aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
committerAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
commit1f04c0a24b2f3cfe89c802a24396263623e3512d (patch)
treed7e2216b6e65b833c0c2b79b478d13ce17dbf296
parent07b188ab773e183871e57b33ae37bf635c9f12ba (diff)
parente2f2e58e7968f8446b1078a20a18bf8ea12b4fbc (diff)
Merge branch 'master' of /usr/src/ntfs-2.6/
-rw-r--r--Documentation/RCU/torture.txt122
-rw-r--r--Documentation/cpusets.txt2
-rw-r--r--Documentation/firmware_class/firmware_sample_driver.c1
-rw-r--r--Documentation/firmware_class/firmware_sample_firmware_class.c2
-rw-r--r--Documentation/i2c/writing-clients1
-rw-r--r--Documentation/keys.txt22
-rw-r--r--MAINTAINERS5
-rw-r--r--README4
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/arm/common/amba.c2
-rw-r--r--arch/arm/common/dmabounce.c165
-rw-r--r--arch/arm/common/scoop.c3
-rw-r--r--arch/arm/configs/ixdp2400_defconfig2
-rw-r--r--arch/arm/configs/ixdp2800_defconfig2
-rw-r--r--arch/arm/kernel/arthur.c1
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c29
-rw-r--r--arch/arm/lib/ashldi3.S48
-rw-r--r--arch/arm/lib/ashldi3.c56
-rw-r--r--arch/arm/lib/ashrdi3.S48
-rw-r--r--arch/arm/lib/ashrdi3.c57
-rw-r--r--arch/arm/lib/gcclib.h22
-rw-r--r--arch/arm/lib/lshrdi3.S48
-rw-r--r--arch/arm/lib/lshrdi3.c56
-rw-r--r--arch/arm/lib/muldi3.S44
-rw-r--r--arch/arm/lib/muldi3.c72
-rw-r--r--arch/arm/lib/ucmpdi2.S35
-rw-r--r--arch/arm/lib/ucmpdi2.c49
-rw-r--r--arch/arm/mach-imx/generic.c2
-rw-r--r--arch/arm/mach-integrator/clock.c1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c1
-rw-r--r--arch/arm/mach-integrator/lm.c1
-rw-r--r--arch/arm/mach-iop3xx/iq31244-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80321-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80331-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80332-pci.c2
-rw-r--r--arch/arm/mach-pxa/corgi.c20
-rw-r--r--arch/arm/mach-pxa/generic.c1
-rw-r--r--arch/arm/mach-pxa/poodle.c21
-rw-r--r--arch/arm/mach-pxa/spitz.c19
-rw-r--r--arch/arm/mach-sa1100/generic.c1
-rw-r--r--arch/arm/mach-versatile/clock.c1
-rw-r--r--arch/arm/mm/copypage-v6.c16
-rw-r--r--arch/arm/plat-omap/clock.c1
-rw-r--r--arch/arm26/kernel/ptrace.c2
-rw-r--r--arch/arm26/kernel/time.c4
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c1
-rw-r--r--arch/cris/kernel/time.c5
-rw-r--r--arch/frv/kernel/ptrace.c2
-rw-r--r--arch/frv/kernel/time.c3
-rw-r--r--arch/h8300/kernel/ptrace.c2
-rw-r--r--arch/h8300/kernel/time.c4
-rw-r--r--arch/i386/Kconfig310
-rw-r--r--arch/i386/Kconfig.cpu309
-rw-r--r--arch/i386/Makefile31
-rw-r--r--arch/i386/Makefile.cpu41
-rw-r--r--arch/i386/kernel/apic.c82
-rw-r--r--arch/i386/kernel/apm.c40
-rw-r--r--arch/i386/kernel/cpu/common.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c1
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c87
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c11
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c119
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/crash.c7
-rw-r--r--arch/i386/kernel/i8259.c4
-rw-r--r--arch/i386/kernel/io_apic.c153
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/mpparse.c41
-rw-r--r--arch/i386/kernel/nmi.c39
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot_fixups.c2
-rw-r--r--arch/i386/kernel/setup.c24
-rw-r--r--arch/i386/kernel/smpboot.c72
-rw-r--r--arch/i386/kernel/srat.c7
-rw-r--r--arch/i386/kernel/time.c16
-rw-r--r--arch/i386/kernel/time_hpet.c20
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c17
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c21
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/mach-es7000/es7000.h11
-rw-r--r--arch/i386/mach-es7000/es7000plat.c11
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--arch/i386/pci/irq.c55
-rw-r--r--arch/i386/power/cpu.c12
-rw-r--r--arch/ia64/ia32/sys_ia32.c1
-rw-r--r--arch/ia64/kernel/cyclone.c1
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/m32r/kernel/entry.S2
-rw-r--r--arch/m32r/kernel/io_m32700ut.c6
-rw-r--r--arch/m32r/kernel/io_mappi.c2
-rw-r--r--arch/m32r/kernel/io_mappi2.c11
-rw-r--r--arch/m32r/kernel/io_mappi3.c7
-rw-r--r--arch/m32r/kernel/io_oaks32r.c2
-rw-r--r--arch/m32r/kernel/io_opsput.c8
-rw-r--r--arch/m32r/kernel/io_usrv.c2
-rw-r--r--arch/m32r/kernel/ptrace.c2
-rw-r--r--arch/m32r/kernel/setup.c24
-rw-r--r--arch/m32r/kernel/time.c4
-rw-r--r--arch/m32r/lib/csum_partial_copy.c2
-rw-r--r--arch/m68k/kernel/ptrace.c2
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68knommu/kernel/ptrace.c2
-rw-r--r--arch/m68knommu/kernel/time.c4
-rw-r--r--arch/mips/kernel/irixelf.c17
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c1
-rw-r--r--arch/parisc/kernel/ioctl32.c5
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/parisc/kernel/time.c4
-rw-r--r--arch/powerpc/Kconfig900
-rw-r--r--arch/powerpc/Kconfig.debug128
-rw-r--r--arch/powerpc/Makefile222
-rw-r--r--arch/powerpc/kernel/Makefile56
-rw-r--r--arch/powerpc/kernel/asm-offsets.c273
-rw-r--r--arch/powerpc/kernel/binfmt_elf32.c (renamed from arch/ppc64/kernel/binfmt_elf32.c)3
-rw-r--r--arch/powerpc/kernel/btext.c853
-rw-r--r--arch/powerpc/kernel/cputable.c (renamed from arch/ppc/kernel/cputable.c)797
-rw-r--r--arch/powerpc/kernel/entry_32.S1000
-rw-r--r--arch/powerpc/kernel/entry_64.S (renamed from arch/ppc64/kernel/entry.S)47
-rw-r--r--arch/powerpc/kernel/fpu.S (renamed from arch/ppc/kernel/fpu.S)105
-rw-r--r--arch/powerpc/kernel/head_32.S1381
-rw-r--r--arch/powerpc/kernel/head_44x.S782
-rw-r--r--arch/powerpc/kernel/head_4xx.S1022
-rw-r--r--arch/powerpc/kernel/head_64.S1957
-rw-r--r--arch/powerpc/kernel/head_8xx.S860
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1063
-rw-r--r--arch/powerpc/kernel/idle_6xx.S233
-rw-r--r--arch/powerpc/kernel/idle_power4.S (renamed from arch/ppc64/kernel/idle_power4.S)9
-rw-r--r--arch/powerpc/kernel/init_task.c (renamed from arch/ppc64/kernel/init_task.c)0
-rw-r--r--arch/powerpc/kernel/lparmap.c (renamed from arch/ppc64/kernel/lparmap.c)0
-rw-r--r--arch/powerpc/kernel/misc_32.S1037
-rw-r--r--arch/powerpc/kernel/misc_64.S880
-rw-r--r--arch/powerpc/kernel/of_device.c (renamed from arch/ppc64/kernel/of_device.c)6
-rw-r--r--arch/powerpc/kernel/pmc.c (renamed from arch/ppc64/kernel/pmc.c)30
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c273
-rw-r--r--arch/powerpc/kernel/process.c (renamed from arch/ppc64/kernel/process.c)486
-rw-r--r--arch/powerpc/kernel/prom.c2170
-rw-r--r--arch/powerpc/kernel/prom_init.c2109
-rw-r--r--arch/powerpc/kernel/ptrace.c (renamed from arch/ppc/kernel/ptrace.c)172
-rw-r--r--arch/powerpc/kernel/ptrace32.c (renamed from arch/ppc64/kernel/ptrace32.c)9
-rw-r--r--arch/powerpc/kernel/rtas.c (renamed from arch/ppc64/kernel/rtas.c)254
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/powerpc/kernel/setup-common.c410
-rw-r--r--arch/powerpc/kernel/setup_32.c372
-rw-r--r--arch/powerpc/kernel/setup_64.c (renamed from arch/ppc64/kernel/setup.c)352
-rw-r--r--arch/powerpc/kernel/signal_32.c (renamed from arch/ppc64/kernel/signal32.c)993
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c (renamed from arch/ppc64/kernel/sys_ppc32.c)320
-rw-r--r--arch/powerpc/kernel/syscalls.c (renamed from arch/ppc64/kernel/syscalls.c)187
-rw-r--r--arch/powerpc/kernel/systbl.S321
-rw-r--r--arch/powerpc/kernel/time.c (renamed from arch/ppc64/kernel/time.c)574
-rw-r--r--arch/powerpc/kernel/traps.c1101
-rw-r--r--arch/powerpc/kernel/vecemu.c (renamed from arch/ppc/kernel/vecemu.c)0
-rw-r--r--arch/powerpc/kernel/vector.S (renamed from arch/ppc64/kernel/vector.S)71
-rw-r--r--arch/powerpc/kernel/vio.c (renamed from arch/ppc64/kernel/vio.c)14
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S279
-rw-r--r--arch/powerpc/lib/Makefile19
-rw-r--r--arch/powerpc/lib/checksum_32.S225
-rw-r--r--arch/powerpc/lib/checksum_64.S (renamed from arch/ppc64/lib/checksum.S)0
-rw-r--r--arch/powerpc/lib/copy_32.S543
-rw-r--r--arch/powerpc/lib/copypage_64.S (renamed from arch/ppc64/lib/copypage.S)0
-rw-r--r--arch/powerpc/lib/copyuser_64.S (renamed from arch/ppc64/lib/copyuser.S)0
-rw-r--r--arch/powerpc/lib/div64.S59
-rw-r--r--arch/powerpc/lib/e2a.c (renamed from arch/ppc64/lib/e2a.c)0
-rw-r--r--arch/powerpc/lib/locks.c (renamed from arch/ppc64/lib/locks.c)5
-rw-r--r--arch/powerpc/lib/mem_64.S119
-rw-r--r--arch/powerpc/lib/memcpy_64.S (renamed from arch/ppc64/lib/memcpy.S)0
-rw-r--r--arch/powerpc/lib/rheap.c693
-rw-r--r--arch/powerpc/lib/sstep.c (renamed from arch/ppc64/lib/sstep.c)17
-rw-r--r--arch/powerpc/lib/strcase.c (renamed from arch/ppc64/lib/strcase.c)8
-rw-r--r--arch/powerpc/lib/string.S198
-rw-r--r--arch/powerpc/lib/usercopy_64.c (renamed from arch/ppc64/lib/usercopy.c)0
-rw-r--r--arch/powerpc/mm/44x_mmu.c120
-rw-r--r--arch/powerpc/mm/4xx_mmu.c141
-rw-r--r--arch/powerpc/mm/Makefile21
-rw-r--r--arch/powerpc/mm/fault.c (renamed from arch/ppc64/mm/fault.c)104
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c237
-rw-r--r--arch/powerpc/mm/hash_low_32.S618
-rw-r--r--arch/powerpc/mm/hash_low_64.S (renamed from arch/ppc64/mm/hash_low.S)2
-rw-r--r--arch/powerpc/mm/hash_native_64.c (renamed from arch/ppc64/mm/hash_native.c)13
-rw-r--r--arch/powerpc/mm/hash_utils_64.c (renamed from arch/ppc64/mm/hash_utils.c)61
-rw-r--r--arch/powerpc/mm/hugetlbpage.c (renamed from arch/ppc64/mm/hugetlbpage.c)0
-rw-r--r--arch/powerpc/mm/imalloc.c (renamed from arch/ppc64/mm/imalloc.c)0
-rw-r--r--arch/powerpc/mm/init_32.c254
-rw-r--r--arch/powerpc/mm/init_64.c223
-rw-r--r--arch/powerpc/mm/lmb.c (renamed from arch/ppc64/kernel/lmb.c)105
-rw-r--r--arch/powerpc/mm/mem.c564
-rw-r--r--arch/powerpc/mm/mmap.c (renamed from arch/ppc64/mm/mmap.c)0
-rw-r--r--arch/powerpc/mm/mmu_context_32.c86
-rw-r--r--arch/powerpc/mm/mmu_context_64.c63
-rw-r--r--arch/powerpc/mm/mmu_decl.h87
-rw-r--r--arch/powerpc/mm/numa.c (renamed from arch/ppc64/mm/numa.c)2
-rw-r--r--arch/powerpc/mm/pgtable_32.c467
-rw-r--r--arch/powerpc/mm/pgtable_64.c347
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c285
-rw-r--r--arch/powerpc/mm/slb.c (renamed from arch/ppc64/mm/slb.c)0
-rw-r--r--arch/powerpc/mm/slb_low.S (renamed from arch/ppc64/mm/slb_low.S)0
-rw-r--r--arch/powerpc/mm/stab.c (renamed from arch/ppc64/mm/stab.c)0
-rw-r--r--arch/powerpc/mm/tlb_32.c183
-rw-r--r--arch/powerpc/mm/tlb_64.c (renamed from arch/ppc64/mm/tlb.c)23
-rw-r--r--arch/powerpc/oprofile/Kconfig (renamed from arch/ppc/oprofile/Kconfig)0
-rw-r--r--arch/powerpc/oprofile/Makefile (renamed from arch/ppc/oprofile/Makefile)7
-rw-r--r--arch/powerpc/oprofile/common.c (renamed from arch/ppc64/oprofile/common.c)84
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_booke.c (renamed from arch/ppc/oprofile/op_model_fsl_booke.c)7
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c (renamed from arch/ppc64/oprofile/op_model_power4.c)2
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c (renamed from arch/ppc64/oprofile/op_model_rs64.c)2
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig280
-rw-r--r--arch/powerpc/platforms/4xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig86
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig352
-rw-r--r--arch/powerpc/platforms/Makefile13
-rw-r--r--arch/powerpc/platforms/apus/Kconfig130
-rw-r--r--arch/powerpc/platforms/chrp/Makefile4
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h12
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c84
-rw-r--r--arch/powerpc/platforms/chrp/pci.c310
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c213
-rw-r--r--arch/powerpc/platforms/chrp/setup.c522
-rw-r--r--arch/powerpc/platforms/chrp/smp.c122
-rw-r--r--arch/powerpc/platforms/chrp/time.c188
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig318
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig31
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h (renamed from include/asm-ppc64/iSeries/HvCallHpt.h)7
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h (renamed from include/asm-ppc64/iSeries/HvCallPci.h)249
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h (renamed from include/asm-ppc64/iSeries/HvCallSm.h)7
-rw-r--r--arch/powerpc/platforms/iseries/htab.c (renamed from arch/ppc64/kernel/iSeries_htab.c)47
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S (renamed from arch/ppc64/kernel/hvCall.S)22
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c (renamed from arch/ppc64/kernel/HvCall.c)1
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c (renamed from arch/ppc64/kernel/HvLpConfig.c)1
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c (renamed from arch/ppc64/kernel/iSeries_iommu.c)44
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h (renamed from include/asm-ppc64/iSeries/ItIplParmsReal.h)7
-rw-r--r--arch/powerpc/platforms/iseries/irq.c (renamed from arch/ppc64/kernel/iSeries_irq.c)17
-rw-r--r--arch/powerpc/platforms/iseries/irq.h (renamed from include/asm-ppc64/iSeries/iSeries_irq.h)6
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c27
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c (renamed from arch/ppc64/kernel/LparData.c)28
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c (renamed from arch/ppc64/kernel/ItLpQueue.c)77
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h (renamed from include/asm-ppc64/iSeries/IoHriMainStore.h)7
-rw-r--r--arch/powerpc/platforms/iseries/mf.c (renamed from arch/ppc64/kernel/mf.c)98
-rw-r--r--arch/powerpc/platforms/iseries/misc.S55
-rw-r--r--arch/powerpc/platforms/iseries/pci.c (renamed from arch/ppc64/kernel/iSeries_pci.c)173
-rw-r--r--arch/powerpc/platforms/iseries/pci.h (renamed from include/asm-ppc64/iSeries/iSeries_pci.h)49
-rw-r--r--arch/powerpc/platforms/iseries/proc.c (renamed from arch/ppc64/kernel/iSeries_proc.c)15
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h (renamed from include/asm-ppc64/iSeries/IoHriProcessorVpd.h)7
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h (renamed from include/asm-ppc64/iSeries/HvReleaseData.h)7
-rw-r--r--arch/powerpc/platforms/iseries/setup.c (renamed from arch/ppc64/kernel/iSeries_setup.c)495
-rw-r--r--arch/powerpc/platforms/iseries/setup.h (renamed from arch/ppc64/kernel/iSeries_setup.h)4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c (renamed from arch/ppc64/kernel/iSeries_smp.c)46
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h (renamed from include/asm-ppc64/iSeries/ItSpCommArea.h)7
-rw-r--r--arch/powerpc/platforms/iseries/vio.c (renamed from arch/ppc64/kernel/iSeries_vio.c)1
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c (renamed from arch/ppc64/kernel/viopath.c)3
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h (renamed from include/asm-ppc64/iSeries/ItVpdAreas.h)7
-rw-r--r--arch/powerpc/platforms/iseries/vpdinfo.c (renamed from arch/ppc64/kernel/iSeries_VpdInfo.c)21
-rw-r--r--arch/powerpc/platforms/maple/Makefile1
-rw-r--r--arch/powerpc/platforms/maple/maple.h12
-rw-r--r--arch/powerpc/platforms/maple/pci.c (renamed from arch/ppc64/kernel/maple_pci.c)7
-rw-r--r--arch/powerpc/platforms/maple/setup.c (renamed from arch/ppc64/kernel/maple_setup.c)13
-rw-r--r--arch/powerpc/platforms/maple/time.c (renamed from arch/ppc64/kernel/maple_time.c)9
-rw-r--r--arch/powerpc/platforms/powermac/Makefile8
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c202
-rw-r--r--arch/powerpc/platforms/powermac/cache.S359
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq.c726
-rw-r--r--arch/powerpc/platforms/powermac/feature.c3063
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c (renamed from arch/ppc64/kernel/pmac_low_i2c.c)0
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c (renamed from arch/ppc64/kernel/pmac_nvram.c)282
-rw-r--r--arch/powerpc/platforms/powermac/pci.c1170
-rw-r--r--arch/powerpc/platforms/powermac/pic.c678
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h51
-rw-r--r--arch/powerpc/platforms/powermac/setup.c794
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S396
-rw-r--r--arch/powerpc/platforms/powermac/smp.c865
-rw-r--r--arch/powerpc/platforms/powermac/time.c360
-rw-r--r--arch/powerpc/platforms/prep/Kconfig22
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig42
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S (renamed from arch/ppc64/kernel/pSeries_hvCall.S)0
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c (renamed from arch/ppc64/kernel/pSeries_iommu.c)28
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c (renamed from arch/ppc64/kernel/pSeries_lpar.c)5
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c (renamed from arch/ppc64/kernel/pSeries_nvram.c)0
-rw-r--r--arch/powerpc/platforms/pseries/pci.c (renamed from arch/ppc64/kernel/pSeries_pci.c)3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c (renamed from arch/ppc64/kernel/ras.c)11
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c (renamed from arch/ppc64/kernel/pSeries_reconfig.c)0
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fw.c138
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fw.h3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c (renamed from arch/ppc64/kernel/pSeries_setup.c)57
-rw-r--r--arch/powerpc/platforms/pseries/smp.c (renamed from arch/ppc64/kernel/pSeries_smp.c)52
-rw-r--r--arch/powerpc/platforms/pseries/vio.c (renamed from arch/ppc64/kernel/pSeries_vio.c)1
-rw-r--r--arch/powerpc/platforms/pseries/xics.c (renamed from arch/ppc64/kernel/xics.c)30
-rw-r--r--arch/powerpc/platforms/pseries/xics.h (renamed from include/asm-ppc64/xics.h)10
-rw-r--r--arch/powerpc/sysdev/Makefile7
-rw-r--r--arch/powerpc/sysdev/dcr.S (renamed from arch/ppc/syslib/dcr.S)0
-rw-r--r--arch/powerpc/sysdev/grackle.c64
-rw-r--r--arch/powerpc/sysdev/i8259.c (renamed from arch/ppc/syslib/i8259.c)65
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c (renamed from arch/ppc/syslib/indirect_pci.c)0
-rw-r--r--arch/powerpc/sysdev/mpic.c (renamed from arch/ppc64/kernel/mpic.c)53
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c (renamed from arch/ppc64/kernel/u3_iommu.c)50
-rw-r--r--arch/powerpc/xmon/Makefile11
-rw-r--r--arch/powerpc/xmon/ansidecl.h (renamed from arch/ppc64/xmon/ansidecl.h)0
-rw-r--r--arch/powerpc/xmon/nonstdio.h (renamed from arch/ppc64/xmon/nonstdio.h)0
-rw-r--r--arch/powerpc/xmon/ppc-dis.c (renamed from arch/ppc64/xmon/ppc-dis.c)0
-rw-r--r--arch/powerpc/xmon/ppc-opc.c (renamed from arch/ppc64/xmon/ppc-opc.c)0
-rw-r--r--arch/powerpc/xmon/ppc.h (renamed from arch/ppc64/xmon/ppc.h)0
-rw-r--r--arch/powerpc/xmon/setjmp.S135
-rw-r--r--arch/powerpc/xmon/start_32.c624
-rw-r--r--arch/powerpc/xmon/start_64.c (renamed from arch/ppc64/xmon/start.c)0
-rw-r--r--arch/powerpc/xmon/start_8xx.c287
-rw-r--r--arch/powerpc/xmon/subr_prf.c (renamed from arch/ppc64/xmon/subr_prf.c)11
-rw-r--r--arch/powerpc/xmon/xmon.c (renamed from arch/ppc64/xmon/xmon.c)399
-rw-r--r--arch/ppc/8xx_io/commproc.c20
-rw-r--r--arch/ppc/Kconfig40
-rw-r--r--arch/ppc/Makefile14
-rw-r--r--arch/ppc/boot/of1275/claim.c1
-rw-r--r--arch/ppc/boot/openfirmware/chrpmain.c2
-rw-r--r--arch/ppc/boot/openfirmware/coffmain.c2
-rw-r--r--arch/ppc/kernel/Makefile27
-rw-r--r--arch/ppc/kernel/align.c4
-rw-r--r--arch/ppc/kernel/asm-offsets.c3
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S6
-rw-r--r--arch/ppc/kernel/cpu_setup_power4.S6
-rw-r--r--arch/ppc/kernel/entry.S12
-rw-r--r--arch/ppc/kernel/head.S100
-rw-r--r--arch/ppc/kernel/head_44x.S32
-rw-r--r--arch/ppc/kernel/head_4xx.S68
-rw-r--r--arch/ppc/kernel/head_8xx.S42
-rw-r--r--arch/ppc/kernel/head_booke.h4
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S47
-rw-r--r--arch/ppc/kernel/idle.c3
-rw-r--r--arch/ppc/kernel/irq.c1
-rw-r--r--arch/ppc/kernel/l2cr.S2
-rw-r--r--arch/ppc/kernel/misc.S235
-rw-r--r--arch/ppc/kernel/pci.c33
-rw-r--r--arch/ppc/kernel/perfmon.c96
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c2
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c34
-rw-r--r--arch/ppc/kernel/process.c142
-rw-r--r--arch/ppc/kernel/setup.c39
-rw-r--r--arch/ppc/kernel/signal.c771
-rw-r--r--arch/ppc/kernel/smp.c22
-rw-r--r--arch/ppc/kernel/syscalls.c268
-rw-r--r--arch/ppc/kernel/time.c14
-rw-r--r--arch/ppc/kernel/traps.c42
-rw-r--r--arch/ppc/kernel/vector.S217
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S26
-rw-r--r--arch/ppc/lib/string.S24
-rw-r--r--arch/ppc/math-emu/sfp-machine.h2
-rw-r--r--arch/ppc/mm/init.c23
-rw-r--r--arch/ppc/oprofile/common.c161
-rw-r--r--arch/ppc/oprofile/op_impl.h45
-rw-r--r--arch/ppc/platforms/4xx/bamboo.c14
-rw-r--r--arch/ppc/platforms/4xx/ebony.c15
-rw-r--r--arch/ppc/platforms/4xx/luan.c13
-rw-r--r--arch/ppc/platforms/4xx/ocotea.c31
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.h1
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c30
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c25
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.h1
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c39
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c22
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c21
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.h1
-rw-r--r--arch/ppc/platforms/Makefile3
-rw-r--r--arch/ppc/platforms/chestnut.c1
-rw-r--r--arch/ppc/platforms/chrp_nvram.c83
-rw-r--r--arch/ppc/platforms/chrp_pci.c10
-rw-r--r--arch/ppc/platforms/chrp_pegasos_eth.c124
-rw-r--r--arch/ppc/platforms/chrp_setup.c33
-rw-r--r--arch/ppc/platforms/chrp_smp.c3
-rw-r--r--arch/ppc/platforms/chrp_time.c8
-rw-r--r--arch/ppc/platforms/ev64360.c1
-rw-r--r--arch/ppc/platforms/fads.h2
-rw-r--r--arch/ppc/platforms/gemini_setup.c4
-rw-r--r--arch/ppc/platforms/hdpu.c9
-rw-r--r--arch/ppc/platforms/katana.c3
-rw-r--r--arch/ppc/platforms/lite5200.c1
-rw-r--r--arch/ppc/platforms/lopec.c17
-rw-r--r--arch/ppc/platforms/mpc885ads.h2
-rw-r--r--arch/ppc/platforms/mvme5100.c6
-rw-r--r--arch/ppc/platforms/pal4_setup.c1
-rw-r--r--arch/ppc/platforms/pmac_backlight.c16
-rw-r--r--arch/ppc/platforms/pmac_cpufreq.c36
-rw-r--r--arch/ppc/platforms/pmac_feature.c176
-rw-r--r--arch/ppc/platforms/pmac_nvram.c42
-rw-r--r--arch/ppc/platforms/pmac_pci.c28
-rw-r--r--arch/ppc/platforms/pmac_pic.c27
-rw-r--r--arch/ppc/platforms/pmac_setup.c19
-rw-r--r--arch/ppc/platforms/pmac_sleep.S4
-rw-r--r--arch/ppc/platforms/pmac_smp.c11
-rw-r--r--arch/ppc/platforms/pmac_time.c8
-rw-r--r--arch/ppc/platforms/pplus.c17
-rw-r--r--arch/ppc/platforms/prep_pci.c64
-rw-r--r--arch/ppc/platforms/prep_setup.c70
-rw-r--r--arch/ppc/platforms/radstone_ppc7d.c15
-rw-r--r--arch/ppc/platforms/residual.c2
-rw-r--r--arch/ppc/platforms/sandpoint.c21
-rw-r--r--arch/ppc/syslib/Makefile57
-rw-r--r--arch/ppc/syslib/btext.c6
-rw-r--r--arch/ppc/syslib/gt64260_pic.c1
-rw-r--r--arch/ppc/syslib/ibm440gx_common.c6
-rw-r--r--arch/ppc/syslib/ibm44x_common.c37
-rw-r--r--arch/ppc/syslib/ibm44x_common.h3
-rw-r--r--arch/ppc/syslib/m8260_setup.c4
-rw-r--r--arch/ppc/syslib/m82xx_pci.c4
-rw-r--r--arch/ppc/syslib/m8xx_setup.c48
-rw-r--r--arch/ppc/syslib/m8xx_wdt.c14
-rw-r--r--arch/ppc/syslib/mpc52xx_pci.c3
-rw-r--r--arch/ppc/syslib/mpc83xx_devices.c1
-rw-r--r--arch/ppc/syslib/mpc85xx_devices.c17
-rw-r--r--arch/ppc/syslib/mpc85xx_sys.c44
-rw-r--r--arch/ppc/syslib/mpc8xx_sys.c4
-rw-r--r--arch/ppc/syslib/mv64360_pic.c1
-rw-r--r--arch/ppc/syslib/mv64x60.c2
-rw-r--r--arch/ppc/syslib/mv64x60_dbg.c1
-rw-r--r--arch/ppc/syslib/of_device.c276
-rw-r--r--arch/ppc/syslib/open_pic.c3
-rw-r--r--arch/ppc/syslib/open_pic2.c1
-rw-r--r--arch/ppc/syslib/ppc403_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_setup.c2
-rw-r--r--arch/ppc/syslib/ppc83xx_setup.c1
-rw-r--r--arch/ppc/syslib/ppc85xx_setup.c1
-rw-r--r--arch/ppc/syslib/ppc8xx_pic.c17
-rw-r--r--arch/ppc/syslib/ppc_sys.c3
-rw-r--r--arch/ppc/syslib/pq2_devices.c1
-rw-r--r--arch/ppc/syslib/prep_nvram.c13
-rw-r--r--arch/ppc/syslib/prom.c18
-rw-r--r--arch/ppc/syslib/xilinx_pic.c1
-rw-r--r--arch/ppc/xmon/start.c3
-rw-r--r--arch/ppc/xmon/xmon.c9
-rw-r--r--arch/ppc64/Kconfig33
-rw-r--r--arch/ppc64/Makefile18
-rw-r--r--arch/ppc64/boot/Makefile67
-rw-r--r--arch/ppc64/boot/crt0.S53
-rw-r--r--arch/ppc64/boot/install.sh2
-rw-r--r--arch/ppc64/boot/main.c268
-rw-r--r--arch/ppc64/boot/string.S4
-rw-r--r--arch/ppc64/boot/string.h1
-rw-r--r--arch/ppc64/boot/zImage.lds64
-rw-r--r--arch/ppc64/boot/zlib.c2195
-rw-r--r--arch/ppc64/boot/zlib.h432
-rw-r--r--arch/ppc64/defconfig4
-rw-r--r--arch/ppc64/kernel/HvLpEvent.c88
-rw-r--r--arch/ppc64/kernel/Makefile75
-rw-r--r--arch/ppc64/kernel/align.c4
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c2
-rw-r--r--arch/ppc64/kernel/bpa_setup.c7
-rw-r--r--arch/ppc64/kernel/btext.c42
-rw-r--r--arch/ppc64/kernel/cputable.c308
-rw-r--r--arch/ppc64/kernel/eeh.c2
-rw-r--r--arch/ppc64/kernel/head.S290
-rw-r--r--arch/ppc64/kernel/hvcserver.c2
-rw-r--r--arch/ppc64/kernel/i8259.c177
-rw-r--r--arch/ppc64/kernel/i8259.h17
-rw-r--r--arch/ppc64/kernel/idle.c8
-rw-r--r--arch/ppc64/kernel/ioctl32.c4
-rw-r--r--arch/ppc64/kernel/kprobes.c1
-rw-r--r--arch/ppc64/kernel/misc.S662
-rw-r--r--arch/ppc64/kernel/pci.c46
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c3
-rw-r--r--arch/ppc64/kernel/pci_dn.c3
-rw-r--r--arch/ppc64/kernel/pci_iommu.c21
-rw-r--r--arch/ppc64/kernel/pmac.h31
-rw-r--r--arch/ppc64/kernel/pmac_feature.c767
-rw-r--r--arch/ppc64/kernel/pmac_pci.c793
-rw-r--r--arch/ppc64/kernel/pmac_setup.c525
-rw-r--r--arch/ppc64/kernel/pmac_smp.c330
-rw-r--r--arch/ppc64/kernel/pmac_time.c195
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c20
-rw-r--r--arch/ppc64/kernel/prom.c7
-rw-r--r--arch/ppc64/kernel/prom_init.c1
-rw-r--r--arch/ppc64/kernel/ptrace.c363
-rw-r--r--arch/ppc64/kernel/rtas-proc.c1
-rw-r--r--arch/ppc64/kernel/rtas_pci.c9
-rw-r--r--arch/ppc64/kernel/rtc.c48
-rw-r--r--arch/ppc64/kernel/signal.c2
-rw-r--r--arch/ppc64/kernel/smp.c40
-rw-r--r--arch/ppc64/kernel/traps.c568
-rw-r--r--arch/ppc64/kernel/vdso64/sigtramp.S1
-rw-r--r--arch/ppc64/kernel/vecemu.c346
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S17
-rw-r--r--arch/ppc64/lib/Makefile15
-rw-r--r--arch/ppc64/lib/string.S106
-rw-r--r--arch/ppc64/mm/Makefile11
-rw-r--r--arch/ppc64/mm/init.c950
-rw-r--r--arch/ppc64/oprofile/Kconfig23
-rw-r--r--arch/ppc64/oprofile/Makefile9
-rw-r--r--arch/ppc64/xmon/Makefile5
-rw-r--r--arch/ppc64/xmon/setjmp.S73
-rw-r--r--arch/s390/kernel/compat_ioctl.c9
-rw-r--r--arch/s390/kernel/head.S72
-rw-r--r--arch/s390/kernel/head64.S66
-rw-r--r--arch/s390/kernel/setup.c186
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vtime.c18
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c1
-rw-r--r--arch/sh/kernel/cpufreq.c1
-rw-r--r--arch/sh/kernel/ptrace.c2
-rw-r--r--arch/sh/kernel/time.c4
-rw-r--r--arch/sh64/kernel/ptrace.c2
-rw-r--r--arch/sh64/kernel/time.c2
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc64/kernel/ioctl32.c3
-rw-r--r--arch/sparc64/kernel/time.c4
-rw-r--r--arch/um/Kconfig10
-rw-r--r--arch/um/Kconfig.x86_645
-rw-r--r--arch/um/Makefile-i38612
-rw-r--r--arch/um/include/sysdep-i386/syscalls.h1
-rw-r--r--arch/um/kernel/time_kern.c4
-rw-r--r--arch/v850/kernel/ptrace.c2
-rw-r--r--arch/v850/kernel/time.c4
-rw-r--r--arch/x86_64/ia32/ia32_ioctl.c125
-rw-r--r--arch/x86_64/kernel/i8259.c8
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/suspend.c95
-rw-r--r--arch/x86_64/kernel/time.c32
-rw-r--r--arch/xtensa/kernel/platform.c1
-rw-r--r--arch/xtensa/kernel/ptrace.c2
-rw-r--r--arch/xtensa/kernel/time.c3
-rw-r--r--drivers/acorn/char/pcf8583.c80
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/sleep/main.c8
-rw-r--r--drivers/base/class.c1
-rw-r--r--drivers/base/cpu.c16
-rw-r--r--drivers/base/firmware_class.c69
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/sys.c1
-rw-r--r--drivers/block/Kconfig.iosched28
-rw-r--r--drivers/block/as-iosched.c2
-rw-r--r--drivers/block/cciss_scsi.c10
-rw-r--r--drivers/block/cfq-iosched.c22
-rw-r--r--drivers/block/elevator.c50
-rw-r--r--drivers/block/paride/paride.c1
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/paride/pt.c1
-rw-r--r--drivers/block/viodasd.c9
-rw-r--r--drivers/cdrom/viocd.c9
-rw-r--r--drivers/char/Kconfig16
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/Kconfig16
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd64-agp.c1
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/isoch.c1
-rw-r--r--drivers/char/agp/sworks-agp.c2
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/char/drm/drm_sysfs.c2
-rw-r--r--drivers/char/epca.c1
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hpet.c168
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/hvcs.c5
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/mwave/3780i.c2
-rw-r--r--drivers/char/rocket.c10
-rw-r--r--drivers/char/ser_a2232.c2
-rw-r--r--drivers/char/specialix.c342
-rw-r--r--drivers/char/synclink.c1
-rw-r--r--drivers/char/synclinkmp.c1
-rw-r--r--drivers/char/tlclk.c896
-rw-r--r--drivers/char/tpm/tpm.c104
-rw-r--r--drivers/char/tpm/tpm.h11
-rw-r--r--drivers/char/tpm/tpm_atmel.c146
-rw-r--r--drivers/char/tpm/tpm_infineon.c179
-rw-r--r--drivers/char/tpm/tpm_nsc.c164
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/viotape.c9
-rw-r--r--drivers/char/vt_ioctl.c3
-rw-r--r--drivers/char/watchdog/cpu5wdt.c1
-rw-r--r--drivers/char/watchdog/mixcomwd.c2
-rw-r--r--drivers/char/watchdog/pcwd.c2
-rw-r--r--drivers/char/watchdog/pcwd_pci.c1
-rw-r--r--drivers/char/watchdog/sc520_wdt.c1
-rw-r--r--drivers/char/watchdog/softdog.c2
-rw-r--r--drivers/char/watchdog/wdt_pci.c1
-rw-r--r--drivers/cpufreq/cpufreq.c85
-rw-r--r--drivers/cpufreq/cpufreq_stats.c42
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/ide/ide-cd.c56
-rw-r--r--drivers/ide/ppc/pmac.c80
-rw-r--r--drivers/infiniband/core/cache.c1
-rw-r--r--drivers/infiniband/core/sa_query.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_uar.c2
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/joystick/a3d.c1
-rw-r--r--drivers/input/joystick/adi.c1
-rw-r--r--drivers/input/joystick/analog.c1
-rw-r--r--drivers/input/joystick/cobra.c1
-rw-r--r--drivers/input/joystick/gf2k.c1
-rw-r--r--drivers/input/joystick/grip.c1
-rw-r--r--drivers/input/joystick/grip_mp.c1
-rw-r--r--drivers/input/joystick/guillemot.c1
-rw-r--r--drivers/input/joystick/interact.c1
-rw-r--r--drivers/input/joystick/joydump.c1
-rw-r--r--drivers/input/joystick/sidewinder.c1
-rw-r--r--drivers/input/joystick/tmdc.c1
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c1
-rw-r--r--drivers/isdn/capi/capifs.c1
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c10
-rw-r--r--drivers/macintosh/ans-lcd.c10
-rw-r--r--drivers/macintosh/apm_emu.c8
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/macintosh/macio_sysfs.c26
-rw-r--r--drivers/macintosh/mediabay.c56
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-cuda.c1
-rw-r--r--drivers/macintosh/via-pmu.c142
-rw-r--r--drivers/macintosh/via-pmu68k.c15
-rw-r--r--drivers/mca/mca-device.c1
-rw-r--r--drivers/media/common/ir-common.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/dvb/frontends/bcm3510.c3
-rw-r--r--drivers/media/dvb/frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c2
-rw-r--r--drivers/media/dvb/frontends/dvb_dummy_fe.c2
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c2
-rw-r--r--drivers/media/dvb/frontends/mt352.c2
-rw-r--r--drivers/media/dvb/frontends/nxt2002.c2
-rw-r--r--drivers/media/dvb/frontends/or51132.c2
-rw-r--r--drivers/media/dvb/frontends/or51211.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb/frontends/sp8870.c2
-rw-r--r--drivers/media/dvb/frontends/sp887x.c2
-rw-r--r--drivers/media/dvb/frontends/stv0297.c2
-rw-r--r--drivers/media/dvb/frontends/stv0299.c1
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb/frontends/tda8083.c1
-rw-r--r--drivers/media/radio/miropcm20-rds.c1
-rw-r--r--drivers/message/i2o/debug.c2
-rw-r--r--drivers/message/i2o/device.c2
-rw-r--r--drivers/message/i2o/driver.c3
-rw-r--r--drivers/message/i2o/exec-osm.c4
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/mfd/ucb1x00-ts.c74
-rw-r--r--drivers/mmc/mmc_block.c10
-rw-r--r--drivers/mmc/pxamci.c1
-rw-r--r--drivers/mtd/chips/jedec.c1
-rw-r--r--drivers/mtd/devices/lart.c1
-rw-r--r--drivers/mtd/devices/phram.c1
-rw-r--r--drivers/mtd/maps/bast-flash.c1
-rw-r--r--drivers/mtd/maps/ceiva.c1
-rw-r--r--drivers/mtd/maps/dc21285.c1
-rw-r--r--drivers/mtd/maps/dilnetpc.c5
-rw-r--r--drivers/mtd/maps/epxa10db-flash.c5
-rw-r--r--drivers/mtd/maps/fortunet.c5
-rw-r--r--drivers/mtd/maps/ixp2000.c6
-rw-r--r--drivers/mtd/maps/ixp4xx.c7
-rw-r--r--drivers/mtd/maps/lubbock-flash.c3
-rw-r--r--drivers/mtd/maps/mainstone-flash.c3
-rw-r--r--drivers/mtd/maps/omap-toto-flash.c2
-rw-r--r--drivers/mtd/maps/omap_nor.c2
-rw-r--r--drivers/mtd/maps/pci.c1
-rw-r--r--drivers/mtd/maps/plat-ram.c1
-rw-r--r--drivers/mtd/maps/tqm8xxl.c4
-rw-r--r--drivers/mtd/mtdblock.c1
-rw-r--r--drivers/mtd/mtdchar.c1
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/net/bmac.c1
-rw-r--r--drivers/net/eepro.c50
-rw-r--r--drivers/net/ibmveth.c14
-rw-r--r--drivers/net/irda/pxaficp_ir.c14
-rw-r--r--drivers/net/irda/smsc-ircc2.c129
-rw-r--r--drivers/net/iseries_veth.c18
-rw-r--r--drivers/net/mace.c1
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/smc91x.h12
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c1
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c1
-rw-r--r--drivers/pci/hotplug/fakephp.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehprm_nonacpi.c3
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c3
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchprm_nonacpi.c2
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/rom.c1
-rw-r--r--drivers/pcmcia/Kconfig10
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c1290
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c31
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c7
-rw-r--r--drivers/pnp/manager.c2
-rw-r--r--drivers/pnp/pnpbios/rsparser.c2
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/fs3270.c234
-rw-r--r--drivers/s390/char/raw3270.c100
-rw-r--r--drivers/s390/char/raw3270.h7
-rw-r--r--drivers/s390/char/tty3270.c27
-rw-r--r--drivers/s390/cio/cmf.c3
-rw-r--r--drivers/s390/cio/device.c20
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/scsi/ahci.c55
-rw-r--r--drivers/scsi/ata_piix.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c9
-rw-r--r--drivers/scsi/ide-scsi.c38
-rw-r--r--drivers/scsi/libata-core.c80
-rw-r--r--drivers/scsi/libata-scsi.c46
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/pdc_adma.c26
-rw-r--r--drivers/scsi/sata_mv.c41
-rw-r--r--drivers/scsi/sata_nv.c3
-rw-r--r--drivers/scsi/sata_promise.c19
-rw-r--r--drivers/scsi/sata_qstor.c25
-rw-r--r--drivers/scsi/sata_sil.c7
-rw-r--r--drivers/scsi/sata_sil24.c41
-rw-r--r--drivers/scsi/sata_sis.c13
-rw-r--r--drivers/scsi/sata_svw.c3
-rw-r--r--drivers/scsi/sata_sx4.c13
-rw-r--r--drivers/scsi/sata_uli.c5
-rw-r--r--drivers/scsi/sata_via.c38
-rw-r--r--drivers/scsi/sata_vsc.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/serial/ioc4_serial.c80
-rw-r--r--drivers/serial/mpsc.c2
-rw-r--r--drivers/serial/serial_core.c3
-rw-r--r--drivers/sh/superhyway/superhyway.c2
-rw-r--r--drivers/usb/host/ohci-omap.c3
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/input/pid.c2
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/w1/w1_family.c1
-rw-r--r--drivers/zorro/zorro-sysfs.c1
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/attr.c3
-rw-r--r--fs/binfmt_elf.c4
-rw-r--r--fs/buffer.c19
-rw-r--r--fs/compat_ioctl.c4
-rw-r--r--fs/dquot.c2
-rw-r--r--fs/exec.c39
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext3/balloc.c7
-rw-r--r--fs/ext3/bitmap.c2
-rw-r--r--fs/ext3/bitmap.h8
-rw-r--r--fs/ext3/ialloc.c3
-rw-r--r--fs/ext3/inode.c11
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext3/namei.h8
-rw-r--r--fs/ext3/resize.c10
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext3/xattr.c8
-rw-r--r--fs/fat/dir.c230
-rw-r--r--fs/file_table.c14
-rw-r--r--fs/filesystems.c1
-rw-r--r--fs/fs-writeback.c28
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/fuse_i.h12
-rw-r--r--fs/inode.c1
-rw-r--r--fs/jffs2/background.c1
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/msdos/namei.c14
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c17
-rw-r--r--fs/proc/proc_misc.c8
-rw-r--r--fs/reiserfs/super.c25
-rw-r--r--fs/reiserfs/xattr_acl.c3
-rw-r--r--fs/super.c2
-rw-r--r--fs/vfat/namei.c20
-rw-r--r--fs/xattr.c14
-rw-r--r--include/asm-alpha/semaphore.h3
-rw-r--r--include/asm-arm/arch-ixp2000/ixdp2x01.h2
-rw-r--r--include/asm-arm/arch-ixp4xx/io.h6
-rw-r--r--include/asm-arm/pgtable.h3
-rw-r--r--include/asm-arm/semaphore.h2
-rw-r--r--include/asm-arm/unistd.h1
-rw-r--r--include/asm-arm26/pgtable.h2
-rw-r--r--include/asm-arm26/semaphore.h3
-rw-r--r--include/asm-arm26/unistd.h1
-rw-r--r--include/asm-cris/semaphore.h3
-rw-r--r--include/asm-cris/unistd.h1
-rw-r--r--include/asm-frv/pgtable.h2
-rw-r--r--include/asm-frv/semaphore.h3
-rw-r--r--include/asm-h8300/semaphore.h3
-rw-r--r--include/asm-h8300/unistd.h1
-rw-r--r--include/asm-i386/apic.h3
-rw-r--r--include/asm-i386/desc.h8
-rw-r--r--include/asm-i386/hw_irq.h1
-rw-r--r--include/asm-i386/mach-default/smpboot_hooks.h15
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h2
-rw-r--r--include/asm-i386/mach-visws/smpboot_hooks.h7
-rw-r--r--include/asm-i386/pgtable-2level.h5
-rw-r--r--include/asm-i386/pgtable-3level.h5
-rw-r--r--include/asm-i386/pgtable.h7
-rw-r--r--include/asm-i386/semaphore.h3
-rw-r--r--include/asm-i386/system.h33
-rw-r--r--include/asm-i386/unistd.h1
-rw-r--r--include/asm-ia64/pgtable.h3
-rw-r--r--include/asm-ia64/semaphore.h2
-rw-r--r--include/asm-ia64/unistd.h2
-rw-r--r--include/asm-m32r/pgtable.h2
-rw-r--r--include/asm-m32r/semaphore.h3
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m32r/unistd.h1
-rw-r--r--include/asm-m68k/semaphore.h3
-rw-r--r--include/asm-m68k/sun3xflop.h2
-rw-r--r--include/asm-m68k/unistd.h1
-rw-r--r--include/asm-m68knommu/ide.h7
-rw-r--r--include/asm-m68knommu/semaphore.h3
-rw-r--r--include/asm-m68knommu/unistd.h1
-rw-r--r--include/asm-mips/pgtable-64.h1
-rw-r--r--include/asm-mips/pgtable.h1
-rw-r--r--include/asm-mips/semaphore.h3
-rw-r--r--include/asm-mips/unistd.h1
-rw-r--r--include/asm-parisc/ide.h1
-rw-r--r--include/asm-parisc/semaphore.h3
-rw-r--r--include/asm-parisc/unistd.h1
-rw-r--r--include/asm-powerpc/a.out.h (renamed from include/asm-ppc64/a.out.h)21
-rw-r--r--include/asm-powerpc/atomic.h (renamed from include/asm-ppc/atomic.h)45
-rw-r--r--include/asm-powerpc/auxvec.h (renamed from include/asm-ppc64/auxvec.h)8
-rw-r--r--include/asm-powerpc/backlight.h (renamed from include/asm-ppc/backlight.h)9
-rw-r--r--include/asm-powerpc/bug.h (renamed from include/asm-ppc64/bug.h)38
-rw-r--r--include/asm-powerpc/byteorder.h (renamed from include/asm-ppc64/byteorder.h)11
-rw-r--r--include/asm-powerpc/checksum.h (renamed from include/asm-ppc64/checksum.h)47
-rw-r--r--include/asm-powerpc/cputable.h427
-rw-r--r--include/asm-powerpc/dbdma.h (renamed from include/asm-ppc/dbdma.h)0
-rw-r--r--include/asm-powerpc/dma.h (renamed from include/asm-ppc/dma.h)91
-rw-r--r--include/asm-powerpc/elf.h (renamed from include/asm-ppc64/elf.h)96
-rw-r--r--include/asm-powerpc/firmware.h (renamed from include/asm-ppc64/firmware.h)10
-rw-r--r--include/asm-powerpc/grackle.h7
-rw-r--r--include/asm-powerpc/hardirq.h (renamed from include/asm-ppc/hardirq.h)14
-rw-r--r--include/asm-powerpc/heathrow.h (renamed from include/asm-ppc/heathrow.h)0
-rw-r--r--include/asm-powerpc/hw_irq.h (renamed from include/asm-ppc64/hw_irq.h)66
-rw-r--r--include/asm-powerpc/i8259.h12
-rw-r--r--include/asm-powerpc/iommu.h (renamed from include/asm-ppc64/iommu.h)41
-rw-r--r--include/asm-powerpc/irq.h (renamed from include/asm-ppc/irq.h)168
-rw-r--r--include/asm-powerpc/kdebug.h (renamed from include/asm-ppc64/kdebug.h)11
-rw-r--r--include/asm-powerpc/keylargo.h (renamed from include/asm-ppc/keylargo.h)0
-rw-r--r--include/asm-powerpc/kmap_types.h33
-rw-r--r--include/asm-powerpc/kprobes.h (renamed from include/asm-ppc64/kprobes.h)7
-rw-r--r--include/asm-powerpc/lmb.h (renamed from include/asm-ppc64/lmb.h)2
-rw-r--r--include/asm-powerpc/machdep.h (renamed from include/asm-ppc64/machdep.h)127
-rw-r--r--include/asm-powerpc/macio.h (renamed from include/asm-ppc/macio.h)0
-rw-r--r--include/asm-powerpc/mediabay.h (renamed from include/asm-ppc/mediabay.h)0
-rw-r--r--include/asm-powerpc/mpic.h (renamed from arch/ppc64/kernel/mpic.h)14
-rw-r--r--include/asm-powerpc/of_device.h (renamed from include/asm-ppc/of_device.h)7
-rw-r--r--include/asm-powerpc/ohare.h (renamed from include/asm-ppc/ohare.h)0
-rw-r--r--include/asm-powerpc/oprofile_impl.h (renamed from include/asm-ppc64/oprofile_impl.h)24
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h (renamed from include/asm-ppc64/pSeries_reconfig.h)0
-rw-r--r--include/asm-powerpc/parport.h (renamed from include/asm-ppc/parport.h)6
-rw-r--r--include/asm-powerpc/pmac_feature.h (renamed from include/asm-ppc/pmac_feature.h)0
-rw-r--r--include/asm-powerpc/pmac_low_i2c.h (renamed from include/asm-ppc/pmac_low_i2c.h)0
-rw-r--r--include/asm-powerpc/pmc.h (renamed from include/asm-ppc64/pmc.h)21
-rw-r--r--include/asm-powerpc/posix_types.h (renamed from include/asm-ppc64/posix_types.h)40
-rw-r--r--include/asm-powerpc/ppc-pci.h (renamed from arch/ppc64/kernel/pci.h)6
-rw-r--r--include/asm-powerpc/ppc_asm.h (renamed from include/asm-ppc/ppc_asm.h)303
-rw-r--r--include/asm-powerpc/processor.h (renamed from include/asm-ppc/processor.h)190
-rw-r--r--include/asm-powerpc/prom.h219
-rw-r--r--include/asm-powerpc/reg.h (renamed from include/asm-ppc/reg.h)297
-rw-r--r--include/asm-powerpc/rtas.h (renamed from include/asm-ppc64/rtas.h)8
-rw-r--r--include/asm-powerpc/rtc.h78
-rw-r--r--include/asm-powerpc/rwsem.h (renamed from include/asm-ppc64/rwsem.h)18
-rw-r--r--include/asm-powerpc/scatterlist.h (renamed from include/asm-ppc64/scatterlist.h)24
-rw-r--r--include/asm-powerpc/seccomp.h (renamed from include/asm-ppc64/seccomp.h)11
-rw-r--r--include/asm-powerpc/sections.h (renamed from include/asm-ppc64/sections.h)21
-rw-r--r--include/asm-powerpc/semaphore.h (renamed from include/asm-ppc64/semaphore.h)9
-rw-r--r--include/asm-powerpc/smu.h (renamed from include/asm-ppc64/smu.h)0
-rw-r--r--include/asm-powerpc/spinlock_types.h (renamed from include/asm-ppc64/spinlock_types.h)4
-rw-r--r--include/asm-powerpc/sstep.h (renamed from include/asm-ppc64/sstep.h)4
-rw-r--r--include/asm-powerpc/statfs.h (renamed from include/asm-ppc64/statfs.h)19
-rw-r--r--include/asm-powerpc/synch.h51
-rw-r--r--include/asm-powerpc/system.h363
-rw-r--r--include/asm-powerpc/thread_info.h (renamed from include/asm-ppc64/thread_info.h)59
-rw-r--r--include/asm-powerpc/time.h226
-rw-r--r--include/asm-powerpc/types.h (renamed from include/asm-ppc64/types.h)37
-rw-r--r--include/asm-powerpc/uninorth.h (renamed from include/asm-ppc/uninorth.h)0
-rw-r--r--include/asm-powerpc/unistd.h (renamed from include/asm-ppc/unistd.h)92
-rw-r--r--include/asm-powerpc/vga.h (renamed from include/asm-ppc64/vga.h)20
-rw-r--r--include/asm-powerpc/vio.h (renamed from include/asm-ppc64/vio.h)22
-rw-r--r--include/asm-powerpc/xmon.h12
-rw-r--r--include/asm-ppc/a.out.h26
-rw-r--r--include/asm-ppc/auxvec.h14
-rw-r--r--include/asm-ppc/bug.h58
-rw-r--r--include/asm-ppc/byteorder.h76
-rw-r--r--include/asm-ppc/cache.h13
-rw-r--r--include/asm-ppc/checksum.h107
-rw-r--r--include/asm-ppc/cpm2.h3
-rw-r--r--include/asm-ppc/cputable.h129
-rw-r--r--include/asm-ppc/elf.h151
-rw-r--r--include/asm-ppc/hw_irq.h74
-rw-r--r--include/asm-ppc/i8259.h11
-rw-r--r--include/asm-ppc/io.h11
-rw-r--r--include/asm-ppc/kmap_types.h25
-rw-r--r--include/asm-ppc/machdep.h4
-rw-r--r--include/asm-ppc/mmu_context.h6
-rw-r--r--include/asm-ppc/mpc8260.h4
-rw-r--r--include/asm-ppc/mpc85xx.h3
-rw-r--r--include/asm-ppc/mpc8xx.h4
-rw-r--r--include/asm-ppc/open_pic.h3
-rw-r--r--include/asm-ppc/page.h18
-rw-r--r--include/asm-ppc/pci-bridge.h5
-rw-r--r--include/asm-ppc/pci.h6
-rw-r--r--include/asm-ppc/perfmon.h22
-rw-r--r--include/asm-ppc/pgtable.h2
-rw-r--r--include/asm-ppc/posix_types.h111
-rw-r--r--include/asm-ppc/ptrace.h2
-rw-r--r--include/asm-ppc/rwsem.h177
-rw-r--r--include/asm-ppc/scatterlist.h25
-rw-r--r--include/asm-ppc/seccomp.h10
-rw-r--r--include/asm-ppc/sections.h33
-rw-r--r--include/asm-ppc/semaphore.h111
-rw-r--r--include/asm-ppc/smp.h28
-rw-r--r--include/asm-ppc/spinlock.h8
-rw-r--r--include/asm-ppc/spinlock_types.h20
-rw-r--r--include/asm-ppc/statfs.h8
-rw-r--r--include/asm-ppc/system.h28
-rw-r--r--include/asm-ppc/thread_info.h107
-rw-r--r--include/asm-ppc/types.h69
-rw-r--r--include/asm-ppc/vga.h46
-rw-r--r--include/asm-ppc/xmon.h17
-rw-r--r--include/asm-ppc64/abs_addr.h7
-rw-r--r--include/asm-ppc64/atomic.h197
-rw-r--r--include/asm-ppc64/bitops.h2
-rw-r--r--include/asm-ppc64/bootinfo.h70
-rw-r--r--include/asm-ppc64/btext.h1
-rw-r--r--include/asm-ppc64/cputable.h167
-rw-r--r--include/asm-ppc64/dart.h59
-rw-r--r--include/asm-ppc64/dbdma.h2
-rw-r--r--include/asm-ppc64/dma.h329
-rw-r--r--include/asm-ppc64/futex.h2
-rw-r--r--include/asm-ppc64/hardirq.h27
-rw-r--r--include/asm-ppc64/io.h2
-rw-r--r--include/asm-ppc64/irq.h120
-rw-r--r--include/asm-ppc64/keylargo.h2
-rw-r--r--include/asm-ppc64/kmap_types.h23
-rw-r--r--include/asm-ppc64/macio.h2
-rw-r--r--include/asm-ppc64/memory.h61
-rw-r--r--include/asm-ppc64/mmu.h7
-rw-r--r--include/asm-ppc64/of_device.h2
-rw-r--r--include/asm-ppc64/page.h8
-rw-r--r--include/asm-ppc64/parport.h18
-rw-r--r--include/asm-ppc64/pci-bridge.h22
-rw-r--r--include/asm-ppc64/pci.h2
-rw-r--r--include/asm-ppc64/pgtable.h2
-rw-r--r--include/asm-ppc64/pmac_feature.h2
-rw-r--r--include/asm-ppc64/pmac_low_i2c.h2
-rw-r--r--include/asm-ppc64/ppc32.h14
-rw-r--r--include/asm-ppc64/ppc_asm.h242
-rw-r--r--include/asm-ppc64/processor.h558
-rw-r--r--include/asm-ppc64/prom.h4
-rw-r--r--include/asm-ppc64/smp.h1
-rw-r--r--include/asm-ppc64/system.h20
-rw-r--r--include/asm-ppc64/tce.h64
-rw-r--r--include/asm-ppc64/time.h124
-rw-r--r--include/asm-ppc64/tlbflush.h7
-rw-r--r--include/asm-ppc64/udbg.h3
-rw-r--r--include/asm-ppc64/uninorth.h2
-rw-r--r--include/asm-ppc64/unistd.h487
-rw-r--r--include/asm-s390/semaphore.h3
-rw-r--r--include/asm-s390/setup.h50
-rw-r--r--include/asm-s390/unistd.h1
-rw-r--r--include/asm-sh/pgtable.h2
-rw-r--r--include/asm-sh/semaphore.h3
-rw-r--r--include/asm-sh/unistd.h1
-rw-r--r--include/asm-sh64/pgtable.h3
-rw-r--r--include/asm-sh64/semaphore.h3
-rw-r--r--include/asm-sparc/floppy.h2
-rw-r--r--include/asm-sparc/pgtable.h2
-rw-r--r--include/asm-sparc/semaphore.h3
-rw-r--r--include/asm-sparc64/pgtable.h3
-rw-r--r--include/asm-sparc64/semaphore.h3
-rw-r--r--include/asm-um/cache.h19
-rw-r--r--include/asm-um/linkage.h8
-rw-r--r--include/asm-v850/semaphore.h3
-rw-r--r--include/asm-v850/unistd.h1
-rw-r--r--include/asm-x86_64/mtrr.h33
-rw-r--r--include/asm-x86_64/pgtable.h2
-rw-r--r--include/asm-x86_64/semaphore.h3
-rw-r--r--include/asm-x86_64/unistd.h2
-rw-r--r--include/asm-xtensa/semaphore.h3
-rw-r--r--include/keys/user-type.h47
-rw-r--r--include/linux/bitmap.h6
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/cpumask.h20
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/fsl_devices.h13
-rw-r--r--include/linux/fuse.h1
-rw-r--r--include/linux/gameport.h1
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/i2o.h8
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/key-ui.h3
-rw-r--r--include/linux/key.h13
-rw-r--r--include/linux/kobj_map.h2
-rw-r--r--include/linux/kthread.h12
-rw-r--r--include/linux/libata.h30
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/msdos_fs.h11
-rw-r--r--include/linux/mtd/map.h3
-rw-r--r--include/linux/nodemask.h20
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/security.h84
-rw-r--r--include/linux/serial.h1
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/spinlock.h31
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/textsearch.h1
-rw-r--r--include/linux/timer.h17
-rw-r--r--include/linux/timex.h7
-rw-r--r--include/linux/zutil.h1
-rw-r--r--include/pcmcia/ss.h1
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--init/Kconfig4
-rw-r--r--init/main.c11
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/cpu.c1
-rw-r--r--kernel/cpuset.c466
-rw-r--r--kernel/exit.c29
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kmod.c6
-rw-r--r--kernel/kprobes.c1
-rw-r--r--kernel/kthread.c13
-rw-r--r--kernel/params.c1
-rw-r--r--kernel/posix-cpu-timers.c16
-rw-r--r--kernel/posix-timers.c19
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/disk.c22
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/power.h17
-rw-r--r--kernel/power/snapshot.c435
-rw-r--r--kernel/power/swsusp.c576
-rw-r--r--kernel/printk.c78
-rw-r--r--kernel/ptrace.c7
-rw-r--r--kernel/rcupdate.c10
-rw-r--r--kernel/rcutorture.c492
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/signal.c148
-rw-r--r--kernel/time.c25
-rw-r--r--kernel/timer.c342
-rw-r--r--kernel/workqueue.c33
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/bitmap.c166
-rw-r--r--lib/extable.c3
-rw-r--r--lib/idr.c35
-rw-r--r--lib/kobject.c1
-rw-r--r--lib/smp_processor_id.c1
-rw-r--r--lib/sort.c1
-rw-r--r--lib/string.c125
-rw-r--r--lib/vsprintf.c1
-rw-r--r--lib/zlib_inflate/inflate.c1
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/mempolicy.c64
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/pdflush.c13
-rw-r--r--mm/swap.c1
-rw-r--r--mm/tiny-shmem.c5
-rw-r--r--mm/truncate.c11
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--scripts/kconfig/Makefile9
-rw-r--r--scripts/kconfig/lkc.h8
-rw-r--r--scripts/kconfig/mconf.c3
-rw-r--r--security/dummy.c25
-rw-r--r--security/keys/key.c56
-rw-r--r--security/keys/keyctl.c13
-rw-r--r--security/keys/keyring.c21
-rw-r--r--security/keys/permission.c7
-rw-r--r--security/keys/process_keys.c9
-rw-r--r--security/keys/user_defined.c49
-rw-r--r--security/selinux/hooks.c78
-rw-r--r--security/selinux/netif.c3
-rw-r--r--security/selinux/selinuxfs.c50
-rw-r--r--security/selinux/ss/conditional.c12
-rw-r--r--security/selinux/ss/ebitmap.c9
-rw-r--r--security/selinux/ss/hashtab.c6
-rw-r--r--security/selinux/ss/policydb.c51
-rw-r--r--security/selinux/ss/services.c11
-rw-r--r--sound/oss/ac97_codec.c1
-rw-r--r--sound/oss/awe_wave.c2
-rw-r--r--sound/oss/cs4232.c6
-rw-r--r--sound/oss/wavfront.c38
1122 files changed, 56389 insertions, 25599 deletions
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
new file mode 100644
index 000000000000..e4c38152f7f7
--- /dev/null
+++ b/Documentation/RCU/torture.txt
@@ -0,0 +1,122 @@
1RCU Torture Test Operation
2
3
4CONFIG_RCU_TORTURE_TEST
5
6The CONFIG_RCU_TORTURE_TEST config option is available for all RCU
7implementations. It creates an rcutorture kernel module that can
8be loaded to run a torture test. The test periodically outputs
9status messages via printk(), which can be examined via the dmesg
10command (perhaps grepping for "rcutorture"). The test is started
11when the module is loaded, and stops when the module is unloaded.
12
13However, actually setting this config option to "y" results in the system
14running the test immediately upon boot, and ending only when the system
15is taken down. Normally, one will instead want to build the system
16with CONFIG_RCU_TORTURE_TEST=m and to use modprobe and rmmod to control
17the test, perhaps using a script similar to the one shown at the end of
18this document. Note that you will need CONFIG_MODULE_UNLOAD in order
19to be able to end the test.
20
21
22MODULE PARAMETERS
23
24This module has the following parameters:
25
26nreaders This is the number of RCU reading threads supported.
27 The default is twice the number of CPUs. Why twice?
28 To properly exercise RCU implementations with preemptible
29 read-side critical sections.
30
31stat_interval The number of seconds between output of torture
32 statistics (via printk()). Regardless of the interval,
33 statistics are printed when the module is unloaded.
34 Setting the interval to zero causes the statistics to
35 be printed -only- when the module is unloaded, and this
36 is the default.
37
38verbose Enable debug printk()s. Default is disabled.
39
40
41OUTPUT
42
43The statistics output is as follows:
44
45 rcutorture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
46 rcutorture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
47 rcutorture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
48 rcutorture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
49 rcutorture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
50 rcutorture: --- End of test
51
52The command "dmesg | grep rcutorture:" will extract this information on
53most systems. On more esoteric configurations, it may be necessary to
54use other commands to access the output of the printk()s used by
55the RCU torture test. The printk()s use KERN_ALERT, so they should
56be evident. ;-)
57
58The entries are as follows:
59
60o "ggp": The number of counter flips (or batches) since boot.
61
62o "rtc": The hexadecimal address of the structure currently visible
63 to readers.
64
65o "ver": The number of times since boot that the rcutw writer task
66 has changed the structure visible to readers.
67
68o "tfle": If non-zero, indicates that the "torture freelist"
69 containing structure to be placed into the "rtc" area is empty.
70 This condition is important, since it can fool you into thinking
71 that RCU is working when it is not. :-/
72
73o "rta": Number of structures allocated from the torture freelist.
74
75o "rtaf": Number of allocations from the torture freelist that have
76 failed due to the list being empty.
77
78o "rtf": Number of frees into the torture freelist.
79
80o "Reader Pipe": Histogram of "ages" of structures seen by readers.
81 If any entries past the first two are non-zero, RCU is broken.
82 And rcutorture prints the error flag string "!!!" to make sure
83 you notice. The age of a newly allocated structure is zero,
84 it becomes one when removed from reader visibility, and is
85 incremented once per grace period subsequently -- and is freed
86 after passing through (RCU_TORTURE_PIPE_LEN-2) grace periods.
87
88 The output displayed above was taken from a correctly working
89 RCU. If you want to see what it looks like when broken, break
90 it yourself. ;-)
91
92o "Reader Batch": Another histogram of "ages" of structures seen
93 by readers, but in terms of counter flips (or batches) rather
94 than in terms of grace periods. The legal number of non-zero
95 entries is again two. The reason for this separate view is
96 that it is easier to get the third entry to show up in the
97 "Reader Batch" list than in the "Reader Pipe" list.
98
99o "Free-Block Circulation": Shows the number of torture structures
100 that have reached a given point in the pipeline. The first element
101 should closely correspond to the number of structures allocated,
102 the second to the number that have been removed from reader view,
103 and all but the last remaining to the corresponding number of
104 passes through a grace period. The last entry should be zero,
105 as it is only incremented if a torture structure's counter
106 somehow gets incremented farther than it should.
107
108
109USAGE
110
111The following script may be used to torture RCU:
112
113 #!/bin/sh
114
115 modprobe rcutorture
116 sleep 100
117 rmmod rcutorture
118 dmesg | grep rcutorture:
119
120The output can be manually inspected for the error flag of "!!!".
121One could of course create a more elaborate script that automatically
122checked for such errors.
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index d17b7d2dd771..a09a8eb80665 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -94,7 +94,7 @@ the available CPU and Memory resources amongst the requesting tasks.
94But larger systems, which benefit more from careful processor and 94But larger systems, which benefit more from careful processor and
95memory placement to reduce memory access times and contention, 95memory placement to reduce memory access times and contention,
96and which typically represent a larger investment for the customer, 96and which typically represent a larger investment for the customer,
97can benefit from explictly placing jobs on properly sized subsets of 97can benefit from explicitly placing jobs on properly sized subsets of
98the system. 98the system.
99 99
100This can be especially valuable on: 100This can be especially valuable on:
diff --git a/Documentation/firmware_class/firmware_sample_driver.c b/Documentation/firmware_class/firmware_sample_driver.c
index 4bef8c25172c..d3ad2c24490a 100644
--- a/Documentation/firmware_class/firmware_sample_driver.c
+++ b/Documentation/firmware_class/firmware_sample_driver.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/string.h>
16 17
17#include "linux/firmware.h" 18#include "linux/firmware.h"
18 19
diff --git a/Documentation/firmware_class/firmware_sample_firmware_class.c b/Documentation/firmware_class/firmware_sample_firmware_class.c
index 09eab2f1b373..57b956aecbc5 100644
--- a/Documentation/firmware_class/firmware_sample_firmware_class.c
+++ b/Documentation/firmware_class/firmware_sample_firmware_class.c
@@ -14,6 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/timer.h> 16#include <linux/timer.h>
17#include <linux/slab.h>
18#include <linux/string.h>
17#include <linux/firmware.h> 19#include <linux/firmware.h>
18 20
19 21
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index e94d9c6cc522..cff7b652588a 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -273,6 +273,7 @@ For now, you can ignore the `flags' parameter. It is there for future use.
273 if (is_isa) { 273 if (is_isa) {
274 274
275 /* Discard immediately if this ISA range is already used */ 275 /* Discard immediately if this ISA range is already used */
276 /* FIXME: never use check_region(), only request_region() */
276 if (check_region(address,FOO_EXTENT)) 277 if (check_region(address,FOO_EXTENT))
277 goto ERROR0; 278 goto ERROR0;
278 279
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 4afe03a58c5b..31154882000a 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -196,7 +196,7 @@ KEY ACCESS PERMISSIONS
196 196
197Keys have an owner user ID, a group access ID, and a permissions mask. The mask 197Keys have an owner user ID, a group access ID, and a permissions mask. The mask
198has up to eight bits each for possessor, user, group and other access. Only 198has up to eight bits each for possessor, user, group and other access. Only
199five of each set of eight bits are defined. These permissions granted are: 199six of each set of eight bits are defined. These permissions granted are:
200 200
201 (*) View 201 (*) View
202 202
@@ -224,6 +224,10 @@ five of each set of eight bits are defined. These permissions granted are:
224 keyring to a key, a process must have Write permission on the keyring and 224 keyring to a key, a process must have Write permission on the keyring and
225 Link permission on the key. 225 Link permission on the key.
226 226
227 (*) Set Attribute
228
229 This permits a key's UID, GID and permissions mask to be changed.
230
227For changing the ownership, group ID or permissions mask, being the owner of 231For changing the ownership, group ID or permissions mask, being the owner of
228the key or having the sysadmin capability is sufficient. 232the key or having the sysadmin capability is sufficient.
229 233
@@ -242,15 +246,15 @@ about the status of the key service:
242 this way: 246 this way:
243 247
244 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY 248 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY
245 00000001 I----- 39 perm 1f1f0000 0 0 keyring _uid_ses.0: 1/4 249 00000001 I----- 39 perm 1f3f0000 0 0 keyring _uid_ses.0: 1/4
246 00000002 I----- 2 perm 1f1f0000 0 0 keyring _uid.0: empty 250 00000002 I----- 2 perm 1f3f0000 0 0 keyring _uid.0: empty
247 00000007 I----- 1 perm 1f1f0000 0 0 keyring _pid.1: empty 251 00000007 I----- 1 perm 1f3f0000 0 0 keyring _pid.1: empty
248 0000018d I----- 1 perm 1f1f0000 0 0 keyring _pid.412: empty 252 0000018d I----- 1 perm 1f3f0000 0 0 keyring _pid.412: empty
249 000004d2 I--Q-- 1 perm 1f1f0000 32 -1 keyring _uid.32: 1/4 253 000004d2 I--Q-- 1 perm 1f3f0000 32 -1 keyring _uid.32: 1/4
250 000004d3 I--Q-- 3 perm 1f1f0000 32 -1 keyring _uid_ses.32: empty 254 000004d3 I--Q-- 3 perm 1f3f0000 32 -1 keyring _uid_ses.32: empty
251 00000892 I--QU- 1 perm 1f000000 0 0 user metal:copper: 0 255 00000892 I--QU- 1 perm 1f000000 0 0 user metal:copper: 0
252 00000893 I--Q-N 1 35s 1f1f0000 0 0 user metal:silver: 0 256 00000893 I--Q-N 1 35s 1f3f0000 0 0 user metal:silver: 0
253 00000894 I--Q-- 1 10h 001f0000 0 0 user metal:gold: 0 257 00000894 I--Q-- 1 10h 003f0000 0 0 user metal:gold: 0
254 258
255 The flags are: 259 The flags are:
256 260
diff --git a/MAINTAINERS b/MAINTAINERS
index e88d193d42f8..983f9e9aed61 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2286,6 +2286,11 @@ W: http://tpmdd.sourceforge.net
2286L: tpmdd-devel@lists.sourceforge.net 2286L: tpmdd-devel@lists.sourceforge.net
2287S: Maintained 2287S: Maintained
2288 2288
2289Telecom Clock Driver for MCPL0010
2290P: Mark Gross
2291M: mark.gross@intel.com
2292S: Supported
2293
2289TENSILICA XTENSA PORT (xtensa): 2294TENSILICA XTENSA PORT (xtensa):
2290P: Chris Zankel 2295P: Chris Zankel
2291M: chris@zankel.net 2296M: chris@zankel.net
diff --git a/README b/README
index d1edcc7adabe..4ee7dda88ba3 100644
--- a/README
+++ b/README
@@ -54,6 +54,10 @@ INSTALLING the kernel:
54 54
55 gzip -cd linux-2.6.XX.tar.gz | tar xvf - 55 gzip -cd linux-2.6.XX.tar.gz | tar xvf -
56 56
57 or
58 bzip2 -dc linux-2.6.XX.tar.bz2 | tar xvf -
59
60
57 Replace "XX" with the version number of the latest kernel. 61 Replace "XX" with the version number of the latest kernel.
58 62
59 Do NOT use the /usr/src/linux area! This area has a (usually 63 Do NOT use the /usr/src/linux area! This area has a (usually
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 67be50b7d80a..6b2921be1909 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -55,10 +55,6 @@
55#include "proto.h" 55#include "proto.h"
56#include "irq_impl.h" 56#include "irq_impl.h"
57 57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62extern unsigned long wall_jiffies; /* kernel/timer.c */ 58extern unsigned long wall_jiffies; /* kernel/timer.c */
63 59
64static int set_rtc_mmss(unsigned long); 60static int set_rtc_mmss(unsigned long);
diff --git a/arch/arm/common/amba.c b/arch/arm/common/amba.c
index c6beb751f2a9..e1013112c354 100644
--- a/arch/arm/common/amba.c
+++ b/arch/arm/common/amba.c
@@ -10,6 +10,8 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/string.h>
14#include <linux/slab.h>
13 15
14#include <asm/io.h> 16#include <asm/io.h>
15#include <asm/irq.h> 17#include <asm/irq.h>
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cbf2165476b0..ad6c89a555bb 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,8 +33,8 @@
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34 34
35#undef DEBUG 35#undef DEBUG
36
37#undef STATS 36#undef STATS
37
38#ifdef STATS 38#ifdef STATS
39#define DO_STATS(X) do { X ; } while (0) 39#define DO_STATS(X) do { X ; } while (0)
40#else 40#else
@@ -52,26 +52,31 @@ struct safe_buffer {
52 int direction; 52 int direction;
53 53
54 /* safe buffer info */ 54 /* safe buffer info */
55 struct dma_pool *pool; 55 struct dmabounce_pool *pool;
56 void *safe; 56 void *safe;
57 dma_addr_t safe_dma_addr; 57 dma_addr_t safe_dma_addr;
58}; 58};
59 59
60struct dmabounce_pool {
61 unsigned long size;
62 struct dma_pool *pool;
63#ifdef STATS
64 unsigned long allocs;
65#endif
66};
67
60struct dmabounce_device_info { 68struct dmabounce_device_info {
61 struct list_head node; 69 struct list_head node;
62 70
63 struct device *dev; 71 struct device *dev;
64 struct dma_pool *small_buffer_pool;
65 struct dma_pool *large_buffer_pool;
66 struct list_head safe_buffers; 72 struct list_head safe_buffers;
67 unsigned long small_buffer_size, large_buffer_size;
68#ifdef STATS 73#ifdef STATS
69 unsigned long sbp_allocs;
70 unsigned long lbp_allocs;
71 unsigned long total_allocs; 74 unsigned long total_allocs;
72 unsigned long map_op_count; 75 unsigned long map_op_count;
73 unsigned long bounce_count; 76 unsigned long bounce_count;
74#endif 77#endif
78 struct dmabounce_pool small;
79 struct dmabounce_pool large;
75}; 80};
76 81
77static LIST_HEAD(dmabounce_devs); 82static LIST_HEAD(dmabounce_devs);
@@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info)
82 printk(KERN_INFO 87 printk(KERN_INFO
83 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", 88 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
84 device_info->dev->bus_id, 89 device_info->dev->bus_id,
85 device_info->sbp_allocs, device_info->lbp_allocs, 90 device_info->small.allocs, device_info->large.allocs,
86 device_info->total_allocs - device_info->sbp_allocs - 91 device_info->total_allocs - device_info->small.allocs -
87 device_info->lbp_allocs, 92 device_info->large.allocs,
88 device_info->total_allocs); 93 device_info->total_allocs);
89} 94}
90#endif 95#endif
@@ -106,18 +111,22 @@ find_dmabounce_dev(struct device *dev)
106/* allocate a 'safe' buffer and keep track of it */ 111/* allocate a 'safe' buffer and keep track of it */
107static inline struct safe_buffer * 112static inline struct safe_buffer *
108alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, 113alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
109 size_t size, enum dma_data_direction dir) 114 size_t size, enum dma_data_direction dir)
110{ 115{
111 struct safe_buffer *buf; 116 struct safe_buffer *buf;
112 struct dma_pool *pool; 117 struct dmabounce_pool *pool;
113 struct device *dev = device_info->dev; 118 struct device *dev = device_info->dev;
114 void *safe;
115 dma_addr_t safe_dma_addr;
116 119
117 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", 120 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
118 __func__, ptr, size, dir); 121 __func__, ptr, size, dir);
119 122
120 DO_STATS ( device_info->total_allocs++ ); 123 if (size <= device_info->small.size) {
124 pool = &device_info->small;
125 } else if (size <= device_info->large.size) {
126 pool = &device_info->large;
127 } else {
128 pool = NULL;
129 }
121 130
122 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); 131 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
123 if (buf == NULL) { 132 if (buf == NULL) {
@@ -125,41 +134,35 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
125 return NULL; 134 return NULL;
126 } 135 }
127 136
128 if (size <= device_info->small_buffer_size) { 137 buf->ptr = ptr;
129 pool = device_info->small_buffer_pool; 138 buf->size = size;
130 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); 139 buf->direction = dir;
131 140 buf->pool = pool;
132 DO_STATS ( device_info->sbp_allocs++ );
133 } else if (size <= device_info->large_buffer_size) {
134 pool = device_info->large_buffer_pool;
135 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
136 141
137 DO_STATS ( device_info->lbp_allocs++ ); 142 if (pool) {
143 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
144 &buf->safe_dma_addr);
138 } else { 145 } else {
139 pool = NULL; 146 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
140 safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); 147 GFP_ATOMIC);
141 } 148 }
142 149
143 if (safe == NULL) { 150 if (buf->safe == NULL) {
144 dev_warn(device_info->dev, 151 dev_warn(dev,
145 "%s: could not alloc dma memory (size=%d)\n", 152 "%s: could not alloc dma memory (size=%d)\n",
146 __func__, size); 153 __func__, size);
147 kfree(buf); 154 kfree(buf);
148 return NULL; 155 return NULL;
149 } 156 }
150 157
151#ifdef STATS 158#ifdef STATS
159 if (pool)
160 pool->allocs++;
161 device_info->total_allocs++;
152 if (device_info->total_allocs % 1000 == 0) 162 if (device_info->total_allocs % 1000 == 0)
153 print_alloc_stats(device_info); 163 print_alloc_stats(device_info);
154#endif 164#endif
155 165
156 buf->ptr = ptr;
157 buf->size = size;
158 buf->direction = dir;
159 buf->pool = pool;
160 buf->safe = safe;
161 buf->safe_dma_addr = safe_dma_addr;
162
163 list_add(&buf->node, &device_info->safe_buffers); 166 list_add(&buf->node, &device_info->safe_buffers);
164 167
165 return buf; 168 return buf;
@@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
186 list_del(&buf->node); 189 list_del(&buf->node);
187 190
188 if (buf->pool) 191 if (buf->pool)
189 dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); 192 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
190 else 193 else
191 dma_free_coherent(device_info->dev, buf->size, buf->safe, 194 dma_free_coherent(device_info->dev, buf->size, buf->safe,
192 buf->safe_dma_addr); 195 buf->safe_dma_addr);
@@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
197/* ************************************************** */ 200/* ************************************************** */
198 201
199#ifdef STATS 202#ifdef STATS
200
201static void print_map_stats(struct dmabounce_device_info *device_info) 203static void print_map_stats(struct dmabounce_device_info *device_info)
202{ 204{
203 printk(KERN_INFO 205 dev_info(device_info->dev,
204 "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", 206 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
205 device_info->dev->bus_id,
206 device_info->map_op_count, device_info->bounce_count); 207 device_info->map_op_count, device_info->bounce_count);
207} 208}
208#endif 209#endif
@@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size,
258 __func__, ptr, buf->safe, size); 259 __func__, ptr, buf->safe, size);
259 memcpy(buf->safe, ptr, size); 260 memcpy(buf->safe, ptr, size);
260 } 261 }
261 consistent_sync(buf->safe, size, dir); 262 ptr = buf->safe;
262 263
263 dma_addr = buf->safe_dma_addr; 264 dma_addr = buf->safe_dma_addr;
264 } else {
265 consistent_sync(ptr, size, dir);
266 } 265 }
267 266
267 consistent_sync(ptr, size, dir);
268
268 return dma_addr; 269 return dma_addr;
269} 270}
270 271
@@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
278 /* 279 /*
279 * Trying to unmap an invalid mapping 280 * Trying to unmap an invalid mapping
280 */ 281 */
281 if (dma_addr == ~0) { 282 if (dma_mapping_error(dma_addr)) {
282 dev_err(dev, "Trying to unmap invalid mapping\n"); 283 dev_err(dev, "Trying to unmap invalid mapping\n");
283 return; 284 return;
284 } 285 }
@@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
570 local_irq_restore(flags); 571 local_irq_restore(flags);
571} 572}
572 573
574static int
575dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
576 unsigned long size)
577{
578 pool->size = size;
579 DO_STATS(pool->allocs = 0);
580 pool->pool = dma_pool_create(name, dev, size,
581 0 /* byte alignment */,
582 0 /* no page-crossing issues */);
583
584 return pool->pool ? 0 : -ENOMEM;
585}
586
573int 587int
574dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 588dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
575 unsigned long large_buffer_size) 589 unsigned long large_buffer_size)
576{ 590{
577 struct dmabounce_device_info *device_info; 591 struct dmabounce_device_info *device_info;
592 int ret;
578 593
579 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); 594 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
580 if (!device_info) { 595 if (!device_info) {
@@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
584 return -ENOMEM; 599 return -ENOMEM;
585 } 600 }
586 601
587 device_info->small_buffer_pool = 602 ret = dmabounce_init_pool(&device_info->small, dev,
588 dma_pool_create("small_dmabounce_pool", 603 "small_dmabounce_pool", small_buffer_size);
589 dev, 604 if (ret) {
590 small_buffer_size, 605 dev_err(dev,
591 0 /* byte alignment */, 606 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
592 0 /* no page-crossing issues */); 607 small_buffer_size);
593 if (!device_info->small_buffer_pool) { 608 goto err_free;
594 printk(KERN_ERR
595 "dmabounce: could not allocate small DMA pool for %s\n",
596 dev->bus_id);
597 kfree(device_info);
598 return -ENOMEM;
599 } 609 }
600 610
601 if (large_buffer_size) { 611 if (large_buffer_size) {
602 device_info->large_buffer_pool = 612 ret = dmabounce_init_pool(&device_info->large, dev,
603 dma_pool_create("large_dmabounce_pool", 613 "large_dmabounce_pool",
604 dev, 614 large_buffer_size);
605 large_buffer_size, 615 if (ret) {
606 0 /* byte alignment */, 616 dev_err(dev,
607 0 /* no page-crossing issues */); 617 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
608 if (!device_info->large_buffer_pool) { 618 large_buffer_size);
609 printk(KERN_ERR 619 goto err_destroy;
610 "dmabounce: could not allocate large DMA pool for %s\n",
611 dev->bus_id);
612 dma_pool_destroy(device_info->small_buffer_pool);
613
614 return -ENOMEM;
615 } 620 }
616 } 621 }
617 622
618 device_info->dev = dev; 623 device_info->dev = dev;
619 device_info->small_buffer_size = small_buffer_size;
620 device_info->large_buffer_size = large_buffer_size;
621 INIT_LIST_HEAD(&device_info->safe_buffers); 624 INIT_LIST_HEAD(&device_info->safe_buffers);
622 625
623#ifdef STATS 626#ifdef STATS
624 device_info->sbp_allocs = 0;
625 device_info->lbp_allocs = 0;
626 device_info->total_allocs = 0; 627 device_info->total_allocs = 0;
627 device_info->map_op_count = 0; 628 device_info->map_op_count = 0;
628 device_info->bounce_count = 0; 629 device_info->bounce_count = 0;
@@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
634 dev->bus_id, dev->bus->name); 635 dev->bus_id, dev->bus->name);
635 636
636 return 0; 637 return 0;
638
639 err_destroy:
640 dma_pool_destroy(device_info->small.pool);
641 err_free:
642 kfree(device_info);
643 return ret;
637} 644}
638 645
639void 646void
@@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev)
655 BUG(); 662 BUG();
656 } 663 }
657 664
658 if (device_info->small_buffer_pool) 665 if (device_info->small.pool)
659 dma_pool_destroy(device_info->small_buffer_pool); 666 dma_pool_destroy(device_info->small.pool);
660 if (device_info->large_buffer_pool) 667 if (device_info->large.pool)
661 dma_pool_destroy(device_info->large_buffer_pool); 668 dma_pool_destroy(device_info->large.pool);
662 669
663#ifdef STATS 670#ifdef STATS
664 print_alloc_stats(device_info); 671 print_alloc_stats(device_info);
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index e8356b76d7c6..4af0cf5f3bfb 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -12,6 +12,9 @@
12 */ 12 */
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/string.h>
16#include <linux/slab.h>
17
15#include <asm/io.h> 18#include <asm/io.h>
16#include <asm/hardware/scoop.h> 19#include <asm/hardware/scoop.h>
17 20
diff --git a/arch/arm/configs/ixdp2400_defconfig b/arch/arm/configs/ixdp2400_defconfig
index 678720fa2e2e..ddeb9f99d662 100644
--- a/arch/arm/configs/ixdp2400_defconfig
+++ b/arch/arm/configs/ixdp2400_defconfig
@@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
559# 559#
560CONFIG_SERIAL_8250=y 560CONFIG_SERIAL_8250=y
561CONFIG_SERIAL_8250_CONSOLE=y 561CONFIG_SERIAL_8250_CONSOLE=y
562CONFIG_SERIAL_8250_NR_UARTS=2 562CONFIG_SERIAL_8250_NR_UARTS=1
563# CONFIG_SERIAL_8250_EXTENDED is not set 563# CONFIG_SERIAL_8250_EXTENDED is not set
564 564
565# 565#
diff --git a/arch/arm/configs/ixdp2800_defconfig b/arch/arm/configs/ixdp2800_defconfig
index 261e2343903b..81d3a0606f95 100644
--- a/arch/arm/configs/ixdp2800_defconfig
+++ b/arch/arm/configs/ixdp2800_defconfig
@@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
559# 559#
560CONFIG_SERIAL_8250=y 560CONFIG_SERIAL_8250=y
561CONFIG_SERIAL_8250_CONSOLE=y 561CONFIG_SERIAL_8250_CONSOLE=y
562CONFIG_SERIAL_8250_NR_UARTS=2 562CONFIG_SERIAL_8250_NR_UARTS=1
563# CONFIG_SERIAL_8250_EXTENDED is not set 563# CONFIG_SERIAL_8250_EXTENDED is not set
564 564
565# 565#
diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c
index a418dad6692c..0ee2e9819631 100644
--- a/arch/arm/kernel/arthur.c
+++ b/arch/arm/kernel/arthur.c
@@ -18,6 +18,7 @@
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/signal.h> 19#include <linux/signal.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/sched.h>
21 22
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23 24
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index cd99b83f14c2..9bd8609a2926 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -782,7 +782,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
782 return ret; 782 return ret;
783} 783}
784 784
785asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 785asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
786{ 786{
787 struct task_struct *child; 787 struct task_struct *child;
788 int ret; 788 int ret;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 69449a818dcc..fc4729106a32 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -36,10 +36,6 @@
36#include <asm/thread_info.h> 36#include <asm/thread_info.h>
37#include <asm/mach/time.h> 37#include <asm/mach/time.h>
38 38
39u64 jiffies_64 = INITIAL_JIFFIES;
40
41EXPORT_SYMBOL(jiffies_64);
42
43/* 39/*
44 * Our system timer. 40 * Our system timer.
45 */ 41 */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 66e5a0516f23..45e9ea6cd2a5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -198,25 +198,16 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
198 barrier(); 198 barrier();
199} 199}
200 200
201DEFINE_SPINLOCK(die_lock); 201static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
202
203/*
204 * This function is protected against re-entrancy.
205 */
206NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
207{ 202{
208 struct task_struct *tsk = current; 203 struct task_struct *tsk = thread->task;
209 static int die_counter; 204 static int die_counter;
210 205
211 console_verbose();
212 spin_lock_irq(&die_lock);
213 bust_spinlocks(1);
214
215 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); 206 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
216 print_modules(); 207 print_modules();
217 __show_regs(regs); 208 __show_regs(regs);
218 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 209 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
219 tsk->comm, tsk->pid, tsk->thread_info + 1); 210 tsk->comm, tsk->pid, thread + 1);
220 211
221 if (!user_mode(regs) || in_interrupt()) { 212 if (!user_mode(regs) || in_interrupt()) {
222 dump_mem("Stack: ", regs->ARM_sp, 213 dump_mem("Stack: ", regs->ARM_sp,
@@ -224,7 +215,21 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
224 dump_backtrace(regs, tsk); 215 dump_backtrace(regs, tsk);
225 dump_instr(regs); 216 dump_instr(regs);
226 } 217 }
218}
227 219
220DEFINE_SPINLOCK(die_lock);
221
222/*
223 * This function is protected against re-entrancy.
224 */
225NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
226{
227 struct thread_info *thread = current_thread_info();
228
229 console_verbose();
230 spin_lock_irq(&die_lock);
231 bust_spinlocks(1);
232 __die(str, err, thread, regs);
228 bust_spinlocks(0); 233 bust_spinlocks(0);
229 spin_unlock_irq(&die_lock); 234 spin_unlock_irq(&die_lock);
230 do_exit(SIGSEGV); 235 do_exit(SIGSEGV);
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
new file mode 100644
index 000000000000..561e20717b30
--- /dev/null
+++ b/arch/arm/lib/ashldi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__ashldi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi ah, ah, lsl r2
44 movpl ah, al, lsl r3
45 orrmi ah, ah, al, lsr ip
46 mov al, al, lsl r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/ashldi3.c b/arch/arm/lib/ashldi3.c
deleted file mode 100644
index b62875cfd8f8..000000000000
--- a/arch/arm/lib/ashldi3.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __ashldi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 w.s.low = 0;
48 w.s.high = (u32) uu.s.low << -bm;
49 } else {
50 u32 carries = (u32) uu.s.low >> bm;
51 w.s.low = (u32) uu.s.low << b;
52 w.s.high = ((u32) uu.s.high << b) | carries;
53 }
54
55 return w.ll;
56}
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
new file mode 100644
index 000000000000..86fb2a90c301
--- /dev/null
+++ b/arch/arm/lib/ashrdi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__ashrdi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi al, al, lsr r2
44 movpl al, ah, asr r3
45 orrmi al, al, ah, lsl ip
46 mov ah, ah, asr r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/ashrdi3.c b/arch/arm/lib/ashrdi3.c
deleted file mode 100644
index 9a8600a7543f..000000000000
--- a/arch/arm/lib/ashrdi3.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __ashrdi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 /* w.s.high = 1..1 or 0..0 */
48 w.s.high = uu.s.high >> (sizeof(s32) * BITS_PER_UNIT - 1);
49 w.s.low = uu.s.high >> -bm;
50 } else {
51 u32 carries = (u32) uu.s.high << bm;
52 w.s.high = uu.s.high >> b;
53 w.s.low = ((u32) uu.s.low >> b) | carries;
54 }
55
56 return w.ll;
57}
diff --git a/arch/arm/lib/gcclib.h b/arch/arm/lib/gcclib.h
deleted file mode 100644
index 8b6dcc656de7..000000000000
--- a/arch/arm/lib/gcclib.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */
2/* I Molton 29/07/01 */
3
4#include <linux/types.h>
5
6#define BITS_PER_UNIT 8
7#define SI_TYPE_SIZE (sizeof(s32) * BITS_PER_UNIT)
8
9#ifdef __ARMEB__
10struct DIstruct {
11 s32 high, low;
12};
13#else
14struct DIstruct {
15 s32 low, high;
16};
17#endif
18
19typedef union {
20 struct DIstruct s;
21 s64 ll;
22} DIunion;
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
new file mode 100644
index 000000000000..46c2ed19ec95
--- /dev/null
+++ b/arch/arm/lib/lshrdi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__lshrdi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi al, al, lsr r2
44 movpl al, ah, lsr r3
45 orrmi al, al, ah, lsl ip
46 mov ah, ah, lsr r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/lshrdi3.c b/arch/arm/lib/lshrdi3.c
deleted file mode 100644
index 3681f49d2b6e..000000000000
--- a/arch/arm/lib/lshrdi3.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __lshrdi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 w.s.high = 0;
48 w.s.low = (u32) uu.s.high >> -bm;
49 } else {
50 u32 carries = (u32) uu.s.high << bm;
51 w.s.high = (u32) uu.s.high >> b;
52 w.s.low = ((u32) uu.s.low >> b) | carries;
53 }
54
55 return w.ll;
56}
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
new file mode 100644
index 000000000000..c7fbdf005319
--- /dev/null
+++ b/arch/arm/lib/muldi3.S
@@ -0,0 +1,44 @@
1/*
2 * linux/arch/arm/lib/muldi3.S
3 *
4 * Author: Nicolas Pitre
5 * Created: Oct 19, 2005
6 * Copyright: Monta Vista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14
15#ifdef __ARMEB__
16#define xh r0
17#define xl r1
18#define yh r2
19#define yl r3
20#else
21#define xl r0
22#define xh r1
23#define yl r2
24#define yh r3
25#endif
26
27ENTRY(__muldi3)
28
29 mul xh, yl, xh
30 mla xh, xl, yh, xh
31 mov ip, xl, asr #16
32 mov yh, yl, asr #16
33 bic xl, xl, ip, lsl #16
34 bic yl, yl, yh, lsl #16
35 mla xh, yh, ip, xh
36 mul yh, xl, yh
37 mul xl, yl, xl
38 mul ip, yl, ip
39 adds xl, xl, yh, lsl #16
40 adc xh, xh, yh, lsr #16
41 adds xl, xl, ip, lsl #16
42 adc xh, xh, ip, lsr #16
43 mov pc, lr
44
diff --git a/arch/arm/lib/muldi3.c b/arch/arm/lib/muldi3.c
deleted file mode 100644
index 0a3b93313f18..000000000000
--- a/arch/arm/lib/muldi3.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34#define umul_ppmm(xh, xl, a, b) \
35{register u32 __t0, __t1, __t2; \
36 __asm__ ("%@ Inlined umul_ppmm \n\
37 mov %2, %5, lsr #16 \n\
38 mov %0, %6, lsr #16 \n\
39 bic %3, %5, %2, lsl #16 \n\
40 bic %4, %6, %0, lsl #16 \n\
41 mul %1, %3, %4 \n\
42 mul %4, %2, %4 \n\
43 mul %3, %0, %3 \n\
44 mul %0, %2, %0 \n\
45 adds %3, %4, %3 \n\
46 addcs %0, %0, #65536 \n\
47 adds %1, %1, %3, lsl #16 \n\
48 adc %0, %0, %3, lsr #16" \
49 : "=&r" ((u32) (xh)), \
50 "=r" ((u32) (xl)), \
51 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
52 : "r" ((u32) (a)), \
53 "r" ((u32) (b)));}
54
55#define __umulsidi3(u, v) \
56 ({DIunion __w; \
57 umul_ppmm (__w.s.high, __w.s.low, u, v); \
58 __w.ll; })
59
60s64 __muldi3(s64 u, s64 v)
61{
62 DIunion w;
63 DIunion uu, vv;
64
65 uu.ll = u, vv.ll = v;
66
67 w.ll = __umulsidi3(uu.s.low, vv.s.low);
68 w.s.high += ((u32) uu.s.low * (u32) vv.s.high
69 + (u32) uu.s.high * (u32) vv.s.low);
70
71 return w.ll;
72}
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
new file mode 100644
index 000000000000..112630f93e5d
--- /dev/null
+++ b/arch/arm/lib/ucmpdi2.S
@@ -0,0 +1,35 @@
1/*
2 * linux/arch/arm/lib/ucmpdi2.S
3 *
4 * Author: Nicolas Pitre
5 * Created: Oct 19, 2005
6 * Copyright: Monta Vista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14
15#ifdef __ARMEB__
16#define xh r0
17#define xl r1
18#define yh r2
19#define yl r3
20#else
21#define xl r0
22#define xh r1
23#define yl r2
24#define yh r3
25#endif
26
27ENTRY(__ucmpdi2)
28
29 cmp xh, yh
30 cmpeq xl, yl
31 movlo r0, #0
32 moveq r0, #1
33 movhi r0, #2
34 mov pc, lr
35
diff --git a/arch/arm/lib/ucmpdi2.c b/arch/arm/lib/ucmpdi2.c
deleted file mode 100644
index 57f3f2df3850..000000000000
--- a/arch/arm/lib/ucmpdi2.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34int __ucmpdi2(s64 a, s64 b)
35{
36 DIunion au, bu;
37
38 au.ll = a, bu.ll = b;
39
40 if ((u32) au.s.high < (u32) bu.s.high)
41 return 0;
42 else if ((u32) au.s.high > (u32) bu.s.high)
43 return 2;
44 if ((u32) au.s.low < (u32) bu.s.low)
45 return 0;
46 else if ((u32) au.s.low > (u32) bu.s.low)
47 return 2;
48 return 1;
49}
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index cb14b0682cef..837d7f0bda4c 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -26,6 +26,8 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/string.h>
30
29#include <asm/arch/imxfb.h> 31#include <asm/arch/imxfb.h>
30#include <asm/hardware.h> 32#include <asm/hardware.h>
31#include <asm/arch/imx-regs.h> 33#include <asm/arch/imx-regs.h>
diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/mach-integrator/clock.c
index 56200594db3c..73c360685cad 100644
--- a/arch/arm/mach-integrator/clock.c
+++ b/arch/arm/mach-integrator/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/semaphore.h> 18#include <asm/semaphore.h>
18#include <asm/hardware/clock.h> 19#include <asm/hardware/clock.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index f368b85f0447..764ceb49470a 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -30,6 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/setup.h> 32#include <asm/setup.h>
33#include <asm/param.h> /* HZ */
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
34#include <asm/hardware/amba.h> 35#include <asm/hardware/amba.h>
35#include <asm/hardware/amba_kmi.h> 36#include <asm/hardware/amba_kmi.h>
diff --git a/arch/arm/mach-integrator/lm.c b/arch/arm/mach-integrator/lm.c
index c5f19d160598..5b41e3a724e1 100644
--- a/arch/arm/mach-integrator/lm.c
+++ b/arch/arm/mach-integrator/lm.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/slab.h>
13 14
14#include <asm/arch/lm.h> 15#include <asm/arch/lm.h>
15 16
diff --git a/arch/arm/mach-iop3xx/iq31244-pci.c b/arch/arm/mach-iop3xx/iq31244-pci.c
index f997daa800bf..c6a973ba8fc6 100644
--- a/arch/arm/mach-iop3xx/iq31244-pci.c
+++ b/arch/arm/mach-iop3xx/iq31244-pci.c
@@ -14,6 +14,8 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/slab.h>
17 19
18#include <asm/hardware.h> 20#include <asm/hardware.h>
19#include <asm/irq.h> 21#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80321-pci.c b/arch/arm/mach-iop3xx/iq80321-pci.c
index 79fea3d20b66..802f6d091b75 100644
--- a/arch/arm/mach-iop3xx/iq80321-pci.c
+++ b/arch/arm/mach-iop3xx/iq80321-pci.c
@@ -14,6 +14,8 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/slab.h>
17 19
18#include <asm/hardware.h> 20#include <asm/hardware.h>
19#include <asm/irq.h> 21#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80331-pci.c b/arch/arm/mach-iop3xx/iq80331-pci.c
index f37a0e26b466..654e450a1311 100644
--- a/arch/arm/mach-iop3xx/iq80331-pci.c
+++ b/arch/arm/mach-iop3xx/iq80331-pci.c
@@ -13,6 +13,8 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/slab.h>
16 18
17#include <asm/hardware.h> 19#include <asm/hardware.h>
18#include <asm/irq.h> 20#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80332-pci.c b/arch/arm/mach-iop3xx/iq80332-pci.c
index b9807aa2aade..65951ffe4631 100644
--- a/arch/arm/mach-iop3xx/iq80332-pci.c
+++ b/arch/arm/mach-iop3xx/iq80332-pci.c
@@ -13,6 +13,8 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/slab.h>
16 18
17#include <asm/hardware.h> 19#include <asm/hardware.h>
18#include <asm/irq.h> 20#include <asm/irq.h>
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 60c8b9d8bb9c..656f73bbcb5a 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -33,6 +33,7 @@
33 33
34#include <asm/arch/pxa-regs.h> 34#include <asm/arch/pxa-regs.h>
35#include <asm/arch/irq.h> 35#include <asm/arch/irq.h>
36#include <asm/arch/irda.h>
36#include <asm/arch/mmc.h> 37#include <asm/arch/mmc.h>
37#include <asm/arch/udc.h> 38#include <asm/arch/udc.h>
38#include <asm/arch/corgi.h> 39#include <asm/arch/corgi.h>
@@ -224,6 +225,22 @@ static struct pxamci_platform_data corgi_mci_platform_data = {
224}; 225};
225 226
226 227
228/*
229 * Irda
230 */
231static void corgi_irda_transceiver_mode(struct device *dev, int mode)
232{
233 if (mode & IR_OFF)
234 GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
235 else
236 GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
237}
238
239static struct pxaficp_platform_data corgi_ficp_platform_data = {
240 .transceiver_cap = IR_SIRMODE | IR_OFF,
241 .transceiver_mode = corgi_irda_transceiver_mode,
242};
243
227 244
228/* 245/*
229 * USB Device Controller 246 * USB Device Controller
@@ -269,10 +286,13 @@ static void __init corgi_init(void)
269 286
270 corgi_ssp_set_machinfo(&corgi_ssp_machinfo); 287 corgi_ssp_set_machinfo(&corgi_ssp_machinfo);
271 288
289 pxa_gpio_mode(CORGI_GPIO_IR_ON | GPIO_OUT);
272 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT); 290 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT);
273 pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN); 291 pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN);
292
274 pxa_set_udc_info(&udc_info); 293 pxa_set_udc_info(&udc_info);
275 pxa_set_mci_info(&corgi_mci_platform_data); 294 pxa_set_mci_info(&corgi_mci_platform_data);
295 pxa_set_ficp_info(&corgi_ficp_platform_data);
276 296
277 scoop_num = 1; 297 scoop_num = 1;
278 scoop_devs = &corgi_pcmcia_scoop[0]; 298 scoop_devs = &corgi_pcmcia_scoop[0];
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 3248bc9b9495..9c0289333301 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -23,6 +23,7 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/pm.h> 25#include <linux/pm.h>
26#include <linux/string.h>
26 27
27#include <asm/hardware.h> 28#include <asm/hardware.h>
28#include <asm/irq.h> 29#include <asm/irq.h>
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index f25638810017..6d413f6701a7 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -32,6 +32,7 @@
32#include <asm/arch/irq.h> 32#include <asm/arch/irq.h>
33#include <asm/arch/mmc.h> 33#include <asm/arch/mmc.h>
34#include <asm/arch/udc.h> 34#include <asm/arch/udc.h>
35#include <asm/arch/irda.h>
35#include <asm/arch/poodle.h> 36#include <asm/arch/poodle.h>
36#include <asm/arch/pxafb.h> 37#include <asm/arch/pxafb.h>
37 38
@@ -152,6 +153,24 @@ static struct pxamci_platform_data poodle_mci_platform_data = {
152 153
153 154
154/* 155/*
156 * Irda
157 */
158static void poodle_irda_transceiver_mode(struct device *dev, int mode)
159{
160 if (mode & IR_OFF) {
161 GPSR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON);
162 } else {
163 GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON);
164 }
165}
166
167static struct pxaficp_platform_data poodle_ficp_platform_data = {
168 .transceiver_cap = IR_SIRMODE | IR_OFF,
169 .transceiver_mode = poodle_irda_transceiver_mode,
170};
171
172
173/*
155 * USB Device Controller 174 * USB Device Controller
156 */ 175 */
157static void poodle_udc_command(int cmd) 176static void poodle_udc_command(int cmd)
@@ -244,8 +263,10 @@ static void __init poodle_init(void)
244 263
245 set_pxa_fb_info(&poodle_fb_info); 264 set_pxa_fb_info(&poodle_fb_info);
246 pxa_gpio_mode(POODLE_GPIO_USB_PULLUP | GPIO_OUT); 265 pxa_gpio_mode(POODLE_GPIO_USB_PULLUP | GPIO_OUT);
266 pxa_gpio_mode(POODLE_GPIO_IR_ON | GPIO_OUT);
247 pxa_set_udc_info(&udc_info); 267 pxa_set_udc_info(&udc_info);
248 pxa_set_mci_info(&poodle_mci_platform_data); 268 pxa_set_mci_info(&poodle_mci_platform_data);
269 pxa_set_ficp_info(&poodle_ficp_platform_data);
249 270
250 scoop_num = 1; 271 scoop_num = 1;
251 scoop_devs = &poodle_pcmcia_scoop[0]; 272 scoop_devs = &poodle_pcmcia_scoop[0];
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index d0ab428c2d7d..b838842b6a20 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -34,6 +34,7 @@
34 34
35#include <asm/arch/pxa-regs.h> 35#include <asm/arch/pxa-regs.h>
36#include <asm/arch/irq.h> 36#include <asm/arch/irq.h>
37#include <asm/arch/irda.h>
37#include <asm/arch/mmc.h> 38#include <asm/arch/mmc.h>
38#include <asm/arch/udc.h> 39#include <asm/arch/udc.h>
39#include <asm/arch/pxafb.h> 40#include <asm/arch/pxafb.h>
@@ -277,6 +278,23 @@ static struct pxamci_platform_data spitz_mci_platform_data = {
277 278
278 279
279/* 280/*
281 * Irda
282 */
283static void spitz_irda_transceiver_mode(struct device *dev, int mode)
284{
285 if (mode & IR_OFF)
286 set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
287 else
288 reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
289}
290
291static struct pxaficp_platform_data spitz_ficp_platform_data = {
292 .transceiver_cap = IR_SIRMODE | IR_OFF,
293 .transceiver_mode = spitz_irda_transceiver_mode,
294};
295
296
297/*
280 * Spitz PXA Framebuffer 298 * Spitz PXA Framebuffer
281 */ 299 */
282static struct pxafb_mach_info spitz_pxafb_info __initdata = { 300static struct pxafb_mach_info spitz_pxafb_info __initdata = {
@@ -326,6 +344,7 @@ static void __init common_init(void)
326 344
327 platform_add_devices(devices, ARRAY_SIZE(devices)); 345 platform_add_devices(devices, ARRAY_SIZE(devices));
328 pxa_set_mci_info(&spitz_mci_platform_data); 346 pxa_set_mci_info(&spitz_mci_platform_data);
347 pxa_set_ficp_info(&spitz_ficp_platform_data);
329 set_pxa_fb_parent(&spitzssp_device.dev); 348 set_pxa_fb_parent(&spitzssp_device.dev);
330 set_pxa_fb_info(&spitz_pxafb_info); 349 set_pxa_fb_info(&spitz_pxafb_info);
331} 350}
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index f94b0fbcdcc8..83eba8b54816 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -17,6 +17,7 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/sched.h> /* just for sched_clock() - funny that */
20 21
21#include <asm/div64.h> 22#include <asm/div64.h>
22#include <asm/hardware.h> 23#include <asm/hardware.h>
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c
index 48025c2b9987..b96a2ea15d41 100644
--- a/arch/arm/mach-versatile/clock.c
+++ b/arch/arm/mach-versatile/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/semaphore.h> 18#include <asm/semaphore.h>
18#include <asm/hardware/clock.h> 19#include <asm/hardware/clock.h>
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 27d041574ea7..269ce6913ee9 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -22,9 +22,7 @@
22#endif 22#endif
23 23
24#define from_address (0xffff8000) 24#define from_address (0xffff8000)
25#define from_pgprot PAGE_KERNEL
26#define to_address (0xffffc000) 25#define to_address (0xffffc000)
27#define to_pgprot PAGE_KERNEL
28 26
29#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 27#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
30 28
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock);
34 * Copy the user page. No aliasing to deal with so we can just 32 * Copy the user page. No aliasing to deal with so we can just
35 * attack the kernel's existing mapping of these pages. 33 * attack the kernel's existing mapping of these pages.
36 */ 34 */
37void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) 35static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
38{ 36{
39 copy_page(kto, kfrom); 37 copy_page(kto, kfrom);
40} 38}
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v
43 * Clear the user page. No aliasing to deal with so we can just 41 * Clear the user page. No aliasing to deal with so we can just
44 * attack the kernel's existing mapping of this page. 42 * attack the kernel's existing mapping of this page.
45 */ 43 */
46void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) 44static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
47{ 45{
48 clear_page(kaddr); 46 clear_page(kaddr);
49} 47}
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
51/* 49/*
52 * Copy the page, taking account of the cache colour. 50 * Copy the page, taking account of the cache colour.
53 */ 51 */
54void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 52static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
55{ 53{
56 unsigned int offset = CACHE_COLOUR(vaddr); 54 unsigned int offset = CACHE_COLOUR(vaddr);
57 unsigned long from, to; 55 unsigned long from, to;
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
72 */ 70 */
73 spin_lock(&v6_lock); 71 spin_lock(&v6_lock);
74 72
75 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 73 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL));
76 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 74 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL));
77 75
78 from = from_address + (offset << PAGE_SHIFT); 76 from = from_address + (offset << PAGE_SHIFT);
79 to = to_address + (offset << PAGE_SHIFT); 77 to = to_address + (offset << PAGE_SHIFT);
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
91 * so remap the kernel page into the same cache colour as the user 89 * so remap the kernel page into the same cache colour as the user
92 * page. 90 * page.
93 */ 91 */
94void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 92static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
95{ 93{
96 unsigned int offset = CACHE_COLOUR(vaddr); 94 unsigned int offset = CACHE_COLOUR(vaddr);
97 unsigned long to = to_address + (offset << PAGE_SHIFT); 95 unsigned long to = to_address + (offset << PAGE_SHIFT);
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
112 */ 110 */
113 spin_lock(&v6_lock); 111 spin_lock(&v6_lock);
114 112
115 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 113 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL));
116 flush_tlb_kernel_page(to); 114 flush_tlb_kernel_page(to);
117 clear_page((void *)to); 115 clear_page((void *)to);
118 116
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 52a58b2da288..a020fe16428f 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/io.h> 18#include <asm/io.h>
18#include <asm/semaphore.h> 19#include <asm/semaphore.h>
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 8a52124de0e1..cf7e977d18c8 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -665,7 +665,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
665 return ret; 665 return ret;
666} 666}
667 667
668asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 668asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
669{ 669{
670 struct task_struct *child; 670 struct task_struct *child;
671 int ret; 671 int ret;
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
index e66aedd02fad..335525339ad6 100644
--- a/arch/arm26/kernel/time.c
+++ b/arch/arm26/kernel/time.c
@@ -34,10 +34,6 @@
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/ioc.h> 35#include <asm/ioc.h>
36 36
37u64 jiffies_64 = INITIAL_JIFFIES;
38
39EXPORT_SYMBOL(jiffies_64);
40
41extern unsigned long wall_jiffies; 37extern unsigned long wall_jiffies;
42 38
43/* this needs a better home */ 39/* this needs a better home */
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index 11ab3836aac6..56b038c8d482 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -140,6 +140,7 @@
140#include <linux/kernel.h> 140#include <linux/kernel.h>
141#include <linux/config.h> 141#include <linux/config.h>
142#include <linux/init.h> 142#include <linux/init.h>
143#include <linux/slab.h>
143 144
144#include <linux/mtd/concat.h> 145#include <linux/mtd/concat.h>
145#include <linux/mtd/map.h> 146#include <linux/mtd/map.h>
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 78ed52b1cdac..b679f983b90a 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/config.h> 21#include <linux/config.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/slab.h>
23 24
24#include <linux/mtd/concat.h> 25#include <linux/mtd/concat.h>
25#include <linux/mtd/map.h> 26#include <linux/mtd/map.h>
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index a2d99b4aedcd..66ba8898db07 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -31,10 +31,7 @@
31#include <linux/timex.h> 31#include <linux/timex.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/profile.h> 33#include <linux/profile.h>
34 34#include <linux/sched.h> /* just for sched_clock() - funny that */
35u64 jiffies_64 = INITIAL_JIFFIES;
36
37EXPORT_SYMBOL(jiffies_64);
38 35
39int have_rtc; /* used to remember if we have an RTC or not */; 36int have_rtc; /* used to remember if we have an RTC or not */;
40 37
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index cbe03cba9f02..cb335a14a315 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -106,7 +106,7 @@ void ptrace_enable(struct task_struct *child)
106 child->thread.frame0->__status |= REG__STATUS_STEP; 106 child->thread.frame0->__status |= REG__STATUS_STEP;
107} 107}
108 108
109asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 109asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
110{ 110{
111 struct task_struct *child; 111 struct task_struct *child;
112 unsigned long tmp; 112 unsigned long tmp;
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index f43b734482e3..2e9741227b73 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -34,9 +34,6 @@
34 34
35extern unsigned long wall_jiffies; 35extern unsigned long wall_jiffies;
36 36
37u64 jiffies_64 = INITIAL_JIFFIES;
38EXPORT_SYMBOL(jiffies_64);
39
40unsigned long __nongprelbss __clkin_clock_speed_HZ; 37unsigned long __nongprelbss __clkin_clock_speed_HZ;
41unsigned long __nongprelbss __ext_bus_clock_speed_HZ; 38unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
42unsigned long __nongprelbss __res_bus_clock_speed_HZ; 39unsigned long __nongprelbss __res_bus_clock_speed_HZ;
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 05c15e869777..a569fe4aa284 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -57,7 +57,7 @@ void ptrace_disable(struct task_struct *child)
57 h8300_disable_trace(child); 57 h8300_disable_trace(child);
58} 58}
59 59
60asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 60asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
61{ 61{
62 struct task_struct *child; 62 struct task_struct *child;
63 int ret; 63 int ret;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index af8c5d2057dd..688a5100604c 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -32,10 +32,6 @@
32 32
33#define TICK_SIZE (tick_nsec / 1000) 33#define TICK_SIZE (tick_nsec / 1000)
34 34
35u64 jiffies_64;
36
37EXPORT_SYMBOL(jiffies_64);
38
39/* 35/*
40 * timer_interrupt() needs to keep up the real-time clock, 36 * timer_interrupt() needs to keep up the real-time clock,
41 * as well as call the "do_timer()" routine every clocktick 37 * as well as call the "do_timer()" routine every clocktick
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d2703cda61ea..5383e5e2d9b7 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -5,7 +5,7 @@
5 5
6mainmenu "Linux Kernel Configuration" 6mainmenu "Linux Kernel Configuration"
7 7
8config X86 8config X86_32
9 bool 9 bool
10 default y 10 default y
11 help 11 help
@@ -18,6 +18,10 @@ config SEMAPHORE_SLEEPERS
18 bool 18 bool
19 default y 19 default y
20 20
21config X86
22 bool
23 default y
24
21config MMU 25config MMU
22 bool 26 bool
23 default y 27 default y
@@ -151,304 +155,7 @@ config ES7000_CLUSTERED_APIC
151 default y 155 default y
152 depends on SMP && X86_ES7000 && MPENTIUMIII 156 depends on SMP && X86_ES7000 && MPENTIUMIII
153 157
154if !X86_ELAN 158source "arch/i386/Kconfig.cpu"
155
156choice
157 prompt "Processor family"
158 default M686
159
160config M386
161 bool "386"
162 ---help---
163 This is the processor type of your CPU. This information is used for
164 optimizing purposes. In order to compile a kernel that can run on
165 all x86 CPU types (albeit not optimally fast), you can specify
166 "386" here.
167
168 The kernel will not necessarily run on earlier architectures than
169 the one you have chosen, e.g. a Pentium optimized kernel will run on
170 a PPro, but not necessarily on a i486.
171
172 Here are the settings recommended for greatest speed:
173 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
174 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
175 will run on a 386 class machine.
176 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
177 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
178 - "586" for generic Pentium CPUs lacking the TSC
179 (time stamp counter) register.
180 - "Pentium-Classic" for the Intel Pentium.
181 - "Pentium-MMX" for the Intel Pentium MMX.
182 - "Pentium-Pro" for the Intel Pentium Pro.
183 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
184 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
185 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
186 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
187 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
188 - "Crusoe" for the Transmeta Crusoe series.
189 - "Efficeon" for the Transmeta Efficeon series.
190 - "Winchip-C6" for original IDT Winchip.
191 - "Winchip-2" for IDT Winchip 2.
192 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
193 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
194 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
195 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
196
197 If you don't know what to do, choose "386".
198
199config M486
200 bool "486"
201 help
202 Select this for a 486 series processor, either Intel or one of the
203 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
204 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
205 U5S.
206
207config M586
208 bool "586/K5/5x86/6x86/6x86MX"
209 help
210 Select this for an 586 or 686 series processor such as the AMD K5,
211 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
212 assume the RDTSC (Read Time Stamp Counter) instruction.
213
214config M586TSC
215 bool "Pentium-Classic"
216 help
217 Select this for a Pentium Classic processor with the RDTSC (Read
218 Time Stamp Counter) instruction for benchmarking.
219
220config M586MMX
221 bool "Pentium-MMX"
222 help
223 Select this for a Pentium with the MMX graphics/multimedia
224 extended instructions.
225
226config M686
227 bool "Pentium-Pro"
228 help
229 Select this for Intel Pentium Pro chips. This enables the use of
230 Pentium Pro extended instructions, and disables the init-time guard
231 against the f00f bug found in earlier Pentiums.
232
233config MPENTIUMII
234 bool "Pentium-II/Celeron(pre-Coppermine)"
235 help
236 Select this for Intel chips based on the Pentium-II and
237 pre-Coppermine Celeron core. This option enables an unaligned
238 copy optimization, compiles the kernel with optimization flags
239 tailored for the chip, and applies any applicable Pentium Pro
240 optimizations.
241
242config MPENTIUMIII
243 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
244 help
245 Select this for Intel chips based on the Pentium-III and
246 Celeron-Coppermine core. This option enables use of some
247 extended prefetch instructions in addition to the Pentium II
248 extensions.
249
250config MPENTIUMM
251 bool "Pentium M"
252 help
253 Select this for Intel Pentium M (not Pentium-4 M)
254 notebook chips.
255
256config MPENTIUM4
257 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
258 help
259 Select this for Intel Pentium 4 chips. This includes the
260 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
261 (not Pentium M) chips. This option enables compile flags
262 optimized for the chip, uses the correct cache shift, and
263 applies any applicable Pentium III optimizations.
264
265config MK6
266 bool "K6/K6-II/K6-III"
267 help
268 Select this for an AMD K6-family processor. Enables use of
269 some extended instructions, and passes appropriate optimization
270 flags to GCC.
271
272config MK7
273 bool "Athlon/Duron/K7"
274 help
275 Select this for an AMD Athlon K7-family processor. Enables use of
276 some extended instructions, and passes appropriate optimization
277 flags to GCC.
278
279config MK8
280 bool "Opteron/Athlon64/Hammer/K8"
281 help
282 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
283 use of some extended instructions, and passes appropriate optimization
284 flags to GCC.
285
286config MCRUSOE
287 bool "Crusoe"
288 help
289 Select this for a Transmeta Crusoe processor. Treats the processor
290 like a 586 with TSC, and sets some GCC optimization flags (like a
291 Pentium Pro with no alignment requirements).
292
293config MEFFICEON
294 bool "Efficeon"
295 help
296 Select this for a Transmeta Efficeon processor.
297
298config MWINCHIPC6
299 bool "Winchip-C6"
300 help
301 Select this for an IDT Winchip C6 chip. Linux and GCC
302 treat this chip as a 586TSC with some extended instructions
303 and alignment requirements.
304
305config MWINCHIP2
306 bool "Winchip-2"
307 help
308 Select this for an IDT Winchip-2. Linux and GCC
309 treat this chip as a 586TSC with some extended instructions
310 and alignment requirements.
311
312config MWINCHIP3D
313 bool "Winchip-2A/Winchip-3"
314 help
315 Select this for an IDT Winchip-2A or 3. Linux and GCC
316 treat this chip as a 586TSC with some extended instructions
317 and alignment reqirements. Also enable out of order memory
318 stores for this CPU, which can increase performance of some
319 operations.
320
321config MGEODEGX1
322 bool "GeodeGX1"
323 help
324 Select this for a Geode GX1 (Cyrix MediaGX) chip.
325
326config MCYRIXIII
327 bool "CyrixIII/VIA-C3"
328 help
329 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
330 treat this chip as a generic 586. Whilst the CPU is 686 class,
331 it lacks the cmov extension which gcc assumes is present when
332 generating 686 code.
333 Note that Nehemiah (Model 9) and above will not boot with this
334 kernel due to them lacking the 3DNow! instructions used in earlier
335 incarnations of the CPU.
336
337config MVIAC3_2
338 bool "VIA C3-2 (Nehemiah)"
339 help
340 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
341 of SSE and tells gcc to treat the CPU as a 686.
342 Note, this kernel will not boot on older (pre model 9) C3s.
343
344endchoice
345
346config X86_GENERIC
347 bool "Generic x86 support"
348 help
349 Instead of just including optimizations for the selected
350 x86 variant (e.g. PII, Crusoe or Athlon), include some more
351 generic optimizations as well. This will make the kernel
352 perform better on x86 CPUs other than that selected.
353
354 This is really intended for distributors who need more
355 generic optimizations.
356
357endif
358
359#
360# Define implied options from the CPU selection here
361#
362config X86_CMPXCHG
363 bool
364 depends on !M386
365 default y
366
367config X86_XADD
368 bool
369 depends on !M386
370 default y
371
372config X86_L1_CACHE_SHIFT
373 int
374 default "7" if MPENTIUM4 || X86_GENERIC
375 default "4" if X86_ELAN || M486 || M386
376 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
377 default "6" if MK7 || MK8 || MPENTIUMM
378
379config RWSEM_GENERIC_SPINLOCK
380 bool
381 depends on M386
382 default y
383
384config RWSEM_XCHGADD_ALGORITHM
385 bool
386 depends on !M386
387 default y
388
389config GENERIC_CALIBRATE_DELAY
390 bool
391 default y
392
393config X86_PPRO_FENCE
394 bool
395 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
396 default y
397
398config X86_F00F_BUG
399 bool
400 depends on M586MMX || M586TSC || M586 || M486 || M386
401 default y
402
403config X86_WP_WORKS_OK
404 bool
405 depends on !M386
406 default y
407
408config X86_INVLPG
409 bool
410 depends on !M386
411 default y
412
413config X86_BSWAP
414 bool
415 depends on !M386
416 default y
417
418config X86_POPAD_OK
419 bool
420 depends on !M386
421 default y
422
423config X86_ALIGNMENT_16
424 bool
425 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
426 default y
427
428config X86_GOOD_APIC
429 bool
430 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
431 default y
432
433config X86_INTEL_USERCOPY
434 bool
435 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
436 default y
437
438config X86_USE_PPRO_CHECKSUM
439 bool
440 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
441 default y
442
443config X86_USE_3DNOW
444 bool
445 depends on MCYRIXIII || MK7
446 default y
447
448config X86_OOSTORE
449 bool
450 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
451 default y
452 159
453config HPET_TIMER 160config HPET_TIMER
454 bool "HPET Timer Support" 161 bool "HPET Timer Support"
@@ -561,11 +268,6 @@ config X86_VISWS_APIC
561 depends on X86_VISWS 268 depends on X86_VISWS
562 default y 269 default y
563 270
564config X86_TSC
565 bool
566 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
567 default y
568
569config X86_MCE 271config X86_MCE
570 bool "Machine Check Exception" 272 bool "Machine Check Exception"
571 depends on !X86_VOYAGER 273 depends on !X86_VOYAGER
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
new file mode 100644
index 000000000000..53bbb3c008ee
--- /dev/null
+++ b/arch/i386/Kconfig.cpu
@@ -0,0 +1,309 @@
1# Put here option for CPU selection and depending optimization
2if !X86_ELAN
3
4choice
5 prompt "Processor family"
6 default M686
7
8config M386
9 bool "386"
10 ---help---
11 This is the processor type of your CPU. This information is used for
12 optimizing purposes. In order to compile a kernel that can run on
13 all x86 CPU types (albeit not optimally fast), you can specify
14 "386" here.
15
16 The kernel will not necessarily run on earlier architectures than
17 the one you have chosen, e.g. a Pentium optimized kernel will run on
18 a PPro, but not necessarily on a i486.
19
20 Here are the settings recommended for greatest speed:
21 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
22 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
23 will run on a 386 class machine.
24 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
25 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
26 - "586" for generic Pentium CPUs lacking the TSC
27 (time stamp counter) register.
28 - "Pentium-Classic" for the Intel Pentium.
29 - "Pentium-MMX" for the Intel Pentium MMX.
30 - "Pentium-Pro" for the Intel Pentium Pro.
31 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
32 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
33 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
34 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
35 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
36 - "Crusoe" for the Transmeta Crusoe series.
37 - "Efficeon" for the Transmeta Efficeon series.
38 - "Winchip-C6" for original IDT Winchip.
39 - "Winchip-2" for IDT Winchip 2.
40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
43 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
44
45 If you don't know what to do, choose "386".
46
47config M486
48 bool "486"
49 help
50 Select this for a 486 series processor, either Intel or one of the
51 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
52 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
53 U5S.
54
55config M586
56 bool "586/K5/5x86/6x86/6x86MX"
57 help
58 Select this for an 586 or 686 series processor such as the AMD K5,
59 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
60 assume the RDTSC (Read Time Stamp Counter) instruction.
61
62config M586TSC
63 bool "Pentium-Classic"
64 help
65 Select this for a Pentium Classic processor with the RDTSC (Read
66 Time Stamp Counter) instruction for benchmarking.
67
68config M586MMX
69 bool "Pentium-MMX"
70 help
71 Select this for a Pentium with the MMX graphics/multimedia
72 extended instructions.
73
74config M686
75 bool "Pentium-Pro"
76 help
77 Select this for Intel Pentium Pro chips. This enables the use of
78 Pentium Pro extended instructions, and disables the init-time guard
79 against the f00f bug found in earlier Pentiums.
80
81config MPENTIUMII
82 bool "Pentium-II/Celeron(pre-Coppermine)"
83 help
84 Select this for Intel chips based on the Pentium-II and
85 pre-Coppermine Celeron core. This option enables an unaligned
86 copy optimization, compiles the kernel with optimization flags
87 tailored for the chip, and applies any applicable Pentium Pro
88 optimizations.
89
90config MPENTIUMIII
91 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
92 help
93 Select this for Intel chips based on the Pentium-III and
94 Celeron-Coppermine core. This option enables use of some
95 extended prefetch instructions in addition to the Pentium II
96 extensions.
97
98config MPENTIUMM
99 bool "Pentium M"
100 help
101 Select this for Intel Pentium M (not Pentium-4 M)
102 notebook chips.
103
104config MPENTIUM4
105 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
106 help
107 Select this for Intel Pentium 4 chips. This includes the
108 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
109 (not Pentium M) chips. This option enables compile flags
110 optimized for the chip, uses the correct cache shift, and
111 applies any applicable Pentium III optimizations.
112
113config MK6
114 bool "K6/K6-II/K6-III"
115 help
116 Select this for an AMD K6-family processor. Enables use of
117 some extended instructions, and passes appropriate optimization
118 flags to GCC.
119
120config MK7
121 bool "Athlon/Duron/K7"
122 help
123 Select this for an AMD Athlon K7-family processor. Enables use of
124 some extended instructions, and passes appropriate optimization
125 flags to GCC.
126
127config MK8
128 bool "Opteron/Athlon64/Hammer/K8"
129 help
130 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
131 use of some extended instructions, and passes appropriate optimization
132 flags to GCC.
133
134config MCRUSOE
135 bool "Crusoe"
136 help
137 Select this for a Transmeta Crusoe processor. Treats the processor
138 like a 586 with TSC, and sets some GCC optimization flags (like a
139 Pentium Pro with no alignment requirements).
140
141config MEFFICEON
142 bool "Efficeon"
143 help
144 Select this for a Transmeta Efficeon processor.
145
146config MWINCHIPC6
147 bool "Winchip-C6"
148 help
149 Select this for an IDT Winchip C6 chip. Linux and GCC
150 treat this chip as a 586TSC with some extended instructions
151 and alignment requirements.
152
153config MWINCHIP2
154 bool "Winchip-2"
155 help
156 Select this for an IDT Winchip-2. Linux and GCC
157 treat this chip as a 586TSC with some extended instructions
158 and alignment requirements.
159
160config MWINCHIP3D
161 bool "Winchip-2A/Winchip-3"
162 help
163 Select this for an IDT Winchip-2A or 3. Linux and GCC
164 treat this chip as a 586TSC with some extended instructions
165 and alignment reqirements. Also enable out of order memory
166 stores for this CPU, which can increase performance of some
167 operations.
168
169config MGEODEGX1
170 bool "GeodeGX1"
171 help
172 Select this for a Geode GX1 (Cyrix MediaGX) chip.
173
174config MCYRIXIII
175 bool "CyrixIII/VIA-C3"
176 help
177 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
178 treat this chip as a generic 586. Whilst the CPU is 686 class,
179 it lacks the cmov extension which gcc assumes is present when
180 generating 686 code.
181 Note that Nehemiah (Model 9) and above will not boot with this
182 kernel due to them lacking the 3DNow! instructions used in earlier
183 incarnations of the CPU.
184
185config MVIAC3_2
186 bool "VIA C3-2 (Nehemiah)"
187 help
188 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
189 of SSE and tells gcc to treat the CPU as a 686.
190 Note, this kernel will not boot on older (pre model 9) C3s.
191
192endchoice
193
194config X86_GENERIC
195 bool "Generic x86 support"
196 help
197 Instead of just including optimizations for the selected
198 x86 variant (e.g. PII, Crusoe or Athlon), include some more
199 generic optimizations as well. This will make the kernel
200 perform better on x86 CPUs other than that selected.
201
202 This is really intended for distributors who need more
203 generic optimizations.
204
205endif
206
207#
208# Define implied options from the CPU selection here
209#
210config X86_CMPXCHG
211 bool
212 depends on !M386
213 default y
214
215config X86_XADD
216 bool
217 depends on !M386
218 default y
219
220config X86_L1_CACHE_SHIFT
221 int
222 default "7" if MPENTIUM4 || X86_GENERIC
223 default "4" if X86_ELAN || M486 || M386
224 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
225 default "6" if MK7 || MK8 || MPENTIUMM
226
227config RWSEM_GENERIC_SPINLOCK
228 bool
229 depends on M386
230 default y
231
232config RWSEM_XCHGADD_ALGORITHM
233 bool
234 depends on !M386
235 default y
236
237config GENERIC_CALIBRATE_DELAY
238 bool
239 default y
240
241config X86_PPRO_FENCE
242 bool
243 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
244 default y
245
246config X86_F00F_BUG
247 bool
248 depends on M586MMX || M586TSC || M586 || M486 || M386
249 default y
250
251config X86_WP_WORKS_OK
252 bool
253 depends on !M386
254 default y
255
256config X86_INVLPG
257 bool
258 depends on !M386
259 default y
260
261config X86_BSWAP
262 bool
263 depends on !M386
264 default y
265
266config X86_POPAD_OK
267 bool
268 depends on !M386
269 default y
270
271config X86_CMPXCHG64
272 bool
273 depends on !M386 && !M486
274 default y
275
276config X86_ALIGNMENT_16
277 bool
278 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
279 default y
280
281config X86_GOOD_APIC
282 bool
283 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
284 default y
285
286config X86_INTEL_USERCOPY
287 bool
288 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
289 default y
290
291config X86_USE_PPRO_CHECKSUM
292 bool
293 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
294 default y
295
296config X86_USE_3DNOW
297 bool
298 depends on MCYRIXIII || MK7
299 default y
300
301config X86_OOSTORE
302 bool
303 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
304 default y
305
306config X86_TSC
307 bool
308 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
309 default y
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 09951990a622..d121ea18460f 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -34,35 +34,8 @@ CFLAGS += -pipe -msoft-float
34# prevent gcc from keeping the stack 16 byte aligned 34# prevent gcc from keeping the stack 16 byte aligned
35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) 35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
36 36
37align := $(cc-option-align) 37# CPU-specific tuning. Anything which can be shared with UML should go here.
38cflags-$(CONFIG_M386) += -march=i386 38include $(srctree)/arch/i386/Makefile.cpu
39cflags-$(CONFIG_M486) += -march=i486
40cflags-$(CONFIG_M586) += -march=i586
41cflags-$(CONFIG_M586TSC) += -march=i586
42cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
43cflags-$(CONFIG_M686) += -march=i686
44cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call cc-option,-mtune=pentium2)
45cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call cc-option,-mtune=pentium3)
46cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call cc-option,-mtune=pentium3)
47cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call cc-option,-mtune=pentium4)
48cflags-$(CONFIG_MK6) += -march=k6
49# Please note, that patches that add -march=athlon-xp and friends are pointless.
50# They make zero difference whatsosever to performance at this time.
51cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
52cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
53cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
54cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
55cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
56cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
57cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
58cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
59cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
60
61# AMD Elan support
62cflags-$(CONFIG_X86_ELAN) += -march=i486
63
64# Geode GX1 support
65cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
66 39
67# -mregparm=3 works ok on gcc-3.0 and later 40# -mregparm=3 works ok on gcc-3.0 and later
68# 41#
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
new file mode 100644
index 000000000000..8e51456df23d
--- /dev/null
+++ b/arch/i386/Makefile.cpu
@@ -0,0 +1,41 @@
1# CPU tuning section - shared with UML.
2# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML.
3
4#-mtune exists since gcc 3.4, and some -mcpu flavors didn't exist in gcc 2.95.
5HAS_MTUNE := $(call cc-option-yn, -mtune=i386)
6ifeq ($(HAS_MTUNE),y)
7tune = $(call cc-option,-mtune=$(1),)
8else
9tune = $(call cc-option,-mcpu=$(1),)
10endif
11
12align := $(cc-option-align)
13cflags-$(CONFIG_M386) += -march=i386
14cflags-$(CONFIG_M486) += -march=i486
15cflags-$(CONFIG_M586) += -march=i586
16cflags-$(CONFIG_M586TSC) += -march=i586
17cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
18cflags-$(CONFIG_M686) += -march=i686
19cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
20cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
21cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3)
22cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
23cflags-$(CONFIG_MK6) += -march=k6
24# Please note, that patches that add -march=athlon-xp and friends are pointless.
25# They make zero difference whatsosever to performance at this time.
26cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
27cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
28cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
29cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
30cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
31cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
32cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
33cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
34cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
35
36# AMD Elan support
37cflags-$(CONFIG_X86_ELAN) += -march=i486
38
39# Geode GX1 support
40cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
41
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5546ddebec33..9204be6eedb3 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -803,6 +803,7 @@ no_apic:
803 803
804void __init init_apic_mappings(void) 804void __init init_apic_mappings(void)
805{ 805{
806 unsigned int orig_apicid;
806 unsigned long apic_phys; 807 unsigned long apic_phys;
807 808
808 /* 809 /*
@@ -824,8 +825,11 @@ void __init init_apic_mappings(void)
824 * Fetch the APIC ID of the BSP in case we have a 825 * Fetch the APIC ID of the BSP in case we have a
825 * default configuration (or the MP table is broken). 826 * default configuration (or the MP table is broken).
826 */ 827 */
827 if (boot_cpu_physical_apicid == -1U) 828 orig_apicid = boot_cpu_physical_apicid;
828 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 829 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
830 if ((orig_apicid != -1U) && (orig_apicid != boot_cpu_physical_apicid))
831 printk(KERN_WARNING "Boot APIC ID in local APIC unexpected (%d vs %d)",
832 orig_apicid, boot_cpu_physical_apicid);
829 833
830#ifdef CONFIG_X86_IO_APIC 834#ifdef CONFIG_X86_IO_APIC
831 { 835 {
@@ -1046,10 +1050,11 @@ static unsigned int calibration_result;
1046 1050
1047void __init setup_boot_APIC_clock(void) 1051void __init setup_boot_APIC_clock(void)
1048{ 1052{
1053 unsigned long flags;
1049 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); 1054 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
1050 using_apic_timer = 1; 1055 using_apic_timer = 1;
1051 1056
1052 local_irq_disable(); 1057 local_irq_save(flags);
1053 1058
1054 calibration_result = calibrate_APIC_clock(); 1059 calibration_result = calibrate_APIC_clock();
1055 /* 1060 /*
@@ -1057,7 +1062,7 @@ void __init setup_boot_APIC_clock(void)
1057 */ 1062 */
1058 setup_APIC_timer(calibration_result); 1063 setup_APIC_timer(calibration_result);
1059 1064
1060 local_irq_enable(); 1065 local_irq_restore(flags);
1061} 1066}
1062 1067
1063void __devinit setup_secondary_APIC_clock(void) 1068void __devinit setup_secondary_APIC_clock(void)
@@ -1254,40 +1259,81 @@ fastcall void smp_error_interrupt(struct pt_regs *regs)
1254} 1259}
1255 1260
1256/* 1261/*
1257 * This initializes the IO-APIC and APIC hardware if this is 1262 * This initializes the IO-APIC and APIC hardware.
1258 * a UP kernel.
1259 */ 1263 */
1260int __init APIC_init_uniprocessor (void) 1264int __init APIC_init(void)
1261{ 1265{
1262 if (enable_local_apic < 0) 1266 if (enable_local_apic < 0) {
1263 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1267 printk(KERN_INFO "APIC disabled\n");
1268 return -1;
1269 }
1264 1270
1265 if (!smp_found_config && !cpu_has_apic) 1271 /* See if we have a SMP configuration or have forced enabled
1272 * the local apic.
1273 */
1274 if (!smp_found_config && !acpi_lapic && !cpu_has_apic) {
1275 enable_local_apic = -1;
1266 return -1; 1276 return -1;
1277 }
1267 1278
1268 /* 1279 /*
1269 * Complain if the BIOS pretends there is one. 1280 * Complain if the BIOS pretends there is an apic.
1281 * Then get out because we don't have an a local apic.
1270 */ 1282 */
1271 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1283 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1272 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1284 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1273 boot_cpu_physical_apicid); 1285 boot_cpu_physical_apicid);
1286 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1287 enable_local_apic = -1;
1274 return -1; 1288 return -1;
1275 } 1289 }
1276 1290
1277 verify_local_APIC(); 1291 verify_local_APIC();
1278 1292
1293 /*
1294 * Should not be necessary because the MP table should list the boot
1295 * CPU too, but we do it for the sake of robustness anyway.
1296 * Makes no sense to do this check in clustered apic mode, so skip it
1297 */
1298 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1299 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1300 boot_cpu_physical_apicid);
1301 physid_set(boot_cpu_physical_apicid, phys_cpu_present_map);
1302 }
1303
1304 /*
1305 * Switch from PIC to APIC mode.
1306 */
1279 connect_bsp_APIC(); 1307 connect_bsp_APIC();
1308 setup_local_APIC();
1280 1309
1281 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1310#ifdef CONFIG_X86_IO_APIC
1311 /*
1312 * Now start the IO-APICs
1313 */
1314 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1315 setup_IO_APIC();
1316#endif
1317 return 0;
1318}
1282 1319
1283 setup_local_APIC(); 1320void __init APIC_late_time_init(void)
1321{
1322 /* Improve our loops per jiffy estimate */
1323 loops_per_jiffy = ((1000 + HZ - 1)/HZ)*cpu_khz;
1324 boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
1325 cpu_data[0].loops_per_jiffy = loops_per_jiffy;
1326
1327 /* setup_apic_nmi_watchdog doesn't work properly before cpu_khz is
1328 * initialized. So redo it here to ensure the boot cpu is setup
1329 * properly.
1330 */
1331 if (nmi_watchdog == NMI_LOCAL_APIC)
1332 setup_apic_nmi_watchdog();
1284 1333
1285#ifdef CONFIG_X86_IO_APIC 1334#ifdef CONFIG_X86_IO_APIC
1286 if (smp_found_config) 1335 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1287 if (!skip_ioapic_setup && nr_ioapics) 1336 IO_APIC_late_time_init();
1288 setup_IO_APIC();
1289#endif 1337#endif
1290 setup_boot_APIC_clock(); 1338 setup_boot_APIC_clock();
1291
1292 return 0;
1293} 1339}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index d7811c4e8b50..d2ef0c2aa93e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
597 cpumask_t cpus; 597 cpumask_t cpus;
598 int cpu; 598 int cpu;
599 struct desc_struct save_desc_40; 599 struct desc_struct save_desc_40;
600 struct desc_struct *gdt;
600 601
601 cpus = apm_save_cpus(); 602 cpus = apm_save_cpus();
602 603
603 cpu = get_cpu(); 604 cpu = get_cpu();
604 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 605 gdt = get_cpu_gdt_table(cpu);
605 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 606 save_desc_40 = gdt[0x40 / 8];
607 gdt[0x40 / 8] = bad_bios_desc;
606 608
607 local_save_flags(flags); 609 local_save_flags(flags);
608 APM_DO_CLI; 610 APM_DO_CLI;
@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
610 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); 612 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
611 APM_DO_RESTORE_SEGS; 613 APM_DO_RESTORE_SEGS;
612 local_irq_restore(flags); 614 local_irq_restore(flags);
613 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; 615 gdt[0x40 / 8] = save_desc_40;
614 put_cpu(); 616 put_cpu();
615 apm_restore_cpus(cpus); 617 apm_restore_cpus(cpus);
616 618
@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
639 cpumask_t cpus; 641 cpumask_t cpus;
640 int cpu; 642 int cpu;
641 struct desc_struct save_desc_40; 643 struct desc_struct save_desc_40;
642 644 struct desc_struct *gdt;
643 645
644 cpus = apm_save_cpus(); 646 cpus = apm_save_cpus();
645 647
646 cpu = get_cpu(); 648 cpu = get_cpu();
647 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 649 gdt = get_cpu_gdt_table(cpu);
648 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 650 save_desc_40 = gdt[0x40 / 8];
651 gdt[0x40 / 8] = bad_bios_desc;
649 652
650 local_save_flags(flags); 653 local_save_flags(flags);
651 APM_DO_CLI; 654 APM_DO_CLI;
@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
653 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); 656 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
654 APM_DO_RESTORE_SEGS; 657 APM_DO_RESTORE_SEGS;
655 local_irq_restore(flags); 658 local_irq_restore(flags);
656 __get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; 659 gdt[0x40 / 8] = save_desc_40;
657 put_cpu(); 660 put_cpu();
658 apm_restore_cpus(cpus); 661 apm_restore_cpus(cpus);
659 return error; 662 return error;
@@ -2295,35 +2298,36 @@ static int __init apm_init(void)
2295 apm_bios_entry.segment = APM_CS; 2298 apm_bios_entry.segment = APM_CS;
2296 2299
2297 for (i = 0; i < NR_CPUS; i++) { 2300 for (i = 0; i < NR_CPUS; i++) {
2298 set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2301 struct desc_struct *gdt = get_cpu_gdt_table(i);
2302 set_base(gdt[APM_CS >> 3],
2299 __va((unsigned long)apm_info.bios.cseg << 4)); 2303 __va((unsigned long)apm_info.bios.cseg << 4));
2300 set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2304 set_base(gdt[APM_CS_16 >> 3],
2301 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2305 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2302 set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2306 set_base(gdt[APM_DS >> 3],
2303 __va((unsigned long)apm_info.bios.dseg << 4)); 2307 __va((unsigned long)apm_info.bios.dseg << 4));
2304#ifndef APM_RELAX_SEGMENTS 2308#ifndef APM_RELAX_SEGMENTS
2305 if (apm_info.bios.version == 0x100) { 2309 if (apm_info.bios.version == 0x100) {
2306#endif 2310#endif
2307 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2311 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
2308 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); 2312 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
2309 /* For some unknown machine. */ 2313 /* For some unknown machine. */
2310 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); 2314 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
2311 /* For the DEC Hinote Ultra CT475 (and others?) */ 2315 /* For the DEC Hinote Ultra CT475 (and others?) */
2312 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); 2316 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
2313#ifndef APM_RELAX_SEGMENTS 2317#ifndef APM_RELAX_SEGMENTS
2314 } else { 2318 } else {
2315 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2319 _set_limit((char *)&gdt[APM_CS >> 3],
2316 (apm_info.bios.cseg_len - 1) & 0xffff); 2320 (apm_info.bios.cseg_len - 1) & 0xffff);
2317 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2321 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2318 (apm_info.bios.cseg_16_len - 1) & 0xffff); 2322 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2319 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2323 _set_limit((char *)&gdt[APM_DS >> 3],
2320 (apm_info.bios.dseg_len - 1) & 0xffff); 2324 (apm_info.bios.dseg_len - 1) & 0xffff);
2321 /* workaround for broken BIOSes */ 2325 /* workaround for broken BIOSes */
2322 if (apm_info.bios.cseg_len <= apm_info.bios.offset) 2326 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2323 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); 2327 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2324 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ 2328 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2325 /* for the BIOS that assumes granularity = 1 */ 2329 /* for the BIOS that assumes granularity = 1 */
2326 per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; 2330 gdt[APM_DS >> 3].b |= 0x800000;
2327 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); 2331 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2328 } 2332 }
2329 } 2333 }
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 9ad43be9a01f..74145a33cb0f 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
573 int cpu = smp_processor_id(); 573 int cpu = smp_processor_id();
574 struct tss_struct * t = &per_cpu(init_tss, cpu); 574 struct tss_struct * t = &per_cpu(init_tss, cpu);
575 struct thread_struct *thread = &current->thread; 575 struct thread_struct *thread = &current->thread;
576 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
576 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); 577 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 578
578 if (cpu_test_and_set(cpu, cpu_initialized)) { 579 if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -594,24 +595,16 @@ void __devinit cpu_init(void)
594 * Initialize the per-CPU GDT with the boot GDT, 595 * Initialize the per-CPU GDT with the boot GDT,
595 * and set up the GDT descriptor: 596 * and set up the GDT descriptor:
596 */ 597 */
597 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, 598 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 GDT_SIZE);
599 599
600 /* Set up GDT entry for 16bit stack */ 600 /* Set up GDT entry for 16bit stack */
601 *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= 601 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
604 (CPU_16BIT_STACK_SIZE - 1); 604 (CPU_16BIT_STACK_SIZE - 1);
605 605
606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
607 cpu_gdt_descr[cpu].address = 607 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
608 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
609
610 /*
611 * Set up the per-thread TLS descriptor cache:
612 */
613 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
614 GDT_ENTRY_TLS_ENTRIES * 8);
615 608
616 load_gdt(&cpu_gdt_descr[cpu]); 609 load_gdt(&cpu_gdt_descr[cpu]);
617 load_idt(&idt_descr); 610 load_idt(&idt_descr);
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 822c8ce9d1f1..caa9f7711343 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/sched.h> /* current */
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/delay.h> 37#include <asm/delay.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index aa622d52c6e5..270f2188d68b 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -28,6 +28,7 @@
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/sched.h> /* current / set_cpus_allowed() */
31 32
32#include <asm/processor.h> 33#include <asm/processor.h>
33#include <asm/msr.h> 34#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 58ca98fdc2ca..2d5c9adba0cd 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
35 36
36#include <asm/msr.h> 37#include <asm/msr.h>
37#include <asm/io.h> 38#include <asm/io.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c397b6220430..1465974256c9 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/sched.h> /* current */
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9e0d5f83cb9f..4dc42a189ae5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -10,6 +11,7 @@
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/compiler.h> 12#include <linux/compiler.h>
12#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
13 15
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/smp.h> 17#include <asm/smp.h>
@@ -28,7 +30,7 @@ struct _cache_table
28}; 30};
29 31
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 32/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 33static struct _cache_table cache_table[] __cpuinitdata =
32{ 34{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -117,10 +119,9 @@ struct _cpuid4_info {
117 cpumask_t shared_cpu_map; 119 cpumask_t shared_cpu_map;
118}; 120};
119 121
120#define MAX_CACHE_LEAVES 4
121static unsigned short num_cache_leaves; 122static unsigned short num_cache_leaves;
122 123
123static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 124static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
124{ 125{
125 unsigned int eax, ebx, ecx, edx; 126 unsigned int eax, ebx, ecx, edx;
126 union _cpuid4_leaf_eax cache_eax; 127 union _cpuid4_leaf_eax cache_eax;
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void)
144{ 145{
145 unsigned int eax, ebx, ecx, edx; 146 unsigned int eax, ebx, ecx, edx;
146 union _cpuid4_leaf_eax cache_eax; 147 union _cpuid4_leaf_eax cache_eax;
147 int i; 148 int i = -1;
148 int retval;
149 149
150 retval = MAX_CACHE_LEAVES; 150 do {
151 /* Do cpuid(4) loop to find out num_cache_leaves */ 151 ++i;
152 for (i = 0; i < MAX_CACHE_LEAVES; i++) { 152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
154 cache_eax.full = eax; 154 cache_eax.full = eax;
155 if (cache_eax.split.type == CACHE_TYPE_NULL) { 155 } while (cache_eax.split.type != CACHE_TYPE_NULL);
156 retval = i; 156 return i;
157 break;
158 }
159 }
160 return retval;
161} 157}
162 158
163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 159unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 160{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 161 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 162 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
284 if ( l3 ) 280 if ( l3 )
285 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 281 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
286 282
287 /* 283 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
292 */
293 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
294 } 284 }
295 285
296 return l2; 286 return l2;
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
301#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 291#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
302 292
303#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
304static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
305{ 295{
306 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf;
307 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu)
334 cpuid4_info[cpu] = NULL; 324 cpuid4_info[cpu] = NULL;
335} 325}
336 326
337static int __devinit detect_cache_attributes(unsigned int cpu) 327static int __cpuinit detect_cache_attributes(unsigned int cpu)
338{ 328{
339 struct _cpuid4_info *this_leaf; 329 struct _cpuid4_info *this_leaf;
340 unsigned long j; 330 unsigned long j;
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
511 free_cache_attributes(cpu); 501 free_cache_attributes(cpu);
512} 502}
513 503
514static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 504static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
515{ 505{
516 506
517 if (num_cache_leaves == 0) 507 if (num_cache_leaves == 0)
@@ -542,7 +532,7 @@ err_out:
542} 532}
543 533
544/* Add/Remove cache interface for CPU device */ 534/* Add/Remove cache interface for CPU device */
545static int __devinit cache_add_dev(struct sys_device * sys_dev) 535static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
546{ 536{
547 unsigned int cpu = sys_dev->id; 537 unsigned int cpu = sys_dev->id;
548 unsigned long i, j; 538 unsigned long i, j;
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
579 return retval; 569 return retval;
580} 570}
581 571
582static int __devexit cache_remove_dev(struct sys_device * sys_dev) 572static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
583{ 573{
584 unsigned int cpu = sys_dev->id; 574 unsigned int cpu = sys_dev->id;
585 unsigned long i; 575 unsigned long i;
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
588 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
589 kobject_unregister(cache_kobject[cpu]); 579 kobject_unregister(cache_kobject[cpu]);
590 cpuid4_cache_sysfs_exit(cpu); 580 cpuid4_cache_sysfs_exit(cpu);
591 return 0; 581 return;
582}
583
584static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
585 unsigned long action, void *hcpu)
586{
587 unsigned int cpu = (unsigned long)hcpu;
588 struct sys_device *sys_dev;
589
590 sys_dev = get_cpu_sysdev(cpu);
591 switch (action) {
592 case CPU_ONLINE:
593 cache_add_dev(sys_dev);
594 break;
595 case CPU_DEAD:
596 cache_remove_dev(sys_dev);
597 break;
598 }
599 return NOTIFY_OK;
592} 600}
593 601
594static struct sysdev_driver cache_sysdev_driver = { 602static struct notifier_block cacheinfo_cpu_notifier =
595 .add = cache_add_dev, 603{
596 .remove = __devexit_p(cache_remove_dev), 604 .notifier_call = cacheinfo_cpu_callback,
597}; 605};
598 606
599/* Register/Unregister the cpu_cache driver */ 607static int __cpuinit cache_sysfs_init(void)
600static int __devinit cache_register_driver(void)
601{ 608{
609 int i;
610
602 if (num_cache_leaves == 0) 611 if (num_cache_leaves == 0)
603 return 0; 612 return 0;
604 613
605 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 614 register_cpu_notifier(&cacheinfo_cpu_notifier);
615
616 for_each_online_cpu(i) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
618 (void *)(long)i);
619 }
620
621 return 0;
606} 622}
607 623
608device_initcall(cache_register_driver); 624device_initcall(cache_sysfs_init);
609 625
610#endif 626#endif
611
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 3c035b8fa3d9..979b18bc95c1 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -102,11 +102,16 @@ void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
103 nr_mce_banks = l & 0xff; 103 nr_mce_banks = l & 0xff;
104 104
105 /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */ 105 /*
106 for (i=1; i<nr_mce_banks; i++) { 106 * Following the example in IA-32 SDM Vol 3:
107 * - MC0_CTL should not be written
108 * - Status registers on all banks should be cleared on reset
109 */
110 for (i=1; i<nr_mce_banks; i++)
107 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); 111 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
112
113 for (i=0; i<nr_mce_banks; i++)
108 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); 114 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
109 }
110 115
111 set_in_cr4 (X86_CR4_MCE); 116 set_in_cr4 (X86_CR4_MCE);
112 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", 117 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 1923e0aed26a..cf39e205d33c 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -149,60 +149,89 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
149 return -EINVAL; 149 return -EINVAL;
150} 150}
151 151
152static int 152static long
153mtrr_ioctl(struct inode *inode, struct file *file, 153mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
154 unsigned int cmd, unsigned long __arg)
155{ 154{
156 int err; 155 int err = 0;
157 mtrr_type type; 156 mtrr_type type;
158 struct mtrr_sentry sentry; 157 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry; 158 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg; 159 void __user *arg = (void __user *) __arg;
161 160
162 switch (cmd) { 161 switch (cmd) {
162 case MTRRIOC_ADD_ENTRY:
163 case MTRRIOC_SET_ENTRY:
164 case MTRRIOC_DEL_ENTRY:
165 case MTRRIOC_KILL_ENTRY:
166 case MTRRIOC_ADD_PAGE_ENTRY:
167 case MTRRIOC_SET_PAGE_ENTRY:
168 case MTRRIOC_DEL_PAGE_ENTRY:
169 case MTRRIOC_KILL_PAGE_ENTRY:
170 if (copy_from_user(&sentry, arg, sizeof sentry))
171 return -EFAULT;
172 break;
173 case MTRRIOC_GET_ENTRY:
174 case MTRRIOC_GET_PAGE_ENTRY:
175 if (copy_from_user(&gentry, arg, sizeof gentry))
176 return -EFAULT;
177 break;
178#ifdef CONFIG_COMPAT
179 case MTRRIOC32_ADD_ENTRY:
180 case MTRRIOC32_SET_ENTRY:
181 case MTRRIOC32_DEL_ENTRY:
182 case MTRRIOC32_KILL_ENTRY:
183 case MTRRIOC32_ADD_PAGE_ENTRY:
184 case MTRRIOC32_SET_PAGE_ENTRY:
185 case MTRRIOC32_DEL_PAGE_ENTRY:
186 case MTRRIOC32_KILL_PAGE_ENTRY: {
187 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
188 err = get_user(sentry.base, &s32->base);
189 err |= get_user(sentry.size, &s32->size);
190 err |= get_user(sentry.type, &s32->type);
191 if (err)
192 return err;
193 break;
194 }
195 case MTRRIOC32_GET_ENTRY:
196 case MTRRIOC32_GET_PAGE_ENTRY: {
197 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
198 err = get_user(gentry.regnum, &g32->regnum);
199 err |= get_user(gentry.base, &g32->base);
200 err |= get_user(gentry.size, &g32->size);
201 err |= get_user(gentry.type, &g32->type);
202 if (err)
203 return err;
204 break;
205 }
206#endif
207 }
208
209 switch (cmd) {
163 default: 210 default:
164 return -ENOTTY; 211 return -ENOTTY;
165 case MTRRIOC_ADD_ENTRY: 212 case MTRRIOC_ADD_ENTRY:
166 if (!capable(CAP_SYS_ADMIN)) 213 if (!capable(CAP_SYS_ADMIN))
167 return -EPERM; 214 return -EPERM;
168 if (copy_from_user(&sentry, arg, sizeof sentry))
169 return -EFAULT;
170 err = 215 err =
171 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 216 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
172 file, 0); 217 file, 0);
173 if (err < 0)
174 return err;
175 break; 218 break;
176 case MTRRIOC_SET_ENTRY: 219 case MTRRIOC_SET_ENTRY:
177 if (!capable(CAP_SYS_ADMIN)) 220 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM; 221 return -EPERM;
179 if (copy_from_user(&sentry, arg, sizeof sentry))
180 return -EFAULT;
181 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); 222 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
182 if (err < 0)
183 return err;
184 break; 223 break;
185 case MTRRIOC_DEL_ENTRY: 224 case MTRRIOC_DEL_ENTRY:
186 if (!capable(CAP_SYS_ADMIN)) 225 if (!capable(CAP_SYS_ADMIN))
187 return -EPERM; 226 return -EPERM;
188 if (copy_from_user(&sentry, arg, sizeof sentry))
189 return -EFAULT;
190 err = mtrr_file_del(sentry.base, sentry.size, file, 0); 227 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
191 if (err < 0)
192 return err;
193 break; 228 break;
194 case MTRRIOC_KILL_ENTRY: 229 case MTRRIOC_KILL_ENTRY:
195 if (!capable(CAP_SYS_ADMIN)) 230 if (!capable(CAP_SYS_ADMIN))
196 return -EPERM; 231 return -EPERM;
197 if (copy_from_user(&sentry, arg, sizeof sentry))
198 return -EFAULT;
199 err = mtrr_del(-1, sentry.base, sentry.size); 232 err = mtrr_del(-1, sentry.base, sentry.size);
200 if (err < 0)
201 return err;
202 break; 233 break;
203 case MTRRIOC_GET_ENTRY: 234 case MTRRIOC_GET_ENTRY:
204 if (copy_from_user(&gentry, arg, sizeof gentry))
205 return -EFAULT;
206 if (gentry.regnum >= num_var_ranges) 235 if (gentry.regnum >= num_var_ranges)
207 return -EINVAL; 236 return -EINVAL;
208 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 237 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
@@ -217,60 +246,59 @@ mtrr_ioctl(struct inode *inode, struct file *file,
217 gentry.type = type; 246 gentry.type = type;
218 } 247 }
219 248
220 if (copy_to_user(arg, &gentry, sizeof gentry))
221 return -EFAULT;
222 break; 249 break;
223 case MTRRIOC_ADD_PAGE_ENTRY: 250 case MTRRIOC_ADD_PAGE_ENTRY:
224 if (!capable(CAP_SYS_ADMIN)) 251 if (!capable(CAP_SYS_ADMIN))
225 return -EPERM; 252 return -EPERM;
226 if (copy_from_user(&sentry, arg, sizeof sentry))
227 return -EFAULT;
228 err = 253 err =
229 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 254 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
230 file, 1); 255 file, 1);
231 if (err < 0)
232 return err;
233 break; 256 break;
234 case MTRRIOC_SET_PAGE_ENTRY: 257 case MTRRIOC_SET_PAGE_ENTRY:
235 if (!capable(CAP_SYS_ADMIN)) 258 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM; 259 return -EPERM;
237 if (copy_from_user(&sentry, arg, sizeof sentry))
238 return -EFAULT;
239 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); 260 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
240 if (err < 0)
241 return err;
242 break; 261 break;
243 case MTRRIOC_DEL_PAGE_ENTRY: 262 case MTRRIOC_DEL_PAGE_ENTRY:
244 if (!capable(CAP_SYS_ADMIN)) 263 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM; 264 return -EPERM;
246 if (copy_from_user(&sentry, arg, sizeof sentry))
247 return -EFAULT;
248 err = mtrr_file_del(sentry.base, sentry.size, file, 1); 265 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
249 if (err < 0)
250 return err;
251 break; 266 break;
252 case MTRRIOC_KILL_PAGE_ENTRY: 267 case MTRRIOC_KILL_PAGE_ENTRY:
253 if (!capable(CAP_SYS_ADMIN)) 268 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM; 269 return -EPERM;
255 if (copy_from_user(&sentry, arg, sizeof sentry))
256 return -EFAULT;
257 err = mtrr_del_page(-1, sentry.base, sentry.size); 270 err = mtrr_del_page(-1, sentry.base, sentry.size);
258 if (err < 0)
259 return err;
260 break; 271 break;
261 case MTRRIOC_GET_PAGE_ENTRY: 272 case MTRRIOC_GET_PAGE_ENTRY:
262 if (copy_from_user(&gentry, arg, sizeof gentry))
263 return -EFAULT;
264 if (gentry.regnum >= num_var_ranges) 273 if (gentry.regnum >= num_var_ranges)
265 return -EINVAL; 274 return -EINVAL;
266 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 275 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
267 gentry.type = type; 276 gentry.type = type;
277 break;
278 }
279
280 if (err)
281 return err;
268 282
283 switch(cmd) {
284 case MTRRIOC_GET_ENTRY:
285 case MTRRIOC_GET_PAGE_ENTRY:
269 if (copy_to_user(arg, &gentry, sizeof gentry)) 286 if (copy_to_user(arg, &gentry, sizeof gentry))
270 return -EFAULT; 287 err = -EFAULT;
288 break;
289#ifdef CONFIG_COMPAT
290 case MTRRIOC32_GET_ENTRY:
291 case MTRRIOC32_GET_PAGE_ENTRY: {
292 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
293 err = put_user(gentry.base, &g32->base);
294 err |= put_user(gentry.size, &g32->size);
295 err |= put_user(gentry.regnum, &g32->regnum);
296 err |= put_user(gentry.type, &g32->type);
271 break; 297 break;
272 } 298 }
273 return 0; 299#endif
300 }
301 return err;
274} 302}
275 303
276static int 304static int
@@ -310,7 +338,8 @@ static struct file_operations mtrr_fops = {
310 .read = seq_read, 338 .read = seq_read,
311 .llseek = seq_lseek, 339 .llseek = seq_lseek,
312 .write = mtrr_write, 340 .write = mtrr_write,
313 .ioctl = mtrr_ioctl, 341 .unlocked_ioctl = mtrr_ioctl,
342 .compat_ioctl = mtrr_ioctl,
314 .release = mtrr_close, 343 .release = mtrr_close,
315}; 344};
316 345
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 8bd77d948a84..41b871ecf4b3 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -44,7 +44,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 45
46 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
47 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 47 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 0248e084017c..af809ccf5fbe 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -21,7 +21,6 @@
21#include <asm/hardirq.h> 21#include <asm/hardirq.h>
22#include <asm/nmi.h> 22#include <asm/nmi.h>
23#include <asm/hw_irq.h> 23#include <asm/hw_irq.h>
24#include <asm/apic.h>
25#include <mach_ipi.h> 24#include <mach_ipi.h>
26 25
27 26
@@ -148,7 +147,6 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
148 regs = &fixed_regs; 147 regs = &fixed_regs;
149 } 148 }
150 crash_save_this_cpu(regs, cpu); 149 crash_save_this_cpu(regs, cpu);
151 disable_local_APIC();
152 atomic_dec(&waiting_for_crash_ipi); 150 atomic_dec(&waiting_for_crash_ipi);
153 /* Assume hlt works */ 151 /* Assume hlt works */
154 halt(); 152 halt();
@@ -188,7 +186,6 @@ static void nmi_shootdown_cpus(void)
188 } 186 }
189 187
190 /* Leave the nmi callback set */ 188 /* Leave the nmi callback set */
191 disable_local_APIC();
192} 189}
193#else 190#else
194static void nmi_shootdown_cpus(void) 191static void nmi_shootdown_cpus(void)
@@ -213,9 +210,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
213 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 210 /* Make a note of crashing cpu. Will be used in NMI callback.*/
214 crashing_cpu = smp_processor_id(); 211 crashing_cpu = smp_processor_id();
215 nmi_shootdown_cpus(); 212 nmi_shootdown_cpus();
216 lapic_shutdown();
217#if defined(CONFIG_X86_IO_APIC)
218 disable_IO_APIC();
219#endif
220 crash_save_self(regs); 213 crash_save_self(regs);
221} 214}
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 323ef8ab3244..d86f24909284 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -435,4 +435,8 @@ void __init init_IRQ(void)
435 setup_irq(FPU_IRQ, &fpu_irq); 435 setup_irq(FPU_IRQ, &fpu_irq);
436 436
437 irq_ctx_init(smp_processor_id()); 437 irq_ctx_init(smp_processor_id());
438
439#ifdef CONFIG_X86_LOCAL_APIC
440 APIC_init();
441#endif
438} 442}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fb3991e8229e..5a77c52b20a9 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -46,6 +46,9 @@
46int (*ioapic_renumber_irq)(int ioapic, int irq); 46int (*ioapic_renumber_irq)(int ioapic, int irq);
47atomic_t irq_mis_count; 47atomic_t irq_mis_count;
48 48
49/* Where if anywhere is the i8259 connect in external int mode */
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51
49static DEFINE_SPINLOCK(ioapic_lock); 52static DEFINE_SPINLOCK(ioapic_lock);
50 53
51/* 54/*
@@ -738,7 +741,7 @@ static int find_irq_entry(int apic, int pin, int type)
738/* 741/*
739 * Find the pin to which IRQ[irq] (ISA) is connected 742 * Find the pin to which IRQ[irq] (ISA) is connected
740 */ 743 */
741static int find_isa_irq_pin(int irq, int type) 744static int __init find_isa_irq_pin(int irq, int type)
742{ 745{
743 int i; 746 int i;
744 747
@@ -758,6 +761,33 @@ static int find_isa_irq_pin(int irq, int type)
758 return -1; 761 return -1;
759} 762}
760 763
764static int __init find_isa_irq_apic(int irq, int type)
765{
766 int i;
767
768 for (i = 0; i < mp_irq_entries; i++) {
769 int lbus = mp_irqs[i].mpc_srcbus;
770
771 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
772 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
773 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
774 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
775 ) &&
776 (mp_irqs[i].mpc_irqtype == type) &&
777 (mp_irqs[i].mpc_srcbusirq == irq))
778 break;
779 }
780 if (i < mp_irq_entries) {
781 int apic;
782 for(apic = 0; apic < nr_ioapics; apic++) {
783 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
784 return apic;
785 }
786 }
787
788 return -1;
789}
790
761/* 791/*
762 * Find a specific PCI IRQ entry. 792 * Find a specific PCI IRQ entry.
763 * Not an __init, possibly needed by modules 793 * Not an __init, possibly needed by modules
@@ -1253,7 +1283,7 @@ static void __init setup_IO_APIC_irqs(void)
1253/* 1283/*
1254 * Set up the 8259A-master output pin: 1284 * Set up the 8259A-master output pin:
1255 */ 1285 */
1256static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) 1286static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
1257{ 1287{
1258 struct IO_APIC_route_entry entry; 1288 struct IO_APIC_route_entry entry;
1259 unsigned long flags; 1289 unsigned long flags;
@@ -1287,8 +1317,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
1287 * Add it to the IO-APIC irq-routing table: 1317 * Add it to the IO-APIC irq-routing table:
1288 */ 1318 */
1289 spin_lock_irqsave(&ioapic_lock, flags); 1319 spin_lock_irqsave(&ioapic_lock, flags);
1290 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1320 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
1291 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1321 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
1292 spin_unlock_irqrestore(&ioapic_lock, flags); 1322 spin_unlock_irqrestore(&ioapic_lock, flags);
1293 1323
1294 enable_8259A_irq(0); 1324 enable_8259A_irq(0);
@@ -1595,7 +1625,8 @@ void /*__init*/ print_PIC(void)
1595static void __init enable_IO_APIC(void) 1625static void __init enable_IO_APIC(void)
1596{ 1626{
1597 union IO_APIC_reg_01 reg_01; 1627 union IO_APIC_reg_01 reg_01;
1598 int i; 1628 int i8259_apic, i8259_pin;
1629 int i, apic;
1599 unsigned long flags; 1630 unsigned long flags;
1600 1631
1601 for (i = 0; i < PIN_MAP_SIZE; i++) { 1632 for (i = 0; i < PIN_MAP_SIZE; i++) {
@@ -1609,11 +1640,52 @@ static void __init enable_IO_APIC(void)
1609 /* 1640 /*
1610 * The number of IO-APIC IRQ registers (== #pins): 1641 * The number of IO-APIC IRQ registers (== #pins):
1611 */ 1642 */
1612 for (i = 0; i < nr_ioapics; i++) { 1643 for (apic = 0; apic < nr_ioapics; apic++) {
1613 spin_lock_irqsave(&ioapic_lock, flags); 1644 spin_lock_irqsave(&ioapic_lock, flags);
1614 reg_01.raw = io_apic_read(i, 1); 1645 reg_01.raw = io_apic_read(apic, 1);
1615 spin_unlock_irqrestore(&ioapic_lock, flags); 1646 spin_unlock_irqrestore(&ioapic_lock, flags);
1616 nr_ioapic_registers[i] = reg_01.bits.entries+1; 1647 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1648 }
1649 for(apic = 0; apic < nr_ioapics; apic++) {
1650 int pin;
1651 /* See if any of the pins is in ExtINT mode */
1652 for(pin = 0; pin < nr_ioapic_registers[i]; pin++) {
1653 struct IO_APIC_route_entry entry;
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1656 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1657 spin_unlock_irqrestore(&ioapic_lock, flags);
1658
1659
1660 /* If the interrupt line is enabled and in ExtInt mode
1661 * I have found the pin where the i8259 is connected.
1662 */
1663 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1664 ioapic_i8259.apic = apic;
1665 ioapic_i8259.pin = pin;
1666 goto found_i8259;
1667 }
1668 }
1669 }
1670 found_i8259:
1671 /* Look to see what if the MP table has reported the ExtINT */
1672 /* If we could not find the appropriate pin by looking at the ioapic
1673 * the i8259 probably is not connected the ioapic but give the
1674 * mptable a chance anyway.
1675 */
1676 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1677 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1678 /* Trust the MP table if nothing is setup in the hardware */
1679 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1680 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1681 ioapic_i8259.pin = i8259_pin;
1682 ioapic_i8259.apic = i8259_apic;
1683 }
1684 /* Complain if the MP table and the hardware disagree */
1685 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1686 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1687 {
1688 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1617 } 1689 }
1618 1690
1619 /* 1691 /*
@@ -1627,7 +1699,6 @@ static void __init enable_IO_APIC(void)
1627 */ 1699 */
1628void disable_IO_APIC(void) 1700void disable_IO_APIC(void)
1629{ 1701{
1630 int pin;
1631 /* 1702 /*
1632 * Clear the IO-APIC before rebooting: 1703 * Clear the IO-APIC before rebooting:
1633 */ 1704 */
@@ -1638,8 +1709,7 @@ void disable_IO_APIC(void)
1638 * Put that IOAPIC in virtual wire mode 1709 * Put that IOAPIC in virtual wire mode
1639 * so legacy interrupts can be delivered. 1710 * so legacy interrupts can be delivered.
1640 */ 1711 */
1641 pin = find_isa_irq_pin(0, mp_ExtINT); 1712 if (ioapic_i8259.pin != -1) {
1642 if (pin != -1) {
1643 struct IO_APIC_route_entry entry; 1713 struct IO_APIC_route_entry entry;
1644 unsigned long flags; 1714 unsigned long flags;
1645 1715
@@ -1650,7 +1720,7 @@ void disable_IO_APIC(void)
1650 entry.polarity = 0; /* High */ 1720 entry.polarity = 0; /* High */
1651 entry.delivery_status = 0; 1721 entry.delivery_status = 0;
1652 entry.dest_mode = 0; /* Physical */ 1722 entry.dest_mode = 0; /* Physical */
1653 entry.delivery_mode = 7; /* ExtInt */ 1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1654 entry.vector = 0; 1724 entry.vector = 0;
1655 entry.dest.physical.physical_dest = 0; 1725 entry.dest.physical.physical_dest = 0;
1656 1726
@@ -1659,11 +1729,13 @@ void disable_IO_APIC(void)
1659 * Add it to the IO-APIC irq-routing table: 1729 * Add it to the IO-APIC irq-routing table:
1660 */ 1730 */
1661 spin_lock_irqsave(&ioapic_lock, flags); 1731 spin_lock_irqsave(&ioapic_lock, flags);
1662 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1732 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1663 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1733 *(((int *)&entry)+1));
1734 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1735 *(((int *)&entry)+0));
1664 spin_unlock_irqrestore(&ioapic_lock, flags); 1736 spin_unlock_irqrestore(&ioapic_lock, flags);
1665 } 1737 }
1666 disconnect_bsp_APIC(pin != -1); 1738 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1667} 1739}
1668 1740
1669/* 1741/*
@@ -2113,20 +2185,21 @@ static void setup_nmi (void)
2113 */ 2185 */
2114static inline void unlock_ExtINT_logic(void) 2186static inline void unlock_ExtINT_logic(void)
2115{ 2187{
2116 int pin, i; 2188 int apic, pin, i;
2117 struct IO_APIC_route_entry entry0, entry1; 2189 struct IO_APIC_route_entry entry0, entry1;
2118 unsigned char save_control, save_freq_select; 2190 unsigned char save_control, save_freq_select;
2119 unsigned long flags; 2191 unsigned long flags;
2120 2192
2121 pin = find_isa_irq_pin(8, mp_INT); 2193 pin = find_isa_irq_pin(8, mp_INT);
2194 apic = find_isa_irq_apic(8, mp_INT);
2122 if (pin == -1) 2195 if (pin == -1)
2123 return; 2196 return;
2124 2197
2125 spin_lock_irqsave(&ioapic_lock, flags); 2198 spin_lock_irqsave(&ioapic_lock, flags);
2126 *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin); 2199 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
2127 *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin); 2200 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
2128 spin_unlock_irqrestore(&ioapic_lock, flags); 2201 spin_unlock_irqrestore(&ioapic_lock, flags);
2129 clear_IO_APIC_pin(0, pin); 2202 clear_IO_APIC_pin(apic, pin);
2130 2203
2131 memset(&entry1, 0, sizeof(entry1)); 2204 memset(&entry1, 0, sizeof(entry1));
2132 2205
@@ -2139,8 +2212,8 @@ static inline void unlock_ExtINT_logic(void)
2139 entry1.vector = 0; 2212 entry1.vector = 0;
2140 2213
2141 spin_lock_irqsave(&ioapic_lock, flags); 2214 spin_lock_irqsave(&ioapic_lock, flags);
2142 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); 2215 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
2143 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); 2216 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
2144 spin_unlock_irqrestore(&ioapic_lock, flags); 2217 spin_unlock_irqrestore(&ioapic_lock, flags);
2145 2218
2146 save_control = CMOS_READ(RTC_CONTROL); 2219 save_control = CMOS_READ(RTC_CONTROL);
@@ -2158,11 +2231,11 @@ static inline void unlock_ExtINT_logic(void)
2158 2231
2159 CMOS_WRITE(save_control, RTC_CONTROL); 2232 CMOS_WRITE(save_control, RTC_CONTROL);
2160 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2233 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2161 clear_IO_APIC_pin(0, pin); 2234 clear_IO_APIC_pin(apic, pin);
2162 2235
2163 spin_lock_irqsave(&ioapic_lock, flags); 2236 spin_lock_irqsave(&ioapic_lock, flags);
2164 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); 2237 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
2165 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); 2238 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
2166 spin_unlock_irqrestore(&ioapic_lock, flags); 2239 spin_unlock_irqrestore(&ioapic_lock, flags);
2167} 2240}
2168 2241
@@ -2174,7 +2247,7 @@ static inline void unlock_ExtINT_logic(void)
2174 */ 2247 */
2175static inline void check_timer(void) 2248static inline void check_timer(void)
2176{ 2249{
2177 int pin1, pin2; 2250 int apic1, pin1, apic2, pin2;
2178 int vector; 2251 int vector;
2179 2252
2180 /* 2253 /*
@@ -2196,10 +2269,13 @@ static inline void check_timer(void)
2196 timer_ack = 1; 2269 timer_ack = 1;
2197 enable_8259A_irq(0); 2270 enable_8259A_irq(0);
2198 2271
2199 pin1 = find_isa_irq_pin(0, mp_INT); 2272 pin1 = find_isa_irq_pin(0, mp_INT);
2200 pin2 = find_isa_irq_pin(0, mp_ExtINT); 2273 apic1 = find_isa_irq_apic(0, mp_INT);
2274 pin2 = ioapic_i8259.pin;
2275 apic2 = ioapic_i8259.apic;
2201 2276
2202 printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2); 2277 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2278 vector, apic1, pin1, apic2, pin2);
2203 2279
2204 if (pin1 != -1) { 2280 if (pin1 != -1) {
2205 /* 2281 /*
@@ -2216,8 +2292,9 @@ static inline void check_timer(void)
2216 clear_IO_APIC_pin(0, pin1); 2292 clear_IO_APIC_pin(0, pin1);
2217 return; 2293 return;
2218 } 2294 }
2219 clear_IO_APIC_pin(0, pin1); 2295 clear_IO_APIC_pin(apic1, pin1);
2220 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); 2296 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
2297 "IO-APIC\n");
2221 } 2298 }
2222 2299
2223 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); 2300 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
@@ -2226,13 +2303,13 @@ static inline void check_timer(void)
2226 /* 2303 /*
2227 * legacy devices should be connected to IO APIC #0 2304 * legacy devices should be connected to IO APIC #0
2228 */ 2305 */
2229 setup_ExtINT_IRQ0_pin(pin2, vector); 2306 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
2230 if (timer_irq_works()) { 2307 if (timer_irq_works()) {
2231 printk("works.\n"); 2308 printk("works.\n");
2232 if (pin1 != -1) 2309 if (pin1 != -1)
2233 replace_pin_at_irq(0, 0, pin1, 0, pin2); 2310 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2234 else 2311 else
2235 add_pin_to_irq(0, 0, pin2); 2312 add_pin_to_irq(0, apic2, pin2);
2236 if (nmi_watchdog == NMI_IO_APIC) { 2313 if (nmi_watchdog == NMI_IO_APIC) {
2237 setup_nmi(); 2314 setup_nmi();
2238 } 2315 }
@@ -2241,7 +2318,7 @@ static inline void check_timer(void)
2241 /* 2318 /*
2242 * Cleanup, just in case ... 2319 * Cleanup, just in case ...
2243 */ 2320 */
2244 clear_IO_APIC_pin(0, pin2); 2321 clear_IO_APIC_pin(apic2, pin2);
2245 } 2322 }
2246 printk(" failed.\n"); 2323 printk(" failed.\n");
2247 2324
@@ -2310,11 +2387,15 @@ void __init setup_IO_APIC(void)
2310 sync_Arb_IDs(); 2387 sync_Arb_IDs();
2311 setup_IO_APIC_irqs(); 2388 setup_IO_APIC_irqs();
2312 init_IO_APIC_traps(); 2389 init_IO_APIC_traps();
2313 check_timer();
2314 if (!acpi_ioapic) 2390 if (!acpi_ioapic)
2315 print_IO_APIC(); 2391 print_IO_APIC();
2316} 2392}
2317 2393
2394void __init IO_APIC_late_time_init(void)
2395{
2396 check_timer();
2397}
2398
2318/* 2399/*
2319 * Called after all the initialization is done. If we didnt find any 2400 * Called after all the initialization is done. If we didnt find any
2320 * APIC bugs then we can allow the modify fast path 2401 * APIC bugs then we can allow the modify fast path
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index ce66dcc26d90..1a201a932865 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -218,7 +218,7 @@ int show_interrupts(struct seq_file *p, void *v)
218 218
219 if (i == 0) { 219 if (i == 0) {
220 seq_printf(p, " "); 220 seq_printf(p, " ");
221 for_each_cpu(j) 221 for_each_online_cpu(j)
222 seq_printf(p, "CPU%d ",j); 222 seq_printf(p, "CPU%d ",j);
223 seq_putc(p, '\n'); 223 seq_putc(p, '\n');
224 } 224 }
@@ -232,7 +232,7 @@ int show_interrupts(struct seq_file *p, void *v)
232#ifndef CONFIG_SMP 232#ifndef CONFIG_SMP
233 seq_printf(p, "%10u ", kstat_irqs(i)); 233 seq_printf(p, "%10u ", kstat_irqs(i));
234#else 234#else
235 for_each_cpu(j) 235 for_each_online_cpu(j)
236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
237#endif 237#endif
238 seq_printf(p, " %14s", irq_desc[i].handler->typename); 238 seq_printf(p, " %14s", irq_desc[i].handler->typename);
@@ -246,12 +246,12 @@ skip:
246 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 246 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
247 } else if (i == NR_IRQS) { 247 } else if (i == NR_IRQS) {
248 seq_printf(p, "NMI: "); 248 seq_printf(p, "NMI: ");
249 for_each_cpu(j) 249 for_each_online_cpu(j)
250 seq_printf(p, "%10u ", nmi_count(j)); 250 seq_printf(p, "%10u ", nmi_count(j));
251 seq_putc(p, '\n'); 251 seq_putc(p, '\n');
252#ifdef CONFIG_X86_LOCAL_APIC 252#ifdef CONFIG_X86_LOCAL_APIC
253 seq_printf(p, "LOC: "); 253 seq_printf(p, "LOC: ");
254 for_each_cpu(j) 254 for_each_online_cpu(j)
255 seq_printf(p, "%10u ", 255 seq_printf(p, "%10u ",
256 per_cpu(irq_stat,j).apic_timer_irqs); 256 per_cpu(irq_stat,j).apic_timer_irqs);
257 seq_putc(p, '\n'); 257 seq_putc(p, '\n');
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 27aabfceb67e..8f767d9aa45d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -69,7 +69,7 @@ unsigned int def_to_bigsmp = 0;
69/* Processor that is doing the boot up */ 69/* Processor that is doing the boot up */
70unsigned int boot_cpu_physical_apicid = -1U; 70unsigned int boot_cpu_physical_apicid = -1U;
71/* Internal processor count */ 71/* Internal processor count */
72static unsigned int __initdata num_processors; 72static unsigned int __devinitdata num_processors;
73 73
74/* Bitmask of physically existing CPUs */ 74/* Bitmask of physically existing CPUs */
75physid_mask_t phys_cpu_present_map; 75physid_mask_t phys_cpu_present_map;
@@ -119,7 +119,7 @@ static int MP_valid_apicid(int apicid, int version)
119} 119}
120#endif 120#endif
121 121
122static void __init MP_processor_info (struct mpc_config_processor *m) 122static void __devinit MP_processor_info (struct mpc_config_processor *m)
123{ 123{
124 int ver, apicid; 124 int ver, apicid;
125 physid_mask_t phys_cpu; 125 physid_mask_t phys_cpu;
@@ -182,17 +182,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
182 boot_cpu_physical_apicid = m->mpc_apicid; 182 boot_cpu_physical_apicid = m->mpc_apicid;
183 } 183 }
184 184
185 if (num_processors >= NR_CPUS) {
186 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
187 " Processor ignored.\n", NR_CPUS);
188 return;
189 }
190
191 if (num_processors >= maxcpus) {
192 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
193 " Processor ignored.\n", maxcpus);
194 return;
195 }
196 ver = m->mpc_apicver; 185 ver = m->mpc_apicver;
197 186
198 if (!MP_valid_apicid(apicid, ver)) { 187 if (!MP_valid_apicid(apicid, ver)) {
@@ -201,11 +190,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
201 return; 190 return;
202 } 191 }
203 192
204 cpu_set(num_processors, cpu_possible_map);
205 num_processors++;
206 phys_cpu = apicid_to_cpu_present(apicid);
207 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
208
209 /* 193 /*
210 * Validate version 194 * Validate version
211 */ 195 */
@@ -216,6 +200,25 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
216 ver = 0x10; 200 ver = 0x10;
217 } 201 }
218 apic_version[m->mpc_apicid] = ver; 202 apic_version[m->mpc_apicid] = ver;
203
204 phys_cpu = apicid_to_cpu_present(apicid);
205 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
206
207 if (num_processors >= NR_CPUS) {
208 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
209 " Processor ignored.\n", NR_CPUS);
210 return;
211 }
212
213 if (num_processors >= maxcpus) {
214 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
215 " Processor ignored.\n", maxcpus);
216 return;
217 }
218
219 cpu_set(num_processors, cpu_possible_map);
220 num_processors++;
221
219 if ((num_processors > 8) && 222 if ((num_processors > 8) &&
220 APIC_XAPIC(ver) && 223 APIC_XAPIC(ver) &&
221 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) 224 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
@@ -834,7 +837,7 @@ void __init mp_register_lapic_address (
834} 837}
835 838
836 839
837void __init mp_register_lapic ( 840void __devinit mp_register_lapic (
838 u8 id, 841 u8 id,
839 u8 enabled) 842 u8 enabled)
840{ 843{
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 72515b8a1b12..d661703ac1cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -100,16 +100,44 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 102
103#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all
106 * CPUs during the test make them busy.
107 */
108static __init void nmi_cpu_busy(void *data)
109{
110 volatile int *endflag = data;
111 local_irq_enable();
112 /* Intentionally don't use cpu_relax here. This is
113 to make sure that the performance counter really ticks,
114 even if there is a simulator or similar that catches the
115 pause instruction. On a real HT machine this is fine because
116 all other CPUs are busy with "useless" delay loops and don't
117 care if they get somewhat less cycles. */
118 while (*endflag == 0)
119 barrier();
120}
121#endif
122
103static int __init check_nmi_watchdog(void) 123static int __init check_nmi_watchdog(void)
104{ 124{
105 unsigned int prev_nmi_count[NR_CPUS]; 125 volatile int endflag = 0;
126 unsigned int *prev_nmi_count;
106 int cpu; 127 int cpu;
107 128
108 if (nmi_watchdog == NMI_NONE) 129 if (nmi_watchdog == NMI_NONE)
109 return 0; 130 return 0;
110 131
132 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
133 if (!prev_nmi_count)
134 return -1;
135
111 printk(KERN_INFO "Testing NMI watchdog ... "); 136 printk(KERN_INFO "Testing NMI watchdog ... ");
112 137
138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140
113 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for (cpu = 0; cpu < NR_CPUS; cpu++)
114 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
115 local_irq_enable(); 143 local_irq_enable();
@@ -123,12 +151,18 @@ static int __init check_nmi_watchdog(void)
123 continue; 151 continue;
124#endif 152#endif
125 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 153 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
126 printk("CPU#%d: NMI appears to be stuck!\n", cpu); 154 endflag = 1;
155 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
156 cpu,
157 prev_nmi_count[cpu],
158 nmi_count(cpu));
127 nmi_active = 0; 159 nmi_active = 0;
128 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; 160 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
161 kfree(prev_nmi_count);
129 return -1; 162 return -1;
130 } 163 }
131 } 164 }
165 endflag = 1;
132 printk("OK.\n"); 166 printk("OK.\n");
133 167
134 /* now that we know it works we can reduce NMI frequency to 168 /* now that we know it works we can reduce NMI frequency to
@@ -136,6 +170,7 @@ static int __init check_nmi_watchdog(void)
136 if (nmi_watchdog == NMI_LOCAL_APIC) 170 if (nmi_watchdog == NMI_LOCAL_APIC)
137 nmi_hz = 1; 171 nmi_hz = 1;
138 172
173 kfree(prev_nmi_count);
139 return 0; 174 return 0;
140} 175}
141/* This needs to happen later in boot so counters are working */ 176/* This needs to happen later in boot so counters are working */
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 7b6368bf8974..efd11f09c996 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -354,7 +354,7 @@ ptrace_set_thread_area(struct task_struct *child,
354 return 0; 354 return 0;
355} 355}
356 356
357asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 357asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
358{ 358{
359 struct task_struct *child; 359 struct task_struct *child;
360 struct user * dummy = NULL; 360 struct user * dummy = NULL;
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c
index 1b183b378c2c..c9b87330aeea 100644
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -44,7 +44,7 @@ void mach_reboot_fixups(void)
44 44
45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { 45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) {
46 cur = &(fixups_table[i]); 46 cur = &(fixups_table[i]);
47 dev = pci_get_device(cur->vendor, cur->device, 0); 47 dev = pci_get_device(cur->vendor, cur->device, NULL);
48 if (!dev) 48 if (!dev)
49 continue; 49 continue;
50 50
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 9b8c8a19824d..b48ac635f3c1 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -389,14 +389,24 @@ static void __init limit_regions(unsigned long long size)
389 } 389 }
390 } 390 }
391 for (i = 0; i < e820.nr_map; i++) { 391 for (i = 0; i < e820.nr_map; i++) {
392 if (e820.map[i].type == E820_RAM) { 392 current_addr = e820.map[i].addr + e820.map[i].size;
393 current_addr = e820.map[i].addr + e820.map[i].size; 393 if (current_addr < size)
394 if (current_addr >= size) { 394 continue;
395 e820.map[i].size -= current_addr-size; 395
396 e820.nr_map = i + 1; 396 if (e820.map[i].type != E820_RAM)
397 return; 397 continue;
398 } 398
399 if (e820.map[i].addr >= size) {
400 /*
401 * This region starts past the end of the
402 * requested size, skip it completely.
403 */
404 e820.nr_map = i;
405 } else {
406 e820.nr_map = i + 1;
407 e820.map[i].size -= current_addr - size;
399 } 408 }
409 return;
400 } 410 }
401} 411}
402 412
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 1fb26d0e30b6..5a2bbe0c4fff 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -87,7 +87,11 @@ EXPORT_SYMBOL(cpu_online_map);
87cpumask_t cpu_callin_map; 87cpumask_t cpu_callin_map;
88cpumask_t cpu_callout_map; 88cpumask_t cpu_callout_map;
89EXPORT_SYMBOL(cpu_callout_map); 89EXPORT_SYMBOL(cpu_callout_map);
90#ifdef CONFIG_HOTPLUG_CPU
91cpumask_t cpu_possible_map = CPU_MASK_ALL;
92#else
90cpumask_t cpu_possible_map; 93cpumask_t cpu_possible_map;
94#endif
91EXPORT_SYMBOL(cpu_possible_map); 95EXPORT_SYMBOL(cpu_possible_map);
92static cpumask_t smp_commenced_mask; 96static cpumask_t smp_commenced_mask;
93 97
@@ -1074,6 +1078,16 @@ void *xquad_portio;
1074EXPORT_SYMBOL(xquad_portio); 1078EXPORT_SYMBOL(xquad_portio);
1075#endif 1079#endif
1076 1080
1081/*
1082 * Fall back to non SMP mode after errors.
1083 *
1084 */
1085static __init void disable_smp(void)
1086{
1087 cpu_set(0, cpu_sibling_map[0]);
1088 cpu_set(0, cpu_core_map[0]);
1089}
1090
1077static void __init smp_boot_cpus(unsigned int max_cpus) 1091static void __init smp_boot_cpus(unsigned int max_cpus)
1078{ 1092{
1079 int apicid, cpu, bit, kicked; 1093 int apicid, cpu, bit, kicked;
@@ -1086,7 +1100,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1086 printk("CPU%d: ", 0); 1100 printk("CPU%d: ", 0);
1087 print_cpu_info(&cpu_data[0]); 1101 print_cpu_info(&cpu_data[0]);
1088 1102
1089 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1090 boot_cpu_logical_apicid = logical_smp_processor_id(); 1103 boot_cpu_logical_apicid = logical_smp_processor_id();
1091 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; 1104 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1092 1105
@@ -1098,68 +1111,27 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1098 cpus_clear(cpu_core_map[0]); 1111 cpus_clear(cpu_core_map[0]);
1099 cpu_set(0, cpu_core_map[0]); 1112 cpu_set(0, cpu_core_map[0]);
1100 1113
1114 map_cpu_to_logical_apicid();
1115
1101 /* 1116 /*
1102 * If we couldn't find an SMP configuration at boot time, 1117 * If we couldn't find an SMP configuration at boot time,
1103 * get out of here now! 1118 * get out of here now!
1104 */ 1119 */
1105 if (!smp_found_config && !acpi_lapic) { 1120 if (!smp_found_config && !acpi_lapic) {
1106 printk(KERN_NOTICE "SMP motherboard not detected.\n"); 1121 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1107 smpboot_clear_io_apic_irqs(); 1122 disable_smp();
1108 phys_cpu_present_map = physid_mask_of_physid(0);
1109 if (APIC_init_uniprocessor())
1110 printk(KERN_NOTICE "Local APIC not detected."
1111 " Using dummy APIC emulation.\n");
1112 map_cpu_to_logical_apicid();
1113 cpu_set(0, cpu_sibling_map[0]);
1114 cpu_set(0, cpu_core_map[0]);
1115 return; 1123 return;
1116 } 1124 }
1117 1125
1118 /* 1126 /*
1119 * Should not be necessary because the MP table should list the boot
1120 * CPU too, but we do it for the sake of robustness anyway.
1121 * Makes no sense to do this check in clustered apic mode, so skip it
1122 */
1123 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1124 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1125 boot_cpu_physical_apicid);
1126 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1127 }
1128
1129 /*
1130 * If we couldn't find a local APIC, then get out of here now!
1131 */
1132 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1133 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1134 boot_cpu_physical_apicid);
1135 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1136 smpboot_clear_io_apic_irqs();
1137 phys_cpu_present_map = physid_mask_of_physid(0);
1138 cpu_set(0, cpu_sibling_map[0]);
1139 cpu_set(0, cpu_core_map[0]);
1140 return;
1141 }
1142
1143 verify_local_APIC();
1144
1145 /*
1146 * If SMP should be disabled, then really disable it! 1127 * If SMP should be disabled, then really disable it!
1147 */ 1128 */
1148 if (!max_cpus) { 1129 if (!max_cpus || (enable_local_apic < 0)) {
1149 smp_found_config = 0; 1130 printk(KERN_INFO "SMP mode deactivated.\n");
1150 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1131 disable_smp();
1151 smpboot_clear_io_apic_irqs();
1152 phys_cpu_present_map = physid_mask_of_physid(0);
1153 cpu_set(0, cpu_sibling_map[0]);
1154 cpu_set(0, cpu_core_map[0]);
1155 return; 1132 return;
1156 } 1133 }
1157 1134
1158 connect_bsp_APIC();
1159 setup_local_APIC();
1160 map_cpu_to_logical_apicid();
1161
1162
1163 setup_portio_remap(); 1135 setup_portio_remap();
1164 1136
1165 /* 1137 /*
@@ -1240,10 +1212,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1240 cpu_set(0, cpu_sibling_map[0]); 1212 cpu_set(0, cpu_sibling_map[0]);
1241 cpu_set(0, cpu_core_map[0]); 1213 cpu_set(0, cpu_core_map[0]);
1242 1214
1243 smpboot_setup_io_apic();
1244
1245 setup_boot_APIC_clock();
1246
1247 /* 1215 /*
1248 * Synchronize the TSC with the AP 1216 * Synchronize the TSC with the AP
1249 */ 1217 */
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 516bf5653b02..8de658db8146 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -327,7 +327,12 @@ int __init get_memcfg_from_srat(void)
327 int tables = 0; 327 int tables = 0;
328 int i = 0; 328 int i = 0;
329 329
330 acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address); 330 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
331 rsdp_address))) {
332 printk("%s: System description tables not found\n",
333 __FUNCTION__);
334 goto out_err;
335 }
331 336
332 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 337 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
333 printk("%s: assigning address to rsdp\n", __FUNCTION__); 338 printk("%s: assigning address to rsdp\n", __FUNCTION__);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 2883a4d4f01f..07471bba2dc6 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -74,10 +74,6 @@ int pit_latch_buggy; /* extern */
74 74
75#include "do_timer.h" 75#include "do_timer.h"
76 76
77u64 jiffies_64 = INITIAL_JIFFIES;
78
79EXPORT_SYMBOL(jiffies_64);
80
81unsigned int cpu_khz; /* Detected as we calibrate the TSC */ 77unsigned int cpu_khz; /* Detected as we calibrate the TSC */
82EXPORT_SYMBOL(cpu_khz); 78EXPORT_SYMBOL(cpu_khz);
83 79
@@ -444,8 +440,8 @@ static int time_init_device(void)
444 440
445device_initcall(time_init_device); 441device_initcall(time_init_device);
446 442
447#ifdef CONFIG_HPET_TIMER
448extern void (*late_time_init)(void); 443extern void (*late_time_init)(void);
444#ifdef CONFIG_HPET_TIMER
449/* Duplicate of time_init() below, with hpet_enable part added */ 445/* Duplicate of time_init() below, with hpet_enable part added */
450static void __init hpet_time_init(void) 446static void __init hpet_time_init(void)
451{ 447{
@@ -462,6 +458,11 @@ static void __init hpet_time_init(void)
462 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 458 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
463 459
464 time_init_hook(); 460 time_init_hook();
461
462#ifdef CONFIG_X86_LOCAL_APIC
463 if (enable_local_apic >= 0)
464 APIC_late_time_init();
465#endif
465} 466}
466#endif 467#endif
467 468
@@ -486,4 +487,9 @@ void __init time_init(void)
486 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 487 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
487 488
488 time_init_hook(); 489 time_init_hook();
490
491#ifdef CONFIG_X86_LOCAL_APIC
492 if (enable_local_apic >= 0)
493 late_time_init = APIC_late_time_init;
494#endif
489} 495}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 658c0629ba6a..9caeaa315cd7 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -275,6 +275,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
275static unsigned long PIE_count; 275static unsigned long PIE_count;
276 276
277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
278static unsigned int hpet_t1_cmp; /* cached comparator register */
278 279
279/* 280/*
280 * Timer 1 for RTC, we do not use periodic interrupt feature, 281 * Timer 1 for RTC, we do not use periodic interrupt feature,
@@ -306,10 +307,12 @@ int hpet_rtc_timer_init(void)
306 cnt = hpet_readl(HPET_COUNTER); 307 cnt = hpet_readl(HPET_COUNTER);
307 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); 308 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
308 hpet_writel(cnt, HPET_T1_CMP); 309 hpet_writel(cnt, HPET_T1_CMP);
310 hpet_t1_cmp = cnt;
309 local_irq_restore(flags); 311 local_irq_restore(flags);
310 312
311 cfg = hpet_readl(HPET_T1_CFG); 313 cfg = hpet_readl(HPET_T1_CFG);
312 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; 314 cfg &= ~HPET_TN_PERIODIC;
315 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
313 hpet_writel(cfg, HPET_T1_CFG); 316 hpet_writel(cfg, HPET_T1_CFG);
314 317
315 return 1; 318 return 1;
@@ -319,8 +322,12 @@ static void hpet_rtc_timer_reinit(void)
319{ 322{
320 unsigned int cfg, cnt; 323 unsigned int cfg, cnt;
321 324
322 if (!(PIE_on | AIE_on | UIE_on)) 325 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
326 cfg = hpet_readl(HPET_T1_CFG);
327 cfg &= ~HPET_TN_ENABLE;
328 hpet_writel(cfg, HPET_T1_CFG);
323 return; 329 return;
330 }
324 331
325 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) 332 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
326 hpet_rtc_int_freq = PIE_freq; 333 hpet_rtc_int_freq = PIE_freq;
@@ -328,15 +335,10 @@ static void hpet_rtc_timer_reinit(void)
328 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; 335 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
329 336
330 /* It is more accurate to use the comparator value than current count.*/ 337 /* It is more accurate to use the comparator value than current count.*/
331 cnt = hpet_readl(HPET_T1_CMP); 338 cnt = hpet_t1_cmp;
332 cnt += hpet_tick*HZ/hpet_rtc_int_freq; 339 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
333 hpet_writel(cnt, HPET_T1_CMP); 340 hpet_writel(cnt, HPET_T1_CMP);
334 341 hpet_t1_cmp = cnt;
335 cfg = hpet_readl(HPET_T1_CFG);
336 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
337 hpet_writel(cfg, HPET_T1_CFG);
338
339 return;
340} 342}
341 343
342/* 344/*
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index d973a8b681fd..be242723c339 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -30,23 +30,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
30 * basic equation: 30 * basic equation:
31 * ns = cycles / (freq / ns_per_sec) 31 * ns = cycles / (freq / ns_per_sec)
32 * ns = cycles * (ns_per_sec / freq) 32 * ns = cycles * (ns_per_sec / freq)
33 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 33 * ns = cycles * (10^9 / (cpu_khz * 10^3))
34 * ns = cycles * (10^3 / cpu_mhz) 34 * ns = cycles * (10^6 / cpu_khz)
35 * 35 *
36 * Then we use scaling math (suggested by george@mvista.com) to get: 36 * Then we use scaling math (suggested by george@mvista.com) to get:
37 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 37 * ns = cycles * (10^6 * SC / cpu_khz) / SC
38 * ns = cycles * cyc2ns_scale / SC 38 * ns = cycles * cyc2ns_scale / SC
39 * 39 *
40 * And since SC is a constant power of two, we can convert the div 40 * And since SC is a constant power of two, we can convert the div
41 * into a shift. 41 * into a shift.
42 *
43 * We can use khz divisor instead of mhz to keep a better percision, since
44 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
45 * (mathieu.desnoyers@polymtl.ca)
46 *
42 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 47 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 */ 48 */
44static unsigned long cyc2ns_scale; 49static unsigned long cyc2ns_scale;
45#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
46 51
47static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 52static inline void set_cyc2ns_scale(unsigned long cpu_khz)
48{ 53{
49 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
50} 55}
51 56
52static inline unsigned long long cycles_2_ns(unsigned long long cyc) 57static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -163,7 +168,7 @@ static int __init init_hpet(char* override)
163 printk("Detected %u.%03u MHz processor.\n", 168 printk("Detected %u.%03u MHz processor.\n",
164 cpu_khz / 1000, cpu_khz % 1000); 169 cpu_khz / 1000, cpu_khz % 1000);
165 } 170 }
166 set_cyc2ns_scale(cpu_khz/1000); 171 set_cyc2ns_scale(cpu_khz);
167 } 172 }
168 /* set this only when cpu_has_tsc */ 173 /* set this only when cpu_has_tsc */
169 timer_hpet.read_timer = read_timer_tsc; 174 timer_hpet.read_timer = read_timer_tsc;
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 6dd470cc9f72..d395e3b42485 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -49,23 +49,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
49 * basic equation: 49 * basic equation:
50 * ns = cycles / (freq / ns_per_sec) 50 * ns = cycles / (freq / ns_per_sec)
51 * ns = cycles * (ns_per_sec / freq) 51 * ns = cycles * (ns_per_sec / freq)
52 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 52 * ns = cycles * (10^9 / (cpu_khz * 10^3))
53 * ns = cycles * (10^3 / cpu_mhz) 53 * ns = cycles * (10^6 / cpu_khz)
54 * 54 *
55 * Then we use scaling math (suggested by george@mvista.com) to get: 55 * Then we use scaling math (suggested by george@mvista.com) to get:
56 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 56 * ns = cycles * (10^6 * SC / cpu_khz) / SC
57 * ns = cycles * cyc2ns_scale / SC 57 * ns = cycles * cyc2ns_scale / SC
58 * 58 *
59 * And since SC is a constant power of two, we can convert the div 59 * And since SC is a constant power of two, we can convert the div
60 * into a shift. 60 * into a shift.
61 *
62 * We can use khz divisor instead of mhz to keep a better percision, since
63 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
64 * (mathieu.desnoyers@polymtl.ca)
65 *
61 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 66 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
62 */ 67 */
63static unsigned long cyc2ns_scale; 68static unsigned long cyc2ns_scale;
64#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 69#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
65 70
66static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 71static inline void set_cyc2ns_scale(unsigned long cpu_khz)
67{ 72{
68 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 73 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
69} 74}
70 75
71static inline unsigned long long cycles_2_ns(unsigned long long cyc) 76static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -286,7 +291,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
286 if (use_tsc) { 291 if (use_tsc) {
287 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { 292 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
288 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); 293 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
289 set_cyc2ns_scale(cpu_khz/1000); 294 set_cyc2ns_scale(cpu_khz);
290 } 295 }
291 } 296 }
292#endif 297#endif
@@ -536,7 +541,7 @@ static int __init init_tsc(char* override)
536 printk("Detected %u.%03u MHz processor.\n", 541 printk("Detected %u.%03u MHz processor.\n",
537 cpu_khz / 1000, cpu_khz % 1000); 542 cpu_khz / 1000, cpu_khz % 1000);
538 } 543 }
539 set_cyc2ns_scale(cpu_khz/1000); 544 set_cyc2ns_scale(cpu_khz);
540 return 0; 545 return 0;
541 } 546 }
542 } 547 }
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 19e90bdd84ea..c34d1bfc5161 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -488,6 +488,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
488 tss->io_bitmap_max - thread->io_bitmap_max); 488 tss->io_bitmap_max - thread->io_bitmap_max);
489 tss->io_bitmap_max = thread->io_bitmap_max; 489 tss->io_bitmap_max = thread->io_bitmap_max;
490 tss->io_bitmap_base = IO_BITMAP_OFFSET; 490 tss->io_bitmap_base = IO_BITMAP_OFFSET;
491 tss->io_bitmap_owner = thread;
491 put_cpu(); 492 put_cpu();
492 return; 493 return;
493 } 494 }
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 898ed905e119..f1e3204f5dec 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -24,6 +24,15 @@
24 * http://www.unisys.com 24 * http://www.unisys.com
25 */ 25 */
26 26
27/*
28 * ES7000 chipsets
29 */
30
31#define NON_UNISYS 0
32#define ES7000_CLASSIC 1
33#define ES7000_ZORRO 2
34
35
27#define MIP_REG 1 36#define MIP_REG 1
28#define MIP_PSAI_REG 4 37#define MIP_PSAI_REG 4
29 38
@@ -106,6 +115,6 @@ struct mip_reg {
106 115
107extern int parse_unisys_oem (char *oemptr); 116extern int parse_unisys_oem (char *oemptr);
108extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); 117extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
109extern void setup_unisys (); 118extern void setup_unisys(void);
110extern int es7000_start_cpu(int cpu, unsigned long eip); 119extern int es7000_start_cpu(int cpu, unsigned long eip);
111extern void es7000_sw_apic(void); 120extern void es7000_sw_apic(void);
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index dc6660511b07..a9ab0644f403 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -62,6 +62,9 @@ static unsigned int base;
62static int 62static int
63es7000_rename_gsi(int ioapic, int gsi) 63es7000_rename_gsi(int ioapic, int gsi)
64{ 64{
65 if (es7000_plat == ES7000_ZORRO)
66 return gsi;
67
65 if (!base) { 68 if (!base) {
66 int i; 69 int i;
67 for (i = 0; i < nr_ioapics; i++) 70 for (i = 0; i < nr_ioapics; i++)
@@ -76,7 +79,7 @@ es7000_rename_gsi(int ioapic, int gsi)
76#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ 79#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
77 80
78void __init 81void __init
79setup_unisys () 82setup_unisys(void)
80{ 83{
81 /* 84 /*
82 * Determine the generation of the ES7000 currently running. 85 * Determine the generation of the ES7000 currently running.
@@ -86,9 +89,9 @@ setup_unisys ()
86 * 89 *
87 */ 90 */
88 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) 91 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
89 es7000_plat = 2; 92 es7000_plat = ES7000_ZORRO;
90 else 93 else
91 es7000_plat = 1; 94 es7000_plat = ES7000_CLASSIC;
92 ioapic_renumber_irq = es7000_rename_gsi; 95 ioapic_renumber_irq = es7000_rename_gsi;
93} 96}
94 97
@@ -151,7 +154,7 @@ parse_unisys_oem (char *oemptr)
151 } 154 }
152 155
153 if (success < 2) { 156 if (success < 2) {
154 es7000_plat = 0; 157 es7000_plat = NON_UNISYS;
155 } else 158 } else
156 setup_unisys(); 159 setup_unisys();
157 return es7000_plat; 160 return es7000_plat;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 9edd4485b91e..cf572d9a3b6e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
108 desc = (void *)desc + (seg & ~7); 108 desc = (void *)desc + (seg & ~7);
109 } else { 109 } else {
110 /* Must disable preemption while reading the GDT. */ 110 /* Must disable preemption while reading the GDT. */
111 desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); 111 desc = (u32 *)get_cpu_gdt_table(get_cpu());
112 desc = (void *)desc + (seg & ~7); 112 desc = (void *)desc + (seg & ~7);
113 } 113 }
114 114
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index cddafe33ff7c..19e6f4871d1e 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -547,31 +547,48 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
547 return 0; 547 return 0;
548} 548}
549 549
550static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) 550static __init int via_router_probe(struct irq_router *r,
551 struct pci_dev *router, u16 device)
551{ 552{
552 /* FIXME: We should move some of the quirk fixup stuff here */ 553 /* FIXME: We should move some of the quirk fixup stuff here */
553 554
554 if (router->device == PCI_DEVICE_ID_VIA_82C686 && 555 /*
555 device == PCI_DEVICE_ID_VIA_82C586_0) { 556 * work arounds for some buggy BIOSes
556 /* Asus k7m bios wrongly reports 82C686A as 586-compatible */ 557 */
557 device = PCI_DEVICE_ID_VIA_82C686; 558 if (device == PCI_DEVICE_ID_VIA_82C586_0) {
559 switch(router->device) {
560 case PCI_DEVICE_ID_VIA_82C686:
561 /*
562 * Asus k7m bios wrongly reports 82C686A
563 * as 586-compatible
564 */
565 device = PCI_DEVICE_ID_VIA_82C686;
566 break;
567 case PCI_DEVICE_ID_VIA_8235:
568 /**
569 * Asus a7v-x bios wrongly reports 8235
570 * as 586-compatible
571 */
572 device = PCI_DEVICE_ID_VIA_8235;
573 break;
574 }
558 } 575 }
559 576
560 switch(device) 577 switch(device) {
561 { 578 case PCI_DEVICE_ID_VIA_82C586_0:
562 case PCI_DEVICE_ID_VIA_82C586_0: 579 r->name = "VIA";
563 r->name = "VIA"; 580 r->get = pirq_via586_get;
564 r->get = pirq_via586_get; 581 r->set = pirq_via586_set;
565 r->set = pirq_via586_set; 582 return 1;
566 return 1; 583 case PCI_DEVICE_ID_VIA_82C596:
567 case PCI_DEVICE_ID_VIA_82C596: 584 case PCI_DEVICE_ID_VIA_82C686:
568 case PCI_DEVICE_ID_VIA_82C686: 585 case PCI_DEVICE_ID_VIA_8231:
569 case PCI_DEVICE_ID_VIA_8231: 586 case PCI_DEVICE_ID_VIA_8235:
570 /* FIXME: add new ones for 8233/5 */ 587 /* FIXME: add new ones for 8233/5 */
571 r->name = "VIA"; 588 r->name = "VIA";
572 r->get = pirq_via_get; 589 r->get = pirq_via_get;
573 r->set = pirq_via_set; 590 r->set = pirq_via_set;
574 return 1; 591 return 1;
575 } 592 }
576 return 0; 593 return 0;
577} 594}
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index b27c5acc79d0..1f1572692e0b 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -51,16 +51,14 @@ void save_processor_state(void)
51 __save_processor_state(&saved_context); 51 __save_processor_state(&saved_context);
52} 52}
53 53
54static void 54static void do_fpu_end(void)
55do_fpu_end(void)
56{ 55{
57 /* restore FPU regs if necessary */ 56 /*
58 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 57 * Restore FPU regs if necessary.
59 kernel_fpu_end(); 58 */
60 mxcsr_feature_mask_init(); 59 kernel_fpu_end();
61} 60}
62 61
63
64static void fix_processor_context(void) 62static void fix_processor_context(void)
65{ 63{
66 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 3fa67ecebc83..dc282710421a 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -36,6 +36,7 @@
36#include <linux/uio.h> 36#include <linux/uio.h>
37#include <linux/nfs_fs.h> 37#include <linux/nfs_fs.h>
38#include <linux/quota.h> 38#include <linux/quota.h>
39#include <linux/syscalls.h>
39#include <linux/sunrpc/svc.h> 40#include <linux/sunrpc/svc.h>
40#include <linux/nfsd/nfsd.h> 41#include <linux/nfsd/nfsd.h>
41#include <linux/nfsd/cache.h> 42#include <linux/nfsd/cache.h>
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 768c7e46957c..6ade3790ce07 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -2,6 +2,7 @@
2#include <linux/smp.h> 2#include <linux/smp.h>
3#include <linux/time.h> 3#include <linux/time.h>
4#include <linux/errno.h> 4#include <linux/errno.h>
5#include <linux/timex.h>
5#include <asm/io.h> 6#include <asm/io.h>
6 7
7/* IBM Summit (EXA) Cyclone counter code*/ 8/* IBM Summit (EXA) Cyclone counter code*/
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 8b8a5a45b621..5b7e736f3b49 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -32,10 +32,6 @@
32 32
33extern unsigned long wall_jiffies; 33extern unsigned long wall_jiffies;
34 34
35u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
36
37EXPORT_SYMBOL(jiffies_64);
38
39#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ 35#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
40 36
41#ifdef CONFIG_IA64_DEBUG_IRQ 37#ifdef CONFIG_IA64_DEBUG_IRQ
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 85920fb8d08c..396c94218cc2 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -653,8 +653,6 @@ ENTRY(rie_handler)
653 SAVE_ALL 653 SAVE_ALL
654 mvfc r0, bpc 654 mvfc r0, bpc
655 ld r1, @r0 655 ld r1, @r0
656 seth r0, #0xa0f0
657 st r1, @r0
658 ldi r1, #0x20 ; error_code 656 ldi r1, #0x20 ; error_code
659 mv r0, sp ; pt_regs 657 mv r0, sp ; pt_regs
660 bl do_rie_handler 658 bl do_rie_handler
diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c
index e545b065f7e9..eda9f963c1eb 100644
--- a/arch/m32r/kernel/io_m32700ut.c
+++ b/arch/m32r/kernel/io_m32700ut.c
@@ -64,11 +64,11 @@ static inline void *__port2addr_ata(unsigned long port)
64 * from 0x10000000 to 0x13ffffff on physical address. 64 * from 0x10000000 to 0x13ffffff on physical address.
65 * The base address of LAN controller(LAN91C111) is 0x300. 65 * The base address of LAN controller(LAN91C111) is 0x300.
66 */ 66 */
67#define LAN_IOSTART 0x300 67#define LAN_IOSTART 0xa0000300
68#define LAN_IOEND 0x320 68#define LAN_IOEND 0xa0000320
69static inline void *_port2addr_ne(unsigned long port) 69static inline void *_port2addr_ne(unsigned long port)
70{ 70{
71 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 71 return (void *)(port + 0x10000000);
72} 72}
73static inline void *_port2addr_usb(unsigned long port) 73static inline void *_port2addr_usb(unsigned long port)
74{ 74{
diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c
index 78033165fb5c..3c3da042fbd1 100644
--- a/arch/m32r/kernel/io_mappi.c
+++ b/arch/m32r/kernel/io_mappi.c
@@ -31,7 +31,7 @@ extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int);
31 31
32static inline void *_port2addr(unsigned long port) 32static inline void *_port2addr(unsigned long port)
33{ 33{
34 return (void *)(port + NONCACHE_OFFSET); 34 return (void *)(port | (NONCACHE_OFFSET));
35} 35}
36 36
37static inline void *_port2addr_ne(unsigned long port) 37static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c
index 5c03504bf653..df3c729cb3e0 100644
--- a/arch/m32r/kernel/io_mappi2.c
+++ b/arch/m32r/kernel/io_mappi2.c
@@ -33,12 +33,9 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
33 33
34static inline void *_port2addr(unsigned long port) 34static inline void *_port2addr(unsigned long port)
35{ 35{
36 return (void *)(port + NONCACHE_OFFSET); 36 return (void *)(port | (NONCACHE_OFFSET));
37} 37}
38 38
39#define LAN_IOSTART 0x300
40#define LAN_IOEND 0x320
41
42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
43static inline void *__port2addr_ata(unsigned long port) 40static inline void *__port2addr_ata(unsigned long port)
44{ 41{
@@ -59,15 +56,17 @@ static inline void *__port2addr_ata(unsigned long port)
59} 56}
60#endif 57#endif
61 58
59#define LAN_IOSTART 0xa0000300
60#define LAN_IOEND 0xa0000320
62#ifdef CONFIG_CHIP_OPSP 61#ifdef CONFIG_CHIP_OPSP
63static inline void *_port2addr_ne(unsigned long port) 62static inline void *_port2addr_ne(unsigned long port)
64{ 63{
65 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 64 return (void *)(port + 0x10000000);
66} 65}
67#else 66#else
68static inline void *_port2addr_ne(unsigned long port) 67static inline void *_port2addr_ne(unsigned long port)
69{ 68{
70 return (void *)(port + NONCACHE_OFFSET + 0x04000000); 69 return (void *)(port + 0x04000000);
71} 70}
72#endif 71#endif
73static inline void *_port2addr_usb(unsigned long port) 72static inline void *_port2addr_usb(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c
index c80bde657854..6716ffea769a 100644
--- a/arch/m32r/kernel/io_mappi3.c
+++ b/arch/m32r/kernel/io_mappi3.c
@@ -36,9 +36,6 @@ static inline void *_port2addr(unsigned long port)
36 return (void *)(port + NONCACHE_OFFSET); 36 return (void *)(port + NONCACHE_OFFSET);
37} 37}
38 38
39#define LAN_IOSTART 0x300
40#define LAN_IOEND 0x320
41
42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
43static inline void *__port2addr_ata(unsigned long port) 40static inline void *__port2addr_ata(unsigned long port)
44{ 41{
@@ -59,9 +56,11 @@ static inline void *__port2addr_ata(unsigned long port)
59} 56}
60#endif 57#endif
61 58
59#define LAN_IOSTART 0xa0000300
60#define LAN_IOEND 0xa0000320
62static inline void *_port2addr_ne(unsigned long port) 61static inline void *_port2addr_ne(unsigned long port)
63{ 62{
64 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 63 return (void *)(port + 0x10000000);
65} 64}
66 65
67static inline void *_port2addr_usb(unsigned long port) 66static inline void *_port2addr_usb(unsigned long port)
diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c
index 9997dddd24d7..8be323931e4a 100644
--- a/arch/m32r/kernel/io_oaks32r.c
+++ b/arch/m32r/kernel/io_oaks32r.c
@@ -16,7 +16,7 @@
16 16
17static inline void *_port2addr(unsigned long port) 17static inline void *_port2addr(unsigned long port)
18{ 18{
19 return (void *)(port + NONCACHE_OFFSET); 19 return (void *)(port | (NONCACHE_OFFSET));
20} 20}
21 21
22static inline void *_port2addr_ne(unsigned long port) 22static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c
index e34951e8156f..4793bd18e115 100644
--- a/arch/m32r/kernel/io_opsput.c
+++ b/arch/m32r/kernel/io_opsput.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
36 36
37static inline void *_port2addr(unsigned long port) 37static inline void *_port2addr(unsigned long port)
38{ 38{
39 return (void *)(port + NONCACHE_OFFSET); 39 return (void *)(port | (NONCACHE_OFFSET));
40} 40}
41 41
42/* 42/*
@@ -44,11 +44,11 @@ static inline void *_port2addr(unsigned long port)
44 * from 0x10000000 to 0x13ffffff on physical address. 44 * from 0x10000000 to 0x13ffffff on physical address.
45 * The base address of LAN controller(LAN91C111) is 0x300. 45 * The base address of LAN controller(LAN91C111) is 0x300.
46 */ 46 */
47#define LAN_IOSTART 0x300 47#define LAN_IOSTART 0xa0000300
48#define LAN_IOEND 0x320 48#define LAN_IOEND 0xa0000320
49static inline void *_port2addr_ne(unsigned long port) 49static inline void *_port2addr_ne(unsigned long port)
50{ 50{
51 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 51 return (void *)(port + 0x10000000);
52} 52}
53static inline void *_port2addr_usb(unsigned long port) 53static inline void *_port2addr_usb(unsigned long port)
54{ 54{
diff --git a/arch/m32r/kernel/io_usrv.c b/arch/m32r/kernel/io_usrv.c
index 9eb161dcc104..39a379af40bc 100644
--- a/arch/m32r/kernel/io_usrv.c
+++ b/arch/m32r/kernel/io_usrv.c
@@ -47,7 +47,7 @@ static inline void *_port2addr(unsigned long port)
47 else if (port >= UART1_IOSTART && port <= UART1_IOEND) 47 else if (port >= UART1_IOSTART && port <= UART1_IOEND)
48 port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART; 48 port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART;
49#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */ 49#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
50 return (void *)(port + NONCACHE_OFFSET); 50 return (void *)(port | (NONCACHE_OFFSET));
51} 51}
52 52
53static inline void delay(void) 53static inline void delay(void)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 124f7c1b775e..078d2a0e71c2 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -756,7 +756,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
756 return ret; 756 return ret;
757} 757}
758 758
759asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 759asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
760{ 760{
761 struct task_struct *child; 761 struct task_struct *child;
762 int ret; 762 int ret;
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index ec5674727e7f..f722ec8eb021 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -305,19 +305,19 @@ static int show_cpuinfo(struct seq_file *m, void *v)
305 305
306 seq_printf(m, "processor\t: %ld\n", cpu); 306 seq_printf(m, "processor\t: %ld\n", cpu);
307 307
308#ifdef CONFIG_CHIP_VDEC2 308#if defined(CONFIG_CHIP_VDEC2)
309 seq_printf(m, "cpu family\t: VDEC2\n" 309 seq_printf(m, "cpu family\t: VDEC2\n"
310 "cache size\t: Unknown\n"); 310 "cache size\t: Unknown\n");
311#elif CONFIG_CHIP_M32700 311#elif defined(CONFIG_CHIP_M32700)
312 seq_printf(m,"cpu family\t: M32700\n" 312 seq_printf(m,"cpu family\t: M32700\n"
313 "cache size\t: I-8KB/D-8KB\n"); 313 "cache size\t: I-8KB/D-8KB\n");
314#elif CONFIG_CHIP_M32102 314#elif defined(CONFIG_CHIP_M32102)
315 seq_printf(m,"cpu family\t: M32102\n" 315 seq_printf(m,"cpu family\t: M32102\n"
316 "cache size\t: I-8KB\n"); 316 "cache size\t: I-8KB\n");
317#elif CONFIG_CHIP_OPSP 317#elif defined(CONFIG_CHIP_OPSP)
318 seq_printf(m,"cpu family\t: OPSP\n" 318 seq_printf(m,"cpu family\t: OPSP\n"
319 "cache size\t: I-8KB/D-8KB\n"); 319 "cache size\t: I-8KB/D-8KB\n");
320#elif CONFIG_CHIP_MP 320#elif defined(CONFIG_CHIP_MP)
321 seq_printf(m, "cpu family\t: M32R-MP\n" 321 seq_printf(m, "cpu family\t: M32R-MP\n"
322 "cache size\t: I-xxKB/D-xxKB\n"); 322 "cache size\t: I-xxKB/D-xxKB\n");
323#else 323#else
@@ -326,19 +326,19 @@ static int show_cpuinfo(struct seq_file *m, void *v)
326 seq_printf(m, "bogomips\t: %lu.%02lu\n", 326 seq_printf(m, "bogomips\t: %lu.%02lu\n",
327 c->loops_per_jiffy/(500000/HZ), 327 c->loops_per_jiffy/(500000/HZ),
328 (c->loops_per_jiffy/(5000/HZ)) % 100); 328 (c->loops_per_jiffy/(5000/HZ)) % 100);
329#ifdef CONFIG_PLAT_MAPPI 329#if defined(CONFIG_PLAT_MAPPI)
330 seq_printf(m, "Machine\t\t: Mappi Evaluation board\n"); 330 seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
331#elif CONFIG_PLAT_MAPPI2 331#elif defined(CONFIG_PLAT_MAPPI2)
332 seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n"); 332 seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
333#elif CONFIG_PLAT_MAPPI3 333#elif defined(CONFIG_PLAT_MAPPI3)
334 seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n"); 334 seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
335#elif CONFIG_PLAT_M32700UT 335#elif defined(CONFIG_PLAT_M32700UT)
336 seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n"); 336 seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
337#elif CONFIG_PLAT_OPSPUT 337#elif defined(CONFIG_PLAT_OPSPUT)
338 seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n"); 338 seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
339#elif CONFIG_PLAT_USRV 339#elif defined(CONFIG_PLAT_USRV)
340 seq_printf(m, "Machine\t\t: uServer\n"); 340 seq_printf(m, "Machine\t\t: uServer\n");
341#elif CONFIG_PLAT_OAKS32R 341#elif defined(CONFIG_PLAT_OAKS32R)
342 seq_printf(m, "Machine\t\t: OAKS32R\n"); 342 seq_printf(m, "Machine\t\t: OAKS32R\n");
343#else 343#else
344 seq_printf(m, "Machine\t\t: Unknown\n"); 344 seq_printf(m, "Machine\t\t: Unknown\n");
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 539c562cd54d..2ebce2063fea 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -39,10 +39,6 @@ extern void send_IPI_allbutself(int, int);
39extern void smp_local_timer_interrupt(struct pt_regs *); 39extern void smp_local_timer_interrupt(struct pt_regs *);
40#endif 40#endif
41 41
42u64 jiffies_64 = INITIAL_JIFFIES;
43
44EXPORT_SYMBOL(jiffies_64);
45
46extern unsigned long wall_jiffies; 42extern unsigned long wall_jiffies;
47#define TICK_SIZE (tick_nsec / 1000) 43#define TICK_SIZE (tick_nsec / 1000)
48 44
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c
index ddb16a83a8ce..3d5f06145854 100644
--- a/arch/m32r/lib/csum_partial_copy.c
+++ b/arch/m32r/lib/csum_partial_copy.c
@@ -18,10 +18,10 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/string.h>
21 22
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <asm/byteorder.h> 24#include <asm/byteorder.h>
24#include <asm/string.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26 26
27/* 27/*
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8ed1b01a6a87..f7f1d2e5b90b 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -121,7 +121,7 @@ void ptrace_disable(struct task_struct *child)
121 child->thread.work.syscall_trace = 0; 121 child->thread.work.syscall_trace = 0;
122} 122}
123 123
124asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 124asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
125{ 125{
126 struct task_struct *child; 126 struct task_struct *child;
127 unsigned long tmp; 127 unsigned long tmp;
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 4ec95e3cb874..98e4b1adfa29 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -27,10 +27,6 @@
27#include <linux/timex.h> 27#include <linux/timex.h>
28#include <linux/profile.h> 28#include <linux/profile.h>
29 29
30u64 jiffies_64 = INITIAL_JIFFIES;
31
32EXPORT_SYMBOL(jiffies_64);
33
34static inline int set_rtc_mmss(unsigned long nowtime) 30static inline int set_rtc_mmss(unsigned long nowtime)
35{ 31{
36 if (mach_set_clock_mmss) 32 if (mach_set_clock_mmss)
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 9724e1cd82e5..621d7b91ccfe 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -101,7 +101,7 @@ void ptrace_disable(struct task_struct *child)
101 put_reg(child, PT_SR, tmp); 101 put_reg(child, PT_SR, tmp);
102} 102}
103 103
104asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 104asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
105{ 105{
106 struct task_struct *child; 106 struct task_struct *child;
107 int ret; 107 int ret;
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index b17c1ecba966..b9d8abb45430 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -27,10 +27,6 @@
27 27
28#define TICK_SIZE (tick_nsec / 1000) 28#define TICK_SIZE (tick_nsec / 1000)
29 29
30u64 jiffies_64 = INITIAL_JIFFIES;
31
32EXPORT_SYMBOL(jiffies_64);
33
34extern unsigned long wall_jiffies; 30extern unsigned long wall_jiffies;
35 31
36 32
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 7ce34d4aa220..10d3644e3608 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -1077,8 +1077,8 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1077 struct elfhdr elf; 1077 struct elfhdr elf;
1078 off_t offset = 0, dataoff; 1078 off_t offset = 0, dataoff;
1079 int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; 1079 int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1080 int numnote = 4; 1080 int numnote = 3;
1081 struct memelfnote notes[4]; 1081 struct memelfnote notes[3];
1082 struct elf_prstatus prstatus; /* NT_PRSTATUS */ 1082 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1083 elf_fpregset_t fpu; /* NT_PRFPREG */ 1083 elf_fpregset_t fpu; /* NT_PRFPREG */
1084 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ 1084 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
@@ -1211,20 +1211,15 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1211 } 1211 }
1212 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); 1212 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1213 1213
1214 notes[2].name = "CORE";
1215 notes[2].type = NT_TASKSTRUCT;
1216 notes[2].datasz = sizeof(*current);
1217 notes[2].data = current;
1218
1219 /* Try to dump the FPU. */ 1214 /* Try to dump the FPU. */
1220 prstatus.pr_fpvalid = dump_fpu (regs, &fpu); 1215 prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
1221 if (!prstatus.pr_fpvalid) { 1216 if (!prstatus.pr_fpvalid) {
1222 numnote--; 1217 numnote--;
1223 } else { 1218 } else {
1224 notes[3].name = "CORE"; 1219 notes[2].name = "CORE";
1225 notes[3].type = NT_PRFPREG; 1220 notes[2].type = NT_PRFPREG;
1226 notes[3].datasz = sizeof(fpu); 1221 notes[2].datasz = sizeof(fpu);
1227 notes[3].data = &fpu; 1222 notes[2].data = &fpu;
1228 } 1223 }
1229 1224
1230 /* Write notes phdr entry. */ 1225 /* Write notes phdr entry. */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index fcceab8f2e00..f1b0f3e1f95b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,7 +174,7 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
174 return 0; 174 return 0;
175} 175}
176 176
177asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 177asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
178{ 178{
179 struct task_struct *child; 179 struct task_struct *child;
180 int ret; 180 int ret;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index a24651dfaaba..787ed541d442 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -45,10 +45,6 @@
45 45
46#define TICK_SIZE (tick_nsec / 1000) 46#define TICK_SIZE (tick_nsec / 1000)
47 47
48u64 jiffies_64 = INITIAL_JIFFIES;
49
50EXPORT_SYMBOL(jiffies_64);
51
52/* 48/*
53 * forward reference 49 * forward reference
54 */ 50 */
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index e1829a5d3b19..07631a97670b 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/signal.h> /* for SIGBUS */
13 14
14#include <asm/module.h> 15#include <asm/module.h>
15#include <asm/sn/addrs.h> 16#include <asm/sn/addrs.h>
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
index 8cad8f004f00..0a331104ad56 100644
--- a/arch/parisc/kernel/ioctl32.c
+++ b/arch/parisc/kernel/ioctl32.c
@@ -561,11 +561,6 @@ IOCTL_TABLE_START
561#define DECLARES 561#define DECLARES
562#include "compat_ioctl.c" 562#include "compat_ioctl.c"
563 563
564/* Might be moved to compat_ioctl.h with some ifdefs... */
565COMPATIBLE_IOCTL(TIOCSTART)
566COMPATIBLE_IOCTL(TIOCSTOP)
567COMPATIBLE_IOCTL(TIOCSLTC)
568
569/* PA-specific ioctls */ 564/* PA-specific ioctls */
570COMPATIBLE_IOCTL(PA_PERF_ON) 565COMPATIBLE_IOCTL(PA_PERF_ON)
571COMPATIBLE_IOCTL(PA_PERF_OFF) 566COMPATIBLE_IOCTL(PA_PERF_OFF)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index f3428e5e86fb..18130c3748f3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -78,7 +78,7 @@ void ptrace_disable(struct task_struct *child)
78 pa_psw(child)->l = 0; 78 pa_psw(child)->l = 0;
79} 79}
80 80
81long sys_ptrace(long request, pid_t pid, long addr, long data) 81long sys_ptrace(long request, long pid, long addr, long data)
82{ 82{
83 struct task_struct *child; 83 struct task_struct *child;
84 long ret; 84 long ret;
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index bc979e1abdec..cded25680787 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -33,10 +33,6 @@
33 33
34#include <linux/timex.h> 34#include <linux/timex.h>
35 35
36u64 jiffies_64 = INITIAL_JIFFIES;
37
38EXPORT_SYMBOL(jiffies_64);
39
40/* xtime and wall_jiffies keep wall-clock time */ 36/* xtime and wall_jiffies keep wall-clock time */
41extern unsigned long wall_jiffies; 37extern unsigned long wall_jiffies;
42 38
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
new file mode 100644
index 000000000000..967ecf92d6a7
--- /dev/null
+++ b/arch/powerpc/Kconfig
@@ -0,0 +1,900 @@
1# For a description of the syntax of this configuration file,
2# see Documentation/kbuild/kconfig-language.txt.
3#
4
5mainmenu "Linux/PowerPC Kernel Configuration"
6
7config PPC64
8 bool "64-bit kernel"
9 default n
10 help
11 This option selects whether a 32-bit or a 64-bit kernel
12 will be built.
13
14config PPC32
15 bool
16 default y if !PPC64
17
18config 64BIT
19 bool
20 default y if PPC64
21
22config PPC_MERGE
23 def_bool y
24
25config MMU
26 bool
27 default y
28
29config UID16
30 bool
31
32config GENERIC_HARDIRQS
33 bool
34 default y
35
36config RWSEM_GENERIC_SPINLOCK
37 bool
38
39config RWSEM_XCHGADD_ALGORITHM
40 bool
41 default y
42
43config GENERIC_CALIBRATE_DELAY
44 bool
45 default y
46
47config PPC
48 bool
49 default y
50
51config EARLY_PRINTK
52 bool
53 default y if PPC64
54
55config COMPAT
56 bool
57 default y if PPC64
58
59config SYSVIPC_COMPAT
60 bool
61 depends on COMPAT && SYSVIPC
62 default y
63
64# All PPC32s use generic nvram driver through ppc_md
65config GENERIC_NVRAM
66 bool
67 default y if PPC32
68
69config SCHED_NO_NO_OMIT_FRAME_POINTER
70 bool
71 default y
72
73config ARCH_MAY_HAVE_PC_FDC
74 bool
75 default y
76
77menu "Processor support"
78choice
79 prompt "Processor Type"
80 depends on PPC32
81 default 6xx
82
83config 6xx
84 bool "6xx/7xx/74xx"
85 select PPC_FPU
86 help
87 There are four families of PowerPC chips supported. The more common
88 types (601, 603, 604, 740, 750, 7400), the Motorola embedded
89 versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
90 embedded versions (403 and 405) and the high end 64 bit Power
91 processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
92
93 Unless you are building a kernel for one of the embedded processor
94 systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
95 Note that the kernel runs in 32-bit mode even on 64-bit chips.
96
97config PPC_52xx
98 bool "Freescale 52xx"
99
100config PPC_82xx
101 bool "Freescale 82xx"
102
103config PPC_83xx
104 bool "Freescale 83xx"
105
106config 40x
107 bool "AMCC 40x"
108
109config 44x
110 bool "AMCC 44x"
111
112config 8xx
113 bool "Freescale 8xx"
114
115config E200
116 bool "Freescale e200"
117
118config E500
119 bool "Freescale e500"
120endchoice
121
122config POWER4_ONLY
123 bool "Optimize for POWER4"
124 depends on PPC64
125 default n
126 ---help---
127 Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
128 The resulting binary will not work on POWER3 or RS64 processors
129 when compiled with binutils 2.15 or later.
130
131config POWER3
132 bool
133 depends on PPC64
134 default y if !POWER4_ONLY
135
136config POWER4
137 depends on PPC64
138 def_bool y
139
140config PPC_FPU
141 bool
142 default y if PPC64
143
144config BOOKE
145 bool
146 depends on E200 || E500
147 default y
148
149config FSL_BOOKE
150 bool
151 depends on E200 || E500
152 default y
153
154config PTE_64BIT
155 bool
156 depends on 44x || E500
157 default y if 44x
158 default y if E500 && PHYS_64BIT
159
160config PHYS_64BIT
161 bool 'Large physical address support' if E500
162 depends on 44x || E500
163 default y if 44x
164 ---help---
165 This option enables kernel support for larger than 32-bit physical
166 addresses. This features is not be available on all e500 cores.
167
168 If in doubt, say N here.
169
170config ALTIVEC
171 bool "AltiVec Support"
172 depends on 6xx || POWER4
173 ---help---
174 This option enables kernel support for the Altivec extensions to the
175 PowerPC processor. The kernel currently supports saving and restoring
176 altivec registers, and turning on the 'altivec enable' bit so user
177 processes can execute altivec instructions.
178
179 This option is only usefully if you have a processor that supports
180 altivec (G4, otherwise known as 74xx series), but does not have
181 any affect on a non-altivec cpu (it does, however add code to the
182 kernel).
183
184 If in doubt, say Y here.
185
186config SPE
187 bool "SPE Support"
188 depends on E200 || E500
189 ---help---
190 This option enables kernel support for the Signal Processing
191 Extensions (SPE) to the PowerPC processor. The kernel currently
192 supports saving and restoring SPE registers, and turning on the
193 'spe enable' bit so user processes can execute SPE instructions.
194
195 This option is only useful if you have a processor that supports
196 SPE (e500, otherwise known as 85xx series), but does not have any
197 effect on a non-spe cpu (it does, however add code to the kernel).
198
199 If in doubt, say Y here.
200
201config PPC_STD_MMU
202 bool
203 depends on 6xx || POWER3 || POWER4 || PPC64
204 default y
205
206config PPC_STD_MMU_32
207 def_bool y
208 depends on PPC_STD_MMU && PPC32
209
210config SMP
211 depends on PPC_STD_MMU
212 bool "Symmetric multi-processing support"
213 ---help---
214 This enables support for systems with more than one CPU. If you have
215 a system with only one CPU, say N. If you have a system with more
216 than one CPU, say Y. Note that the kernel does not currently
217 support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
218 since they have inadequate hardware support for multiprocessor
219 operation.
220
221 If you say N here, the kernel will run on single and multiprocessor
222 machines, but will use only one CPU of a multiprocessor machine. If
223 you say Y here, the kernel will run on single-processor machines.
224 On a single-processor machine, the kernel will run faster if you say
225 N here.
226
227 If you don't know what to do here, say N.
228
229config NR_CPUS
230 int "Maximum number of CPUs (2-32)"
231 range 2 128
232 depends on SMP
233 default "32" if PPC64
234 default "4"
235
236config NOT_COHERENT_CACHE
237 bool
238 depends on 4xx || 8xx || E200
239 default y
240endmenu
241
242source "init/Kconfig"
243
244menu "Platform support"
245 depends on PPC64 || 6xx
246
247choice
248 prompt "Machine type"
249 default PPC_MULTIPLATFORM
250
251config PPC_MULTIPLATFORM
252 bool "Generic desktop/server/laptop"
253 help
254 Select this option if configuring for an IBM pSeries or
255 RS/6000 machine, an Apple machine, or a PReP, CHRP,
256 Maple or Cell-based machine.
257
258config PPC_ISERIES
259 bool "IBM Legacy iSeries"
260 depends on PPC64
261
262config EMBEDDED6xx
263 bool "Embedded 6xx/7xx/7xxx-based board"
264 depends on PPC32
265
266config APUS
267 bool "Amiga-APUS"
268 depends on PPC32 && BROKEN
269 help
270 Select APUS if configuring for a PowerUP Amiga.
271 More information is available at:
272 <http://linux-apus.sourceforge.net/>.
273endchoice
274
275config PPC_PSERIES
276 depends on PPC_MULTIPLATFORM && PPC64
277 bool " IBM pSeries & new (POWER5-based) iSeries"
278 select PPC_I8259
279 select PPC_RTAS
280 select RTAS_ERROR_LOGGING
281 default y
282
283config PPC_CHRP
284 bool " Common Hardware Reference Platform (CHRP) based machines"
285 depends on PPC_MULTIPLATFORM && PPC32
286 select PPC_I8259
287 select PPC_INDIRECT_PCI
288 select PPC_RTAS
289 select PPC_MPC106
290 default y
291
292config PPC_PMAC
293 bool " Apple PowerMac based machines"
294 depends on PPC_MULTIPLATFORM
295 select PPC_INDIRECT_PCI if PPC32
296 select PPC_MPC106 if PPC32
297 default y
298
299config PPC_PMAC64
300 bool
301 depends on PPC_PMAC && POWER4
302 select U3_DART
303 default y
304
305config PPC_PREP
306 bool " PowerPC Reference Platform (PReP) based machines"
307 depends on PPC_MULTIPLATFORM && PPC32
308 select PPC_I8259
309 select PPC_INDIRECT_PCI
310 default y
311
312config PPC_MAPLE
313 depends on PPC_MULTIPLATFORM && PPC64
314 bool " Maple 970FX Evaluation Board"
315 select U3_DART
316 select MPIC_BROKEN_U3
317 default n
318 help
319 This option enables support for the Maple 970FX Evaluation Board.
320 For more informations, refer to <http://www.970eval.com>
321
322config PPC_BPA
323 bool " Broadband Processor Architecture"
324 depends on PPC_MULTIPLATFORM && PPC64
325 select PPC_RTAS
326
327config PPC_OF
328 bool
329 depends on PPC_MULTIPLATFORM # for now
330 default y
331
332config XICS
333 depends on PPC_PSERIES
334 bool
335 default y
336
337config U3_DART
338 bool
339 depends on PPC_MULTIPLATFORM && PPC64
340 default n
341
342config MPIC
343 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP
344 bool
345 default y
346
347config PPC_RTAS
348 bool
349 default n
350
351config RTAS_ERROR_LOGGING
352 bool
353 depends on PPC_RTAS
354 default n
355
356config MPIC_BROKEN_U3
357 bool
358 depends on PPC_MAPLE
359 default y
360
361config BPA_IIC
362 depends on PPC_BPA
363 bool
364 default y
365
366config IBMVIO
367 depends on PPC_PSERIES || PPC_ISERIES
368 bool
369 default y
370
371config PPC_MPC106
372 bool
373 default n
374
375source "drivers/cpufreq/Kconfig"
376
377config CPU_FREQ_PMAC
378 bool "Support for Apple PowerBooks"
379 depends on CPU_FREQ && ADB_PMU && PPC32
380 select CPU_FREQ_TABLE
381 help
382 This adds support for frequency switching on Apple PowerBooks,
383 this currently includes some models of iBook & Titanium
384 PowerBook.
385
386config PPC601_SYNC_FIX
387 bool "Workarounds for PPC601 bugs"
388 depends on 6xx && (PPC_PREP || PPC_PMAC)
389 help
390 Some versions of the PPC601 (the first PowerPC chip) have bugs which
391 mean that extra synchronization instructions are required near
392 certain instructions, typically those that make major changes to the
393 CPU state. These extra instructions reduce performance slightly.
394 If you say N here, these extra instructions will not be included,
395 resulting in a kernel which will run faster but may not run at all
396 on some systems with the PPC601 chip.
397
398 If in doubt, say Y here.
399
400config TAU
401 bool "Thermal Management Support"
402 depends on 6xx
403 help
404 G3 and G4 processors have an on-chip temperature sensor called the
405 'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
406 temperature within 2-4 degrees Celsius. This option shows the current
407 on-die temperature in /proc/cpuinfo if the cpu supports it.
408
409 Unfortunately, on some chip revisions, this sensor is very inaccurate
410 and in some cases, does not work at all, so don't assume the cpu
411 temp is actually what /proc/cpuinfo says it is.
412
413config TAU_INT
414 bool "Interrupt driven TAU driver (DANGEROUS)"
415 depends on TAU
416 ---help---
417 The TAU supports an interrupt driven mode which causes an interrupt
418 whenever the temperature goes out of range. This is the fastest way
419 to get notified the temp has exceeded a range. With this option off,
420 a timer is used to re-check the temperature periodically.
421
422 However, on some cpus it appears that the TAU interrupt hardware
423 is buggy and can cause a situation which would lead unexplained hard
424 lockups.
425
426 Unless you are extending the TAU driver, or enjoy kernel/hardware
427 debugging, leave this option off.
428
429config TAU_AVERAGE
430 bool "Average high and low temp"
431 depends on TAU
432 ---help---
433 The TAU hardware can compare the temperature to an upper and lower
434 bound. The default behavior is to show both the upper and lower
435 bound in /proc/cpuinfo. If the range is large, the temperature is
436 either changing a lot, or the TAU hardware is broken (likely on some
437 G4's). If the range is small (around 4 degrees), the temperature is
438 relatively stable. If you say Y here, a single temperature value,
439 halfway between the upper and lower bounds, will be reported in
440 /proc/cpuinfo.
441
442 If in doubt, say N here.
443endmenu
444
445source arch/powerpc/platforms/embedded6xx/Kconfig
446source arch/powerpc/platforms/4xx/Kconfig
447source arch/powerpc/platforms/85xx/Kconfig
448source arch/powerpc/platforms/8xx/Kconfig
449
450menu "Kernel options"
451
452config HIGHMEM
453 bool "High memory support"
454 depends on PPC32
455
456source kernel/Kconfig.hz
457source kernel/Kconfig.preempt
458source "fs/Kconfig.binfmt"
459
460# We optimistically allocate largepages from the VM, so make the limit
461# large enough (16MB). This badly named config option is actually
462# max order + 1
463config FORCE_MAX_ZONEORDER
464 int
465 depends on PPC64
466 default "13"
467
468config MATH_EMULATION
469 bool "Math emulation"
470 depends on 4xx || 8xx || E200 || E500
471 ---help---
472 Some PowerPC chips designed for embedded applications do not have
473 a floating-point unit and therefore do not implement the
474 floating-point instructions in the PowerPC instruction set. If you
475 say Y here, the kernel will include code to emulate a floating-point
476 unit, which will allow programs that use floating-point
477 instructions to run.
478
479config IOMMU_VMERGE
480 bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
481 depends on EXPERIMENTAL && PPC64
482 default n
483 help
484 Cause IO segments sent to a device for DMA to be merged virtually
485 by the IOMMU when they happen to have been allocated contiguously.
486 This doesn't add pressure to the IOMMU allocator. However, some
487 drivers don't support getting large merged segments coming back
488 from *_map_sg(). Say Y if you know the drivers you are using are
489 properly handling this case.
490
491config HOTPLUG_CPU
492 bool "Support for enabling/disabling CPUs"
493 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
494 ---help---
495 Say Y here to be able to disable and re-enable individual
496 CPUs at runtime on SMP machines.
497
498 Say N if you are unsure.
499
500config KEXEC
501 bool "kexec system call (EXPERIMENTAL)"
502 depends on PPC_MULTIPLATFORM && EXPERIMENTAL
503 help
504 kexec is a system call that implements the ability to shutdown your
505 current kernel, and to start another kernel. It is like a reboot
506 but it is indepedent of the system firmware. And like a reboot
507 you can start any kernel with it, not just Linux.
508
509 The name comes from the similiarity to the exec system call.
510
511 It is an ongoing process to be certain the hardware in a machine
512 is properly shutdown, so do not be surprised if this code does not
513 initially work for you. It may help to enable device hotplugging
514 support. As of this writing the exact hardware interface is
515 strongly in flux, so no good recommendation can be made.
516
517config EMBEDDEDBOOT
518 bool
519 depends on 8xx || 8260
520 default y
521
522config PC_KEYBOARD
523 bool "PC PS/2 style Keyboard"
524 depends on 4xx || CPM2
525
526config PPCBUG_NVRAM
527 bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
528 default y if PPC_PREP
529
530config IRQ_ALL_CPUS
531 bool "Distribute interrupts on all CPUs by default"
532 depends on SMP && !MV64360
533 help
534 This option gives the kernel permission to distribute IRQs across
535 multiple CPUs. Saying N here will route all IRQs to the first
536 CPU. Generally saying Y is safe, although some problems have been
537 reported with SMP Power Macintoshes with this option enabled.
538
539source "arch/powerpc/platforms/pseries/Kconfig"
540
541config NUMA
542 bool "NUMA support"
543 depends on PPC64
544 default y if SMP && PPC_PSERIES
545
546config ARCH_SELECT_MEMORY_MODEL
547 def_bool y
548 depends on PPC64
549
550config ARCH_FLATMEM_ENABLE
551 def_bool y
552 depends on PPC64 && !NUMA
553
554config ARCH_DISCONTIGMEM_ENABLE
555 def_bool y
556 depends on SMP && PPC_PSERIES
557
558config ARCH_DISCONTIGMEM_DEFAULT
559 def_bool y
560 depends on ARCH_DISCONTIGMEM_ENABLE
561
562config ARCH_SPARSEMEM_ENABLE
563 def_bool y
564 depends on ARCH_DISCONTIGMEM_ENABLE
565
566source "mm/Kconfig"
567
568config HAVE_ARCH_EARLY_PFN_TO_NID
569 def_bool y
570 depends on NEED_MULTIPLE_NODES
571
572# Some NUMA nodes have memory ranges that span
573# other nodes. Even though a pfn is valid and
574# between a node's start and end pfns, it may not
575# reside on that node.
576#
577# This is a relatively temporary hack that should
578# be able to go away when sparsemem is fully in
579# place
580
581config NODES_SPAN_OTHER_NODES
582 def_bool y
583 depends on NEED_MULTIPLE_NODES
584
585config SCHED_SMT
586 bool "SMT (Hyperthreading) scheduler support"
587 depends on PPC64 && SMP
588 default off
589 help
590 SMT scheduler support improves the CPU scheduler's decision making
591 when dealing with POWER5 cpus at a cost of slightly increased
592 overhead in some places. If unsure say N here.
593
594config PROC_DEVICETREE
595 bool "Support for device tree in /proc"
596 depends on PROC_FS
597 help
598 This option adds a device-tree directory under /proc which contains
599 an image of the device tree that the kernel copies from Open
600 Firmware or other boot firmware. If unsure, say Y here.
601
602source "arch/powerpc/platforms/prep/Kconfig"
603
604config CMDLINE_BOOL
605 bool "Default bootloader kernel arguments"
606 depends on !PPC_ISERIES
607
608config CMDLINE
609 string "Initial kernel command string"
610 depends on CMDLINE_BOOL
611 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
612 help
613 On some platforms, there is currently no way for the boot loader to
614 pass arguments to the kernel. For these platforms, you can supply
615 some command-line options at build time by entering them here. In
616 most cases you will need to specify the root device here.
617
618if !44x || BROKEN
619source kernel/power/Kconfig
620endif
621
622config SECCOMP
623 bool "Enable seccomp to safely compute untrusted bytecode"
624 depends on PROC_FS
625 default y
626 help
627 This kernel feature is useful for number crunching applications
628 that may need to compute untrusted bytecode during their
629 execution. By using pipes or other transports made available to
630 the process as file descriptors supporting the read/write
631 syscalls, it's possible to isolate those applications in
632 their own address space using seccomp. Once seccomp is
633 enabled via /proc/<pid>/seccomp, it cannot be disabled
634 and the task is only allowed to execute a few safe syscalls
635 defined by each seccomp mode.
636
637 If unsure, say Y. Only embedded should say N here.
638
639endmenu
640
641config ISA_DMA_API
642 bool
643 default y
644
645menu "Bus options"
646
647config ISA
648 bool "Support for ISA-bus hardware"
649 depends on PPC_PREP || PPC_CHRP
650 select PPC_I8259
651 help
652 Find out whether you have ISA slots on your motherboard. ISA is the
653 name of a bus system, i.e. the way the CPU talks to the other stuff
654 inside your box. If you have an Apple machine, say N here; if you
655 have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
656 you have an embedded board, consult your board documentation.
657
658config GENERIC_ISA_DMA
659 bool
660 depends on PPC64 || POWER4 || 6xx && !CPM2
661 default y
662
663config PPC_I8259
664 bool
665 default y if 85xx
666 default n
667
668config PPC_INDIRECT_PCI
669 bool
670 depends on PCI
671 default y if 40x || 44x || 85xx || 83xx
672 default n
673
674config EISA
675 bool
676
677config SBUS
678 bool
679
680# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
681config MCA
682 bool
683
684config PCI
685 bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
686 default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
687 default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
688 default PCI_QSPAN if !4xx && !CPM2 && 8xx
689 help
690 Find out whether your system includes a PCI bus. PCI is the name of
691 a bus system, i.e. the way the CPU talks to the other stuff inside
692 your box. If you say Y here, the kernel will include drivers and
693 infrastructure code to support PCI bus devices.
694
695config PCI_DOMAINS
696 bool
697 default PCI
698
699config MPC83xx_PCI2
700 bool " Supprt for 2nd PCI host controller"
701 depends on PCI && MPC834x
702 default y if MPC834x_SYS
703
704config PCI_QSPAN
705 bool "QSpan PCI"
706 depends on !4xx && !CPM2 && 8xx
707 select PPC_I8259
708 help
709 Say Y here if you have a system based on a Motorola 8xx-series
710 embedded processor with a QSPAN PCI interface, otherwise say N.
711
712config PCI_8260
713 bool
714 depends on PCI && 8260
715 select PPC_INDIRECT_PCI
716 default y
717
718config 8260_PCI9
719 bool " Enable workaround for MPC826x erratum PCI 9"
720 depends on PCI_8260 && !ADS8272
721 default y
722
723choice
724 prompt " IDMA channel for PCI 9 workaround"
725 depends on 8260_PCI9
726
727config 8260_PCI9_IDMA1
728 bool "IDMA1"
729
730config 8260_PCI9_IDMA2
731 bool "IDMA2"
732
733config 8260_PCI9_IDMA3
734 bool "IDMA3"
735
736config 8260_PCI9_IDMA4
737 bool "IDMA4"
738
739endchoice
740
741source "drivers/pci/Kconfig"
742
743source "drivers/pcmcia/Kconfig"
744
745source "drivers/pci/hotplug/Kconfig"
746
747endmenu
748
749menu "Advanced setup"
750 depends on PPC32
751
752config ADVANCED_OPTIONS
753 bool "Prompt for advanced kernel configuration options"
754 help
755 This option will enable prompting for a variety of advanced kernel
756 configuration options. These options can cause the kernel to not
757 work if they are set incorrectly, but can be used to optimize certain
758 aspects of kernel memory management.
759
760 Unless you know what you are doing, say N here.
761
762comment "Default settings for advanced configuration options are used"
763 depends on !ADVANCED_OPTIONS
764
765config HIGHMEM_START_BOOL
766 bool "Set high memory pool address"
767 depends on ADVANCED_OPTIONS && HIGHMEM
768 help
769 This option allows you to set the base address of the kernel virtual
770 area used to map high memory pages. This can be useful in
771 optimizing the layout of kernel virtual memory.
772
773 Say N here unless you know what you are doing.
774
775config HIGHMEM_START
776 hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
777 default "0xfe000000"
778
779config LOWMEM_SIZE_BOOL
780 bool "Set maximum low memory"
781 depends on ADVANCED_OPTIONS
782 help
783 This option allows you to set the maximum amount of memory which
784 will be used as "low memory", that is, memory which the kernel can
785 access directly, without having to set up a kernel virtual mapping.
786 This can be useful in optimizing the layout of kernel virtual
787 memory.
788
789 Say N here unless you know what you are doing.
790
791config LOWMEM_SIZE
792 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
793 default "0x30000000"
794
795config KERNEL_START_BOOL
796 bool "Set custom kernel base address"
797 depends on ADVANCED_OPTIONS
798 help
799 This option allows you to set the kernel virtual address at which
800 the kernel will map low memory (the kernel image will be linked at
801 this address). This can be useful in optimizing the virtual memory
802 layout of the system.
803
804 Say N here unless you know what you are doing.
805
806config KERNEL_START
807 hex "Virtual address of kernel base" if KERNEL_START_BOOL
808 default "0xc0000000"
809
810config TASK_SIZE_BOOL
811 bool "Set custom user task size"
812 depends on ADVANCED_OPTIONS
813 help
814 This option allows you to set the amount of virtual address space
815 allocated to user tasks. This can be useful in optimizing the
816 virtual memory layout of the system.
817
818 Say N here unless you know what you are doing.
819
820config TASK_SIZE
821 hex "Size of user task space" if TASK_SIZE_BOOL
822 default "0x80000000"
823
824config CONSISTENT_START_BOOL
825 bool "Set custom consistent memory pool address"
826 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
827 help
828 This option allows you to set the base virtual address
829 of the the consistent memory pool. This pool of virtual
830 memory is used to make consistent memory allocations.
831
832config CONSISTENT_START
833 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
834 default "0xff100000" if NOT_COHERENT_CACHE
835
836config CONSISTENT_SIZE_BOOL
837 bool "Set custom consistent memory pool size"
838 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
839 help
840 This option allows you to set the size of the the
841 consistent memory pool. This pool of virtual memory
842 is used to make consistent memory allocations.
843
844config CONSISTENT_SIZE
845 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
846 default "0x00200000" if NOT_COHERENT_CACHE
847
848config BOOT_LOAD_BOOL
849 bool "Set the boot link/load address"
850 depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
851 help
852 This option allows you to set the initial load address of the zImage
853 or zImage.initrd file. This can be useful if you are on a board
854 which has a small amount of memory.
855
856 Say N here unless you know what you are doing.
857
858config BOOT_LOAD
859 hex "Link/load address for booting" if BOOT_LOAD_BOOL
860 default "0x00400000" if 40x || 8xx || 8260
861 default "0x01000000" if 44x
862 default "0x00800000"
863
864config PIN_TLB
865 bool "Pinned Kernel TLBs (860 ONLY)"
866 depends on ADVANCED_OPTIONS && 8xx
867endmenu
868
869if PPC64
870config KERNEL_START
871 hex
872 default "0xc000000000000000"
873endif
874
875source "net/Kconfig"
876
877source "drivers/Kconfig"
878
879source "fs/Kconfig"
880
881# XXX source "arch/ppc/8xx_io/Kconfig"
882
883# XXX source "arch/ppc/8260_io/Kconfig"
884
885source "arch/powerpc/platforms/iseries/Kconfig"
886
887source "lib/Kconfig"
888
889source "arch/powerpc/oprofile/Kconfig"
890
891source "arch/powerpc/Kconfig.debug"
892
893source "security/Kconfig"
894
895config KEYS_COMPAT
896 bool
897 depends on COMPAT && KEYS
898 default y
899
900source "crypto/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
new file mode 100644
index 000000000000..0baf64ec80d0
--- /dev/null
+++ b/arch/powerpc/Kconfig.debug
@@ -0,0 +1,128 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config DEBUG_STACKOVERFLOW
6 bool "Check for stack overflows"
7 depends on DEBUG_KERNEL && PPC64
8 help
9 This option will cause messages to be printed if free stack space
10 drops below a certain limit.
11
12config KPROBES
13 bool "Kprobes"
14 depends on DEBUG_KERNEL && PPC64
15 help
16 Kprobes allows you to trap at almost any kernel address and
17 execute a callback function. register_kprobe() establishes
18 a probepoint and specifies the callback. Kprobes is useful
19 for kernel debugging, non-intrusive instrumentation and testing.
20 If in doubt, say "N".
21
22config DEBUG_STACK_USAGE
23 bool "Stack utilization instrumentation"
24 depends on DEBUG_KERNEL && PPC64
25 help
26 Enables the display of the minimum amount of free stack which each
27 task has ever had available in the sysrq-T and sysrq-P debug output.
28
29 This option will slow down process creation somewhat.
30
31config DEBUGGER
32 bool "Enable debugger hooks"
33 depends on DEBUG_KERNEL
34 help
35 Include in-kernel hooks for kernel debuggers. Unless you are
36 intending to debug the kernel, say N here.
37
38config KGDB
39 bool "Include kgdb kernel debugger"
40 depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
41 select DEBUG_INFO
42 help
43 Include in-kernel hooks for kgdb, the Linux kernel source level
44 debugger. See <http://kgdb.sourceforge.net/> for more information.
45 Unless you are intending to debug the kernel, say N here.
46
47choice
48 prompt "Serial Port"
49 depends on KGDB
50 default KGDB_TTYS1
51
52config KGDB_TTYS0
53 bool "ttyS0"
54
55config KGDB_TTYS1
56 bool "ttyS1"
57
58config KGDB_TTYS2
59 bool "ttyS2"
60
61config KGDB_TTYS3
62 bool "ttyS3"
63
64endchoice
65
66config KGDB_CONSOLE
67 bool "Enable serial console thru kgdb port"
68 depends on KGDB && 8xx || CPM2
69 help
70 If you enable this, all serial console messages will be sent
71 over the gdb stub.
72 If unsure, say N.
73
74config XMON
75 bool "Include xmon kernel debugger"
76 depends on DEBUGGER && !PPC_ISERIES
77 help
78 Include in-kernel hooks for the xmon kernel monitor/debugger.
79 Unless you are intending to debug the kernel, say N here.
80 Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
81 nothing will appear on the screen (xmon writes directly to the
82 framebuffer memory).
83 The cmdline option 'xmon' or 'xmon=early' will drop into xmon
84 very early during boot. 'xmon=on' will just enable the xmon
85 debugger hooks. 'xmon=off' will disable the debugger hooks
86 if CONFIG_XMON_DEFAULT is set.
87
88config XMON_DEFAULT
89 bool "Enable xmon by default"
90 depends on XMON
91 help
92 xmon is normally disabled unless booted with 'xmon=on'.
93 Use 'xmon=off' to disable xmon init during runtime.
94
95config IRQSTACKS
96 bool "Use separate kernel stacks when processing interrupts"
97 depends on PPC64
98 help
99 If you say Y here the kernel will use separate kernel stacks
100 for handling hard and soft interrupts. This can help avoid
101 overflowing the process kernel stacks.
102
103config BDI_SWITCH
104 bool "Include BDI-2000 user context switcher"
105 depends on DEBUG_KERNEL && PPC32
106 help
107 Include in-kernel support for the Abatron BDI2000 debugger.
108 Unless you are intending to debug the kernel with one of these
109 machines, say N here.
110
111config BOOTX_TEXT
112 bool "Support for early boot text console (BootX or OpenFirmware only)"
113 depends PPC_OF && !PPC_ISERIES
114 help
115 Say Y here to see progress messages from the boot firmware in text
116 mode. Requires either BootX or Open Firmware.
117
118config SERIAL_TEXT_DEBUG
119 bool "Support for early boot texts over serial port"
120 depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
121 PPC_GEN550 || PPC_MPC52xx
122
123config PPC_OCP
124 bool
125 depends on IBM_OCP || XILINX_OCP
126 default y
127
128endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
new file mode 100644
index 000000000000..2f4cce06a7e5
--- /dev/null
+++ b/arch/powerpc/Makefile
@@ -0,0 +1,222 @@
1# This file is included by the global makefile so that you can add your own
2# architecture-specific flags and dependencies. Remember to do have actions
3# for "archclean" and "archdep" for cleaning up and making dependencies for
4# this architecture.
5#
6# This file is subject to the terms and conditions of the GNU General Public
7# License. See the file "COPYING" in the main directory of this archive
8# for more details.
9#
10# Copyright (C) 1994 by Linus Torvalds
11# Changes for PPC by Gary Thomas
12# Rewritten by Cort Dougan and Paul Mackerras
13#
14
15# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
16KERNELLOAD := $(CONFIG_KERNEL_START)
17
18HAS_BIARCH := $(call cc-option-yn, -m32)
19
20ifeq ($(CONFIG_PPC64),y)
21OLDARCH := ppc64
22SZ := 64
23
24# Set default 32 bits cross compilers for vdso and boot wrapper
25CROSS32_COMPILE ?=
26
27CROSS32CC := $(CROSS32_COMPILE)gcc
28CROSS32AS := $(CROSS32_COMPILE)as
29CROSS32LD := $(CROSS32_COMPILE)ld
30CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
31
32ifeq ($(HAS_BIARCH),y)
33ifeq ($(CROSS32_COMPILE),)
34CROSS32CC := $(CC) -m32
35CROSS32AS := $(AS) -a32
36CROSS32LD := $(LD) -m elf32ppc
37CROSS32OBJCOPY := $(OBJCOPY)
38endif
39endif
40
41export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
42
43new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
44
45ifeq ($(new_nm),y)
46NM := $(NM) --synthetic
47endif
48
49else
50OLDARCH := ppc
51SZ := 32
52endif
53
54UTS_MACHINE := $(OLDARCH)
55
56ifeq ($(HAS_BIARCH),y)
57override AS += -a$(SZ)
58override LD += -m elf$(SZ)ppc
59override CC += -m$(SZ)
60endif
61
62LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
63
64# The -Iarch/$(ARCH)/include is temporary while we are merging
65CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
66AFLAGS += -Iarch/$(ARCH)
67CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
68CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
69CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
70CFLAGS += $(CFLAGS-y)
71CPP = $(CC) -E $(CFLAGS)
72# Temporary hack until we have migrated to asm-powerpc
73LINUXINCLUDE += -Iarch/$(ARCH)/include
74
75CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
76
77ifeq ($(CONFIG_PPC64),y)
78GCC_VERSION := $(call cc-version)
79GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
80
81ifeq ($(CONFIG_POWER4_ONLY),y)
82ifeq ($(CONFIG_ALTIVEC),y)
83ifeq ($(GCC_BROKEN_VEC),y)
84 CFLAGS += $(call cc-option,-mcpu=970)
85else
86 CFLAGS += $(call cc-option,-mcpu=power4)
87endif
88else
89 CFLAGS += $(call cc-option,-mcpu=power4)
90endif
91else
92 CFLAGS += $(call cc-option,-mtune=power4)
93endif
94endif
95
96# No AltiVec instruction when building kernel
97CFLAGS += $(call cc-option,-mno-altivec)
98
99# Enable unit-at-a-time mode when possible. It shrinks the
100# kernel considerably.
101CFLAGS += $(call cc-option,-funit-at-a-time)
102
103ifndef CONFIG_FSL_BOOKE
104CFLAGS += -mstring
105endif
106
107cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
108cpu-as-$(CONFIG_4xx) += -Wa,-m405
109cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
110cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
111cpu-as-$(CONFIG_E500) += -Wa,-me500
112cpu-as-$(CONFIG_E200) += -Wa,-me200
113
114AFLAGS += $(cpu-as-y)
115CFLAGS += $(cpu-as-y)
116
117# Default to the common case.
118KBUILD_DEFCONFIG := common_defconfig
119
120head-y := arch/powerpc/kernel/head_32.o
121head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
122head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
123head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
124head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
125head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
126
127head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
128head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
129
130core-y += arch/powerpc/kernel/ \
131 arch/$(OLDARCH)/kernel/ \
132 arch/powerpc/mm/ \
133 arch/powerpc/lib/ \
134 arch/powerpc/sysdev/ \
135 arch/powerpc/platforms/
136core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
137core-$(CONFIG_XMON) += arch/powerpc/xmon/
138core-$(CONFIG_APUS) += arch/ppc/amiga/
139drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
140drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
141drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
142
143drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
144
145defaultimage-$(CONFIG_PPC32) := uImage zImage
146defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
147defaultimage-$(CONFIG_PPC_PSERIES) := zImage
148KBUILD_IMAGE := $(defaultimage-y)
149all: $(KBUILD_IMAGE)
150
151CPPFLAGS_vmlinux.lds := -Upowerpc
152
153# All the instructions talk about "make bzImage".
154bzImage: zImage
155
156BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
157
158.PHONY: $(BOOT_TARGETS)
159
160boot := arch/$(OLDARCH)/boot
161
162# urk
163ifeq ($(CONFIG_PPC64),y)
164$(BOOT_TARGETS): vmlinux
165 $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
166else
167$(BOOT_TARGETS): vmlinux
168 $(Q)$(MAKE) ARCH=ppc $(build)=$(boot) $@
169endif
170
171uImage: vmlinux
172 $(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot)/images $(boot)/images/$@
173
174define archhelp
175 @echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
176 @echo ' uImage - Create a bootable image for U-Boot / PPCBoot'
177 @echo ' install - Install kernel using'
178 @echo ' (your) ~/bin/installkernel or'
179 @echo ' (distribution) /sbin/installkernel or'
180 @echo ' install to $$(INSTALL_PATH) and run lilo'
181 @echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
182endef
183
184archclean:
185 $(Q)$(MAKE) $(clean)=$(boot)
186 # Temporary hack until we have migrated to asm-powerpc
187 $(Q)rm -rf arch/$(ARCH)/include
188
189archprepare: checkbin
190
191# Temporary hack until we have migrated to asm-powerpc
192include/asm: arch/$(ARCH)/include/asm
193arch/$(ARCH)/include/asm:
194 $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
195 $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
196
197# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
198# to stdout and these checks are run even on install targets.
199TOUT := .tmp_gas_check
200# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
201# instructions.
202# gcc-3.4 and binutils-2.14 are a fatal combination.
203GCC_VERSION := $(call cc-version)
204
205checkbin:
206 @if test "$(GCC_VERSION)" = "0304" ; then \
207 if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
208 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
209 echo 'correctly with gcc-3.4 and your version of binutils.'; \
210 echo '*** Please upgrade your binutils or downgrade your gcc'; \
211 false; \
212 fi ; \
213 fi
214 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
215 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
216 echo 'correctly with old versions of binutils.' ; \
217 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
218 false ; \
219 fi
220
221CLEAN_FILES += $(TOUT)
222
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644
index 000000000000..572d4f5eaacb
--- /dev/null
+++ b/arch/powerpc/kernel/Makefile
@@ -0,0 +1,56 @@
1#
2# Makefile for the linux kernel.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC
10CFLAGS_btext.o += -fPIC
11endif
12
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
16 ptrace32.o systbl.o
17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o
21obj-$(CONFIG_IBMVIO) += vio.o
22
23ifeq ($(CONFIG_PPC_MERGE),y)
24
25extra-$(CONFIG_PPC_STD_MMU) := head_32.o
26extra-$(CONFIG_PPC64) := head_64.o
27extra-$(CONFIG_40x) := head_4xx.o
28extra-$(CONFIG_44x) := head_44x.o
29extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
30extra-$(CONFIG_8xx) := head_8xx.o
31extra-y += vmlinux.lds
32
33obj-y += process.o init_task.o time.o \
34 prom.o traps.o setup-common.o
35obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
36obj-$(CONFIG_PPC64) += misc_64.o
37obj-$(CONFIG_PPC_OF) += prom_init.o
38obj-$(CONFIG_MODULES) += ppc_ksyms.o
39obj-$(CONFIG_BOOTX_TEXT) += btext.o
40obj-$(CONFIG_6xx) += idle_6xx.o
41
42ifeq ($(CONFIG_PPC_ISERIES),y)
43$(obj)/head_64.o: $(obj)/lparmap.s
44AFLAGS_head_64.o += -I$(obj)
45endif
46
47else
48# stuff used from here for ARCH=ppc or ARCH=ppc64
49obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
50 setup-common.o
51
52
53endif
54
55extra-$(CONFIG_PPC_FPU) += fpu.o
56extra-$(CONFIG_PPC64) += entry_64.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..330cd783206f
--- /dev/null
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -0,0 +1,273 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#ifdef CONFIG_PPC64
26#include <linux/time.h>
27#include <linux/hardirq.h>
28#else
29#include <linux/ptrace.h>
30#include <linux/suspend.h>
31#endif
32
33#include <asm/io.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/processor.h>
37#include <asm/cputable.h>
38#include <asm/thread_info.h>
39#include <asm/rtas.h>
40#ifdef CONFIG_PPC64
41#include <asm/paca.h>
42#include <asm/lppaca.h>
43#include <asm/iSeries/HvLpEvent.h>
44#include <asm/cache.h>
45#include <asm/systemcfg.h>
46#include <asm/compat.h>
47#endif
48
49#define DEFINE(sym, val) \
50 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
51
52#define BLANK() asm volatile("\n->" : : )
53
54int main(void)
55{
56 DEFINE(THREAD, offsetof(struct task_struct, thread));
57 DEFINE(MM, offsetof(struct task_struct, mm));
58#ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60#else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63#endif /* CONFIG_PPC64 */
64
65 DEFINE(KSP, offsetof(struct thread_struct, ksp));
66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
70#ifdef CONFIG_ALTIVEC
71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
75#endif /* CONFIG_ALTIVEC */
76#ifdef CONFIG_PPC64
77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
78#else /* CONFIG_PPC64 */
79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
81#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
83 DEFINE(PT_PTRACED, PT_PTRACED);
84#endif
85#ifdef CONFIG_SPE
86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
90#endif /* CONFIG_SPE */
91#endif /* CONFIG_PPC64 */
92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
96#ifdef CONFIG_PPC32
97 DEFINE(TI_TASK, offsetof(struct thread_info, task));
98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
100#endif /* CONFIG_PPC32 */
101
102#ifdef CONFIG_PPC64
103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
110 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
111
112 /* paca */
113 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
114 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
115 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
116 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
117 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
118 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
119 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
120 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
121 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
122 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
123 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
124 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
128#ifdef CONFIG_HUGETLB_PAGE
129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
131#endif /* CONFIG_HUGETLB_PAGE */
132 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
133 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
134 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
135 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
136 DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
142 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
143 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
144 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
145#endif /* CONFIG_PPC64 */
146
147 /* RTAS */
148 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
149 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
150
151 /* Interrupt register frame */
152 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
153#ifndef CONFIG_PPC64
154 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
155#else /* CONFIG_PPC64 */
156 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
157 /* 288 = # of volatile regs, int & fp, for leaf routines */
158 /* which do not stack a frame. See the PPC64 ABI. */
159 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
160 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
161 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
162 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
163#endif /* CONFIG_PPC64 */
164 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
165 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
166 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
167 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
168 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
169 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
170 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
171 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
172 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
173 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
174 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
175 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
176 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
177 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
178#ifndef CONFIG_PPC64
179 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
180 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
181 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
182 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
183 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
184 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
185 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
186 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
187 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
188 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
189 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
190 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
191 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
192 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
193 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
194 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
195 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
196 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
197#endif /* CONFIG_PPC64 */
198 /*
199 * Note: these symbols include _ because they overlap with special
200 * register names
201 */
202 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
203 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
204 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
205 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
206 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
207 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
208 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
209 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
210 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
211 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
212 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
213#ifndef CONFIG_PPC64
214 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
215 /*
216 * The PowerPC 400-class & Book-E processors have neither the DAR
217 * nor the DSISR SPRs. Hence, we overload them to hold the similar
218 * DEAR and ESR SPRs for such processors. For critical interrupts
219 * we use them to hold SRR0 and SRR1.
220 */
221 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
222 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
223#else /* CONFIG_PPC64 */
224 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
225
226 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
227 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
228 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
229#endif /* CONFIG_PPC64 */
230
231 DEFINE(CLONE_VM, CLONE_VM);
232 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
233
234#ifndef CONFIG_PPC64
235 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
236#endif /* ! CONFIG_PPC64 */
237
238 /* About the CPU features table */
239 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
240 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
241 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
242 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
243 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
244
245#ifndef CONFIG_PPC64
246 DEFINE(pbe_address, offsetof(struct pbe, address));
247 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
248 DEFINE(pbe_next, offsetof(struct pbe, next));
249
250 DEFINE(TASK_SIZE, TASK_SIZE);
251 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
252#else /* CONFIG_PPC64 */
253 /* systemcfg offsets for use by vdso */
254 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
255 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
256 DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
257 DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
258 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
259 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
260 DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
261 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
262 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
263
264 /* timeval/timezone offsets for use by vdso */
265 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
266 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
267 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
268 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
269 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
270 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
271#endif /* CONFIG_PPC64 */
272 return 0;
273}
diff --git a/arch/ppc64/kernel/binfmt_elf32.c b/arch/powerpc/kernel/binfmt_elf32.c
index fadc699a0497..8ad6b0f33651 100644
--- a/arch/ppc64/kernel/binfmt_elf32.c
+++ b/arch/powerpc/kernel/binfmt_elf32.c
@@ -70,9 +70,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
70 value->tv_sec = jiffies / HZ; 70 value->tv_sec = jiffies / HZ;
71} 71}
72 72
73extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
74#undef start_thread
75#define start_thread start_thread32
76#define init_elf_binfmt init_elf32_binfmt 73#define init_elf_binfmt init_elf32_binfmt
77 74
78#include "../../../fs/binfmt_elf.c" 75#include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
new file mode 100644
index 000000000000..bdfba92b2b38
--- /dev/null
+++ b/arch/powerpc/kernel/btext.c
@@ -0,0 +1,853 @@
1/*
2 * Procedures for drawing on the screen early on in the boot process.
3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/module.h>
11
12#include <asm/sections.h>
13#include <asm/prom.h>
14#include <asm/btext.h>
15#include <asm/prom.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18#include <asm/pgtable.h>
19#include <asm/io.h>
20#include <asm/lmb.h>
21#include <asm/processor.h>
22
23#define NO_SCROLL
24
25#ifndef NO_SCROLL
26static void scrollscreen(void);
27#endif
28
29static void draw_byte(unsigned char c, long locX, long locY);
30static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
32static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
33
34static int g_loc_X;
35static int g_loc_Y;
36static int g_max_loc_X;
37static int g_max_loc_Y;
38
39static int dispDeviceRowBytes;
40static int dispDeviceDepth;
41static int dispDeviceRect[4];
42static unsigned char *dispDeviceBase, *logicalDisplayBase;
43
44unsigned long disp_BAT[2] __initdata = {0, 0};
45
46#define cmapsz (16*256)
47
48static unsigned char vga_font[cmapsz];
49
50int boot_text_mapped;
51int force_printk_to_btext = 0;
52
53#ifdef CONFIG_PPC32
54/* Calc BAT values for mapping the display and store them
55 * in disp_BAT. Those values are then used from head.S to map
56 * the display during identify_machine() and MMU_Init()
57 *
58 * The display is mapped to virtual address 0xD0000000, rather
59 * than 1:1, because some some CHRP machines put the frame buffer
60 * in the region starting at 0xC0000000 (KERNELBASE).
61 * This mapping is temporary and will disappear as soon as the
62 * setup done by MMU_Init() is applied.
63 *
64 * For now, we align the BAT and then map 8Mb on 601 and 16Mb
65 * on other PPCs. This may cause trouble if the framebuffer
66 * is really badly aligned, but I didn't encounter this case
67 * yet.
68 */
69void __init
70btext_prepare_BAT(void)
71{
72 unsigned long vaddr = KERNELBASE + 0x10000000;
73 unsigned long addr;
74 unsigned long lowbits;
75
76 addr = (unsigned long)dispDeviceBase;
77 if (!addr) {
78 boot_text_mapped = 0;
79 return;
80 }
81 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
82 /* 603, 604, G3, G4, ... */
83 lowbits = addr & ~0xFF000000UL;
84 addr &= 0xFF000000UL;
85 disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
86 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
87 } else {
88 /* 601 */
89 lowbits = addr & ~0xFF800000UL;
90 addr &= 0xFF800000UL;
91 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
92 disp_BAT[1] = addr | BL_8M | 0x40;
93 }
94 logicalDisplayBase = (void *) (vaddr + lowbits);
95}
96#endif
97
98/* This function will enable the early boot text when doing OF booting. This
99 * way, xmon output should work too
100 */
101void __init
102btext_setup_display(int width, int height, int depth, int pitch,
103 unsigned long address)
104{
105 g_loc_X = 0;
106 g_loc_Y = 0;
107 g_max_loc_X = width / 8;
108 g_max_loc_Y = height / 16;
109 logicalDisplayBase = (unsigned char *)address;
110 dispDeviceBase = (unsigned char *)address;
111 dispDeviceRowBytes = pitch;
112 dispDeviceDepth = depth;
113 dispDeviceRect[0] = dispDeviceRect[1] = 0;
114 dispDeviceRect[2] = width;
115 dispDeviceRect[3] = height;
116 boot_text_mapped = 1;
117}
118
119/* Here's a small text engine to use during early boot
120 * or for debugging purposes
121 *
122 * todo:
123 *
124 * - build some kind of vgacon with it to enable early printk
125 * - move to a separate file
126 * - add a few video driver hooks to keep in sync with display
127 * changes.
128 */
129
130void map_boot_text(void)
131{
132 unsigned long base, offset, size;
133 unsigned char *vbase;
134
135 /* By default, we are no longer mapped */
136 boot_text_mapped = 0;
137 if (dispDeviceBase == 0)
138 return;
139 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
140 offset = ((unsigned long) dispDeviceBase) - base;
141 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
142 + dispDeviceRect[0];
143 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
144 if (vbase == 0)
145 return;
146 logicalDisplayBase = vbase + offset;
147 boot_text_mapped = 1;
148}
149
150int btext_initialize(struct device_node *np)
151{
152 unsigned int width, height, depth, pitch;
153 unsigned long address = 0;
154 u32 *prop;
155
156 prop = (u32 *)get_property(np, "width", NULL);
157 if (prop == NULL)
158 return -EINVAL;
159 width = *prop;
160 prop = (u32 *)get_property(np, "height", NULL);
161 if (prop == NULL)
162 return -EINVAL;
163 height = *prop;
164 prop = (u32 *)get_property(np, "depth", NULL);
165 if (prop == NULL)
166 return -EINVAL;
167 depth = *prop;
168 pitch = width * ((depth + 7) / 8);
169 prop = (u32 *)get_property(np, "linebytes", NULL);
170 if (prop)
171 pitch = *prop;
172 if (pitch == 1)
173 pitch = 0x1000;
174 prop = (u32 *)get_property(np, "address", NULL);
175 if (prop)
176 address = *prop;
177
178 /* FIXME: Add support for PCI reg properties */
179
180 if (address == 0)
181 return -EINVAL;
182
183 g_loc_X = 0;
184 g_loc_Y = 0;
185 g_max_loc_X = width / 8;
186 g_max_loc_Y = height / 16;
187 logicalDisplayBase = (unsigned char *)address;
188 dispDeviceBase = (unsigned char *)address;
189 dispDeviceRowBytes = pitch;
190 dispDeviceDepth = depth;
191 dispDeviceRect[0] = dispDeviceRect[1] = 0;
192 dispDeviceRect[2] = width;
193 dispDeviceRect[3] = height;
194
195 map_boot_text();
196
197 return 0;
198}
199
200void __init init_boot_display(void)
201{
202 char *name;
203 struct device_node *np = NULL;
204 int rc = -ENODEV;
205
206 printk("trying to initialize btext ...\n");
207
208 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
209 if (name != NULL) {
210 np = of_find_node_by_path(name);
211 if (np != NULL) {
212 if (strcmp(np->type, "display") != 0) {
213 printk("boot stdout isn't a display !\n");
214 of_node_put(np);
215 np = NULL;
216 }
217 }
218 }
219 if (np)
220 rc = btext_initialize(np);
221 if (rc == 0)
222 return;
223
224 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
225 if (get_property(np, "linux,opened", NULL)) {
226 printk("trying %s ...\n", np->full_name);
227 rc = btext_initialize(np);
228 printk("result: %d\n", rc);
229 }
230 if (rc == 0)
231 return;
232 }
233}
234
235/* Calc the base address of a given point (x,y) */
236static unsigned char * calc_base(int x, int y)
237{
238 unsigned char *base;
239
240 base = logicalDisplayBase;
241 if (base == 0)
242 base = dispDeviceBase;
243 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
244 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
245 return base;
246}
247
248/* Adjust the display to a new resolution */
249void btext_update_display(unsigned long phys, int width, int height,
250 int depth, int pitch)
251{
252 if (dispDeviceBase == 0)
253 return;
254
255 /* check it's the same frame buffer (within 256MB) */
256 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
257 return;
258
259 dispDeviceBase = (__u8 *) phys;
260 dispDeviceRect[0] = 0;
261 dispDeviceRect[1] = 0;
262 dispDeviceRect[2] = width;
263 dispDeviceRect[3] = height;
264 dispDeviceDepth = depth;
265 dispDeviceRowBytes = pitch;
266 if (boot_text_mapped) {
267 iounmap(logicalDisplayBase);
268 boot_text_mapped = 0;
269 }
270 map_boot_text();
271 g_loc_X = 0;
272 g_loc_Y = 0;
273 g_max_loc_X = width / 8;
274 g_max_loc_Y = height / 16;
275}
276EXPORT_SYMBOL(btext_update_display);
277
278void btext_clearscreen(void)
279{
280 unsigned long *base = (unsigned long *)calc_base(0, 0);
281 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
282 (dispDeviceDepth >> 3)) >> 3;
283 int i,j;
284
285 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
286 {
287 unsigned long *ptr = base;
288 for(j=width; j; --j)
289 *(ptr++) = 0;
290 base += (dispDeviceRowBytes >> 3);
291 }
292}
293
294#ifndef NO_SCROLL
295static void scrollscreen(void)
296{
297 unsigned long *src = (unsigned long *)calc_base(0,16);
298 unsigned long *dst = (unsigned long *)calc_base(0,0);
299 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
300 (dispDeviceDepth >> 3)) >> 3;
301 int i,j;
302
303 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
304 {
305 unsigned long *src_ptr = src;
306 unsigned long *dst_ptr = dst;
307 for(j=width; j; --j)
308 *(dst_ptr++) = *(src_ptr++);
309 src += (dispDeviceRowBytes >> 3);
310 dst += (dispDeviceRowBytes >> 3);
311 }
312 for (i=0; i<16; i++)
313 {
314 unsigned long *dst_ptr = dst;
315 for(j=width; j; --j)
316 *(dst_ptr++) = 0;
317 dst += (dispDeviceRowBytes >> 3);
318 }
319}
320#endif /* ndef NO_SCROLL */
321
322void btext_drawchar(char c)
323{
324 int cline = 0;
325#ifdef NO_SCROLL
326 int x;
327#endif
328 if (!boot_text_mapped)
329 return;
330
331 switch (c) {
332 case '\b':
333 if (g_loc_X > 0)
334 --g_loc_X;
335 break;
336 case '\t':
337 g_loc_X = (g_loc_X & -8) + 8;
338 break;
339 case '\r':
340 g_loc_X = 0;
341 break;
342 case '\n':
343 g_loc_X = 0;
344 g_loc_Y++;
345 cline = 1;
346 break;
347 default:
348 draw_byte(c, g_loc_X++, g_loc_Y);
349 }
350 if (g_loc_X >= g_max_loc_X) {
351 g_loc_X = 0;
352 g_loc_Y++;
353 cline = 1;
354 }
355#ifndef NO_SCROLL
356 while (g_loc_Y >= g_max_loc_Y) {
357 scrollscreen();
358 g_loc_Y--;
359 }
360#else
361 /* wrap around from bottom to top of screen so we don't
362 waste time scrolling each line. -- paulus. */
363 if (g_loc_Y >= g_max_loc_Y)
364 g_loc_Y = 0;
365 if (cline) {
366 for (x = 0; x < g_max_loc_X; ++x)
367 draw_byte(' ', x, g_loc_Y);
368 }
369#endif
370}
371
372void btext_drawstring(const char *c)
373{
374 if (!boot_text_mapped)
375 return;
376 while (*c)
377 btext_drawchar(*c++);
378}
379
380void btext_drawhex(unsigned long v)
381{
382 char *hex_table = "0123456789abcdef";
383
384 if (!boot_text_mapped)
385 return;
386#ifdef CONFIG_PPC64
387 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
388 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
389 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
390 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
391 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
392 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
393 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
394 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
395#endif
396 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
397 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
398 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
399 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
400 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
401 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
402 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
403 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
404 btext_drawchar(' ');
405}
406
407static void draw_byte(unsigned char c, long locX, long locY)
408{
409 unsigned char *base = calc_base(locX << 3, locY << 4);
410 unsigned char *font = &vga_font[((unsigned int)c) * 16];
411 int rb = dispDeviceRowBytes;
412
413 switch(dispDeviceDepth) {
414 case 24:
415 case 32:
416 draw_byte_32(font, (unsigned int *)base, rb);
417 break;
418 case 15:
419 case 16:
420 draw_byte_16(font, (unsigned int *)base, rb);
421 break;
422 case 8:
423 draw_byte_8(font, (unsigned int *)base, rb);
424 break;
425 }
426}
427
428static unsigned int expand_bits_8[16] = {
429 0x00000000,
430 0x000000ff,
431 0x0000ff00,
432 0x0000ffff,
433 0x00ff0000,
434 0x00ff00ff,
435 0x00ffff00,
436 0x00ffffff,
437 0xff000000,
438 0xff0000ff,
439 0xff00ff00,
440 0xff00ffff,
441 0xffff0000,
442 0xffff00ff,
443 0xffffff00,
444 0xffffffff
445};
446
447static unsigned int expand_bits_16[4] = {
448 0x00000000,
449 0x0000ffff,
450 0xffff0000,
451 0xffffffff
452};
453
454
455static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
456{
457 int l, bits;
458 int fg = 0xFFFFFFFFUL;
459 int bg = 0x00000000UL;
460
461 for (l = 0; l < 16; ++l)
462 {
463 bits = *font++;
464 base[0] = (-(bits >> 7) & fg) ^ bg;
465 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
466 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
467 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
468 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
469 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
470 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
471 base[7] = (-(bits & 1) & fg) ^ bg;
472 base = (unsigned int *) ((char *)base + rb);
473 }
474}
475
476static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
477{
478 int l, bits;
479 int fg = 0xFFFFFFFFUL;
480 int bg = 0x00000000UL;
481 unsigned int *eb = (int *)expand_bits_16;
482
483 for (l = 0; l < 16; ++l)
484 {
485 bits = *font++;
486 base[0] = (eb[bits >> 6] & fg) ^ bg;
487 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
488 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
489 base[3] = (eb[bits & 3] & fg) ^ bg;
490 base = (unsigned int *) ((char *)base + rb);
491 }
492}
493
494static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
495{
496 int l, bits;
497 int fg = 0x0F0F0F0FUL;
498 int bg = 0x00000000UL;
499 unsigned int *eb = (int *)expand_bits_8;
500
501 for (l = 0; l < 16; ++l)
502 {
503 bits = *font++;
504 base[0] = (eb[bits >> 4] & fg) ^ bg;
505 base[1] = (eb[bits & 0xf] & fg) ^ bg;
506 base = (unsigned int *) ((char *)base + rb);
507 }
508}
509
510static unsigned char vga_font[cmapsz] = {
5110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
5130x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
5140xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
5150x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
5160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
5170x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
5180x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
5190x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
5200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5210x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
5220xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
5230x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
5240x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
5250xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
5260x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
5270x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
5280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
5290x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
5300x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
5310x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
5320x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
5330xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
5340x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
5350x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
5360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
5370x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
5380xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
5390x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
5400x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5410xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5420x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
5430x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5450x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5460x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5470x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
5480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
5490xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5500x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5510x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
5520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
5530x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5540x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5550x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5560x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
5570x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
5580x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
5590x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
5600x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
5610x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
5620x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
5630x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
5650x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
5660x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5670x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
5680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
5690x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5700x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
5710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
5720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5730x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5740x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
5750x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
5760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
5770x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5780x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
5790x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
5800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
5810x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
5820xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5830x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5840x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
5850x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5860xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5870x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
5880x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
5890x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5900x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5910x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
5920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
5930x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
5940x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
5950x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
5970xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
5980x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
5990x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
6000x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
6010xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
6020x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
6030x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
6040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
6050x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6060xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
6070x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
6090x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
6100x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
6110x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
6120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
6130x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
6140xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
6150x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
6160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
6180x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
6190x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
6200x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
6210x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
6220xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6230x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6250xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6260xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
6270x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
6280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
6290x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6300xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6310x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
6320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
6330x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
6340xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
6350x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
6360x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
6370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6380x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
6390x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
6410xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
6420x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
6430x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
6440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
6450xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6460x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6470x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
6480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
6490xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
6500x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6510x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
6530x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
6540x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6550x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6560x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
6570xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6580x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
6590x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6600x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
6610x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
6620x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
6630x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
6640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
6650x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
6660x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
6670x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
6690xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6700x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
6710x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
6720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
6730xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
6740x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
6750x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
6760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
6770x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
6780x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
6790x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
6810xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6820xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
6830x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6840x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
6850xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
6860x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6870x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
6880x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
6890xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
6900x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6910x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
6920x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
6930xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6940x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6950x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
6960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
6970x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
6980x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6990x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
7000x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
7010xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
7020x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
7030x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
7040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
7050x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
7060xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
7070x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
7090xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
7100x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
7110x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
7120x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
7130xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
7140x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
7150x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7160x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
7170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
7180xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
7190x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
7200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
7210xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
7220x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
7230x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
7240xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
7250xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
7260x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
7270x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7280x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
7290xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
7300x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
7310x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
7320x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
7330x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
7340x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7350x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
7360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
7370xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7380x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
7390x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
7400x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
7410x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
7420x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
7430x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
7440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
7450x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
7460x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
7470x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
7480x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
7490xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
7500x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7510x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
7520x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7530x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7540x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
7560x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
7570x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7580x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7590x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7600x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
7610x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7620x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7630x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
7640x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7650x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7660x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7670x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
7680x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
7690x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7700x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7710x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7720x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7730x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7740x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7750x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7760x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
7770x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7780x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7790x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7800x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
7810x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7820x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7830x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7840x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
7850x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
7860x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7870x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
7880x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
7890x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7900x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
7920x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
7930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7940x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7950x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7960x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
7970x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7980x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7990x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
8000x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
8010x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8020x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8030xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8040xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
8050xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
8060xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
8070x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
8080x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
8090x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8100x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
8110x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
8120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
8130xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8140xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
8150x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
8160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
8170xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8180x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
8190x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
8210x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
8220x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
8230x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
8240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
8250x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8260x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8270x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
8280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
8290x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
8300xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
8310x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
8320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
8330x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
8340x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
8350x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
8360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
8370x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8380x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
8390x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
8400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
8410x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
8420x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
8440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8450x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
8460x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
8470x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
8480x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
8490x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8500x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
8510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8520x00, 0x00, 0x00, 0x00,
853};
diff --git a/arch/ppc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6b76cf58d9e0..b91345fa0805 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * arch/ppc/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 * 3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -14,96 +15,302 @@
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/threads.h> 16#include <linux/threads.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <asm/cputable.h> 18#include <linux/module.h>
18 19
19struct cpu_spec* cur_cpu_spec[NR_CPUS]; 20#include <asm/oprofile_impl.h>
21#include <asm/cputable.h>
20 22
21extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 23struct cpu_spec* cur_cpu_spec = NULL;
22extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 24EXPORT_SYMBOL(cur_cpu_spec);
23extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
24extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
25extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
26extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
27extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
28extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
29extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
30extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
31extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
32extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
33extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
34 25
35#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ 26/* NOTE:
36 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ 27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
37 !defined(CONFIG_BOOKE)) 28 * the responsibility of the appropriate CPU save/restore functions to
29 * eventually copy these settings over. Those save/restore aren't yet
30 * part of the cputable though. That has to be fixed for both ppc32
31 * and ppc64
32 */
33#ifdef CONFIG_PPC64
34extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
37#else
38extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
41extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
42extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
43extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
44extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
45extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
46#endif /* CONFIG_PPC32 */
47extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38 48
39/* This table only contains "desktop" CPUs, it need to be filled with embedded 49/* This table only contains "desktop" CPUs, it need to be filled with embedded
40 * ones as well... 50 * ones as well...
41 */ 51 */
42#define COMMON_PPC (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ 52#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
43 PPC_FEATURE_HAS_MMU) 53 PPC_FEATURE_HAS_MMU)
54#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
44 55
45/* We only set the altivec features if the kernel was compiled with altivec
46 * support
47 */
48#ifdef CONFIG_ALTIVEC
49#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
50#define PPC_FEATURE_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
51#else
52#define CPU_FTR_ALTIVEC_COMP 0
53#define PPC_FEATURE_ALTIVEC_COMP 0
54#endif
55 56
56/* We only set the spe features if the kernel was compiled with 57/* We only set the spe features if the kernel was compiled with
57 * spe support 58 * spe support
58 */ 59 */
59#ifdef CONFIG_SPE 60#ifdef CONFIG_SPE
60#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE 61#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
61#else 62#else
62#define PPC_FEATURE_SPE_COMP 0 63#define PPC_FEATURE_SPE_COMP 0
63#endif 64#endif
64 65
65/* We need to mark all pages as being coherent if we're SMP or we 66struct cpu_spec cpu_specs[] = {
66 * have a 74[45]x and an MPC107 host bridge. 67#ifdef CONFIG_PPC64
67 */ 68 { /* Power3 */
68#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) 69 .pvr_mask = 0xffff0000,
69#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT 70 .pvr_value = 0x00400000,
70#else 71 .cpu_name = "POWER3 (630)",
71#define CPU_FTR_COMMON 0 72 .cpu_features = CPU_FTRS_POWER3,
73 .cpu_user_features = COMMON_USER_PPC64,
74 .icache_bsize = 128,
75 .dcache_bsize = 128,
76 .num_pmcs = 8,
77 .cpu_setup = __setup_cpu_power3,
78#ifdef CONFIG_OPROFILE
79 .oprofile_cpu_type = "ppc64/power3",
80 .oprofile_model = &op_model_rs64,
72#endif 81#endif
73 82 },
74/* The powersave features NAP & DOZE seems to confuse BDI when 83 { /* Power3+ */
75 debugging. So if a BDI is used, disable theses 84 .pvr_mask = 0xffff0000,
76 */ 85 .pvr_value = 0x00410000,
77#ifndef CONFIG_BDI_SWITCH 86 .cpu_name = "POWER3 (630+)",
78#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE 87 .cpu_features = CPU_FTRS_POWER3,
79#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP 88 .cpu_user_features = COMMON_USER_PPC64,
89 .icache_bsize = 128,
90 .dcache_bsize = 128,
91 .num_pmcs = 8,
92 .cpu_setup = __setup_cpu_power3,
93#ifdef CONFIG_OPROFILE
94 .oprofile_cpu_type = "ppc64/power3",
95 .oprofile_model = &op_model_rs64,
96#endif
97 },
98 { /* Northstar */
99 .pvr_mask = 0xffff0000,
100 .pvr_value = 0x00330000,
101 .cpu_name = "RS64-II (northstar)",
102 .cpu_features = CPU_FTRS_RS64,
103 .cpu_user_features = COMMON_USER_PPC64,
104 .icache_bsize = 128,
105 .dcache_bsize = 128,
106 .num_pmcs = 8,
107 .cpu_setup = __setup_cpu_power3,
108#ifdef CONFIG_OPROFILE
109 .oprofile_cpu_type = "ppc64/rs64",
110 .oprofile_model = &op_model_rs64,
111#endif
112 },
113 { /* Pulsar */
114 .pvr_mask = 0xffff0000,
115 .pvr_value = 0x00340000,
116 .cpu_name = "RS64-III (pulsar)",
117 .cpu_features = CPU_FTRS_RS64,
118 .cpu_user_features = COMMON_USER_PPC64,
119 .icache_bsize = 128,
120 .dcache_bsize = 128,
121 .num_pmcs = 8,
122 .cpu_setup = __setup_cpu_power3,
123#ifdef CONFIG_OPROFILE
124 .oprofile_cpu_type = "ppc64/rs64",
125 .oprofile_model = &op_model_rs64,
126#endif
127 },
128 { /* I-star */
129 .pvr_mask = 0xffff0000,
130 .pvr_value = 0x00360000,
131 .cpu_name = "RS64-III (icestar)",
132 .cpu_features = CPU_FTRS_RS64,
133 .cpu_user_features = COMMON_USER_PPC64,
134 .icache_bsize = 128,
135 .dcache_bsize = 128,
136 .num_pmcs = 8,
137 .cpu_setup = __setup_cpu_power3,
138#ifdef CONFIG_OPROFILE
139 .oprofile_cpu_type = "ppc64/rs64",
140 .oprofile_model = &op_model_rs64,
141#endif
142 },
143 { /* S-star */
144 .pvr_mask = 0xffff0000,
145 .pvr_value = 0x00370000,
146 .cpu_name = "RS64-IV (sstar)",
147 .cpu_features = CPU_FTRS_RS64,
148 .cpu_user_features = COMMON_USER_PPC64,
149 .icache_bsize = 128,
150 .dcache_bsize = 128,
151 .num_pmcs = 8,
152 .cpu_setup = __setup_cpu_power3,
153#ifdef CONFIG_OPROFILE
154 .oprofile_cpu_type = "ppc64/rs64",
155 .oprofile_model = &op_model_rs64,
156#endif
157 },
158 { /* Power4 */
159 .pvr_mask = 0xffff0000,
160 .pvr_value = 0x00350000,
161 .cpu_name = "POWER4 (gp)",
162 .cpu_features = CPU_FTRS_POWER4,
163 .cpu_user_features = COMMON_USER_PPC64,
164 .icache_bsize = 128,
165 .dcache_bsize = 128,
166 .num_pmcs = 8,
167 .cpu_setup = __setup_cpu_power4,
168#ifdef CONFIG_OPROFILE
169 .oprofile_cpu_type = "ppc64/power4",
170 .oprofile_model = &op_model_rs64,
171#endif
172 },
173 { /* Power4+ */
174 .pvr_mask = 0xffff0000,
175 .pvr_value = 0x00380000,
176 .cpu_name = "POWER4+ (gq)",
177 .cpu_features = CPU_FTRS_POWER4,
178 .cpu_user_features = COMMON_USER_PPC64,
179 .icache_bsize = 128,
180 .dcache_bsize = 128,
181 .num_pmcs = 8,
182 .cpu_setup = __setup_cpu_power4,
183#ifdef CONFIG_OPROFILE
184 .oprofile_cpu_type = "ppc64/power4",
185 .oprofile_model = &op_model_power4,
186#endif
187 },
188 { /* PPC970 */
189 .pvr_mask = 0xffff0000,
190 .pvr_value = 0x00390000,
191 .cpu_name = "PPC970",
192 .cpu_features = CPU_FTRS_PPC970,
193 .cpu_user_features = COMMON_USER_PPC64 |
194 PPC_FEATURE_HAS_ALTIVEC_COMP,
195 .icache_bsize = 128,
196 .dcache_bsize = 128,
197 .num_pmcs = 8,
198 .cpu_setup = __setup_cpu_ppc970,
199#ifdef CONFIG_OPROFILE
200 .oprofile_cpu_type = "ppc64/970",
201 .oprofile_model = &op_model_power4,
202#endif
203 },
204#endif /* CONFIG_PPC64 */
205#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210#ifdef CONFIG_PPC32
211 .cpu_features = CPU_FTRS_970_32,
80#else 212#else
81#define CPU_FTR_MAYBE_CAN_DOZE 0 213 .cpu_features = CPU_FTRS_PPC970,
82#define CPU_FTR_MAYBE_CAN_NAP 0
83#endif 214#endif
84 215 .cpu_user_features = COMMON_USER_PPC64 |
85struct cpu_spec cpu_specs[] = { 216 PPC_FEATURE_HAS_ALTIVEC_COMP,
217 .icache_bsize = 128,
218 .dcache_bsize = 128,
219 .num_pmcs = 8,
220 .cpu_setup = __setup_cpu_ppc970,
221#ifdef CONFIG_OPROFILE
222 .oprofile_cpu_type = "ppc64/970",
223 .oprofile_model = &op_model_power4,
224#endif
225 },
226#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
227#ifdef CONFIG_PPC64
228 { /* PPC970MP */
229 .pvr_mask = 0xffff0000,
230 .pvr_value = 0x00440000,
231 .cpu_name = "PPC970MP",
232 .cpu_features = CPU_FTRS_PPC970,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTRS_POWER5,
248 .cpu_user_features = COMMON_USER_PPC64,
249 .icache_bsize = 128,
250 .dcache_bsize = 128,
251 .num_pmcs = 6,
252 .cpu_setup = __setup_cpu_power4,
253#ifdef CONFIG_OPROFILE
254 .oprofile_cpu_type = "ppc64/power5",
255 .oprofile_model = &op_model_power4,
256#endif
257 },
258 { /* Power5 */
259 .pvr_mask = 0xffff0000,
260 .pvr_value = 0x003b0000,
261 .cpu_name = "POWER5 (gs)",
262 .cpu_features = CPU_FTRS_POWER5,
263 .cpu_user_features = COMMON_USER_PPC64,
264 .icache_bsize = 128,
265 .dcache_bsize = 128,
266 .num_pmcs = 6,
267 .cpu_setup = __setup_cpu_power4,
268#ifdef CONFIG_OPROFILE
269 .oprofile_cpu_type = "ppc64/power5",
270 .oprofile_model = &op_model_power4,
271#endif
272 },
273 { /* BE DD1.x */
274 .pvr_mask = 0xffff0000,
275 .pvr_value = 0x00700000,
276 .cpu_name = "Cell Broadband Engine",
277 .cpu_features = CPU_FTRS_CELL,
278 .cpu_user_features = COMMON_USER_PPC64 |
279 PPC_FEATURE_HAS_ALTIVEC_COMP,
280 .icache_bsize = 128,
281 .dcache_bsize = 128,
282 .cpu_setup = __setup_cpu_be,
283 },
284 { /* default match */
285 .pvr_mask = 0x00000000,
286 .pvr_value = 0x00000000,
287 .cpu_name = "POWER4 (compatible)",
288 .cpu_features = CPU_FTRS_COMPATIBLE,
289 .cpu_user_features = COMMON_USER_PPC64,
290 .icache_bsize = 128,
291 .dcache_bsize = 128,
292 .num_pmcs = 6,
293 .cpu_setup = __setup_cpu_power4,
294 }
295#endif /* CONFIG_PPC64 */
296#ifdef CONFIG_PPC32
86#if CLASSIC_PPC 297#if CLASSIC_PPC
87 { /* 601 */ 298 { /* 601 */
88 .pvr_mask = 0xffff0000, 299 .pvr_mask = 0xffff0000,
89 .pvr_value = 0x00010000, 300 .pvr_value = 0x00010000,
90 .cpu_name = "601", 301 .cpu_name = "601",
91 .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 | 302 .cpu_features = CPU_FTRS_PPC601,
92 CPU_FTR_HPTE_TABLE, 303 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
93 .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR |
94 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 304 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
95 .icache_bsize = 32, 305 .icache_bsize = 32,
96 .dcache_bsize = 32, 306 .dcache_bsize = 32,
97 .cpu_setup = __setup_cpu_601
98 }, 307 },
99 { /* 603 */ 308 { /* 603 */
100 .pvr_mask = 0xffff0000, 309 .pvr_mask = 0xffff0000,
101 .pvr_value = 0x00030000, 310 .pvr_value = 0x00030000,
102 .cpu_name = "603", 311 .cpu_name = "603",
103 .cpu_features = CPU_FTR_COMMON | 312 .cpu_features = CPU_FTRS_603,
104 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 313 .cpu_user_features = COMMON_USER,
105 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
106 .cpu_user_features = COMMON_PPC,
107 .icache_bsize = 32, 314 .icache_bsize = 32,
108 .dcache_bsize = 32, 315 .dcache_bsize = 32,
109 .cpu_setup = __setup_cpu_603 316 .cpu_setup = __setup_cpu_603
@@ -112,10 +319,8 @@ struct cpu_spec cpu_specs[] = {
112 .pvr_mask = 0xffff0000, 319 .pvr_mask = 0xffff0000,
113 .pvr_value = 0x00060000, 320 .pvr_value = 0x00060000,
114 .cpu_name = "603e", 321 .cpu_name = "603e",
115 .cpu_features = CPU_FTR_COMMON | 322 .cpu_features = CPU_FTRS_603,
116 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 323 .cpu_user_features = COMMON_USER,
117 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
118 .cpu_user_features = COMMON_PPC,
119 .icache_bsize = 32, 324 .icache_bsize = 32,
120 .dcache_bsize = 32, 325 .dcache_bsize = 32,
121 .cpu_setup = __setup_cpu_603 326 .cpu_setup = __setup_cpu_603
@@ -124,10 +329,8 @@ struct cpu_spec cpu_specs[] = {
124 .pvr_mask = 0xffff0000, 329 .pvr_mask = 0xffff0000,
125 .pvr_value = 0x00070000, 330 .pvr_value = 0x00070000,
126 .cpu_name = "603ev", 331 .cpu_name = "603ev",
127 .cpu_features = CPU_FTR_COMMON | 332 .cpu_features = CPU_FTRS_603,
128 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 333 .cpu_user_features = COMMON_USER,
129 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
130 .cpu_user_features = COMMON_PPC,
131 .icache_bsize = 32, 334 .icache_bsize = 32,
132 .dcache_bsize = 32, 335 .dcache_bsize = 32,
133 .cpu_setup = __setup_cpu_603 336 .cpu_setup = __setup_cpu_603
@@ -136,10 +339,8 @@ struct cpu_spec cpu_specs[] = {
136 .pvr_mask = 0xffff0000, 339 .pvr_mask = 0xffff0000,
137 .pvr_value = 0x00040000, 340 .pvr_value = 0x00040000,
138 .cpu_name = "604", 341 .cpu_name = "604",
139 .cpu_features = CPU_FTR_COMMON | 342 .cpu_features = CPU_FTRS_604,
140 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 343 .cpu_user_features = COMMON_USER,
141 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
142 .cpu_user_features = COMMON_PPC,
143 .icache_bsize = 32, 344 .icache_bsize = 32,
144 .dcache_bsize = 32, 345 .dcache_bsize = 32,
145 .num_pmcs = 2, 346 .num_pmcs = 2,
@@ -149,10 +350,8 @@ struct cpu_spec cpu_specs[] = {
149 .pvr_mask = 0xfffff000, 350 .pvr_mask = 0xfffff000,
150 .pvr_value = 0x00090000, 351 .pvr_value = 0x00090000,
151 .cpu_name = "604e", 352 .cpu_name = "604e",
152 .cpu_features = CPU_FTR_COMMON | 353 .cpu_features = CPU_FTRS_604,
153 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 354 .cpu_user_features = COMMON_USER,
154 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
155 .cpu_user_features = COMMON_PPC,
156 .icache_bsize = 32, 355 .icache_bsize = 32,
157 .dcache_bsize = 32, 356 .dcache_bsize = 32,
158 .num_pmcs = 4, 357 .num_pmcs = 4,
@@ -162,10 +361,8 @@ struct cpu_spec cpu_specs[] = {
162 .pvr_mask = 0xffff0000, 361 .pvr_mask = 0xffff0000,
163 .pvr_value = 0x00090000, 362 .pvr_value = 0x00090000,
164 .cpu_name = "604r", 363 .cpu_name = "604r",
165 .cpu_features = CPU_FTR_COMMON | 364 .cpu_features = CPU_FTRS_604,
166 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 365 .cpu_user_features = COMMON_USER,
167 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
168 .cpu_user_features = COMMON_PPC,
169 .icache_bsize = 32, 366 .icache_bsize = 32,
170 .dcache_bsize = 32, 367 .dcache_bsize = 32,
171 .num_pmcs = 4, 368 .num_pmcs = 4,
@@ -175,10 +372,8 @@ struct cpu_spec cpu_specs[] = {
175 .pvr_mask = 0xffff0000, 372 .pvr_mask = 0xffff0000,
176 .pvr_value = 0x000a0000, 373 .pvr_value = 0x000a0000,
177 .cpu_name = "604ev", 374 .cpu_name = "604ev",
178 .cpu_features = CPU_FTR_COMMON | 375 .cpu_features = CPU_FTRS_604,
179 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 376 .cpu_user_features = COMMON_USER,
180 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
181 .cpu_user_features = COMMON_PPC,
182 .icache_bsize = 32, 377 .icache_bsize = 32,
183 .dcache_bsize = 32, 378 .dcache_bsize = 32,
184 .num_pmcs = 4, 379 .num_pmcs = 4,
@@ -188,11 +383,8 @@ struct cpu_spec cpu_specs[] = {
188 .pvr_mask = 0xffffffff, 383 .pvr_mask = 0xffffffff,
189 .pvr_value = 0x00084202, 384 .pvr_value = 0x00084202,
190 .cpu_name = "740/750", 385 .cpu_name = "740/750",
191 .cpu_features = CPU_FTR_COMMON | 386 .cpu_features = CPU_FTRS_740_NOTAU,
192 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 387 .cpu_user_features = COMMON_USER,
193 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
194 CPU_FTR_MAYBE_CAN_NAP,
195 .cpu_user_features = COMMON_PPC,
196 .icache_bsize = 32, 388 .icache_bsize = 32,
197 .dcache_bsize = 32, 389 .dcache_bsize = 32,
198 .num_pmcs = 4, 390 .num_pmcs = 4,
@@ -202,11 +394,8 @@ struct cpu_spec cpu_specs[] = {
202 .pvr_mask = 0xfffffff0, 394 .pvr_mask = 0xfffffff0,
203 .pvr_value = 0x00080100, 395 .pvr_value = 0x00080100,
204 .cpu_name = "750CX", 396 .cpu_name = "750CX",
205 .cpu_features = CPU_FTR_COMMON | 397 .cpu_features = CPU_FTRS_750,
206 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 398 .cpu_user_features = COMMON_USER,
207 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
208 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
209 .cpu_user_features = COMMON_PPC,
210 .icache_bsize = 32, 399 .icache_bsize = 32,
211 .dcache_bsize = 32, 400 .dcache_bsize = 32,
212 .num_pmcs = 4, 401 .num_pmcs = 4,
@@ -216,11 +405,8 @@ struct cpu_spec cpu_specs[] = {
216 .pvr_mask = 0xfffffff0, 405 .pvr_mask = 0xfffffff0,
217 .pvr_value = 0x00082200, 406 .pvr_value = 0x00082200,
218 .cpu_name = "750CX", 407 .cpu_name = "750CX",
219 .cpu_features = CPU_FTR_COMMON | 408 .cpu_features = CPU_FTRS_750,
220 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 409 .cpu_user_features = COMMON_USER,
221 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
222 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
223 .cpu_user_features = COMMON_PPC,
224 .icache_bsize = 32, 410 .icache_bsize = 32,
225 .dcache_bsize = 32, 411 .dcache_bsize = 32,
226 .num_pmcs = 4, 412 .num_pmcs = 4,
@@ -230,11 +416,8 @@ struct cpu_spec cpu_specs[] = {
230 .pvr_mask = 0xfffffff0, 416 .pvr_mask = 0xfffffff0,
231 .pvr_value = 0x00082210, 417 .pvr_value = 0x00082210,
232 .cpu_name = "750CXe", 418 .cpu_name = "750CXe",
233 .cpu_features = CPU_FTR_COMMON | 419 .cpu_features = CPU_FTRS_750,
234 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 420 .cpu_user_features = COMMON_USER,
235 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
236 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
237 .cpu_user_features = COMMON_PPC,
238 .icache_bsize = 32, 421 .icache_bsize = 32,
239 .dcache_bsize = 32, 422 .dcache_bsize = 32,
240 .num_pmcs = 4, 423 .num_pmcs = 4,
@@ -244,11 +427,8 @@ struct cpu_spec cpu_specs[] = {
244 .pvr_mask = 0xffffffff, 427 .pvr_mask = 0xffffffff,
245 .pvr_value = 0x00083214, 428 .pvr_value = 0x00083214,
246 .cpu_name = "750CXe", 429 .cpu_name = "750CXe",
247 .cpu_features = CPU_FTR_COMMON | 430 .cpu_features = CPU_FTRS_750,
248 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 431 .cpu_user_features = COMMON_USER,
249 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
250 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
251 .cpu_user_features = COMMON_PPC,
252 .icache_bsize = 32, 432 .icache_bsize = 32,
253 .dcache_bsize = 32, 433 .dcache_bsize = 32,
254 .num_pmcs = 4, 434 .num_pmcs = 4,
@@ -258,11 +438,8 @@ struct cpu_spec cpu_specs[] = {
258 .pvr_mask = 0xfffff000, 438 .pvr_mask = 0xfffff000,
259 .pvr_value = 0x00083000, 439 .pvr_value = 0x00083000,
260 .cpu_name = "745/755", 440 .cpu_name = "745/755",
261 .cpu_features = CPU_FTR_COMMON | 441 .cpu_features = CPU_FTRS_750,
262 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 442 .cpu_user_features = COMMON_USER,
263 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
264 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
265 .cpu_user_features = COMMON_PPC,
266 .icache_bsize = 32, 443 .icache_bsize = 32,
267 .dcache_bsize = 32, 444 .dcache_bsize = 32,
268 .num_pmcs = 4, 445 .num_pmcs = 4,
@@ -272,12 +449,8 @@ struct cpu_spec cpu_specs[] = {
272 .pvr_mask = 0xffffff00, 449 .pvr_mask = 0xffffff00,
273 .pvr_value = 0x70000100, 450 .pvr_value = 0x70000100,
274 .cpu_name = "750FX", 451 .cpu_name = "750FX",
275 .cpu_features = CPU_FTR_COMMON | 452 .cpu_features = CPU_FTRS_750FX1,
276 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 453 .cpu_user_features = COMMON_USER,
277 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
278 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
279 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
280 .cpu_user_features = COMMON_PPC,
281 .icache_bsize = 32, 454 .icache_bsize = 32,
282 .dcache_bsize = 32, 455 .dcache_bsize = 32,
283 .num_pmcs = 4, 456 .num_pmcs = 4,
@@ -287,12 +460,8 @@ struct cpu_spec cpu_specs[] = {
287 .pvr_mask = 0xffffffff, 460 .pvr_mask = 0xffffffff,
288 .pvr_value = 0x70000200, 461 .pvr_value = 0x70000200,
289 .cpu_name = "750FX", 462 .cpu_name = "750FX",
290 .cpu_features = CPU_FTR_COMMON | 463 .cpu_features = CPU_FTRS_750FX2,
291 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 464 .cpu_user_features = COMMON_USER,
292 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
293 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
294 CPU_FTR_NO_DPM,
295 .cpu_user_features = COMMON_PPC,
296 .icache_bsize = 32, 465 .icache_bsize = 32,
297 .dcache_bsize = 32, 466 .dcache_bsize = 32,
298 .num_pmcs = 4, 467 .num_pmcs = 4,
@@ -302,12 +471,8 @@ struct cpu_spec cpu_specs[] = {
302 .pvr_mask = 0xffff0000, 471 .pvr_mask = 0xffff0000,
303 .pvr_value = 0x70000000, 472 .pvr_value = 0x70000000,
304 .cpu_name = "750FX", 473 .cpu_name = "750FX",
305 .cpu_features = CPU_FTR_COMMON | 474 .cpu_features = CPU_FTRS_750FX,
306 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 475 .cpu_user_features = COMMON_USER,
307 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
308 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
309 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
310 .cpu_user_features = COMMON_PPC,
311 .icache_bsize = 32, 476 .icache_bsize = 32,
312 .dcache_bsize = 32, 477 .dcache_bsize = 32,
313 .num_pmcs = 4, 478 .num_pmcs = 4,
@@ -317,12 +482,8 @@ struct cpu_spec cpu_specs[] = {
317 .pvr_mask = 0xffff0000, 482 .pvr_mask = 0xffff0000,
318 .pvr_value = 0x70020000, 483 .pvr_value = 0x70020000,
319 .cpu_name = "750GX", 484 .cpu_name = "750GX",
320 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 485 .cpu_features = CPU_FTRS_750GX,
321 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 486 .cpu_user_features = COMMON_USER,
322 CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
323 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
324 CPU_FTR_HAS_HIGH_BATS,
325 .cpu_user_features = COMMON_PPC,
326 .icache_bsize = 32, 487 .icache_bsize = 32,
327 .dcache_bsize = 32, 488 .dcache_bsize = 32,
328 .num_pmcs = 4, 489 .num_pmcs = 4,
@@ -332,11 +493,8 @@ struct cpu_spec cpu_specs[] = {
332 .pvr_mask = 0xffff0000, 493 .pvr_mask = 0xffff0000,
333 .pvr_value = 0x00080000, 494 .pvr_value = 0x00080000,
334 .cpu_name = "740/750", 495 .cpu_name = "740/750",
335 .cpu_features = CPU_FTR_COMMON | 496 .cpu_features = CPU_FTRS_740,
336 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 497 .cpu_user_features = COMMON_USER,
337 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
338 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
339 .cpu_user_features = COMMON_PPC,
340 .icache_bsize = 32, 498 .icache_bsize = 32,
341 .dcache_bsize = 32, 499 .dcache_bsize = 32,
342 .num_pmcs = 4, 500 .num_pmcs = 4,
@@ -346,11 +504,8 @@ struct cpu_spec cpu_specs[] = {
346 .pvr_mask = 0xffffffff, 504 .pvr_mask = 0xffffffff,
347 .pvr_value = 0x000c1101, 505 .pvr_value = 0x000c1101,
348 .cpu_name = "7400 (1.1)", 506 .cpu_name = "7400 (1.1)",
349 .cpu_features = CPU_FTR_COMMON | 507 .cpu_features = CPU_FTRS_7400_NOTAU,
350 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 508 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
351 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
352 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
353 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
354 .icache_bsize = 32, 509 .icache_bsize = 32,
355 .dcache_bsize = 32, 510 .dcache_bsize = 32,
356 .num_pmcs = 4, 511 .num_pmcs = 4,
@@ -360,12 +515,8 @@ struct cpu_spec cpu_specs[] = {
360 .pvr_mask = 0xffff0000, 515 .pvr_mask = 0xffff0000,
361 .pvr_value = 0x000c0000, 516 .pvr_value = 0x000c0000,
362 .cpu_name = "7400", 517 .cpu_name = "7400",
363 .cpu_features = CPU_FTR_COMMON | 518 .cpu_features = CPU_FTRS_7400,
364 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 519 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
365 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
366 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
367 CPU_FTR_MAYBE_CAN_NAP,
368 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
369 .icache_bsize = 32, 520 .icache_bsize = 32,
370 .dcache_bsize = 32, 521 .dcache_bsize = 32,
371 .num_pmcs = 4, 522 .num_pmcs = 4,
@@ -375,12 +526,8 @@ struct cpu_spec cpu_specs[] = {
375 .pvr_mask = 0xffff0000, 526 .pvr_mask = 0xffff0000,
376 .pvr_value = 0x800c0000, 527 .pvr_value = 0x800c0000,
377 .cpu_name = "7410", 528 .cpu_name = "7410",
378 .cpu_features = CPU_FTR_COMMON | 529 .cpu_features = CPU_FTRS_7400,
379 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 530 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
380 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
381 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
382 CPU_FTR_MAYBE_CAN_NAP,
383 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
384 .icache_bsize = 32, 531 .icache_bsize = 32,
385 .dcache_bsize = 32, 532 .dcache_bsize = 32,
386 .num_pmcs = 4, 533 .num_pmcs = 4,
@@ -390,12 +537,8 @@ struct cpu_spec cpu_specs[] = {
390 .pvr_mask = 0xffffffff, 537 .pvr_mask = 0xffffffff,
391 .pvr_value = 0x80000200, 538 .pvr_value = 0x80000200,
392 .cpu_name = "7450", 539 .cpu_name = "7450",
393 .cpu_features = CPU_FTR_COMMON | 540 .cpu_features = CPU_FTRS_7450_20,
394 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 541 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
395 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
396 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
397 CPU_FTR_NEED_COHERENT,
398 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
399 .icache_bsize = 32, 542 .icache_bsize = 32,
400 .dcache_bsize = 32, 543 .dcache_bsize = 32,
401 .num_pmcs = 6, 544 .num_pmcs = 6,
@@ -405,14 +548,8 @@ struct cpu_spec cpu_specs[] = {
405 .pvr_mask = 0xffffffff, 548 .pvr_mask = 0xffffffff,
406 .pvr_value = 0x80000201, 549 .pvr_value = 0x80000201,
407 .cpu_name = "7450", 550 .cpu_name = "7450",
408 .cpu_features = CPU_FTR_COMMON | 551 .cpu_features = CPU_FTRS_7450_21,
409 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 552 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
410 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
412 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
413 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
414 CPU_FTR_NEED_COHERENT,
415 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
416 .icache_bsize = 32, 553 .icache_bsize = 32,
417 .dcache_bsize = 32, 554 .dcache_bsize = 32,
418 .num_pmcs = 6, 555 .num_pmcs = 6,
@@ -422,13 +559,8 @@ struct cpu_spec cpu_specs[] = {
422 .pvr_mask = 0xffff0000, 559 .pvr_mask = 0xffff0000,
423 .pvr_value = 0x80000000, 560 .pvr_value = 0x80000000,
424 .cpu_name = "7450", 561 .cpu_name = "7450",
425 .cpu_features = CPU_FTR_COMMON | 562 .cpu_features = CPU_FTRS_7450_23,
426 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 563 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
427 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
428 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
429 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
430 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
431 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
432 .icache_bsize = 32, 564 .icache_bsize = 32,
433 .dcache_bsize = 32, 565 .dcache_bsize = 32,
434 .num_pmcs = 6, 566 .num_pmcs = 6,
@@ -438,12 +570,8 @@ struct cpu_spec cpu_specs[] = {
438 .pvr_mask = 0xffffff00, 570 .pvr_mask = 0xffffff00,
439 .pvr_value = 0x80010100, 571 .pvr_value = 0x80010100,
440 .cpu_name = "7455", 572 .cpu_name = "7455",
441 .cpu_features = CPU_FTR_COMMON | 573 .cpu_features = CPU_FTRS_7455_1,
442 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 574 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
443 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
444 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
445 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
446 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
447 .icache_bsize = 32, 575 .icache_bsize = 32,
448 .dcache_bsize = 32, 576 .dcache_bsize = 32,
449 .num_pmcs = 6, 577 .num_pmcs = 6,
@@ -453,14 +581,8 @@ struct cpu_spec cpu_specs[] = {
453 .pvr_mask = 0xffffffff, 581 .pvr_mask = 0xffffffff,
454 .pvr_value = 0x80010200, 582 .pvr_value = 0x80010200,
455 .cpu_name = "7455", 583 .cpu_name = "7455",
456 .cpu_features = CPU_FTR_COMMON | 584 .cpu_features = CPU_FTRS_7455_20,
457 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 585 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
458 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
459 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
460 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
461 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
462 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
463 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
464 .icache_bsize = 32, 586 .icache_bsize = 32,
465 .dcache_bsize = 32, 587 .dcache_bsize = 32,
466 .num_pmcs = 6, 588 .num_pmcs = 6,
@@ -470,14 +592,8 @@ struct cpu_spec cpu_specs[] = {
470 .pvr_mask = 0xffff0000, 592 .pvr_mask = 0xffff0000,
471 .pvr_value = 0x80010000, 593 .pvr_value = 0x80010000,
472 .cpu_name = "7455", 594 .cpu_name = "7455",
473 .cpu_features = CPU_FTR_COMMON | 595 .cpu_features = CPU_FTRS_7455,
474 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 596 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
475 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
476 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
477 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
478 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
479 CPU_FTR_NEED_COHERENT,
480 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
481 .icache_bsize = 32, 597 .icache_bsize = 32,
482 .dcache_bsize = 32, 598 .dcache_bsize = 32,
483 .num_pmcs = 6, 599 .num_pmcs = 6,
@@ -487,14 +603,8 @@ struct cpu_spec cpu_specs[] = {
487 .pvr_mask = 0xffffffff, 603 .pvr_mask = 0xffffffff,
488 .pvr_value = 0x80020100, 604 .pvr_value = 0x80020100,
489 .cpu_name = "7447/7457", 605 .cpu_name = "7447/7457",
490 .cpu_features = CPU_FTR_COMMON | 606 .cpu_features = CPU_FTRS_7447_10,
491 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 607 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
492 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
493 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
494 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
495 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
496 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
497 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
498 .icache_bsize = 32, 608 .icache_bsize = 32,
499 .dcache_bsize = 32, 609 .dcache_bsize = 32,
500 .num_pmcs = 6, 610 .num_pmcs = 6,
@@ -504,14 +614,8 @@ struct cpu_spec cpu_specs[] = {
504 .pvr_mask = 0xffffffff, 614 .pvr_mask = 0xffffffff,
505 .pvr_value = 0x80020101, 615 .pvr_value = 0x80020101,
506 .cpu_name = "7447/7457", 616 .cpu_name = "7447/7457",
507 .cpu_features = CPU_FTR_COMMON | 617 .cpu_features = CPU_FTRS_7447_10,
508 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 618 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
509 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
510 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
511 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
512 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
513 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
514 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
515 .icache_bsize = 32, 619 .icache_bsize = 32,
516 .dcache_bsize = 32, 620 .dcache_bsize = 32,
517 .num_pmcs = 6, 621 .num_pmcs = 6,
@@ -521,14 +625,8 @@ struct cpu_spec cpu_specs[] = {
521 .pvr_mask = 0xffff0000, 625 .pvr_mask = 0xffff0000,
522 .pvr_value = 0x80020000, 626 .pvr_value = 0x80020000,
523 .cpu_name = "7447/7457", 627 .cpu_name = "7447/7457",
524 .cpu_features = CPU_FTR_COMMON | 628 .cpu_features = CPU_FTRS_7447,
525 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 629 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
526 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
527 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
528 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
529 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
530 CPU_FTR_NEED_COHERENT,
531 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
532 .icache_bsize = 32, 630 .icache_bsize = 32,
533 .dcache_bsize = 32, 631 .dcache_bsize = 32,
534 .num_pmcs = 6, 632 .num_pmcs = 6,
@@ -538,13 +636,8 @@ struct cpu_spec cpu_specs[] = {
538 .pvr_mask = 0xffff0000, 636 .pvr_mask = 0xffff0000,
539 .pvr_value = 0x80030000, 637 .pvr_value = 0x80030000,
540 .cpu_name = "7447A", 638 .cpu_name = "7447A",
541 .cpu_features = CPU_FTR_COMMON | 639 .cpu_features = CPU_FTRS_7447A,
542 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 640 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
543 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
544 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
545 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
546 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
547 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
548 .icache_bsize = 32, 641 .icache_bsize = 32,
549 .dcache_bsize = 32, 642 .dcache_bsize = 32,
550 .num_pmcs = 6, 643 .num_pmcs = 6,
@@ -554,13 +647,8 @@ struct cpu_spec cpu_specs[] = {
554 .pvr_mask = 0xffff0000, 647 .pvr_mask = 0xffff0000,
555 .pvr_value = 0x80040000, 648 .pvr_value = 0x80040000,
556 .cpu_name = "7448", 649 .cpu_name = "7448",
557 .cpu_features = CPU_FTR_COMMON | 650 .cpu_features = CPU_FTRS_7447A,
558 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 651 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
559 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
560 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
561 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
562 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
563 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
564 .icache_bsize = 32, 652 .icache_bsize = 32,
565 .dcache_bsize = 32, 653 .dcache_bsize = 32,
566 .num_pmcs = 6, 654 .num_pmcs = 6,
@@ -570,10 +658,8 @@ struct cpu_spec cpu_specs[] = {
570 .pvr_mask = 0x7fff0000, 658 .pvr_mask = 0x7fff0000,
571 .pvr_value = 0x00810000, 659 .pvr_value = 0x00810000,
572 .cpu_name = "82xx", 660 .cpu_name = "82xx",
573 .cpu_features = CPU_FTR_COMMON | 661 .cpu_features = CPU_FTRS_82XX,
574 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 662 .cpu_user_features = COMMON_USER,
575 CPU_FTR_USE_TB,
576 .cpu_user_features = COMMON_PPC,
577 .icache_bsize = 32, 663 .icache_bsize = 32,
578 .dcache_bsize = 32, 664 .dcache_bsize = 32,
579 .cpu_setup = __setup_cpu_603 665 .cpu_setup = __setup_cpu_603
@@ -582,10 +668,8 @@ struct cpu_spec cpu_specs[] = {
582 .pvr_mask = 0x7fff0000, 668 .pvr_mask = 0x7fff0000,
583 .pvr_value = 0x00820000, 669 .pvr_value = 0x00820000,
584 .cpu_name = "G2_LE", 670 .cpu_name = "G2_LE",
585 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 671 .cpu_features = CPU_FTRS_G2_LE,
586 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 672 .cpu_user_features = COMMON_USER,
587 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
588 .cpu_user_features = COMMON_PPC,
589 .icache_bsize = 32, 673 .icache_bsize = 32,
590 .dcache_bsize = 32, 674 .dcache_bsize = 32,
591 .cpu_setup = __setup_cpu_603 675 .cpu_setup = __setup_cpu_603
@@ -594,10 +678,8 @@ struct cpu_spec cpu_specs[] = {
594 .pvr_mask = 0x7fff0000, 678 .pvr_mask = 0x7fff0000,
595 .pvr_value = 0x00830000, 679 .pvr_value = 0x00830000,
596 .cpu_name = "e300", 680 .cpu_name = "e300",
597 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 681 .cpu_features = CPU_FTRS_E300,
598 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 682 .cpu_user_features = COMMON_USER,
599 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
600 .cpu_user_features = COMMON_PPC,
601 .icache_bsize = 32, 683 .icache_bsize = 32,
602 .dcache_bsize = 32, 684 .dcache_bsize = 32,
603 .cpu_setup = __setup_cpu_603 685 .cpu_setup = __setup_cpu_603
@@ -606,114 +688,12 @@ struct cpu_spec cpu_specs[] = {
606 .pvr_mask = 0x00000000, 688 .pvr_mask = 0x00000000,
607 .pvr_value = 0x00000000, 689 .pvr_value = 0x00000000,
608 .cpu_name = "(generic PPC)", 690 .cpu_name = "(generic PPC)",
609 .cpu_features = CPU_FTR_COMMON | 691 .cpu_features = CPU_FTRS_CLASSIC32,
610 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 692 .cpu_user_features = COMMON_USER,
611 CPU_FTR_HPTE_TABLE,
612 .cpu_user_features = COMMON_PPC,
613 .icache_bsize = 32, 693 .icache_bsize = 32,
614 .dcache_bsize = 32, 694 .dcache_bsize = 32,
615 .cpu_setup = __setup_cpu_generic
616 }, 695 },
617#endif /* CLASSIC_PPC */ 696#endif /* CLASSIC_PPC */
618#ifdef CONFIG_PPC64BRIDGE
619 { /* Power3 */
620 .pvr_mask = 0xffff0000,
621 .pvr_value = 0x00400000,
622 .cpu_name = "Power3 (630)",
623 .cpu_features = CPU_FTR_COMMON |
624 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
625 CPU_FTR_HPTE_TABLE,
626 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
627 .icache_bsize = 128,
628 .dcache_bsize = 128,
629 .num_pmcs = 8,
630 .cpu_setup = __setup_cpu_power3
631 },
632 { /* Power3+ */
633 .pvr_mask = 0xffff0000,
634 .pvr_value = 0x00410000,
635 .cpu_name = "Power3 (630+)",
636 .cpu_features = CPU_FTR_COMMON |
637 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
638 CPU_FTR_HPTE_TABLE,
639 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
640 .icache_bsize = 128,
641 .dcache_bsize = 128,
642 .num_pmcs = 8,
643 .cpu_setup = __setup_cpu_power3
644 },
645 { /* I-star */
646 .pvr_mask = 0xffff0000,
647 .pvr_value = 0x00360000,
648 .cpu_name = "I-star",
649 .cpu_features = CPU_FTR_COMMON |
650 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
651 CPU_FTR_HPTE_TABLE,
652 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
653 .icache_bsize = 128,
654 .dcache_bsize = 128,
655 .num_pmcs = 8,
656 .cpu_setup = __setup_cpu_power3
657 },
658 { /* S-star */
659 .pvr_mask = 0xffff0000,
660 .pvr_value = 0x00370000,
661 .cpu_name = "S-star",
662 .cpu_features = CPU_FTR_COMMON |
663 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
664 CPU_FTR_HPTE_TABLE,
665 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
666 .icache_bsize = 128,
667 .dcache_bsize = 128,
668 .num_pmcs = 8,
669 .cpu_setup = __setup_cpu_power3
670 },
671#endif /* CONFIG_PPC64BRIDGE */
672#ifdef CONFIG_POWER4
673 { /* Power4 */
674 .pvr_mask = 0xffff0000,
675 .pvr_value = 0x00350000,
676 .cpu_name = "Power4",
677 .cpu_features = CPU_FTR_COMMON |
678 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
679 CPU_FTR_HPTE_TABLE,
680 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
681 .icache_bsize = 128,
682 .dcache_bsize = 128,
683 .num_pmcs = 8,
684 .cpu_setup = __setup_cpu_power4
685 },
686 { /* PPC970 */
687 .pvr_mask = 0xffff0000,
688 .pvr_value = 0x00390000,
689 .cpu_name = "PPC970",
690 .cpu_features = CPU_FTR_COMMON |
691 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
692 CPU_FTR_HPTE_TABLE |
693 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
694 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
695 PPC_FEATURE_ALTIVEC_COMP,
696 .icache_bsize = 128,
697 .dcache_bsize = 128,
698 .num_pmcs = 8,
699 .cpu_setup = __setup_cpu_ppc970
700 },
701 { /* PPC970FX */
702 .pvr_mask = 0xffff0000,
703 .pvr_value = 0x003c0000,
704 .cpu_name = "PPC970FX",
705 .cpu_features = CPU_FTR_COMMON |
706 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
707 CPU_FTR_HPTE_TABLE |
708 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
709 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
710 PPC_FEATURE_ALTIVEC_COMP,
711 .icache_bsize = 128,
712 .dcache_bsize = 128,
713 .num_pmcs = 8,
714 .cpu_setup = __setup_cpu_ppc970
715 },
716#endif /* CONFIG_POWER4 */
717#ifdef CONFIG_8xx 697#ifdef CONFIG_8xx
718 { /* 8xx */ 698 { /* 8xx */
719 .pvr_mask = 0xffff0000, 699 .pvr_mask = 0xffff0000,
@@ -721,8 +701,7 @@ struct cpu_spec cpu_specs[] = {
721 .cpu_name = "8xx", 701 .cpu_name = "8xx",
722 /* CPU_FTR_MAYBE_CAN_DOZE is possible, 702 /* CPU_FTR_MAYBE_CAN_DOZE is possible,
723 * if the 8xx code is there.... */ 703 * if the 8xx code is there.... */
724 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 704 .cpu_features = CPU_FTRS_8XX,
725 CPU_FTR_USE_TB,
726 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 705 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
727 .icache_bsize = 16, 706 .icache_bsize = 16,
728 .dcache_bsize = 16, 707 .dcache_bsize = 16,
@@ -733,8 +712,7 @@ struct cpu_spec cpu_specs[] = {
733 .pvr_mask = 0xffffff00, 712 .pvr_mask = 0xffffff00,
734 .pvr_value = 0x00200200, 713 .pvr_value = 0x00200200,
735 .cpu_name = "403GC", 714 .cpu_name = "403GC",
736 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 715 .cpu_features = CPU_FTRS_40X,
737 CPU_FTR_USE_TB,
738 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 716 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
739 .icache_bsize = 16, 717 .icache_bsize = 16,
740 .dcache_bsize = 16, 718 .dcache_bsize = 16,
@@ -743,8 +721,7 @@ struct cpu_spec cpu_specs[] = {
743 .pvr_mask = 0xffffff00, 721 .pvr_mask = 0xffffff00,
744 .pvr_value = 0x00201400, 722 .pvr_value = 0x00201400,
745 .cpu_name = "403GCX", 723 .cpu_name = "403GCX",
746 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 724 .cpu_features = CPU_FTRS_40X,
747 CPU_FTR_USE_TB,
748 .cpu_user_features = PPC_FEATURE_32 | 725 .cpu_user_features = PPC_FEATURE_32 |
749 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 726 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
750 .icache_bsize = 16, 727 .icache_bsize = 16,
@@ -754,8 +731,7 @@ struct cpu_spec cpu_specs[] = {
754 .pvr_mask = 0xffff0000, 731 .pvr_mask = 0xffff0000,
755 .pvr_value = 0x00200000, 732 .pvr_value = 0x00200000,
756 .cpu_name = "403G ??", 733 .cpu_name = "403G ??",
757 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 734 .cpu_features = CPU_FTRS_40X,
758 CPU_FTR_USE_TB,
759 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 735 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
760 .icache_bsize = 16, 736 .icache_bsize = 16,
761 .dcache_bsize = 16, 737 .dcache_bsize = 16,
@@ -764,8 +740,7 @@ struct cpu_spec cpu_specs[] = {
764 .pvr_mask = 0xffff0000, 740 .pvr_mask = 0xffff0000,
765 .pvr_value = 0x40110000, 741 .pvr_value = 0x40110000,
766 .cpu_name = "405GP", 742 .cpu_name = "405GP",
767 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 743 .cpu_features = CPU_FTRS_40X,
768 CPU_FTR_USE_TB,
769 .cpu_user_features = PPC_FEATURE_32 | 744 .cpu_user_features = PPC_FEATURE_32 |
770 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 745 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
771 .icache_bsize = 32, 746 .icache_bsize = 32,
@@ -775,8 +750,7 @@ struct cpu_spec cpu_specs[] = {
775 .pvr_mask = 0xffff0000, 750 .pvr_mask = 0xffff0000,
776 .pvr_value = 0x40130000, 751 .pvr_value = 0x40130000,
777 .cpu_name = "STB03xxx", 752 .cpu_name = "STB03xxx",
778 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 753 .cpu_features = CPU_FTRS_40X,
779 CPU_FTR_USE_TB,
780 .cpu_user_features = PPC_FEATURE_32 | 754 .cpu_user_features = PPC_FEATURE_32 |
781 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 755 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
782 .icache_bsize = 32, 756 .icache_bsize = 32,
@@ -786,8 +760,7 @@ struct cpu_spec cpu_specs[] = {
786 .pvr_mask = 0xffff0000, 760 .pvr_mask = 0xffff0000,
787 .pvr_value = 0x41810000, 761 .pvr_value = 0x41810000,
788 .cpu_name = "STB04xxx", 762 .cpu_name = "STB04xxx",
789 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 763 .cpu_features = CPU_FTRS_40X,
790 CPU_FTR_USE_TB,
791 .cpu_user_features = PPC_FEATURE_32 | 764 .cpu_user_features = PPC_FEATURE_32 |
792 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 765 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
793 .icache_bsize = 32, 766 .icache_bsize = 32,
@@ -797,8 +770,7 @@ struct cpu_spec cpu_specs[] = {
797 .pvr_mask = 0xffff0000, 770 .pvr_mask = 0xffff0000,
798 .pvr_value = 0x41610000, 771 .pvr_value = 0x41610000,
799 .cpu_name = "NP405L", 772 .cpu_name = "NP405L",
800 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 773 .cpu_features = CPU_FTRS_40X,
801 CPU_FTR_USE_TB,
802 .cpu_user_features = PPC_FEATURE_32 | 774 .cpu_user_features = PPC_FEATURE_32 |
803 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 775 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
804 .icache_bsize = 32, 776 .icache_bsize = 32,
@@ -808,8 +780,7 @@ struct cpu_spec cpu_specs[] = {
808 .pvr_mask = 0xffff0000, 780 .pvr_mask = 0xffff0000,
809 .pvr_value = 0x40B10000, 781 .pvr_value = 0x40B10000,
810 .cpu_name = "NP4GS3", 782 .cpu_name = "NP4GS3",
811 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 783 .cpu_features = CPU_FTRS_40X,
812 CPU_FTR_USE_TB,
813 .cpu_user_features = PPC_FEATURE_32 | 784 .cpu_user_features = PPC_FEATURE_32 |
814 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 785 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
815 .icache_bsize = 32, 786 .icache_bsize = 32,
@@ -819,8 +790,7 @@ struct cpu_spec cpu_specs[] = {
819 .pvr_mask = 0xffff0000, 790 .pvr_mask = 0xffff0000,
820 .pvr_value = 0x41410000, 791 .pvr_value = 0x41410000,
821 .cpu_name = "NP405H", 792 .cpu_name = "NP405H",
822 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 793 .cpu_features = CPU_FTRS_40X,
823 CPU_FTR_USE_TB,
824 .cpu_user_features = PPC_FEATURE_32 | 794 .cpu_user_features = PPC_FEATURE_32 |
825 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 795 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
826 .icache_bsize = 32, 796 .icache_bsize = 32,
@@ -830,8 +800,7 @@ struct cpu_spec cpu_specs[] = {
830 .pvr_mask = 0xffff0000, 800 .pvr_mask = 0xffff0000,
831 .pvr_value = 0x50910000, 801 .pvr_value = 0x50910000,
832 .cpu_name = "405GPr", 802 .cpu_name = "405GPr",
833 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 803 .cpu_features = CPU_FTRS_40X,
834 CPU_FTR_USE_TB,
835 .cpu_user_features = PPC_FEATURE_32 | 804 .cpu_user_features = PPC_FEATURE_32 |
836 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 805 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
837 .icache_bsize = 32, 806 .icache_bsize = 32,
@@ -841,8 +810,7 @@ struct cpu_spec cpu_specs[] = {
841 .pvr_mask = 0xffff0000, 810 .pvr_mask = 0xffff0000,
842 .pvr_value = 0x51510000, 811 .pvr_value = 0x51510000,
843 .cpu_name = "STBx25xx", 812 .cpu_name = "STBx25xx",
844 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 813 .cpu_features = CPU_FTRS_40X,
845 CPU_FTR_USE_TB,
846 .cpu_user_features = PPC_FEATURE_32 | 814 .cpu_user_features = PPC_FEATURE_32 |
847 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 815 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
848 .icache_bsize = 32, 816 .icache_bsize = 32,
@@ -852,8 +820,7 @@ struct cpu_spec cpu_specs[] = {
852 .pvr_mask = 0xffff0000, 820 .pvr_mask = 0xffff0000,
853 .pvr_value = 0x41F10000, 821 .pvr_value = 0x41F10000,
854 .cpu_name = "405LP", 822 .cpu_name = "405LP",
855 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 823 .cpu_features = CPU_FTRS_40X,
856 CPU_FTR_USE_TB,
857 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 824 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
858 .icache_bsize = 32, 825 .icache_bsize = 32,
859 .dcache_bsize = 32, 826 .dcache_bsize = 32,
@@ -862,8 +829,7 @@ struct cpu_spec cpu_specs[] = {
862 .pvr_mask = 0xffff0000, 829 .pvr_mask = 0xffff0000,
863 .pvr_value = 0x20010000, 830 .pvr_value = 0x20010000,
864 .cpu_name = "Virtex-II Pro", 831 .cpu_name = "Virtex-II Pro",
865 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 832 .cpu_features = CPU_FTRS_40X,
866 CPU_FTR_USE_TB,
867 .cpu_user_features = PPC_FEATURE_32 | 833 .cpu_user_features = PPC_FEATURE_32 |
868 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 834 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
869 .icache_bsize = 32, 835 .icache_bsize = 32,
@@ -873,8 +839,7 @@ struct cpu_spec cpu_specs[] = {
873 .pvr_mask = 0xffff0000, 839 .pvr_mask = 0xffff0000,
874 .pvr_value = 0x51210000, 840 .pvr_value = 0x51210000,
875 .cpu_name = "405EP", 841 .cpu_name = "405EP",
876 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 842 .cpu_features = CPU_FTRS_40X,
877 CPU_FTR_USE_TB,
878 .cpu_user_features = PPC_FEATURE_32 | 843 .cpu_user_features = PPC_FEATURE_32 |
879 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 844 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
880 .icache_bsize = 32, 845 .icache_bsize = 32,
@@ -887,9 +852,8 @@ struct cpu_spec cpu_specs[] = {
887 .pvr_mask = 0xf0000fff, 852 .pvr_mask = 0xf0000fff,
888 .pvr_value = 0x40000850, 853 .pvr_value = 0x40000850,
889 .cpu_name = "440EP Rev. A", 854 .cpu_name = "440EP Rev. A",
890 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 855 .cpu_features = CPU_FTRS_44X,
891 CPU_FTR_USE_TB, 856 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
892 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
893 .icache_bsize = 32, 857 .icache_bsize = 32,
894 .dcache_bsize = 32, 858 .dcache_bsize = 32,
895 }, 859 },
@@ -897,28 +861,25 @@ struct cpu_spec cpu_specs[] = {
897 .pvr_mask = 0xf0000fff, 861 .pvr_mask = 0xf0000fff,
898 .pvr_value = 0x400008d3, 862 .pvr_value = 0x400008d3,
899 .cpu_name = "440EP Rev. B", 863 .cpu_name = "440EP Rev. B",
900 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 864 .cpu_features = CPU_FTRS_44X,
901 CPU_FTR_USE_TB, 865 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
902 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
903 .icache_bsize = 32, 866 .icache_bsize = 32,
904 .dcache_bsize = 32, 867 .dcache_bsize = 32,
905 }, 868 },
906 { /* 440GP Rev. B */ 869 { /* 440GP Rev. B */
907 .pvr_mask = 0xf0000fff, 870 .pvr_mask = 0xf0000fff,
908 .pvr_value = 0x40000440, 871 .pvr_value = 0x40000440,
909 .cpu_name = "440GP Rev. B", 872 .cpu_name = "440GP Rev. B",
910 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 873 .cpu_features = CPU_FTRS_44X,
911 CPU_FTR_USE_TB,
912 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 874 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
913 .icache_bsize = 32, 875 .icache_bsize = 32,
914 .dcache_bsize = 32, 876 .dcache_bsize = 32,
915 }, 877 },
916 { /* 440GP Rev. C */ 878 { /* 440GP Rev. C */
917 .pvr_mask = 0xf0000fff, 879 .pvr_mask = 0xf0000fff,
918 .pvr_value = 0x40000481, 880 .pvr_value = 0x40000481,
919 .cpu_name = "440GP Rev. C", 881 .cpu_name = "440GP Rev. C",
920 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 882 .cpu_features = CPU_FTRS_44X,
921 CPU_FTR_USE_TB,
922 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 883 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
923 .icache_bsize = 32, 884 .icache_bsize = 32,
924 .dcache_bsize = 32, 885 .dcache_bsize = 32,
@@ -927,8 +888,7 @@ struct cpu_spec cpu_specs[] = {
927 .pvr_mask = 0xf0000fff, 888 .pvr_mask = 0xf0000fff,
928 .pvr_value = 0x50000850, 889 .pvr_value = 0x50000850,
929 .cpu_name = "440GX Rev. A", 890 .cpu_name = "440GX Rev. A",
930 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 891 .cpu_features = CPU_FTRS_44X,
931 CPU_FTR_USE_TB,
932 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 892 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
933 .icache_bsize = 32, 893 .icache_bsize = 32,
934 .dcache_bsize = 32, 894 .dcache_bsize = 32,
@@ -937,8 +897,7 @@ struct cpu_spec cpu_specs[] = {
937 .pvr_mask = 0xf0000fff, 897 .pvr_mask = 0xf0000fff,
938 .pvr_value = 0x50000851, 898 .pvr_value = 0x50000851,
939 .cpu_name = "440GX Rev. B", 899 .cpu_name = "440GX Rev. B",
940 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 900 .cpu_features = CPU_FTRS_44X,
941 CPU_FTR_USE_TB,
942 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 901 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
943 .icache_bsize = 32, 902 .icache_bsize = 32,
944 .dcache_bsize = 32, 903 .dcache_bsize = 32,
@@ -947,8 +906,7 @@ struct cpu_spec cpu_specs[] = {
947 .pvr_mask = 0xf0000fff, 906 .pvr_mask = 0xf0000fff,
948 .pvr_value = 0x50000892, 907 .pvr_value = 0x50000892,
949 .cpu_name = "440GX Rev. C", 908 .cpu_name = "440GX Rev. C",
950 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 909 .cpu_features = CPU_FTRS_44X,
951 CPU_FTR_USE_TB,
952 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 910 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
953 .icache_bsize = 32, 911 .icache_bsize = 32,
954 .dcache_bsize = 32, 912 .dcache_bsize = 32,
@@ -957,8 +915,7 @@ struct cpu_spec cpu_specs[] = {
957 .pvr_mask = 0xf0000fff, 915 .pvr_mask = 0xf0000fff,
958 .pvr_value = 0x50000894, 916 .pvr_value = 0x50000894,
959 .cpu_name = "440GX Rev. F", 917 .cpu_name = "440GX Rev. F",
960 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 918 .cpu_features = CPU_FTRS_44X,
961 CPU_FTR_USE_TB,
962 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 919 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
963 .icache_bsize = 32, 920 .icache_bsize = 32,
964 .dcache_bsize = 32, 921 .dcache_bsize = 32,
@@ -967,44 +924,42 @@ struct cpu_spec cpu_specs[] = {
967 .pvr_mask = 0xff000fff, 924 .pvr_mask = 0xff000fff,
968 .pvr_value = 0x53000891, 925 .pvr_value = 0x53000891,
969 .cpu_name = "440SP Rev. A", 926 .cpu_name = "440SP Rev. A",
970 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 927 .cpu_features = CPU_FTRS_44X,
971 CPU_FTR_USE_TB,
972 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 928 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
973 .icache_bsize = 32, 929 .icache_bsize = 32,
974 .dcache_bsize = 32, 930 .dcache_bsize = 32,
975 }, 931 },
976#endif /* CONFIG_44x */ 932#endif /* CONFIG_44x */
977#ifdef CONFIG_FSL_BOOKE 933#ifdef CONFIG_FSL_BOOKE
978 { /* e200z5 */ 934 { /* e200z5 */
979 .pvr_mask = 0xfff00000, 935 .pvr_mask = 0xfff00000,
980 .pvr_value = 0x81000000, 936 .pvr_value = 0x81000000,
981 .cpu_name = "e200z5", 937 .cpu_name = "e200z5",
982 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 938 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
983 .cpu_features = CPU_FTR_USE_TB, 939 .cpu_features = CPU_FTRS_E200,
984 .cpu_user_features = PPC_FEATURE_32 | 940 .cpu_user_features = PPC_FEATURE_32 |
985 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE | 941 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
986 PPC_FEATURE_UNIFIED_CACHE, 942 PPC_FEATURE_UNIFIED_CACHE,
987 .dcache_bsize = 32, 943 .dcache_bsize = 32,
988 }, 944 },
989 { /* e200z6 */ 945 { /* e200z6 */
990 .pvr_mask = 0xfff00000, 946 .pvr_mask = 0xfff00000,
991 .pvr_value = 0x81100000, 947 .pvr_value = 0x81100000,
992 .cpu_name = "e200z6", 948 .cpu_name = "e200z6",
993 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 949 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
994 .cpu_features = CPU_FTR_USE_TB, 950 .cpu_features = CPU_FTRS_E200,
995 .cpu_user_features = PPC_FEATURE_32 | 951 .cpu_user_features = PPC_FEATURE_32 |
996 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 952 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
997 PPC_FEATURE_HAS_EFP_SINGLE | 953 PPC_FEATURE_HAS_EFP_SINGLE |
998 PPC_FEATURE_UNIFIED_CACHE, 954 PPC_FEATURE_UNIFIED_CACHE,
999 .dcache_bsize = 32, 955 .dcache_bsize = 32,
1000 }, 956 },
1001 { /* e500 */ 957 { /* e500 */
1002 .pvr_mask = 0xffff0000, 958 .pvr_mask = 0xffff0000,
1003 .pvr_value = 0x80200000, 959 .pvr_value = 0x80200000,
1004 .cpu_name = "e500", 960 .cpu_name = "e500",
1005 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 961 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1006 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 962 .cpu_features = CPU_FTRS_E500,
1007 CPU_FTR_USE_TB,
1008 .cpu_user_features = PPC_FEATURE_32 | 963 .cpu_user_features = PPC_FEATURE_32 |
1009 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 964 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1010 PPC_FEATURE_HAS_EFP_SINGLE, 965 PPC_FEATURE_HAS_EFP_SINGLE,
@@ -1012,13 +967,12 @@ struct cpu_spec cpu_specs[] = {
1012 .dcache_bsize = 32, 967 .dcache_bsize = 32,
1013 .num_pmcs = 4, 968 .num_pmcs = 4,
1014 }, 969 },
1015 { /* e500v2 */ 970 { /* e500v2 */
1016 .pvr_mask = 0xffff0000, 971 .pvr_mask = 0xffff0000,
1017 .pvr_value = 0x80210000, 972 .pvr_value = 0x80210000,
1018 .cpu_name = "e500v2", 973 .cpu_name = "e500v2",
1019 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 974 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1020 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 975 .cpu_features = CPU_FTRS_E500_2,
1021 CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
1022 .cpu_user_features = PPC_FEATURE_32 | 976 .cpu_user_features = PPC_FEATURE_32 |
1023 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 977 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1024 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, 978 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
@@ -1032,10 +986,11 @@ struct cpu_spec cpu_specs[] = {
1032 .pvr_mask = 0x00000000, 986 .pvr_mask = 0x00000000,
1033 .pvr_value = 0x00000000, 987 .pvr_value = 0x00000000,
1034 .cpu_name = "(generic PPC)", 988 .cpu_name = "(generic PPC)",
1035 .cpu_features = CPU_FTR_COMMON, 989 .cpu_features = CPU_FTRS_GENERIC_32,
1036 .cpu_user_features = PPC_FEATURE_32, 990 .cpu_user_features = PPC_FEATURE_32,
1037 .icache_bsize = 32, 991 .icache_bsize = 32,
1038 .dcache_bsize = 32, 992 .dcache_bsize = 32,
1039 } 993 }
1040#endif /* !CLASSIC_PPC */ 994#endif /* !CLASSIC_PPC */
995#endif /* CONFIG_PPC32 */
1041}; 996};
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..2e99ae41723c
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1000 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
203 li r11,0
204 stb r11,TI_SC_NOERR(r10)
205 lwz r11,TI_FLAGS(r10)
206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
207 bne- syscall_dotrace
208syscall_dotrace_cont:
209 cmplwi 0,r0,NR_syscalls
210 lis r10,sys_call_table@h
211 ori r10,r10,sys_call_table@l
212 slwi r0,r0,2
213 bge- 66f
214 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
215 mtlr r10
216 addi r9,r1,STACK_FRAME_OVERHEAD
217 PPC440EP_ERR42
218 blrl /* Call handler */
219 .globl ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222 bl do_show_syscall_exit
223#endif
224 mr r6,r3
225 li r11,-_LAST_ERRNO
226 cmplw 0,r3,r11
227 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
228 blt+ 30f
229 lbz r11,TI_SC_NOERR(r12)
230 cmpwi r11,0
231 bne 30f
232 neg r3,r3
233 lwz r10,_CCR(r1) /* Set SO bit in CR */
234 oris r10,r10,0x1000
235 stw r10,_CCR(r1)
236
237 /* disable interrupts so current_thread_info()->flags can't change */
23830: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
239 SYNC
240 MTMSRD(r10)
241 lwz r9,TI_FLAGS(r12)
242 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
243 bne- syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246 /* If the process has its own DBCR0 value, load it up. The single
247 step bit tells us that dbcr0 should be loaded. */
248 lwz r0,THREAD+THREAD_DBCR0(r2)
249 andis. r10,r0,DBCR0_IC@h
250 bnel- load_dbcr0
251#endif
252 stwcx. r0,0,r1 /* to clear the reservation */
253 lwz r4,_LINK(r1)
254 lwz r5,_CCR(r1)
255 mtlr r4
256 mtcr r5
257 lwz r7,_NIP(r1)
258 lwz r8,_MSR(r1)
259 FIX_SRR1(r8, r0)
260 lwz r2,GPR2(r1)
261 lwz r1,GPR1(r1)
262 mtspr SPRN_SRR0,r7
263 mtspr SPRN_SRR1,r8
264 SYNC
265 RFI
266
26766: li r3,-ENOSYS
268 b ret_from_syscall
269
270 .globl ret_from_fork
271ret_from_fork:
272 REST_NVGPRS(r1)
273 bl schedule_tail
274 li r3,0
275 b ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279 SAVE_NVGPRS(r1)
280 li r0,0xc00
281 stw r0,_TRAP(r1)
282 addi r3,r1,STACK_FRAME_OVERHEAD
283 bl do_syscall_trace_enter
284 lwz r0,GPR0(r1) /* Restore original registers */
285 lwz r3,GPR3(r1)
286 lwz r4,GPR4(r1)
287 lwz r5,GPR5(r1)
288 lwz r6,GPR6(r1)
289 lwz r7,GPR7(r1)
290 lwz r8,GPR8(r1)
291 REST_NVGPRS(r1)
292 b syscall_dotrace_cont
293
294syscall_exit_work:
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
297 andi. r0,r9,_TIF_SYSCALL_T_OR_A
298 beq 5f
299 ori r10,r10,MSR_EE
300 SYNC
301 MTMSRD(r10) /* re-enable interrupts */
302 lwz r4,_TRAP(r1)
303 andi. r4,r4,1
304 beq 4f
305 SAVE_NVGPRS(r1)
306 li r4,0xc00
307 stw r4,_TRAP(r1)
3084:
309 addi r3,r1,STACK_FRAME_OVERHEAD
310 bl do_syscall_trace_leave
311 REST_NVGPRS(r1)
3122:
313 lwz r3,GPR3(r1)
314 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 SYNC
316 MTMSRD(r10) /* disable interrupts again */
317 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
318 lwz r9,TI_FLAGS(r12)
3195:
320 andi. r0,r9,_TIF_NEED_RESCHED
321 bne 1f
322 lwz r5,_MSR(r1)
323 andi. r5,r5,MSR_PR
324 beq syscall_exit_cont
325 andi. r0,r9,_TIF_SIGPENDING
326 beq syscall_exit_cont
327 b do_user_signal
3281:
329 ori r10,r10,MSR_EE
330 SYNC
331 MTMSRD(r10) /* re-enable interrupts */
332 bl schedule
333 b 2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338 lis r11,show_syscalls_task@ha
339 lwz r11,show_syscalls_task@l(r11)
340 cmp 0,r2,r11
341 bnelr
342#endif
343 stw r31,GPR31(r1)
344 mflr r31
345 lis r3,7f@ha
346 addi r3,r3,7f@l
347 lwz r4,GPR0(r1)
348 lwz r5,GPR3(r1)
349 lwz r6,GPR4(r1)
350 lwz r7,GPR5(r1)
351 lwz r8,GPR6(r1)
352 lwz r9,GPR7(r1)
353 bl printk
354 lis r3,77f@ha
355 addi r3,r3,77f@l
356 lwz r4,GPR8(r1)
357 mr r5,r2
358 bl printk
359 lwz r0,GPR0(r1)
360 lwz r3,GPR3(r1)
361 lwz r4,GPR4(r1)
362 lwz r5,GPR5(r1)
363 lwz r6,GPR6(r1)
364 lwz r7,GPR7(r1)
365 lwz r8,GPR8(r1)
366 mtlr r31
367 lwz r31,GPR31(r1)
368 blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372 lis r11,show_syscalls_task@ha
373 lwz r11,show_syscalls_task@l(r11)
374 cmp 0,r2,r11
375 bnelr
376#endif
377 stw r31,GPR31(r1)
378 mflr r31
379 stw r3,RESULT(r1) /* Save result */
380 mr r4,r3
381 lis r3,79f@ha
382 addi r3,r3,79f@l
383 bl printk
384 lwz r3,RESULT(r1)
385 mtlr r31
386 lwz r31,GPR31(r1)
387 blr
388
3897: .string "syscall %d(%x, %x, %x, %x, %x, "
39077: .string "%x), current=%p\n"
39179: .string " -> %x\n"
392 .align 2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395 .data
396 .globl show_syscalls_task
397show_syscalls_task:
398 .long -1
399 .text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace. Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410 .globl ppc_sigsuspend
411ppc_sigsuspend:
412 SAVE_NVGPRS(r1)
413 lwz r0,_TRAP(r1)
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,_TRAP(r1) /* register set saved */
416 b sys_sigsuspend
417
418 .globl ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420 SAVE_NVGPRS(r1)
421 lwz r0,_TRAP(r1)
422 rlwinm r0,r0,0,0,30
423 stw r0,_TRAP(r1)
424 b sys_rt_sigsuspend
425
426 .globl ppc_fork
427ppc_fork:
428 SAVE_NVGPRS(r1)
429 lwz r0,_TRAP(r1)
430 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
431 stw r0,_TRAP(r1) /* register set saved */
432 b sys_fork
433
434 .globl ppc_vfork
435ppc_vfork:
436 SAVE_NVGPRS(r1)
437 lwz r0,_TRAP(r1)
438 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
439 stw r0,_TRAP(r1) /* register set saved */
440 b sys_vfork
441
442 .globl ppc_clone
443ppc_clone:
444 SAVE_NVGPRS(r1)
445 lwz r0,_TRAP(r1)
446 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
447 stw r0,_TRAP(r1) /* register set saved */
448 b sys_clone
449
450 .globl ppc_swapcontext
451ppc_swapcontext:
452 SAVE_NVGPRS(r1)
453 lwz r0,_TRAP(r1)
454 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
455 stw r0,_TRAP(r1) /* register set saved */
456 b sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464 .globl handle_page_fault
465handle_page_fault:
466 stw r4,_DAR(r1)
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 bl do_page_fault
469 cmpwi r3,0
470 beq+ ret_from_except
471 SAVE_NVGPRS(r1)
472 lwz r0,_TRAP(r1)
473 clrrwi r0,r0,1
474 stw r0,_TRAP(r1)
475 mr r5,r3
476 addi r3,r1,STACK_FRAME_OVERHEAD
477 lwz r4,_DAR(r1)
478 bl bad_page_fault
479 b ret_from_except_full
480
481/*
482 * This routine switches between two different tasks. The process
483 * state of one is saved on its kernel stack. Then the state
484 * of the other is restored from its kernel stack. The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path. If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502 stwu r1,-INT_FRAME_SIZE(r1)
503 mflr r0
504 stw r0,INT_FRAME_SIZE+4(r1)
505 /* r3-r12 are caller saved -- Cort */
506 SAVE_NVGPRS(r1)
507 stw r0,_NIP(r1) /* Return to switch caller */
508 mfmsr r11
509 li r0,MSR_FP /* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512 oris r0,r0,MSR_VEC@h /* Disable altivec */
513 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
514 stw r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518 oris r0,r0,MSR_SPE@h /* Disable SPE */
519 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
520 stw r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556 lwz r0,THREAD+THREAD_SPEFSCR(r2)
557 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560 lwz r0,_CCR(r1)
561 mtcrf 0xFF,r0
562 /* r3-r12 are destroyed -- Cort */
563 REST_NVGPRS(r1)
564
565 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 mtlr r4
567 addi r1,r1,INT_FRAME_SIZE
568 blr
569
570 .globl fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
574 beq 1f /* if not, we've got problems */
575#endif
576
5772: REST_4GPRS(3, r11)
578 lwz r10,_CCR(r11)
579 REST_GPR(1, r11)
580 mtcr r10
581 lwz r10,_LINK(r11)
582 mtlr r10
583 REST_GPR(10, r11)
584 mtspr SPRN_SRR1,r9
585 mtspr SPRN_SRR0,r12
586 REST_GPR(9, r11)
587 REST_GPR(12, r11)
588 lwz r11,GPR11(r11)
589 SYNC
590 RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941: lis r3,exc_exit_restart_end@ha
595 addi r3,r3,exc_exit_restart_end@l
596 cmplw r12,r3
597 bge 3f
598 lis r4,exc_exit_restart@ha
599 addi r4,r4,exc_exit_restart@l
600 cmplw r12,r4
601 blt 3f
602 lis r3,fee_restarts@ha
603 tophys(r3,r3)
604 lwz r5,fee_restarts@l(r3)
605 addi r5,r5,1
606 stw r5,fee_restarts@l(r3)
607 mr r12,r4 /* restart at exc_exit_restart */
608 b 2b
609
610 .comm fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617 b 2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619 li r10,-1
620 stw r10,_TRAP(r11)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 lis r10,MSR_KERNEL@h
623 ori r10,r10,MSR_KERNEL@l
624 bl transfer_to_handler_full
625 .long nonrecoverable_exception
626 .long ret_from_except
627#endif
628
629 .globl sigreturn_exit
630sigreturn_exit:
631 subi r1,r3,STACK_FRAME_OVERHEAD
632 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
633 lwz r9,TI_FLAGS(r12)
634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,_TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,_TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,_TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,_TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_RTAS
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 LOADADDR(r4, rtas)
967 lis r6,1f@ha /* physical return address for rtas */
968 addi r6,r6,1f@l
969 tophys(r6,r6)
970 tophys(r7,r1)
971 lwz r8,RTASENTRY(r4)
972 lwz r4,RTASBASE(r4)
973 mfmsr r9
974 stw r9,8(r1)
975 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
976 SYNC /* disable interrupts so SRR0/1 */
977 MTMSRD(r0) /* don't get trashed */
978 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
979 mtlr r6
980 mtspr SPRN_SPRG2,r7
981 mtspr SPRN_SRR0,r8
982 mtspr SPRN_SRR1,r9
983 RFI
9841: tophys(r9,r1)
985 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
986 lwz r9,8(r9) /* original msr value */
987 FIX_SRR1(r9,r0)
988 addi r1,r1,INT_FRAME_SIZE
989 li r0,0
990 mtspr SPRN_SPRG2,r0
991 mtspr SPRN_SRR0,r8
992 mtspr SPRN_SRR1,r9
993 RFI /* return to caller */
994
995 .globl machine_check_in_rtas
996machine_check_in_rtas:
997 twi 31,0,0
998 /* XXX load up BATs and panic */
999
1000#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/ppc64/kernel/entry.S b/arch/powerpc/kernel/entry_64.S
index e8c0bbf4d000..984a10630714 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -42,9 +42,6 @@
42.SYS_CALL_TABLE: 42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table 43 .tc .sys_call_table[TC],.sys_call_table
44 44
45.SYS_CALL_TABLE32:
46 .tc .sys_call_table32[TC],.sys_call_table32
47
48/* This value is used to mark exception frames on the stack. */ 45/* This value is used to mark exception frames on the stack. */
49exception_marker: 46exception_marker:
50 .tc ID_72656773_68657265[TC],0x7265677368657265 47 .tc ID_72656773_68657265[TC],0x7265677368657265
@@ -133,7 +130,7 @@ system_call: /* label this so stack traces look sane */
133 ld r11,.SYS_CALL_TABLE@toc(2) 130 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT 131 andi. r10,r10,_TIF_32BIT
135 beq 15f 132 beq 15f
136 ld r11,.SYS_CALL_TABLE32@toc(2) 133 addi r11,r11,8 /* use 32-bit syscall entries */
137 clrldi r3,r3,32 134 clrldi r3,r3,32
138 clrldi r4,r4,32 135 clrldi r4,r4,32
139 clrldi r5,r5,32 136 clrldi r5,r5,32
@@ -141,7 +138,7 @@ system_call: /* label this so stack traces look sane */
141 clrldi r7,r7,32 138 clrldi r7,r7,32
142 clrldi r8,r8,32 139 clrldi r8,r8,32
14315: 14015:
144 slwi r0,r0,3 141 slwi r0,r0,4
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */ 142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
146 mtctr r10 143 mtctr r10
147 bctrl /* Call handler */ 144 bctrl /* Call handler */
@@ -191,8 +188,8 @@ syscall_exit_trace_cont:
191 ld r1,GPR1(r1) 188 ld r1,GPR1(r1)
192 mtlr r4 189 mtlr r4
193 mtcr r5 190 mtcr r5
194 mtspr SRR0,r7 191 mtspr SPRN_SRR0,r7
195 mtspr SRR1,r8 192 mtspr SPRN_SRR1,r8
196 rfid 193 rfid
197 b . /* prevent speculative execution */ 194 b . /* prevent speculative execution */
198 195
@@ -265,7 +262,7 @@ _GLOBAL(save_nvgprs)
265 */ 262 */
266_GLOBAL(ppc32_sigsuspend) 263_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs 264 bl .save_nvgprs
268 bl .sys32_sigsuspend 265 bl .compat_sys_sigsuspend
269 b 70f 266 b 70f
270 267
271_GLOBAL(ppc64_rt_sigsuspend) 268_GLOBAL(ppc64_rt_sigsuspend)
@@ -275,7 +272,7 @@ _GLOBAL(ppc64_rt_sigsuspend)
275 272
276_GLOBAL(ppc32_rt_sigsuspend) 273_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs 274 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend 275 bl .compat_sys_rt_sigsuspend
27970: cmpdi 0,r3,0 27670: cmpdi 0,r3,0
280 /* If it returned an error, we need to return via syscall_exit to set 277 /* If it returned an error, we need to return via syscall_exit to set
281 the SO bit in cr0 and potentially stop for ptrace. */ 278 the SO bit in cr0 and potentially stop for ptrace. */
@@ -310,7 +307,7 @@ _GLOBAL(ppc_clone)
310 307
311_GLOBAL(ppc32_swapcontext) 308_GLOBAL(ppc32_swapcontext)
312 bl .save_nvgprs 309 bl .save_nvgprs
313 bl .sys32_swapcontext 310 bl .compat_sys_swapcontext
314 b 80f 311 b 80f
315 312
316_GLOBAL(ppc64_swapcontext) 313_GLOBAL(ppc64_swapcontext)
@@ -319,11 +316,11 @@ _GLOBAL(ppc64_swapcontext)
319 b 80f 316 b 80f
320 317
321_GLOBAL(ppc32_sigreturn) 318_GLOBAL(ppc32_sigreturn)
322 bl .sys32_sigreturn 319 bl .compat_sys_sigreturn
323 b 80f 320 b 80f
324 321
325_GLOBAL(ppc32_rt_sigreturn) 322_GLOBAL(ppc32_rt_sigreturn)
326 bl .sys32_rt_sigreturn 323 bl .compat_sys_rt_sigreturn
327 b 80f 324 b 80f
328 325
329_GLOBAL(ppc64_rt_sigreturn) 326_GLOBAL(ppc64_rt_sigreturn)
@@ -531,7 +528,7 @@ restore:
531 mtctr r3 528 mtctr r3
532 mtlr r0 529 mtlr r0
533 ld r3,_XER(r1) 530 ld r3,_XER(r1)
534 mtspr XER,r3 531 mtspr SPRN_XER,r3
535 532
536 REST_8GPRS(5, r1) 533 REST_8GPRS(5, r1)
537 534
@@ -543,12 +540,12 @@ restore:
543 mtmsrd r0,1 540 mtmsrd r0,1
544 541
545 ld r0,_MSR(r1) 542 ld r0,_MSR(r1)
546 mtspr SRR1,r0 543 mtspr SPRN_SRR1,r0
547 544
548 ld r2,_CCR(r1) 545 ld r2,_CCR(r1)
549 mtcrf 0xFF,r2 546 mtcrf 0xFF,r2
550 ld r2,_NIP(r1) 547 ld r2,_NIP(r1)
551 mtspr SRR0,r2 548 mtspr SPRN_SRR0,r2
552 549
553 ld r0,GPR0(r1) 550 ld r0,GPR0(r1)
554 ld r2,GPR2(r1) 551 ld r2,GPR2(r1)
@@ -643,7 +640,7 @@ _GLOBAL(enter_rtas)
643 std r4,_CCR(r1) 640 std r4,_CCR(r1)
644 mfctr r5 641 mfctr r5
645 std r5,_CTR(r1) 642 std r5,_CTR(r1)
646 mfspr r6,XER 643 mfspr r6,SPRN_XER
647 std r6,_XER(r1) 644 std r6,_XER(r1)
648 mfdar r7 645 mfdar r7
649 std r7,_DAR(r1) 646 std r7,_DAR(r1)
@@ -697,14 +694,14 @@ _GLOBAL(enter_rtas)
697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 694 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
698 ld r4,RTASBASE(r4) /* get the rtas->base value */ 695 ld r4,RTASBASE(r4) /* get the rtas->base value */
699 696
700 mtspr SRR0,r5 697 mtspr SPRN_SRR0,r5
701 mtspr SRR1,r6 698 mtspr SPRN_SRR1,r6
702 rfid 699 rfid
703 b . /* prevent speculative execution */ 700 b . /* prevent speculative execution */
704 701
705_STATIC(rtas_return_loc) 702_STATIC(rtas_return_loc)
706 /* relocation is off at this point */ 703 /* relocation is off at this point */
707 mfspr r4,SPRG3 /* Get PACA */ 704 mfspr r4,SPRN_SPRG3 /* Get PACA */
708 SET_REG_TO_CONST(r5, KERNELBASE) 705 SET_REG_TO_CONST(r5, KERNELBASE)
709 sub r4,r4,r5 /* RELOC the PACA base pointer */ 706 sub r4,r4,r5 /* RELOC the PACA base pointer */
710 707
@@ -718,8 +715,8 @@ _STATIC(rtas_return_loc)
718 LOADADDR(r3,.rtas_restore_regs) 715 LOADADDR(r3,.rtas_restore_regs)
719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 716 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
720 717
721 mtspr SRR0,r3 718 mtspr SPRN_SRR0,r3
722 mtspr SRR1,r4 719 mtspr SPRN_SRR1,r4
723 rfid 720 rfid
724 b . /* prevent speculative execution */ 721 b . /* prevent speculative execution */
725 722
@@ -730,14 +727,14 @@ _STATIC(rtas_restore_regs)
730 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 727 REST_8GPRS(14, r1) /* Restore the non-volatiles */
731 REST_10GPRS(22, r1) /* ditto */ 728 REST_10GPRS(22, r1) /* ditto */
732 729
733 mfspr r13,SPRG3 730 mfspr r13,SPRN_SPRG3
734 731
735 ld r4,_CCR(r1) 732 ld r4,_CCR(r1)
736 mtcr r4 733 mtcr r4
737 ld r5,_CTR(r1) 734 ld r5,_CTR(r1)
738 mtctr r5 735 mtctr r5
739 ld r6,_XER(r1) 736 ld r6,_XER(r1)
740 mtspr XER,r6 737 mtspr SPRN_XER,r6
741 ld r7,_DAR(r1) 738 ld r7,_DAR(r1)
742 mtdar r7 739 mtdar r7
743 ld r8,_DSISR(r1) 740 ld r8,_DSISR(r1)
@@ -774,7 +771,7 @@ _GLOBAL(enter_prom)
774 std r4,_CCR(r1) 771 std r4,_CCR(r1)
775 mfctr r5 772 mfctr r5
776 std r5,_CTR(r1) 773 std r5,_CTR(r1)
777 mfspr r6,XER 774 mfspr r6,SPRN_XER
778 std r6,_XER(r1) 775 std r6,_XER(r1)
779 mfdar r7 776 mfdar r7
780 std r7,_DAR(r1) 777 std r7,_DAR(r1)
@@ -827,7 +824,7 @@ _GLOBAL(enter_prom)
827 ld r5,_CTR(r1) 824 ld r5,_CTR(r1)
828 mtctr r5 825 mtctr r5
829 ld r6,_XER(r1) 826 ld r6,_XER(r1)
830 mtspr XER,r6 827 mtspr SPRN_XER,r6
831 ld r7,_DAR(r1) 828 ld r7,_DAR(r1)
832 mtdar r7 829 mtdar r7
833 ld r8,_DSISR(r1) 830 ld r8,_DSISR(r1)
diff --git a/arch/ppc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 665d7d34304c..4d6001fa1cf2 100644
--- a/arch/ppc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
@@ -27,13 +27,9 @@
27 * Load up this task's FP registers from its thread_struct, 27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task. 28 * enable the FPU for the current task and return to the task.
29 */ 29 */
30 .globl load_up_fpu 30_GLOBAL(load_up_fpu)
31load_up_fpu:
32 mfmsr r5 31 mfmsr r5
33 ori r5,r5,MSR_FP 32 ori r5,r5,MSR_FP
34#ifdef CONFIG_PPC64BRIDGE
35 clrldi r5,r5,1 /* turn off 64-bit mode */
36#endif /* CONFIG_PPC64BRIDGE */
37 SYNC 33 SYNC
38 MTMSRD(r5) /* enable use of fpu now */ 34 MTMSRD(r5) /* enable use of fpu now */
39 isync 35 isync
@@ -43,67 +39,57 @@ load_up_fpu:
43 * to another. Instead we call giveup_fpu in switch_to. 39 * to another. Instead we call giveup_fpu in switch_to.
44 */ 40 */
45#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
46 tophys(r6,0) /* get __pa constant */ 42 LOADBASE(r3, last_task_used_math)
47 addis r3,r6,last_task_used_math@ha 43 toreal(r3)
48 lwz r4,last_task_used_math@l(r3) 44 LDL r4,OFF(last_task_used_math)(r3)
49 cmpwi 0,r4,0 45 CMPI 0,r4,0
50 beq 1f 46 beq 1f
51 add r4,r4,r6 47 toreal(r4)
52 addi r4,r4,THREAD /* want last_task_used_math->thread */ 48 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4) 49 SAVE_32FPRS(0, r4)
54 mffs fr0 50 mffs fr0
55 stfd fr0,THREAD_FPSCR-4(r4) 51 stfd fr0,THREAD_FPSCR(r4)
56 lwz r5,PT_REGS(r4) 52 LDL r5,PT_REGS(r4)
57 add r5,r5,r6 53 toreal(r5)
58 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
59 li r10,MSR_FP|MSR_FE0|MSR_FE1 55 li r10,MSR_FP|MSR_FE0|MSR_FE1
60 andc r4,r4,r10 /* disable FP for previous task */ 56 andc r4,r4,r10 /* disable FP for previous task */
61 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
621: 581:
63#endif /* CONFIG_SMP */ 59#endif /* CONFIG_SMP */
64 /* enable use of FP after return */ 60 /* enable use of FP after return */
61#ifdef CONFIG_PPC32
65 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 62 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
66 lwz r4,THREAD_FPEXC_MODE(r5) 63 lwz r4,THREAD_FPEXC_MODE(r5)
67 ori r9,r9,MSR_FP /* enable FP for current */ 64 ori r9,r9,MSR_FP /* enable FP for current */
68 or r9,r9,r4 65 or r9,r9,r4
69 lfd fr0,THREAD_FPSCR-4(r5) 66#else
67 ld r4,PACACURRENT(r13)
68 addi r5,r4,THREAD /* Get THREAD */
69 ld r4,THREAD_FPEXC_MODE(r5)
70 ori r12,r12,MSR_FP
71 or r12,r12,r4
72 std r12,_MSR(r1)
73#endif
74 lfd fr0,THREAD_FPSCR(r5)
70 mtfsf 0xff,fr0 75 mtfsf 0xff,fr0
71 REST_32FPRS(0, r5) 76 REST_32FPRS(0, r5)
72#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
73 subi r4,r5,THREAD 78 subi r4,r5,THREAD
74 sub r4,r4,r6 79 fromreal(r4)
75 stw r4,last_task_used_math@l(r3) 80 STL r4,OFF(last_task_used_math)(r3)
76#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
77 /* restore registers and return */ 82 /* restore registers and return */
78 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
79 b fast_exception_return 84 b fast_exception_return
80 85
81/* 86/*
82 * FP unavailable trap from kernel - print a message, but let
83 * the task use FP in the kernel until it returns to user mode.
84 */
85 .globl KernelFP
86KernelFP:
87 lwz r3,_MSR(r1)
88 ori r3,r3,MSR_FP
89 stw r3,_MSR(r1) /* enable use of FP after return */
90 lis r3,86f@h
91 ori r3,r3,86f@l
92 mr r4,r2 /* current */
93 lwz r5,_NIP(r1)
94 bl printk
95 b ret_from_except
9686: .string "floating point used in kernel (task=%p, pc=%x)\n"
97 .align 4,0
98
99/*
100 * giveup_fpu(tsk) 87 * giveup_fpu(tsk)
101 * Disable FP for the task given as the argument, 88 * Disable FP for the task given as the argument,
102 * and save the floating-point registers in its thread_struct. 89 * and save the floating-point registers in its thread_struct.
103 * Enables the FPU for use in the kernel on return. 90 * Enables the FPU for use in the kernel on return.
104 */ 91 */
105 .globl giveup_fpu 92_GLOBAL(giveup_fpu)
106giveup_fpu:
107 mfmsr r5 93 mfmsr r5
108 ori r5,r5,MSR_FP 94 ori r5,r5,MSR_FP
109 SYNC_601 95 SYNC_601
@@ -111,23 +97,48 @@ giveup_fpu:
111 MTMSRD(r5) /* enable use of fpu now */ 97 MTMSRD(r5) /* enable use of fpu now */
112 SYNC_601 98 SYNC_601
113 isync 99 isync
114 cmpwi 0,r3,0 100 CMPI 0,r3,0
115 beqlr- /* if no previous owner, done */ 101 beqlr- /* if no previous owner, done */
116 addi r3,r3,THREAD /* want THREAD of task */ 102 addi r3,r3,THREAD /* want THREAD of task */
117 lwz r5,PT_REGS(r3) 103 LDL r5,PT_REGS(r3)
118 cmpwi 0,r5,0 104 CMPI 0,r5,0
119 SAVE_32FPRS(0, r3) 105 SAVE_32FPRS(0, r3)
120 mffs fr0 106 mffs fr0
121 stfd fr0,THREAD_FPSCR-4(r3) 107 stfd fr0,THREAD_FPSCR(r3)
122 beq 1f 108 beq 1f
123 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
124 li r3,MSR_FP|MSR_FE0|MSR_FE1 110 li r3,MSR_FP|MSR_FE0|MSR_FE1
125 andc r4,r4,r3 /* disable FP for previous task */ 111 andc r4,r4,r3 /* disable FP for previous task */
126 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1271: 1131:
128#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
129 li r5,0 115 li r5,0
130 lis r4,last_task_used_math@ha 116 LOADBASE(r4,last_task_used_math)
131 stw r5,last_task_used_math@l(r4) 117 STL r5,OFF(last_task_used_math)(r4)
132#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
133 blr 119 blr
120
121/*
122 * These are used in the alignment trap handler when emulating
123 * single-precision loads and stores.
124 * We restore and save the fpscr so the task gets the same result
125 * and exceptions as if the cpu had performed the load or store.
126 */
127
128_GLOBAL(cvt_fd)
129 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
130 mtfsf 0xff,0
131 lfs 0,0(r3)
132 stfd 0,0(r4)
133 mffs 0
134 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
135 blr
136
137_GLOBAL(cvt_df)
138 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
139 mtfsf 0xff,0
140 lfd 0,0(r3)
141 stfs 0,0(r4)
142 mffs 0
143 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
144 blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
new file mode 100644
index 000000000000..b102e3a2415e
--- /dev/null
+++ b/arch/powerpc/kernel/head_32.S
@@ -0,0 +1,1381 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35
36#ifdef CONFIG_APUS
37#include <asm/amigappc.h>
38#endif
39
40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
41#define LOAD_BAT(n, reg, RA, RB) \
42 /* see the comment for clear_bats() -- Cort */ \
43 li RA,0; \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_DBAT##n##U,RA; \
46 lwz RA,(n*16)+0(reg); \
47 lwz RB,(n*16)+4(reg); \
48 mtspr SPRN_IBAT##n##U,RA; \
49 mtspr SPRN_IBAT##n##L,RB; \
50 beq 1f; \
51 lwz RA,(n*16)+8(reg); \
52 lwz RB,(n*16)+12(reg); \
53 mtspr SPRN_DBAT##n##U,RA; \
54 mtspr SPRN_DBAT##n##L,RB; \
551:
56
57 .text
58 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
59 .stabs "head_32.S",N_SO,0,0,0f
600:
61 .globl _stext
62_stext:
63
64/*
65 * _start is defined this way because the XCOFF loader in the OpenFirmware
66 * on the powermac expects the entry point to be a procedure descriptor.
67 */
68 .text
69 .globl _start
70_start:
71 /*
72 * These are here for legacy reasons, the kernel used to
73 * need to look like a coff function entry for the pmac
74 * but we're always started by some kind of bootloader now.
75 * -- Cort
76 */
77 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
78 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
79 nop
80
81/* PMAC
82 * Enter here with the kernel text, data and bss loaded starting at
83 * 0, running with virtual == physical mapping.
84 * r5 points to the prom entry point (the client interface handler
85 * address). Address translation is turned on, with the prom
86 * managing the hash table. Interrupts are disabled. The stack
87 * pointer (r1) points to just below the end of the half-meg region
88 * from 0x380000 - 0x400000, which is mapped in already.
89 *
90 * If we are booted from MacOS via BootX, we enter with the kernel
91 * image loaded somewhere, and the following values in registers:
92 * r3: 'BooX' (0x426f6f58)
93 * r4: virtual address of boot_infos_t
94 * r5: 0
95 *
96 * APUS
97 * r3: 'APUS'
98 * r4: physical address of memory base
99 * Linux/m68k style BootInfo structure at &_end.
100 *
101 * PREP
102 * This is jumped to on prep systems right after the kernel is relocated
103 * to its proper place in memory by the boot loader. The expected layout
104 * of the regs is:
105 * r3: ptr to residual data
106 * r4: initrd_start or if no initrd then 0
107 * r5: initrd_end - unused if r4 is 0
108 * r6: Start of command line string
109 * r7: End of command line string
110 *
111 * This just gets a minimal mmu environment setup so we can call
112 * start_here() to do the real work.
113 * -- Cort
114 */
115
116 .globl __start
117__start:
118/*
119 * We have to do any OF calls before we map ourselves to KERNELBASE,
120 * because OF may have I/O devices mapped into that area
121 * (particularly on CHRP).
122 */
123 cmpwi 0,r5,0
124 beq 1f
125 bl prom_init
126 trap
127
1281: mr r31,r3 /* save parameters */
129 mr r30,r4
130 li r24,0 /* cpu # */
131
132/*
133 * early_init() does the early machine identification and does
134 * the necessary low-level setup and clears the BSS
135 * -- Cort <cort@fsmlabs.com>
136 */
137 bl early_init
138
139#ifdef CONFIG_APUS
140/* On APUS the __va/__pa constants need to be set to the correct
141 * values before continuing.
142 */
143 mr r4,r30
144 bl fix_mem_constants
145#endif /* CONFIG_APUS */
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156
157/*
158 * Call setup_cpu for CPU 0 and initialize 6xx Idle
159 */
160 bl reloc_offset
161 li r24,0 /* cpu# */
162 bl call_setup_cpu /* Call setup_cpu for this CPU */
163#ifdef CONFIG_6xx
164 bl reloc_offset
165 bl init_idle_6xx
166#endif /* CONFIG_6xx */
167
168
169#ifndef CONFIG_APUS
170/*
171 * We need to run with _start at physical address 0.
172 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
173 * the exception vectors at 0 (and therefore this copy
174 * overwrites OF's exception vectors with our own).
175 * The MMU is off at this point.
176 */
177 bl reloc_offset
178 mr r26,r3
179 addis r4,r3,KERNELBASE@h /* current address of _start */
180 cmpwi 0,r4,0 /* are we already running at 0? */
181 bne relocate_kernel
182#endif /* CONFIG_APUS */
183/*
184 * we now have the 1st 16M of ram mapped with the bats.
185 * prep needs the mmu to be turned on here, but pmac already has it on.
186 * this shouldn't bother the pmac since it just gets turned on again
187 * as we jump to our code at KERNELBASE. -- Cort
188 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
189 * off, and in other cases, we now turn it off before changing BATs above.
190 */
191turn_on_mmu:
192 mfmsr r0
193 ori r0,r0,MSR_DR|MSR_IR
194 mtspr SPRN_SRR1,r0
195 lis r0,start_here@h
196 ori r0,r0,start_here@l
197 mtspr SPRN_SRR0,r0
198 SYNC
199 RFI /* enables MMU */
200
201/*
202 * We need __secondary_hold as a place to hold the other cpus on
203 * an SMP machine, even when we are running a UP kernel.
204 */
205 . = 0xc0 /* for prep bootloader */
206 li r3,1 /* MTX only has 1 cpu */
207 .globl __secondary_hold
208__secondary_hold:
209 /* tell the master we're here */
210 stw r3,__secondary_hold_acknowledge@l(0)
211#ifdef CONFIG_SMP
212100: lwz r4,0(0)
213 /* wait until we're told to start */
214 cmpw 0,r4,r3
215 bne 100b
216 /* our cpu # was at addr 0 - go */
217 mr r24,r3 /* cpu # */
218 b __secondary_start
219#else
220 b .
221#endif /* CONFIG_SMP */
222
223 .globl __secondary_hold_spinloop
224__secondary_hold_spinloop:
225 .long 0
226 .globl __secondary_hold_acknowledge
227__secondary_hold_acknowledge:
228 .long -1
229
230/*
231 * Exception entry code. This code runs with address translation
232 * turned off, i.e. using physical addresses.
233 * We assume sprg3 has the physical address of the current
234 * task's thread_struct.
235 */
236#define EXCEPTION_PROLOG \
237 mtspr SPRN_SPRG0,r10; \
238 mtspr SPRN_SPRG1,r11; \
239 mfcr r10; \
240 EXCEPTION_PROLOG_1; \
241 EXCEPTION_PROLOG_2
242
243#define EXCEPTION_PROLOG_1 \
244 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
245 andi. r11,r11,MSR_PR; \
246 tophys(r11,r1); /* use tophys(r1) if kernel */ \
247 beq 1f; \
248 mfspr r11,SPRN_SPRG3; \
249 lwz r11,THREAD_INFO-THREAD(r11); \
250 addi r11,r11,THREAD_SIZE; \
251 tophys(r11,r11); \
2521: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
253
254
255#define EXCEPTION_PROLOG_2 \
256 CLR_TOP32(r11); \
257 stw r10,_CCR(r11); /* save registers */ \
258 stw r12,GPR12(r11); \
259 stw r9,GPR9(r11); \
260 mfspr r10,SPRN_SPRG0; \
261 stw r10,GPR10(r11); \
262 mfspr r12,SPRN_SPRG1; \
263 stw r12,GPR11(r11); \
264 mflr r10; \
265 stw r10,_LINK(r11); \
266 mfspr r12,SPRN_SRR0; \
267 mfspr r9,SPRN_SRR1; \
268 stw r1,GPR1(r11); \
269 stw r1,0(r11); \
270 tovirt(r1,r11); /* set new kernel sp */ \
271 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
272 MTMSRD(r10); /* (except for mach check in rtas) */ \
273 stw r0,GPR0(r11); \
274 lis r10,0x7265; /* put exception frame marker */ \
275 addi r10,r10,0x6773; \
276 stw r10,8(r11); \
277 SAVE_4GPRS(3, r11); \
278 SAVE_2GPRS(7, r11)
279
280/*
281 * Note: code which follows this uses cr0.eq (set if from kernel),
282 * r11, r12 (SRR0), and r9 (SRR1).
283 *
284 * Note2: once we have set r1 we are in a position to take exceptions
285 * again, and we could thus set MSR:RI at that point.
286 */
287
288/*
289 * Exception vectors.
290 */
291#define EXCEPTION(n, label, hdlr, xfer) \
292 . = n; \
293label: \
294 EXCEPTION_PROLOG; \
295 addi r3,r1,STACK_FRAME_OVERHEAD; \
296 xfer(n, hdlr)
297
298#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
299 li r10,trap; \
300 stw r10,_TRAP(r11); \
301 li r10,MSR_KERNEL; \
302 copyee(r10, r9); \
303 bl tfer; \
304i##n: \
305 .long hdlr; \
306 .long ret
307
308#define COPY_EE(d, s) rlwimi d,s,0,16,16
309#define NOCOPY(d, s)
310
311#define EXC_XFER_STD(n, hdlr) \
312 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
313 ret_from_except_full)
314
315#define EXC_XFER_LITE(n, hdlr) \
316 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
317 ret_from_except)
318
319#define EXC_XFER_EE(n, hdlr) \
320 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
321 ret_from_except_full)
322
323#define EXC_XFER_EE_LITE(n, hdlr) \
324 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
325 ret_from_except)
326
327/* System reset */
328/* core99 pmac starts the seconary here by changing the vector, and
329 putting it back to what it was (unknown_exception) when done. */
330#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
331 . = 0x100
332 b __secondary_start_gemini
333#else
334 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
335#endif
336
337/* Machine check */
338/*
339 * On CHRP, this is complicated by the fact that we could get a
340 * machine check inside RTAS, and we have no guarantee that certain
341 * critical registers will have the values we expect. The set of
342 * registers that might have bad values includes all the GPRs
343 * and all the BATs. We indicate that we are in RTAS by putting
344 * a non-zero value, the address of the exception frame to use,
345 * in SPRG2. The machine check handler checks SPRG2 and uses its
346 * value if it is non-zero. If we ever needed to free up SPRG2,
347 * we could use a field in the thread_info or thread_struct instead.
348 * (Other exception handlers assume that r1 is a valid kernel stack
349 * pointer when we take an exception from supervisor mode.)
350 * -- paulus.
351 */
352 . = 0x200
353 mtspr SPRN_SPRG0,r10
354 mtspr SPRN_SPRG1,r11
355 mfcr r10
356#ifdef CONFIG_PPC_CHRP
357 mfspr r11,SPRN_SPRG2
358 cmpwi 0,r11,0
359 bne 7f
360#endif /* CONFIG_PPC_CHRP */
361 EXCEPTION_PROLOG_1
3627: EXCEPTION_PROLOG_2
363 addi r3,r1,STACK_FRAME_OVERHEAD
364#ifdef CONFIG_PPC_CHRP
365 mfspr r4,SPRN_SPRG2
366 cmpwi cr1,r4,0
367 bne cr1,1f
368#endif
369 EXC_XFER_STD(0x200, machine_check_exception)
370#ifdef CONFIG_PPC_CHRP
3711: b machine_check_in_rtas
372#endif
373
374/* Data access exception. */
375 . = 0x300
376DataAccess:
377 EXCEPTION_PROLOG
378 mfspr r10,SPRN_DSISR
379 andis. r0,r10,0xa470 /* weird error? */
380 bne 1f /* if not, try to put a PTE */
381 mfspr r4,SPRN_DAR /* into the hash table */
382 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
383 bl hash_page
3841: stw r10,_DSISR(r11)
385 mr r5,r10
386 mfspr r4,SPRN_DAR
387 EXC_XFER_EE_LITE(0x300, handle_page_fault)
388
389
390/* Instruction access exception. */
391 . = 0x400
392InstructionAccess:
393 EXCEPTION_PROLOG
394 andis. r0,r9,0x4000 /* no pte found? */
395 beq 1f /* if so, try to put a PTE */
396 li r3,0 /* into the hash table */
397 mr r4,r12 /* SRR0 is fault address */
398 bl hash_page
3991: mr r4,r12
400 mr r5,r9
401 EXC_XFER_EE_LITE(0x400, handle_page_fault)
402
403/* External interrupt */
404 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
405
406/* Alignment exception */
407 . = 0x600
408Alignment:
409 EXCEPTION_PROLOG
410 mfspr r4,SPRN_DAR
411 stw r4,_DAR(r11)
412 mfspr r5,SPRN_DSISR
413 stw r5,_DSISR(r11)
414 addi r3,r1,STACK_FRAME_OVERHEAD
415 EXC_XFER_EE(0x600, alignment_exception)
416
417/* Program check exception */
418 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
419
420/* Floating-point unavailable */
421 . = 0x800
422FPUnavailable:
423 EXCEPTION_PROLOG
424 bne load_up_fpu /* if from user, just load it up */
425 addi r3,r1,STACK_FRAME_OVERHEAD
426 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
427
428/* Decrementer */
429 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
430
431 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
432 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
433
434/* System call */
435 . = 0xc00
436SystemCall:
437 EXCEPTION_PROLOG
438 EXC_XFER_EE_LITE(0xc00, DoSyscall)
439
440/* Single step - not used on 601 */
441 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
442 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
443
444/*
445 * The Altivec unavailable trap is at 0x0f20. Foo.
446 * We effectively remap it to 0x3000.
447 * We include an altivec unavailable exception vector even if
448 * not configured for Altivec, so that you can't panic a
449 * non-altivec kernel running on a machine with altivec just
450 * by executing an altivec instruction.
451 */
452 . = 0xf00
453 b Trap_0f
454
455 . = 0xf20
456 b AltiVecUnavailable
457
458Trap_0f:
459 EXCEPTION_PROLOG
460 addi r3,r1,STACK_FRAME_OVERHEAD
461 EXC_XFER_EE(0xf00, unknown_exception)
462
463/*
464 * Handle TLB miss for instruction on 603/603e.
465 * Note: we get an alternate set of r0 - r3 to use automatically.
466 */
467 . = 0x1000
468InstructionTLBMiss:
469/*
470 * r0: stored ctr
471 * r1: linux style pte ( later becomes ppc hardware pte )
472 * r2: ptr to linux-style pte
473 * r3: scratch
474 */
475 mfctr r0
476 /* Get PTE (linux-style) and check access */
477 mfspr r3,SPRN_IMISS
478 lis r1,KERNELBASE@h /* check if kernel address */
479 cmplw 0,r3,r1
480 mfspr r2,SPRN_SPRG3
481 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
482 lwz r2,PGDIR(r2)
483 blt+ 112f
484 lis r2,swapper_pg_dir@ha /* if kernel address, use */
485 addi r2,r2,swapper_pg_dir@l /* kernel page table */
486 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
487 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
488112: tophys(r2,r2)
489 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
490 lwz r2,0(r2) /* get pmd entry */
491 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
492 beq- InstructionAddressInvalid /* return if no mapping */
493 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
494 lwz r3,0(r2) /* get linux-style pte */
495 andc. r1,r1,r3 /* check access & ~permission */
496 bne- InstructionAddressInvalid /* return if access not permitted */
497 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
498 /*
499 * NOTE! We are assuming this is not an SMP system, otherwise
500 * we would need to update the pte atomically with lwarx/stwcx.
501 */
502 stw r3,0(r2) /* update PTE (accessed bit) */
503 /* Convert linux-style PTE to low word of PPC-style PTE */
504 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
505 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
506 and r1,r1,r2 /* writable if _RW and _DIRTY */
507 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
508 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
509 ori r1,r1,0xe14 /* clear out reserved bits and M */
510 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
511 mtspr SPRN_RPA,r1
512 mfspr r3,SPRN_IMISS
513 tlbli r3
514 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
515 mtcrf 0x80,r3
516 rfi
517InstructionAddressInvalid:
518 mfspr r3,SPRN_SRR1
519 rlwinm r1,r3,9,6,6 /* Get load/store bit */
520
521 addis r1,r1,0x2000
522 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
523 mtctr r0 /* Restore CTR */
524 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
525 or r2,r2,r1
526 mtspr SPRN_SRR1,r2
527 mfspr r1,SPRN_IMISS /* Get failing address */
528 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
529 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
530 xor r1,r1,r2
531 mtspr SPRN_DAR,r1 /* Set fault address */
532 mfmsr r0 /* Restore "normal" registers */
533 xoris r0,r0,MSR_TGPR>>16
534 mtcrf 0x80,r3 /* Restore CR0 */
535 mtmsr r0
536 b InstructionAccess
537
538/*
539 * Handle TLB miss for DATA Load operation on 603/603e
540 */
541 . = 0x1100
542DataLoadTLBMiss:
543/*
544 * r0: stored ctr
545 * r1: linux style pte ( later becomes ppc hardware pte )
546 * r2: ptr to linux-style pte
547 * r3: scratch
548 */
549 mfctr r0
550 /* Get PTE (linux-style) and check access */
551 mfspr r3,SPRN_DMISS
552 lis r1,KERNELBASE@h /* check if kernel address */
553 cmplw 0,r3,r1
554 mfspr r2,SPRN_SPRG3
555 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
556 lwz r2,PGDIR(r2)
557 blt+ 112f
558 lis r2,swapper_pg_dir@ha /* if kernel address, use */
559 addi r2,r2,swapper_pg_dir@l /* kernel page table */
560 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
561 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
562112: tophys(r2,r2)
563 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
564 lwz r2,0(r2) /* get pmd entry */
565 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
566 beq- DataAddressInvalid /* return if no mapping */
567 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
568 lwz r3,0(r2) /* get linux-style pte */
569 andc. r1,r1,r3 /* check access & ~permission */
570 bne- DataAddressInvalid /* return if access not permitted */
571 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
572 /*
573 * NOTE! We are assuming this is not an SMP system, otherwise
574 * we would need to update the pte atomically with lwarx/stwcx.
575 */
576 stw r3,0(r2) /* update PTE (accessed bit) */
577 /* Convert linux-style PTE to low word of PPC-style PTE */
578 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
579 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
580 and r1,r1,r2 /* writable if _RW and _DIRTY */
581 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
582 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
583 ori r1,r1,0xe14 /* clear out reserved bits and M */
584 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
585 mtspr SPRN_RPA,r1
586 mfspr r3,SPRN_DMISS
587 tlbld r3
588 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
589 mtcrf 0x80,r3
590 rfi
591DataAddressInvalid:
592 mfspr r3,SPRN_SRR1
593 rlwinm r1,r3,9,6,6 /* Get load/store bit */
594 addis r1,r1,0x2000
595 mtspr SPRN_DSISR,r1
596 mtctr r0 /* Restore CTR */
597 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
598 mtspr SPRN_SRR1,r2
599 mfspr r1,SPRN_DMISS /* Get failing address */
600 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
601 beq 20f /* Jump if big endian */
602 xori r1,r1,3
60320: mtspr SPRN_DAR,r1 /* Set fault address */
604 mfmsr r0 /* Restore "normal" registers */
605 xoris r0,r0,MSR_TGPR>>16
606 mtcrf 0x80,r3 /* Restore CR0 */
607 mtmsr r0
608 b DataAccess
609
610/*
611 * Handle TLB miss for DATA Store on 603/603e
612 */
613 . = 0x1200
614DataStoreTLBMiss:
615/*
616 * r0: stored ctr
617 * r1: linux style pte ( later becomes ppc hardware pte )
618 * r2: ptr to linux-style pte
619 * r3: scratch
620 */
621 mfctr r0
622 /* Get PTE (linux-style) and check access */
623 mfspr r3,SPRN_DMISS
624 lis r1,KERNELBASE@h /* check if kernel address */
625 cmplw 0,r3,r1
626 mfspr r2,SPRN_SPRG3
627 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
628 lwz r2,PGDIR(r2)
629 blt+ 112f
630 lis r2,swapper_pg_dir@ha /* if kernel address, use */
631 addi r2,r2,swapper_pg_dir@l /* kernel page table */
632 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
633 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
634112: tophys(r2,r2)
635 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r3,0(r2) /* get linux-style pte */
641 andc. r1,r1,r3 /* check access & ~permission */
642 bne- DataAddressInvalid /* return if access not permitted */
643 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
644 /*
645 * NOTE! We are assuming this is not an SMP system, otherwise
646 * we would need to update the pte atomically with lwarx/stwcx.
647 */
648 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
649 /* Convert linux-style PTE to low word of PPC-style PTE */
650 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
651 li r1,0xe15 /* clear out reserved bits and M */
652 andc r1,r3,r1 /* PP = user? 2: 0 */
653 mtspr SPRN_RPA,r1
654 mfspr r3,SPRN_DMISS
655 tlbld r3
656 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
657 mtcrf 0x80,r3
658 rfi
659
660#ifndef CONFIG_ALTIVEC
661#define altivec_assist_exception unknown_exception
662#endif
663
664 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
665 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
666 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
667 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
668 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
669 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
678 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
679 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
680 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
681 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
682 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
683 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
684 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
685 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
686 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
687 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
688 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
689 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
690 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
691 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
692 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
693
694 .globl mol_trampoline
695 .set mol_trampoline, i0x2f00
696
697 . = 0x3000
698
699AltiVecUnavailable:
700 EXCEPTION_PROLOG
701#ifdef CONFIG_ALTIVEC
702 bne load_up_altivec /* if from user, just load it up */
703#endif /* CONFIG_ALTIVEC */
704 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
705
706#ifdef CONFIG_ALTIVEC
707/* Note that the AltiVec support is closely modeled after the FP
708 * support. Changes to one are likely to be applicable to the
709 * other! */
710load_up_altivec:
711/*
712 * Disable AltiVec for the task which had AltiVec previously,
713 * and save its AltiVec registers in its thread_struct.
714 * Enables AltiVec for use in the kernel on return.
715 * On SMP we know the AltiVec units are free, since we give it up every
716 * switch. -- Kumar
717 */
718 mfmsr r5
719 oris r5,r5,MSR_VEC@h
720 MTMSRD(r5) /* enable use of AltiVec now */
721 isync
722/*
723 * For SMP, we don't do lazy AltiVec switching because it just gets too
724 * horrendously complex, especially when a task switches from one CPU
725 * to another. Instead we call giveup_altivec in switch_to.
726 */
727#ifndef CONFIG_SMP
728 tophys(r6,0)
729 addis r3,r6,last_task_used_altivec@ha
730 lwz r4,last_task_used_altivec@l(r3)
731 cmpwi 0,r4,0
732 beq 1f
733 add r4,r4,r6
734 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
735 SAVE_32VRS(0,r10,r4)
736 mfvscr vr0
737 li r10,THREAD_VSCR
738 stvx vr0,r10,r4
739 lwz r5,PT_REGS(r4)
740 add r5,r5,r6
741 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
742 lis r10,MSR_VEC@h
743 andc r4,r4,r10 /* disable altivec for previous task */
744 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7451:
746#endif /* CONFIG_SMP */
747 /* enable use of AltiVec after return */
748 oris r9,r9,MSR_VEC@h
749 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
750 li r4,1
751 li r10,THREAD_VSCR
752 stw r4,THREAD_USED_VR(r5)
753 lvx vr0,r10,r5
754 mtvscr vr0
755 REST_32VRS(0,r10,r5)
756#ifndef CONFIG_SMP
757 subi r4,r5,THREAD
758 sub r4,r4,r6
759 stw r4,last_task_used_altivec@l(r3)
760#endif /* CONFIG_SMP */
761 /* restore registers and return */
762 /* we haven't used ctr or xer or lr */
763 b fast_exception_return
764
765/*
766 * AltiVec unavailable trap from kernel - print a message, but let
767 * the task use AltiVec in the kernel until it returns to user mode.
768 */
769KernelAltiVec:
770 lwz r3,_MSR(r1)
771 oris r3,r3,MSR_VEC@h
772 stw r3,_MSR(r1) /* enable use of AltiVec after return */
773 lis r3,87f@h
774 ori r3,r3,87f@l
775 mr r4,r2 /* current */
776 lwz r5,_NIP(r1)
777 bl printk
778 b ret_from_except
77987: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
780 .align 4,0
781
782/*
783 * giveup_altivec(tsk)
784 * Disable AltiVec for the task given as the argument,
785 * and save the AltiVec registers in its thread_struct.
786 * Enables AltiVec for use in the kernel on return.
787 */
788
789 .globl giveup_altivec
790giveup_altivec:
791 mfmsr r5
792 oris r5,r5,MSR_VEC@h
793 SYNC
794 MTMSRD(r5) /* enable use of AltiVec now */
795 isync
796 cmpwi 0,r3,0
797 beqlr- /* if no previous owner, done */
798 addi r3,r3,THREAD /* want THREAD of task */
799 lwz r5,PT_REGS(r3)
800 cmpwi 0,r5,0
801 SAVE_32VRS(0, r4, r3)
802 mfvscr vr0
803 li r4,THREAD_VSCR
804 stvx vr0,r4,r3
805 beq 1f
806 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
807 lis r3,MSR_VEC@h
808 andc r4,r4,r3 /* disable AltiVec for previous task */
809 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8101:
811#ifndef CONFIG_SMP
812 li r5,0
813 lis r4,last_task_used_altivec@ha
814 stw r5,last_task_used_altivec@l(r4)
815#endif /* CONFIG_SMP */
816 blr
817#endif /* CONFIG_ALTIVEC */
818
819/*
820 * This code is jumped to from the startup code to copy
821 * the kernel image to physical address 0.
822 */
823relocate_kernel:
824 addis r9,r26,klimit@ha /* fetch klimit */
825 lwz r25,klimit@l(r9)
826 addis r25,r25,-KERNELBASE@h
827 li r3,0 /* Destination base address */
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
8344: mr r5,r25
835 bl copy_and_flush /* copy the rest */
836 b turn_on_mmu
837
838/*
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */
844_GLOBAL(copy_and_flush)
845 addi r5,r5,-4
846 addi r6,r6,-4
8474: li r0,L1_CACHE_BYTES/4
848 mtctr r0
8493: addi r6,r6,4 /* copy a cache line */
850 lwzx r0,r6,r4
851 stwx r0,r6,r3
852 bdnz 3b
853 dcbst r6,r3 /* write it to memory */
854 sync
855 icbi r6,r3 /* flush the icache line */
856 cmplw 0,r6,r5
857 blt 4b
858 sync /* additional sync needed on g4 */
859 isync
860 addi r5,r5,4
861 addi r6,r6,4
862 blr
863
864#ifdef CONFIG_APUS
865/*
866 * On APUS the physical base address of the kernel is not known at compile
867 * time, which means the __pa/__va constants used are incorrect. In the
868 * __init section is recorded the virtual addresses of instructions using
869 * these constants, so all that has to be done is fix these before
870 * continuing the kernel boot.
871 *
872 * r4 = The physical address of the kernel base.
873 */
874fix_mem_constants:
875 mr r10,r4
876 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
877 neg r11,r10 /* phys_to_virt constant */
878
879 lis r12,__vtop_table_begin@h
880 ori r12,r12,__vtop_table_begin@l
881 add r12,r12,r10 /* table begin phys address */
882 lis r13,__vtop_table_end@h
883 ori r13,r13,__vtop_table_end@l
884 add r13,r13,r10 /* table end phys address */
885 subi r12,r12,4
886 subi r13,r13,4
8871: lwzu r14,4(r12) /* virt address of instruction */
888 add r14,r14,r10 /* phys address of instruction */
889 lwz r15,0(r14) /* instruction, now insert top */
890 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
891 stw r15,0(r14) /* of instruction and restore. */
892 dcbst r0,r14 /* write it to memory */
893 sync
894 icbi r0,r14 /* flush the icache line */
895 cmpw r12,r13
896 bne 1b
897 sync /* additional sync needed on g4 */
898 isync
899
900/*
901 * Map the memory where the exception handlers will
902 * be copied to when hash constants have been patched.
903 */
904#ifdef CONFIG_APUS_FAST_EXCEPT
905 lis r8,0xfff0
906#else
907 lis r8,0
908#endif
909 ori r8,r8,0x2 /* 128KB, supervisor */
910 mtspr SPRN_DBAT3U,r8
911 mtspr SPRN_DBAT3L,r8
912
913 lis r12,__ptov_table_begin@h
914 ori r12,r12,__ptov_table_begin@l
915 add r12,r12,r10 /* table begin phys address */
916 lis r13,__ptov_table_end@h
917 ori r13,r13,__ptov_table_end@l
918 add r13,r13,r10 /* table end phys address */
919 subi r12,r12,4
920 subi r13,r13,4
9211: lwzu r14,4(r12) /* virt address of instruction */
922 add r14,r14,r10 /* phys address of instruction */
923 lwz r15,0(r14) /* instruction, now insert top */
924 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
925 stw r15,0(r14) /* of instruction and restore. */
926 dcbst r0,r14 /* write it to memory */
927 sync
928 icbi r0,r14 /* flush the icache line */
929 cmpw r12,r13
930 bne 1b
931
932 sync /* additional sync needed on g4 */
933 isync /* No speculative loading until now */
934 blr
935
936/***********************************************************************
937 * Please note that on APUS the exception handlers are located at the
938 * physical address 0xfff0000. For this reason, the exception handlers
939 * cannot use relative branches to access the code below.
940 ***********************************************************************/
941#endif /* CONFIG_APUS */
942
943#ifdef CONFIG_SMP
944#ifdef CONFIG_GEMINI
945 .globl __secondary_start_gemini
946__secondary_start_gemini:
947 mfspr r4,SPRN_HID0
948 ori r4,r4,HID0_ICFI
949 li r3,0
950 ori r3,r3,HID0_ICE
951 andc r4,r4,r3
952 mtspr SPRN_HID0,r4
953 sync
954 b __secondary_start
955#endif /* CONFIG_GEMINI */
956
957 .globl __secondary_start_pmac_0
958__secondary_start_pmac_0:
959 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
960 li r24,0
961 b 1f
962 li r24,1
963 b 1f
964 li r24,2
965 b 1f
966 li r24,3
9671:
968 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
969 set to map the 0xf0000000 - 0xffffffff region */
970 mfmsr r0
971 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
972 SYNC
973 mtmsr r0
974 isync
975
976 .globl __secondary_start
977__secondary_start:
978 /* Copy some CPU settings from CPU 0 */
979 bl __restore_cpu_setup
980
981 lis r3,-KERNELBASE@h
982 mr r4,r24
983 bl call_setup_cpu /* Call setup_cpu for this CPU */
984#ifdef CONFIG_6xx
985 lis r3,-KERNELBASE@h
986 bl init_idle_6xx
987#endif /* CONFIG_6xx */
988
989 /* get current_thread_info and current */
990 lis r1,secondary_ti@ha
991 tophys(r1,r1)
992 lwz r1,secondary_ti@l(r1)
993 tophys(r2,r1)
994 lwz r2,TI_TASK(r2)
995
996 /* stack */
997 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
998 li r0,0
999 tophys(r3,r1)
1000 stw r0,0(r3)
1001
1002 /* load up the MMU */
1003 bl load_up_mmu
1004
1005 /* ptr to phys current thread */
1006 tophys(r4,r2)
1007 addi r4,r4,THREAD /* phys address of our thread_struct */
1008 CLR_TOP32(r4)
1009 mtspr SPRN_SPRG3,r4
1010 li r3,0
1011 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1012
1013 /* enable MMU and jump to start_secondary */
1014 li r4,MSR_KERNEL
1015 FIX_SRR1(r4,r5)
1016 lis r3,start_secondary@h
1017 ori r3,r3,start_secondary@l
1018 mtspr SPRN_SRR0,r3
1019 mtspr SPRN_SRR1,r4
1020 SYNC
1021 RFI
1022#endif /* CONFIG_SMP */
1023
1024/*
1025 * Those generic dummy functions are kept for CPUs not
1026 * included in CONFIG_6xx
1027 */
1028#if !defined(CONFIG_6xx)
1029_GLOBAL(__save_cpu_setup)
1030 blr
1031_GLOBAL(__restore_cpu_setup)
1032 blr
1033#endif /* !defined(CONFIG_6xx) */
1034
1035
1036/*
1037 * Load stuff into the MMU. Intended to be called with
1038 * IR=0 and DR=0.
1039 */
1040load_up_mmu:
1041 sync /* Force all PTE updates to finish */
1042 isync
1043 tlbia /* Clear all TLB entries */
1044 sync /* wait for tlbia/tlbie to finish */
1045 TLBSYNC /* ... on all CPUs */
1046 /* Load the SDR1 register (hash table base & size) */
1047 lis r6,_SDR1@ha
1048 tophys(r6,r6)
1049 lwz r6,_SDR1@l(r6)
1050 mtspr SPRN_SDR1,r6
1051 li r0,16 /* load up segment register values */
1052 mtctr r0 /* for context 0 */
1053 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1054 li r4,0
10553: mtsrin r3,r4
1056 addi r3,r3,0x111 /* increment VSID */
1057 addis r4,r4,0x1000 /* address of next segment */
1058 bdnz 3b
1059
1060/* Load the BAT registers with the values set up by MMU_init.
1061 MMU_init takes care of whether we're on a 601 or not. */
1062 mfpvr r3
1063 srwi r3,r3,16
1064 cmpwi r3,1
1065 lis r3,BATS@ha
1066 addi r3,r3,BATS@l
1067 tophys(r3,r3)
1068 LOAD_BAT(0,r3,r4,r5)
1069 LOAD_BAT(1,r3,r4,r5)
1070 LOAD_BAT(2,r3,r4,r5)
1071 LOAD_BAT(3,r3,r4,r5)
1072
1073 blr
1074
1075/*
1076 * This is where the main kernel code starts.
1077 */
1078start_here:
1079 /* ptr to current */
1080 lis r2,init_task@h
1081 ori r2,r2,init_task@l
1082 /* Set up for using our exception vectors */
1083 /* ptr to phys current thread */
1084 tophys(r4,r2)
1085 addi r4,r4,THREAD /* init task's THREAD */
1086 CLR_TOP32(r4)
1087 mtspr SPRN_SPRG3,r4
1088 li r3,0
1089 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1090
1091 /* stack */
1092 lis r1,init_thread_union@ha
1093 addi r1,r1,init_thread_union@l
1094 li r0,0
1095 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1096/*
1097 * Do early platform-specific initialization,
1098 * and set up the MMU.
1099 */
1100 mr r3,r31
1101 mr r4,r30
1102 bl machine_init
1103 bl MMU_init
1104
1105#ifdef CONFIG_APUS
1106 /* Copy exception code to exception vector base on APUS. */
1107 lis r4,KERNELBASE@h
1108#ifdef CONFIG_APUS_FAST_EXCEPT
1109 lis r3,0xfff0 /* Copy to 0xfff00000 */
1110#else
1111 lis r3,0 /* Copy to 0x00000000 */
1112#endif
1113 li r5,0x4000 /* # bytes of memory to copy */
1114 li r6,0
1115 bl copy_and_flush /* copy the first 0x4000 bytes */
1116#endif /* CONFIG_APUS */
1117
1118/*
1119 * Go back to running unmapped so we can load up new values
1120 * for SDR1 (hash table pointer) and the segment registers
1121 * and change to using our exception vectors.
1122 */
1123 lis r4,2f@h
1124 ori r4,r4,2f@l
1125 tophys(r4,r4)
1126 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1127 FIX_SRR1(r3,r5)
1128 mtspr SPRN_SRR0,r4
1129 mtspr SPRN_SRR1,r3
1130 SYNC
1131 RFI
1132/* Load up the kernel context */
11332: bl load_up_mmu
1134
1135#ifdef CONFIG_BDI_SWITCH
1136 /* Add helper information for the Abatron bdiGDB debugger.
1137 * We do this here because we know the mmu is disabled, and
1138 * will be enabled for real in just a few instructions.
1139 */
1140 lis r5, abatron_pteptrs@h
1141 ori r5, r5, abatron_pteptrs@l
1142 stw r5, 0xf0(r0) /* This much match your Abatron config */
1143 lis r6, swapper_pg_dir@h
1144 ori r6, r6, swapper_pg_dir@l
1145 tophys(r5, r5)
1146 stw r6, 0(r5)
1147#endif /* CONFIG_BDI_SWITCH */
1148
1149/* Now turn on the MMU for real! */
1150 li r4,MSR_KERNEL
1151 FIX_SRR1(r4,r5)
1152 lis r3,start_kernel@h
1153 ori r3,r3,start_kernel@l
1154 mtspr SPRN_SRR0,r3
1155 mtspr SPRN_SRR1,r4
1156 SYNC
1157 RFI
1158
1159/*
1160 * Set up the segment registers for a new context.
1161 */
1162_GLOBAL(set_context)
1163 mulli r3,r3,897 /* multiply context by skew factor */
1164 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1165 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1166 li r0,NUM_USER_SEGMENTS
1167 mtctr r0
1168
1169#ifdef CONFIG_BDI_SWITCH
1170 /* Context switch the PTE pointer for the Abatron BDI2000.
1171 * The PGDIR is passed as second argument.
1172 */
1173 lis r5, KERNELBASE@h
1174 lwz r5, 0xf0(r5)
1175 stw r4, 0x4(r5)
1176#endif
1177 li r4,0
1178 isync
11793:
1180 mtsrin r3,r4
1181 addi r3,r3,0x111 /* next VSID */
1182 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1183 addis r4,r4,0x1000 /* address of next segment */
1184 bdnz 3b
1185 sync
1186 isync
1187 blr
1188
1189/*
1190 * An undocumented "feature" of 604e requires that the v bit
1191 * be cleared before changing BAT values.
1192 *
1193 * Also, newer IBM firmware does not clear bat3 and 4 so
1194 * this makes sure it's done.
1195 * -- Cort
1196 */
1197clear_bats:
1198 li r10,0
1199 mfspr r9,SPRN_PVR
1200 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1201 cmpwi r9, 1
1202 beq 1f
1203
1204 mtspr SPRN_DBAT0U,r10
1205 mtspr SPRN_DBAT0L,r10
1206 mtspr SPRN_DBAT1U,r10
1207 mtspr SPRN_DBAT1L,r10
1208 mtspr SPRN_DBAT2U,r10
1209 mtspr SPRN_DBAT2L,r10
1210 mtspr SPRN_DBAT3U,r10
1211 mtspr SPRN_DBAT3L,r10
12121:
1213 mtspr SPRN_IBAT0U,r10
1214 mtspr SPRN_IBAT0L,r10
1215 mtspr SPRN_IBAT1U,r10
1216 mtspr SPRN_IBAT1L,r10
1217 mtspr SPRN_IBAT2U,r10
1218 mtspr SPRN_IBAT2L,r10
1219 mtspr SPRN_IBAT3U,r10
1220 mtspr SPRN_IBAT3L,r10
1221BEGIN_FTR_SECTION
1222 /* Here's a tweak: at this point, CPU setup have
1223 * not been called yet, so HIGH_BAT_EN may not be
1224 * set in HID0 for the 745x processors. However, it
1225 * seems that doesn't affect our ability to actually
1226 * write to these SPRs.
1227 */
1228 mtspr SPRN_DBAT4U,r10
1229 mtspr SPRN_DBAT4L,r10
1230 mtspr SPRN_DBAT5U,r10
1231 mtspr SPRN_DBAT5L,r10
1232 mtspr SPRN_DBAT6U,r10
1233 mtspr SPRN_DBAT6L,r10
1234 mtspr SPRN_DBAT7U,r10
1235 mtspr SPRN_DBAT7L,r10
1236 mtspr SPRN_IBAT4U,r10
1237 mtspr SPRN_IBAT4L,r10
1238 mtspr SPRN_IBAT5U,r10
1239 mtspr SPRN_IBAT5L,r10
1240 mtspr SPRN_IBAT6U,r10
1241 mtspr SPRN_IBAT6L,r10
1242 mtspr SPRN_IBAT7U,r10
1243 mtspr SPRN_IBAT7L,r10
1244END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1245 blr
1246
1247flush_tlbs:
1248 lis r10, 0x40
12491: addic. r10, r10, -0x1000
1250 tlbie r10
1251 blt 1b
1252 sync
1253 blr
1254
1255mmu_off:
1256 addi r4, r3, __after_mmu_off - _start
1257 mfmsr r3
1258 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1259 beqlr
1260 andc r3,r3,r0
1261 mtspr SPRN_SRR0,r4
1262 mtspr SPRN_SRR1,r3
1263 sync
1264 RFI
1265
1266/*
1267 * Use the first pair of BAT registers to map the 1st 16MB
1268 * of RAM to KERNELBASE. From this point on we can't safely
1269 * call OF any more.
1270 */
1271initial_bats:
1272 lis r11,KERNELBASE@h
1273 mfspr r9,SPRN_PVR
1274 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1275 cmpwi 0,r9,1
1276 bne 4f
1277 ori r11,r11,4 /* set up BAT registers for 601 */
1278 li r8,0x7f /* valid, block length = 8MB */
1279 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1280 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1281 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1282 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1283 mtspr SPRN_IBAT1U,r9
1284 mtspr SPRN_IBAT1L,r10
1285 isync
1286 blr
1287
12884: tophys(r8,r11)
1289#ifdef CONFIG_SMP
1290 ori r8,r8,0x12 /* R/W access, M=1 */
1291#else
1292 ori r8,r8,2 /* R/W access */
1293#endif /* CONFIG_SMP */
1294#ifdef CONFIG_APUS
1295 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1296#else
1297 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1298#endif /* CONFIG_APUS */
1299
1300 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1301 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1302 mtspr SPRN_IBAT0L,r8
1303 mtspr SPRN_IBAT0U,r11
1304 isync
1305 blr
1306
1307
1308#ifdef CONFIG_8260
1309/* Jump into the system reset for the rom.
1310 * We first disable the MMU, and then jump to the ROM reset address.
1311 *
1312 * r3 is the board info structure, r4 is the location for starting.
1313 * I use this for building a small kernel that can load other kernels,
1314 * rather than trying to write or rely on a rom monitor that can tftp load.
1315 */
1316 .globl m8260_gorom
1317m8260_gorom:
1318 mfmsr r0
1319 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1320 sync
1321 mtmsr r0
1322 sync
1323 mfspr r11, SPRN_HID0
1324 lis r10, 0
1325 ori r10,r10,HID0_ICE|HID0_DCE
1326 andc r11, r11, r10
1327 mtspr SPRN_HID0, r11
1328 isync
1329 li r5, MSR_ME|MSR_RI
1330 lis r6,2f@h
1331 addis r6,r6,-KERNELBASE@h
1332 ori r6,r6,2f@l
1333 mtspr SPRN_SRR0,r6
1334 mtspr SPRN_SRR1,r5
1335 isync
1336 sync
1337 rfi
13382:
1339 mtlr r4
1340 blr
1341#endif
1342
1343
1344/*
1345 * We put a few things here that have to be page-aligned.
1346 * This stuff goes at the beginning of the data segment,
1347 * which is page-aligned.
1348 */
1349 .data
1350 .globl sdata
1351sdata:
1352 .globl empty_zero_page
1353empty_zero_page:
1354 .space 4096
1355
1356 .globl swapper_pg_dir
1357swapper_pg_dir:
1358 .space 4096
1359
1360/*
1361 * This space gets a copy of optional info passed to us by the bootstrap
1362 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1363 */
1364 .globl cmd_line
1365cmd_line:
1366 .space 512
1367
1368 .globl intercept_table
1369intercept_table:
1370 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1371 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1372 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1373 .long 0, 0, 0, 0, 0, 0, 0, 0
1374 .long 0, 0, 0, 0, 0, 0, 0, 0
1375 .long 0, 0, 0, 0, 0, 0, 0, 0
1376
1377/* Room for two PTE pointers, usually the kernel and current user pointers
1378 * to their respective root page table.
1379 */
1380abatron_pteptrs:
1381 .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644
index 000000000000..8b49679fad54
--- /dev/null
+++ b/arch/powerpc/kernel/head_44x.S
@@ -0,0 +1,782 @@
1/*
2 * arch/ppc/kernel/head_44x.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2005 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 *
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
31 */
32
33#include <linux/config.h>
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/ibm44x.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44#include "head_booke.h"
45
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/*
77 * Set up the initial MMU state
78 *
79 * We are still executing code at the virtual address
80 * mappings set by the firmware for the base of RAM.
81 *
82 * We first invalidate all TLB entries but the one
83 * we are running from. We then load the KERNELBASE
84 * mappings so we can begin to use kernel addresses
85 * natively and so the interrupt vector locations are
86 * permanently pinned (necessary since Book E
87 * implementations always have translation enabled).
88 *
89 * TODO: Use the known TLB entry we are running from to
90 * determine which physical region we are located
91 * in. This can be used to determine where in RAM
92 * (on a shared CPU system) or PCI memory space
93 * (on a DRAMless system) we are located.
94 * For now, we assume a perfect world which means
95 * we are located at the base of DRAM (physical 0).
96 */
97
98/*
99 * Search TLB for entry that we are currently using.
100 * Invalidate all entries but the one we are using.
101 */
102 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
103 mfspr r3,SPRN_PID /* Get PID */
104 mfmsr r4 /* Get MSR */
105 andi. r4,r4,MSR_IS@l /* TS=1? */
106 beq wmmucr /* If not, leave STS=0 */
107 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
108wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
109 sync
110
111 bl invstr /* Find our address */
112invstr: mflr r5 /* Make it accessible */
113 tlbsx r23,0,r5 /* Find entry we are in */
114 li r4,0 /* Start at TLB entry 0 */
115 li r3,0 /* Set PAGEID inval value */
1161: cmpw r23,r4 /* Is this our entry? */
117 beq skpinv /* If so, skip the inval */
118 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
119skpinv: addi r4,r4,1 /* Increment */
120 cmpwi r4,64 /* Are we done? */
121 bne 1b /* If not, repeat */
122 isync /* If so, context change */
123
124/*
125 * Configure and load pinned entry into TLB slot 63.
126 */
127
128 lis r3,KERNELBASE@h /* Load the kernel virtual address */
129 ori r3,r3,KERNELBASE@l
130
131 /* Kernel is at the base of RAM */
132 li r4, 0 /* Load the kernel physical address */
133
134 /* Load the kernel PID = 0 */
135 li r0,0
136 mtspr SPRN_PID,r0
137 sync
138
139 /* Initialize MMUCR */
140 li r5,0
141 mtspr SPRN_MMUCR,r5
142 sync
143
144 /* pageid fields */
145 clrrwi r3,r3,10 /* Mask off the effective page number */
146 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
147
148 /* xlat fields */
149 clrrwi r4,r4,10 /* Mask off the real page number */
150 /* ERPN is 0 for first 4GB page */
151
152 /* attrib fields */
153 /* Added guarded bit to protect against speculative loads/stores */
154 li r5,0
155 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
156
157 li r0,63 /* TLB slot 63 */
158
159 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
160 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
161 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
162
163 /* Force context change */
164 mfmsr r0
165 mtspr SPRN_SRR1, r0
166 lis r0,3f@h
167 ori r0,r0,3f@l
168 mtspr SPRN_SRR0,r0
169 sync
170 rfi
171
172 /* If necessary, invalidate original entry we used */
1733: cmpwi r23,63
174 beq 4f
175 li r6,0
176 tlbwe r6,r23,PPC44x_TLB_PAGEID
177 isync
178
1794:
180#ifdef CONFIG_SERIAL_TEXT_DEBUG
181 /*
182 * Add temporary UART mapping for early debug.
183 * We can map UART registers wherever we want as long as they don't
184 * interfere with other system mappings (e.g. with pinned entries).
185 * For an example of how we handle this - see ocotea.h. --ebs
186 */
187 /* pageid fields */
188 lis r3,UART0_IO_BASE@h
189 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
190
191 /* xlat fields */
192 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
193#ifndef CONFIG_440EP
194 ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
195#endif
196
197 /* attrib fields */
198 li r5,0
199 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
200
201 li r0,0 /* TLB slot 0 */
202
203 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
204 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
205 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
206
207 /* Force context change */
208 isync
209#endif /* CONFIG_SERIAL_TEXT_DEBUG */
210
211 /* Establish the interrupt vector offsets */
212 SET_IVOR(0, CriticalInput);
213 SET_IVOR(1, MachineCheck);
214 SET_IVOR(2, DataStorage);
215 SET_IVOR(3, InstructionStorage);
216 SET_IVOR(4, ExternalInput);
217 SET_IVOR(5, Alignment);
218 SET_IVOR(6, Program);
219 SET_IVOR(7, FloatingPointUnavailable);
220 SET_IVOR(8, SystemCall);
221 SET_IVOR(9, AuxillaryProcessorUnavailable);
222 SET_IVOR(10, Decrementer);
223 SET_IVOR(11, FixedIntervalTimer);
224 SET_IVOR(12, WatchdogTimer);
225 SET_IVOR(13, DataTLBError);
226 SET_IVOR(14, InstructionTLBError);
227 SET_IVOR(15, Debug);
228
229 /* Establish the interrupt vector base */
230 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
231 mtspr SPRN_IVPR,r4
232
233#ifdef CONFIG_440EP
234 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
235 mfspr r2,SPRN_CCR0
236 lis r3,0xffef
237 ori r3,r3,0xffff
238 and r2,r2,r3
239 mtspr SPRN_CCR0,r2
240 isync
241#endif
242
243 /*
244 * This is where the main kernel code starts.
245 */
246
247 /* ptr to current */
248 lis r2,init_task@h
249 ori r2,r2,init_task@l
250
251 /* ptr to current thread */
252 addi r4,r2,THREAD /* init task's THREAD */
253 mtspr SPRN_SPRG3,r4
254
255 /* stack */
256 lis r1,init_thread_union@h
257 ori r1,r1,init_thread_union@l
258 li r0,0
259 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
260
261 bl early_init
262
263/*
264 * Decide what sort of machine this is and initialize the MMU.
265 */
266 mr r3,r31
267 mr r4,r30
268 mr r5,r29
269 mr r6,r28
270 mr r7,r27
271 bl machine_init
272 bl MMU_init
273
274 /* Setup PTE pointers for the Abatron bdiGDB */
275 lis r6, swapper_pg_dir@h
276 ori r6, r6, swapper_pg_dir@l
277 lis r5, abatron_pteptrs@h
278 ori r5, r5, abatron_pteptrs@l
279 lis r4, KERNELBASE@h
280 ori r4, r4, KERNELBASE@l
281 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
282 stw r6, 0(r5)
283
284 /* Let's move on */
285 lis r4,start_kernel@h
286 ori r4,r4,start_kernel@l
287 lis r3,MSR_KERNEL@h
288 ori r3,r3,MSR_KERNEL@l
289 mtspr SPRN_SRR0,r4
290 mtspr SPRN_SRR1,r3
291 rfi /* change context and jump to start_kernel */
292
293/*
294 * Interrupt vector entry code
295 *
296 * The Book E MMUs are always on so we don't need to handle
297 * interrupts in real mode as with previous PPC processors. In
298 * this case we handle interrupts in the kernel virtual address
299 * space.
300 *
301 * Interrupt vectors are dynamically placed relative to the
302 * interrupt prefix as determined by the address of interrupt_base.
303 * The interrupt vectors offsets are programmed using the labels
304 * for each interrupt vector entry.
305 *
306 * Interrupt vectors must be aligned on a 16 byte boundary.
307 * We align on a 32 byte cache line boundary for good measure.
308 */
309
310interrupt_base:
311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313
314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif
320
321 /* Data Storage Interrupt */
322 START_EXCEPTION(DataStorage)
323 mtspr SPRN_SPRG0, r10 /* Save some working registers */
324 mtspr SPRN_SPRG1, r11
325 mtspr SPRN_SPRG4W, r12
326 mtspr SPRN_SPRG5W, r13
327 mfcr r11
328 mtspr SPRN_SPRG7W, r11
329
330 /*
331 * Check if it was a store fault, if not then bail
332 * because a user tried to access a kernel or
333 * read-protected page. Otherwise, get the
334 * offending address and handle it.
335 */
336 mfspr r10, SPRN_ESR
337 andis. r10, r10, ESR_ST@h
338 beq 2f
339
340 mfspr r10, SPRN_DEAR /* Get faulting address */
341
342 /* If we are faulting a kernel address, we have to use the
343 * kernel page tables.
344 */
345 lis r11, TASK_SIZE@h
346 cmplw r10, r11
347 blt+ 3f
348 lis r11, swapper_pg_dir@h
349 ori r11, r11, swapper_pg_dir@l
350
351 mfspr r12,SPRN_MMUCR
352 rlwinm r12,r12,0,0,23 /* Clear TID */
353
354 b 4f
355
356 /* Get the PGD for the current thread */
3573:
358 mfspr r11,SPRN_SPRG3
359 lwz r11,PGDIR(r11)
360
361 /* Load PID into MMUCR TID */
362 mfspr r12,SPRN_MMUCR /* Get MMUCR */
363 mfspr r13,SPRN_PID /* Get PID */
364 rlwimi r12,r13,0,24,31 /* Set TID */
365
3664:
367 mtspr SPRN_MMUCR,r12
368
369 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
370 lwzx r11, r12, r11 /* Get pgd/pmd entry */
371 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
372 beq 2f /* Bail if no table */
373
374 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
375 lwz r11, 4(r12) /* Get pte entry */
376
377 andi. r13, r11, _PAGE_RW /* Is it writeable? */
378 beq 2f /* Bail if not */
379
380 /* Update 'changed'.
381 */
382 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
383 stw r11, 4(r12) /* Update Linux page table */
384
385 li r13, PPC44x_TLB_SR@l /* Set SR */
386 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
387 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
388 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
389 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
390 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
391 and r12, r12, r11 /* HWEXEC/RW & USER */
392 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
393 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
394
395 rlwimi r11,r13,0,26,31 /* Insert static perms */
396
397 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
398
399 /* find the TLB index that caused the fault. It has to be here. */
400 tlbsx r10, 0, r10
401
402 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
403
404 /* Done...restore registers and get out of here.
405 */
406 mfspr r11, SPRN_SPRG7R
407 mtcr r11
408 mfspr r13, SPRN_SPRG5R
409 mfspr r12, SPRN_SPRG4R
410
411 mfspr r11, SPRN_SPRG1
412 mfspr r10, SPRN_SPRG0
413 rfi /* Force context change */
414
4152:
416 /*
417 * The bailout. Restore registers to pre-exception conditions
418 * and call the heavyweights to help us out.
419 */
420 mfspr r11, SPRN_SPRG7R
421 mtcr r11
422 mfspr r13, SPRN_SPRG5R
423 mfspr r12, SPRN_SPRG4R
424
425 mfspr r11, SPRN_SPRG1
426 mfspr r10, SPRN_SPRG0
427 b data_access
428
429 /* Instruction Storage Interrupt */
430 INSTRUCTION_STORAGE_EXCEPTION
431
432 /* External Input Interrupt */
433 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
434
435 /* Alignment Interrupt */
436 ALIGNMENT_EXCEPTION
437
438 /* Program Interrupt */
439 PROGRAM_EXCEPTION
440
441 /* Floating Point Unavailable Interrupt */
442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION
444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif
447
448 /* System Call Interrupt */
449 START_EXCEPTION(SystemCall)
450 NORMAL_EXCEPTION_PROLOG
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452
453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455
456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION
458
459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462
463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif
470
471 /* Data TLB Error Interrupt */
472 START_EXCEPTION(DataTLBError)
473 mtspr SPRN_SPRG0, r10 /* Save some working registers */
474 mtspr SPRN_SPRG1, r11
475 mtspr SPRN_SPRG4W, r12
476 mtspr SPRN_SPRG5W, r13
477 mfcr r11
478 mtspr SPRN_SPRG7W, r11
479 mfspr r10, SPRN_DEAR /* Get faulting address */
480
481 /* If we are faulting a kernel address, we have to use the
482 * kernel page tables.
483 */
484 lis r11, TASK_SIZE@h
485 cmplw r10, r11
486 blt+ 3f
487 lis r11, swapper_pg_dir@h
488 ori r11, r11, swapper_pg_dir@l
489
490 mfspr r12,SPRN_MMUCR
491 rlwinm r12,r12,0,0,23 /* Clear TID */
492
493 b 4f
494
495 /* Get the PGD for the current thread */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
499
500 /* Load PID into MMUCR TID */
501 mfspr r12,SPRN_MMUCR
502 mfspr r13,SPRN_PID /* Get PID */
503 rlwimi r12,r13,0,24,31 /* Set TID */
504
5054:
506 mtspr SPRN_MMUCR,r12
507
508 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
509 lwzx r11, r12, r11 /* Get pgd/pmd entry */
510 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
511 beq 2f /* Bail if no table */
512
513 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
514 lwz r11, 4(r12) /* Get pte entry */
515 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
516 beq 2f /* Bail if not present */
517
518 ori r11, r11, _PAGE_ACCESSED
519 stw r11, 4(r12)
520
521 /* Jump to common tlb load */
522 b finish_tlb_load
523
5242:
525 /* The bailout. Restore registers to pre-exception conditions
526 * and call the heavyweights to help us out.
527 */
528 mfspr r11, SPRN_SPRG7R
529 mtcr r11
530 mfspr r13, SPRN_SPRG5R
531 mfspr r12, SPRN_SPRG4R
532 mfspr r11, SPRN_SPRG1
533 mfspr r10, SPRN_SPRG0
534 b data_access
535
536 /* Instruction TLB Error Interrupt */
537 /*
538 * Nearly the same as above, except we get our
539 * information from different registers and bailout
540 * to a different point.
541 */
542 START_EXCEPTION(InstructionTLBError)
543 mtspr SPRN_SPRG0, r10 /* Save some working registers */
544 mtspr SPRN_SPRG1, r11
545 mtspr SPRN_SPRG4W, r12
546 mtspr SPRN_SPRG5W, r13
547 mfcr r11
548 mtspr SPRN_SPRG7W, r11
549 mfspr r10, SPRN_SRR0 /* Get faulting address */
550
551 /* If we are faulting a kernel address, we have to use the
552 * kernel page tables.
553 */
554 lis r11, TASK_SIZE@h
555 cmplw r10, r11
556 blt+ 3f
557 lis r11, swapper_pg_dir@h
558 ori r11, r11, swapper_pg_dir@l
559
560 mfspr r12,SPRN_MMUCR
561 rlwinm r12,r12,0,0,23 /* Clear TID */
562
563 b 4f
564
565 /* Get the PGD for the current thread */
5663:
567 mfspr r11,SPRN_SPRG3
568 lwz r11,PGDIR(r11)
569
570 /* Load PID into MMUCR TID */
571 mfspr r12,SPRN_MMUCR
572 mfspr r13,SPRN_PID /* Get PID */
573 rlwimi r12,r13,0,24,31 /* Set TID */
574
5754:
576 mtspr SPRN_MMUCR,r12
577
578 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
579 lwzx r11, r12, r11 /* Get pgd/pmd entry */
580 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
581 beq 2f /* Bail if no table */
582
583 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
584 lwz r11, 4(r12) /* Get pte entry */
585 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
586 beq 2f /* Bail if not present */
587
588 ori r11, r11, _PAGE_ACCESSED
589 stw r11, 4(r12)
590
591 /* Jump to common TLB load point */
592 b finish_tlb_load
593
5942:
595 /* The bailout. Restore registers to pre-exception conditions
596 * and call the heavyweights to help us out.
597 */
598 mfspr r11, SPRN_SPRG7R
599 mtcr r11
600 mfspr r13, SPRN_SPRG5R
601 mfspr r12, SPRN_SPRG4R
602 mfspr r11, SPRN_SPRG1
603 mfspr r10, SPRN_SPRG0
604 b InstructionStorage
605
606 /* Debug Interrupt */
607 DEBUG_EXCEPTION
608
609/*
610 * Local functions
611 */
612 /*
613 * Data TLB exceptions will bail out to this point
614 * if they can't resolve the lightweight TLB fault.
615 */
616data_access:
617 NORMAL_EXCEPTION_PROLOG
618 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
619 stw r5,_ESR(r11)
620 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
621 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
622
623/*
624
625 * Both the instruction and data TLB miss get to this
626 * point to load the TLB.
627 * r10 - EA of fault
628 * r11 - available to use
629 * r12 - Pointer to the 64-bit PTE
630 * r13 - available to use
631 * MMUCR - loaded with proper value when we get here
632 * Upon exit, we reload everything and RFI.
633 */
634finish_tlb_load:
635 /*
636 * We set execute, because we don't have the granularity to
637 * properly set this at the page level (Linux problem).
638 * If shared is set, we cause a zero PID->TID load.
639 * Many of these bits are software only. Bits we don't set
640 * here we (properly should) assume have the appropriate value.
641 */
642
643 /* Load the next available TLB index */
644 lis r13, tlb_44x_index@ha
645 lwz r13, tlb_44x_index@l(r13)
646 /* Load the TLB high watermark */
647 lis r11, tlb_44x_hwater@ha
648 lwz r11, tlb_44x_hwater@l(r11)
649
650 /* Increment, rollover, and store TLB index */
651 addi r13, r13, 1
652 cmpw 0, r13, r11 /* reserve entries */
653 ble 7f
654 li r13, 0
6557:
656 /* Store the next available TLB index */
657 lis r11, tlb_44x_index@ha
658 stw r13, tlb_44x_index@l(r11)
659
660 lwz r11, 0(r12) /* Get MS word of PTE */
661 lwz r12, 4(r12) /* Get LS word of PTE */
662 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
663 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
664
665 /*
666 * Create PAGEID. This is the faulting address,
667 * page size, and valid flag.
668 */
669 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
670 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
671 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
672
673 li r10, PPC44x_TLB_SR@l /* Set SR */
674 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
675 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
676 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
677 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
678 and r11, r12, r11 /* HWEXEC & USER */
679 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
680
681 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
682 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
683 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
684
685 /* Done...restore registers and get out of here.
686 */
687 mfspr r11, SPRN_SPRG7R
688 mtcr r11
689 mfspr r13, SPRN_SPRG5R
690 mfspr r12, SPRN_SPRG4R
691 mfspr r11, SPRN_SPRG1
692 mfspr r10, SPRN_SPRG0
693 rfi /* Force context change */
694
695/*
696 * Global functions
697 */
698
699/*
700 * extern void giveup_altivec(struct task_struct *prev)
701 *
702 * The 44x core does not have an AltiVec unit.
703 */
704_GLOBAL(giveup_altivec)
705 blr
706
707/*
708 * extern void giveup_fpu(struct task_struct *prev)
709 *
710 * The 44x core does not have an FPU.
711 */
712#ifndef CONFIG_PPC_FPU
713_GLOBAL(giveup_fpu)
714 blr
715#endif
716
717/*
718 * extern void abort(void)
719 *
720 * At present, this routine just applies a system reset.
721 */
722_GLOBAL(abort)
723 mfspr r13,SPRN_DBCR0
724 oris r13,r13,DBCR0_RST_SYSTEM@h
725 mtspr SPRN_DBCR0,r13
726
727_GLOBAL(set_context)
728
729#ifdef CONFIG_BDI_SWITCH
730 /* Context switch the PTE pointer for the Abatron BDI2000.
731 * The PGDIR is the second parameter.
732 */
733 lis r5, abatron_pteptrs@h
734 ori r5, r5, abatron_pteptrs@l
735 stw r4, 0x4(r5)
736#endif
737 mtspr SPRN_PID,r3
738 isync /* Force context change */
739 blr
740
741/*
742 * We put a few things here that have to be page-aligned. This stuff
743 * goes at the beginning of the data segment, which is page-aligned.
744 */
745 .data
746 .align 12
747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
751 .space 4096
752
753/*
754 * To support >32-bit physical addresses, we use an 8KB pgdir.
755 */
756 .globl swapper_pg_dir
757swapper_pg_dir:
758 .space 8192
759
760/* Reserved 4k for the critical exception stack & 4k for the machine
761 * check stack per CPU for kernel mode exceptions */
762 .section .bss
763 .align 12
764exception_stack_bottom:
765 .space BOOKE_EXCEPTION_STACK_SIZE
766 .globl exception_stack_top
767exception_stack_top:
768
769/*
770 * This space gets a copy of optional info passed to us by the bootstrap
771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
772 */
773 .globl cmd_line
774cmd_line:
775 .space 512
776
777/*
778 * Room for two PTE pointers, usually the kernel and current user pointers
779 * to their respective root page table.
780 */
781abatron_pteptrs:
782 .space 8
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644
index 000000000000..2590e97f5539
--- /dev/null
+++ b/arch/powerpc/kernel/head_4xx.S
@@ -0,0 +1,1022 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <linux/config.h>
35#include <asm/processor.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/ibm4xx.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 * r4 - Starting address of the init RAM disk
51 * r5 - Ending address of the init RAM disk
52 * r6 - Start of kernel command line string (e.g. "mem=96m")
53 * r7 - End of kernel command line string
54 *
55 * This is all going to change RSN when we add bi_recs....... -- Dan
56 */
57 .text
58_GLOBAL(_stext)
59_GLOBAL(_start)
60
61 /* Save parameters we are passed.
62 */
63 mr r31,r3
64 mr r30,r4
65 mr r29,r5
66 mr r28,r6
67 mr r27,r7
68
69 /* We have to turn on the MMU right away so we get cache modes
70 * set correctly.
71 */
72 bl initial_mmu
73
74/* We now have the lower 16 Meg mapped into TLB entries, and the caches
75 * ready to work.
76 */
77turn_on_mmu:
78 lis r0,MSR_KERNEL@h
79 ori r0,r0,MSR_KERNEL@l
80 mtspr SPRN_SRR1,r0
81 lis r0,start_here@h
82 ori r0,r0,start_here@l
83 mtspr SPRN_SRR0,r0
84 SYNC
85 rfi /* enables MMU */
86 b . /* prevent prefetch past rfi */
87
88/*
89 * This area is used for temporarily saving registers during the
90 * critical exception prolog.
91 */
92 . = 0xc0
93crit_save:
94_GLOBAL(crit_r10)
95 .space 4
96_GLOBAL(crit_r11)
97 .space 4
98
99/*
100 * Exception vector entry code. This code runs with address translation
101 * turned off (i.e. using physical addresses). We assume SPRG3 has the
102 * physical address of the current task thread_struct.
103 * Note that we have to have decremented r1 before we write to any fields
104 * of the exception frame, since a critical interrupt could occur at any
105 * time, and it will write to the area immediately below the current r1.
106 */
107#define NORMAL_EXCEPTION_PROLOG \
108 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
109 mtspr SPRN_SPRG1,r11; \
110 mtspr SPRN_SPRG2,r1; \
111 mfcr r10; /* save CR in r10 for now */\
112 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
113 andi. r11,r11,MSR_PR; \
114 beq 1f; \
115 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
116 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
117 addi r1,r1,THREAD_SIZE; \
1181: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
119 tophys(r11,r1); \
120 stw r10,_CCR(r11); /* save various registers */\
121 stw r12,GPR12(r11); \
122 stw r9,GPR9(r11); \
123 mfspr r10,SPRN_SPRG0; \
124 stw r10,GPR10(r11); \
125 mfspr r12,SPRN_SPRG1; \
126 stw r12,GPR11(r11); \
127 mflr r10; \
128 stw r10,_LINK(r11); \
129 mfspr r10,SPRN_SPRG2; \
130 mfspr r12,SPRN_SRR0; \
131 stw r10,GPR1(r11); \
132 mfspr r9,SPRN_SRR1; \
133 stw r10,0(r11); \
134 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
135 stw r0,GPR0(r11); \
136 SAVE_4GPRS(3, r11); \
137 SAVE_2GPRS(7, r11)
138
139/*
140 * Exception prolog for critical exceptions. This is a little different
141 * from the normal exception prolog above since a critical exception
142 * can potentially occur at any point during normal exception processing.
143 * Thus we cannot use the same SPRG registers as the normal prolog above.
144 * Instead we use a couple of words of memory at low physical addresses.
145 * This is OK since we don't support SMP on these processors.
146 */
147#define CRITICAL_EXCEPTION_PROLOG \
148 stw r10,crit_r10@l(0); /* save two registers to work with */\
149 stw r11,crit_r11@l(0); \
150 mfcr r10; /* save CR in r10 for now */\
151 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
152 andi. r11,r11,MSR_PR; \
153 lis r11,critical_stack_top@h; \
154 ori r11,r11,critical_stack_top@l; \
155 beq 1f; \
156 /* COMING FROM USER MODE */ \
157 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
158 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
159 addi r11,r11,THREAD_SIZE; \
1601: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
161 tophys(r11,r11); \
162 stw r10,_CCR(r11); /* save various registers */\
163 stw r12,GPR12(r11); \
164 stw r9,GPR9(r11); \
165 mflr r10; \
166 stw r10,_LINK(r11); \
167 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
168 stw r12,_DEAR(r11); /* since they may have had stuff */\
169 mfspr r9,SPRN_ESR; /* in them at the point where the */\
170 stw r9,_ESR(r11); /* exception was taken */\
171 mfspr r12,SPRN_SRR2; \
172 stw r1,GPR1(r11); \
173 mfspr r9,SPRN_SRR3; \
174 stw r1,0(r11); \
175 tovirt(r1,r11); \
176 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
177 stw r0,GPR0(r11); \
178 SAVE_4GPRS(3, r11); \
179 SAVE_2GPRS(7, r11)
180
181 /*
182 * State at this point:
183 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
184 * r10 saved in crit_r10 and in stack frame, trashed
185 * r11 saved in crit_r11 and in stack frame,
186 * now phys stack/exception frame pointer
187 * r12 saved in stack frame, now saved SRR2
188 * CR saved in stack frame, CR0.EQ = !SRR3.PR
189 * LR, DEAR, ESR in stack frame
190 * r1 saved in stack frame, now virt stack/excframe pointer
191 * r0, r3-r8 saved in stack frame
192 */
193
194/*
195 * Exception vectors.
196 */
197#define START_EXCEPTION(n, label) \
198 . = n; \
199label:
200
201#define EXCEPTION(n, label, hdlr, xfer) \
202 START_EXCEPTION(n, label); \
203 NORMAL_EXCEPTION_PROLOG; \
204 addi r3,r1,STACK_FRAME_OVERHEAD; \
205 xfer(n, hdlr)
206
207#define CRITICAL_EXCEPTION(n, label, hdlr) \
208 START_EXCEPTION(n, label); \
209 CRITICAL_EXCEPTION_PROLOG; \
210 addi r3,r1,STACK_FRAME_OVERHEAD; \
211 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
212 NOCOPY, crit_transfer_to_handler, \
213 ret_from_crit_exc)
214
215#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
216 li r10,trap; \
217 stw r10,_TRAP(r11); \
218 lis r10,msr@h; \
219 ori r10,r10,msr@l; \
220 copyee(r10, r9); \
221 bl tfer; \
222 .long hdlr; \
223 .long ret
224
225#define COPY_EE(d, s) rlwimi d,s,0,16,16
226#define NOCOPY(d, s)
227
228#define EXC_XFER_STD(n, hdlr) \
229 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
230 ret_from_except_full)
231
232#define EXC_XFER_LITE(n, hdlr) \
233 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
234 ret_from_except)
235
236#define EXC_XFER_EE(n, hdlr) \
237 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
238 ret_from_except_full)
239
240#define EXC_XFER_EE_LITE(n, hdlr) \
241 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
242 ret_from_except)
243
244
245/*
246 * 0x0100 - Critical Interrupt Exception
247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249
250/*
251 * 0x0200 - Machine Check Exception
252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254
255/*
256 * 0x0300 - Data Storage Exception
257 * This happens for just a few reasons. U0 set (but we don't do that),
258 * or zone protection fault (user violation, write to protected page).
259 * If this is just an update of modified status, we do that quickly
260 * and exit. Otherwise, we call heavywight functions to do the work.
261 */
262 START_EXCEPTION(0x0300, DataStorage)
263 mtspr SPRN_SPRG0, r10 /* Save some working registers */
264 mtspr SPRN_SPRG1, r11
265#ifdef CONFIG_403GCX
266 stw r12, 0(r0)
267 stw r9, 4(r0)
268 mfcr r11
269 mfspr r12, SPRN_PID
270 stw r11, 8(r0)
271 stw r12, 12(r0)
272#else
273 mtspr SPRN_SPRG4, r12
274 mtspr SPRN_SPRG5, r9
275 mfcr r11
276 mfspr r12, SPRN_PID
277 mtspr SPRN_SPRG7, r11
278 mtspr SPRN_SPRG6, r12
279#endif
280
281 /* First, check if it was a zone fault (which means a user
282 * tried to access a kernel or read-protected page - always
283 * a SEGV). All other faults here must be stores, so no
284 * need to check ESR_DST as well. */
285 mfspr r10, SPRN_ESR
286 andis. r10, r10, ESR_DIZ@h
287 bne 2f
288
289 mfspr r10, SPRN_DEAR /* Get faulting address */
290
291 /* If we are faulting a kernel address, we have to use the
292 * kernel page tables.
293 */
294 lis r11, TASK_SIZE@h
295 cmplw r10, r11
296 blt+ 3f
297 lis r11, swapper_pg_dir@h
298 ori r11, r11, swapper_pg_dir@l
299 li r9, 0
300 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
301 b 4f
302
303 /* Get the PGD for the current thread.
304 */
3053:
306 mfspr r11,SPRN_SPRG3
307 lwz r11,PGDIR(r11)
3084:
309 tophys(r11, r11)
310 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
311 lwz r11, 0(r11) /* Get L1 entry */
312 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
313 beq 2f /* Bail if no table */
314
315 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
316 lwz r11, 0(r12) /* Get Linux PTE */
317
318 andi. r9, r11, _PAGE_RW /* Is it writeable? */
319 beq 2f /* Bail if not */
320
321 /* Update 'changed'.
322 */
323 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
324 stw r11, 0(r12) /* Update Linux page table */
325
326 /* Most of the Linux PTE is ready to load into the TLB LO.
327 * We set ZSEL, where only the LS-bit determines user access.
328 * We set execute, because we don't have the granularity to
329 * properly set this at the page level (Linux problem).
330 * If shared is set, we cause a zero PID->TID load.
331 * Many of these bits are software only. Bits we don't set
332 * here we (properly should) assume have the appropriate value.
333 */
334 li r12, 0x0ce2
335 andc r11, r11, r12 /* Make sure 20, 21 are zero */
336
337 /* find the TLB index that caused the fault. It has to be here.
338 */
339 tlbsx r9, 0, r10
340
341 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
342
343 /* Done...restore registers and get out of here.
344 */
345#ifdef CONFIG_403GCX
346 lwz r12, 12(r0)
347 lwz r11, 8(r0)
348 mtspr SPRN_PID, r12
349 mtcr r11
350 lwz r9, 4(r0)
351 lwz r12, 0(r0)
352#else
353 mfspr r12, SPRN_SPRG6
354 mfspr r11, SPRN_SPRG7
355 mtspr SPRN_PID, r12
356 mtcr r11
357 mfspr r9, SPRN_SPRG5
358 mfspr r12, SPRN_SPRG4
359#endif
360 mfspr r11, SPRN_SPRG1
361 mfspr r10, SPRN_SPRG0
362 PPC405_ERR77_SYNC
363 rfi /* Should sync shadow TLBs */
364 b . /* prevent prefetch past rfi */
365
3662:
367 /* The bailout. Restore registers to pre-exception conditions
368 * and call the heavyweights to help us out.
369 */
370#ifdef CONFIG_403GCX
371 lwz r12, 12(r0)
372 lwz r11, 8(r0)
373 mtspr SPRN_PID, r12
374 mtcr r11
375 lwz r9, 4(r0)
376 lwz r12, 0(r0)
377#else
378 mfspr r12, SPRN_SPRG6
379 mfspr r11, SPRN_SPRG7
380 mtspr SPRN_PID, r12
381 mtcr r11
382 mfspr r9, SPRN_SPRG5
383 mfspr r12, SPRN_SPRG4
384#endif
385 mfspr r11, SPRN_SPRG1
386 mfspr r10, SPRN_SPRG0
387 b DataAccess
388
389/*
390 * 0x0400 - Instruction Storage Exception
391 * This is caused by a fetch from non-execute or guarded pages.
392 */
393 START_EXCEPTION(0x0400, InstructionAccess)
394 NORMAL_EXCEPTION_PROLOG
395 mr r4,r12 /* Pass SRR0 as arg2 */
396 li r5,0 /* Pass zero as arg3 */
397 EXC_XFER_EE_LITE(0x400, handle_page_fault)
398
399/* 0x0500 - External Interrupt Exception */
400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
401
402/* 0x0600 - Alignment Exception */
403 START_EXCEPTION(0x0600, Alignment)
404 NORMAL_EXCEPTION_PROLOG
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, alignment_exception)
409
410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck)
412 NORMAL_EXCEPTION_PROLOG
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, program_check_exception)
417
418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422
423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427
428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer)
434 NORMAL_EXCEPTION_PROLOG
435 lis r0,TSR_PIS@h
436 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
437 addi r3,r1,STACK_FRAME_OVERHEAD
438 EXC_XFER_LITE(0x1000, timer_interrupt)
439
440#if 0
441/* NOTE:
442 * FIT and WDT handlers are not implemented yet.
443 */
444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/
447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448
449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/
451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif
456#endif
457
458/* 0x1100 - Data TLB Miss Exception
459 * As the name implies, translation is not in the MMU, so search the
460 * page tables and fix it. The only purpose of this function is to
461 * load TLB entries from the page table if they exist.
462 */
463 START_EXCEPTION(0x1100, DTLBMiss)
464 mtspr SPRN_SPRG0, r10 /* Save some working registers */
465 mtspr SPRN_SPRG1, r11
466#ifdef CONFIG_403GCX
467 stw r12, 0(r0)
468 stw r9, 4(r0)
469 mfcr r11
470 mfspr r12, SPRN_PID
471 stw r11, 8(r0)
472 stw r12, 12(r0)
473#else
474 mtspr SPRN_SPRG4, r12
475 mtspr SPRN_SPRG5, r9
476 mfcr r11
477 mfspr r12, SPRN_PID
478 mtspr SPRN_SPRG7, r11
479 mtspr SPRN_SPRG6, r12
480#endif
481 mfspr r10, SPRN_DEAR /* Get faulting address */
482
483 /* If we are faulting a kernel address, we have to use the
484 * kernel page tables.
485 */
486 lis r11, TASK_SIZE@h
487 cmplw r10, r11
488 blt+ 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 li r9, 0
492 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
493 b 4f
494
495 /* Get the PGD for the current thread.
496 */
4973:
498 mfspr r11,SPRN_SPRG3
499 lwz r11,PGDIR(r11)
5004:
501 tophys(r11, r11)
502 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
503 lwz r12, 0(r11) /* Get L1 entry */
504 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
505 beq 2f /* Bail if no table */
506
507 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
508 lwz r11, 0(r12) /* Get Linux PTE */
509 andi. r9, r11, _PAGE_PRESENT
510 beq 5f
511
512 ori r11, r11, _PAGE_ACCESSED
513 stw r11, 0(r12)
514
515 /* Create TLB tag. This is the faulting address plus a static
516 * set of bits. These are size, valid, E, U0.
517 */
518 li r12, 0x00c0
519 rlwimi r10, r12, 0, 20, 31
520
521 b finish_tlb_load
522
5232: /* Check for possible large-page pmd entry */
524 rlwinm. r9, r12, 2, 22, 24
525 beq 5f
526
527 /* Create TLB tag. This is the faulting address, plus a static
528 * set of bits (valid, E, U0) plus the size from the PMD.
529 */
530 ori r9, r9, 0x40
531 rlwimi r10, r9, 0, 20, 31
532 mr r11, r12
533
534 b finish_tlb_load
535
5365:
537 /* The bailout. Restore registers to pre-exception conditions
538 * and call the heavyweights to help us out.
539 */
540#ifdef CONFIG_403GCX
541 lwz r12, 12(r0)
542 lwz r11, 8(r0)
543 mtspr SPRN_PID, r12
544 mtcr r11
545 lwz r9, 4(r0)
546 lwz r12, 0(r0)
547#else
548 mfspr r12, SPRN_SPRG6
549 mfspr r11, SPRN_SPRG7
550 mtspr SPRN_PID, r12
551 mtcr r11
552 mfspr r9, SPRN_SPRG5
553 mfspr r12, SPRN_SPRG4
554#endif
555 mfspr r11, SPRN_SPRG1
556 mfspr r10, SPRN_SPRG0
557 b DataAccess
558
559/* 0x1200 - Instruction TLB Miss Exception
560 * Nearly the same as above, except we get our information from different
561 * registers and bailout to a different point.
562 */
563 START_EXCEPTION(0x1200, ITLBMiss)
564 mtspr SPRN_SPRG0, r10 /* Save some working registers */
565 mtspr SPRN_SPRG1, r11
566#ifdef CONFIG_403GCX
567 stw r12, 0(r0)
568 stw r9, 4(r0)
569 mfcr r11
570 mfspr r12, SPRN_PID
571 stw r11, 8(r0)
572 stw r12, 12(r0)
573#else
574 mtspr SPRN_SPRG4, r12
575 mtspr SPRN_SPRG5, r9
576 mfcr r11
577 mfspr r12, SPRN_PID
578 mtspr SPRN_SPRG7, r11
579 mtspr SPRN_SPRG6, r12
580#endif
581 mfspr r10, SPRN_SRR0 /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 cmplw r10, r11
588 blt+ 3f
589 lis r11, swapper_pg_dir@h
590 ori r11, r11, swapper_pg_dir@l
591 li r9, 0
592 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
593 b 4f
594
595 /* Get the PGD for the current thread.
596 */
5973:
598 mfspr r11,SPRN_SPRG3
599 lwz r11,PGDIR(r11)
6004:
601 tophys(r11, r11)
602 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
603 lwz r12, 0(r11) /* Get L1 entry */
604 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
605 beq 2f /* Bail if no table */
606
607 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
608 lwz r11, 0(r12) /* Get Linux PTE */
609 andi. r9, r11, _PAGE_PRESENT
610 beq 5f
611
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, 0(r12)
614
615 /* Create TLB tag. This is the faulting address plus a static
616 * set of bits. These are size, valid, E, U0.
617 */
618 li r12, 0x00c0
619 rlwimi r10, r12, 0, 20, 31
620
621 b finish_tlb_load
622
6232: /* Check for possible large-page pmd entry */
624 rlwinm. r9, r12, 2, 22, 24
625 beq 5f
626
627 /* Create TLB tag. This is the faulting address, plus a static
628 * set of bits (valid, E, U0) plus the size from the PMD.
629 */
630 ori r9, r9, 0x40
631 rlwimi r10, r9, 0, 20, 31
632 mr r11, r12
633
634 b finish_tlb_load
635
6365:
637 /* The bailout. Restore registers to pre-exception conditions
638 * and call the heavyweights to help us out.
639 */
640#ifdef CONFIG_403GCX
641 lwz r12, 12(r0)
642 lwz r11, 8(r0)
643 mtspr SPRN_PID, r12
644 mtcr r11
645 lwz r9, 4(r0)
646 lwz r12, 0(r0)
647#else
648 mfspr r12, SPRN_SPRG6
649 mfspr r11, SPRN_SPRG7
650 mtspr SPRN_PID, r12
651 mtcr r11
652 mfspr r9, SPRN_SPRG5
653 mfspr r12, SPRN_SPRG4
654#endif
655 mfspr r11, SPRN_SPRG1
656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess
658
659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss
667#else
668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif
670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678
679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case
681 * where an instruction that we are trying to single step causes
682 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
683 * the exception handler generates a single step debug exception.
684 *
685 * If we get a debug trap on the first instruction of an exception handler,
686 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
687 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
688 * The exception handler was handling a non-critical interrupt, so it will
689 * save (and later restore) the MSR via SPRN_SRR1, which will still have
690 * the MSR_DE bit set.
691 */
692 /* 0x2000 - Debug Exception */
693 START_EXCEPTION(0x2000, DebugTrap)
694 CRITICAL_EXCEPTION_PROLOG
695
696 /*
697 * If this is a single step or branch-taken exception in an
698 * exception entry sequence, it was probably meant to apply to
699 * the code where the exception occurred (since exception entry
700 * doesn't turn off DE automatically). We simulate the effect
701 * of turning off DE on entry to an exception handler by turning
702 * off DE in the SRR3 value and clearing the debug status.
703 */
704 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
705 andis. r10,r10,DBSR_IC@h
706 beq+ 2f
707
708 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
709 beq 1f /* branch and fix it up */
710
711 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
712 cmplwi r10,0x2100
713 bgt+ 2f /* address above exception vectors */
714
715 /* here it looks like we got an inappropriate debug exception. */
7161: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
717 lis r10,DBSR_IC@h /* clear the IC event */
718 mtspr SPRN_DBSR,r10
719 /* restore state and get out */
720 lwz r10,_CCR(r11)
721 lwz r0,GPR0(r11)
722 lwz r1,GPR1(r11)
723 mtcrf 0x80,r10
724 mtspr SPRN_SRR2,r12
725 mtspr SPRN_SRR3,r9
726 lwz r9,GPR9(r11)
727 lwz r12,GPR12(r11)
728 lwz r10,crit_r10@l(0)
729 lwz r11,crit_r11@l(0)
730 PPC405_ERR77_SYNC
731 rfci
732 b .
733
734 /* continue normal handling for a critical exception... */
7352: mfspr r4,SPRN_DBSR
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
738 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
739 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
740
741/*
742 * The other Data TLB exceptions bail out to this point
743 * if they can't resolve the lightweight TLB fault.
744 */
745DataAccess:
746 NORMAL_EXCEPTION_PROLOG
747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
748 stw r5,_ESR(r11)
749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
750 EXC_XFER_EE_LITE(0x300, handle_page_fault)
751
752/* Other PowerPC processors, namely those derived from the 6xx-series
753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
754 * However, for the 4xx-series processors these are neither defined nor
755 * reserved.
756 */
757
758 /* Damn, I came up one instruction too many to fit into the
759 * exception space :-). Both the instruction and data TLB
760 * miss get to this point to load the TLB.
761 * r10 - TLB_TAG value
762 * r11 - Linux PTE
763 * r12, r9 - avilable to use
764 * PID - loaded with proper value when we get here
765 * Upon exit, we reload everything and RFI.
766 * Actually, it will fit now, but oh well.....a common place
767 * to load the TLB.
768 */
769tlb_4xx_index:
770 .long 0
771finish_tlb_load:
772 /* load the next available TLB index.
773 */
774 lwz r9, tlb_4xx_index@l(0)
775 addi r9, r9, 1
776 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
777 stw r9, tlb_4xx_index@l(0)
778
7796:
780 /*
781 * Clear out the software-only bits in the PTE to generate the
782 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
783 * top 3 bits of the zone field, and M.
784 */
785 li r12, 0x0ce2
786 andc r11, r11, r12
787
788 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
789 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
790
791 /* Done...restore registers and get out of here.
792 */
793#ifdef CONFIG_403GCX
794 lwz r12, 12(r0)
795 lwz r11, 8(r0)
796 mtspr SPRN_PID, r12
797 mtcr r11
798 lwz r9, 4(r0)
799 lwz r12, 0(r0)
800#else
801 mfspr r12, SPRN_SPRG6
802 mfspr r11, SPRN_SPRG7
803 mtspr SPRN_PID, r12
804 mtcr r11
805 mfspr r9, SPRN_SPRG5
806 mfspr r12, SPRN_SPRG4
807#endif
808 mfspr r11, SPRN_SPRG1
809 mfspr r10, SPRN_SPRG0
810 PPC405_ERR77_SYNC
811 rfi /* Should sync shadow TLBs */
812 b . /* prevent prefetch past rfi */
813
814/* extern void giveup_fpu(struct task_struct *prev)
815 *
816 * The PowerPC 4xx family of processors do not have an FPU, so this just
817 * returns.
818 */
819_GLOBAL(giveup_fpu)
820 blr
821
822/* This is where the main kernel code starts.
823 */
824start_here:
825
826 /* ptr to current */
827 lis r2,init_task@h
828 ori r2,r2,init_task@l
829
830 /* ptr to phys current thread */
831 tophys(r4,r2)
832 addi r4,r4,THREAD /* init task's THREAD */
833 mtspr SPRN_SPRG3,r4
834
835 /* stack */
836 lis r1,init_thread_union@ha
837 addi r1,r1,init_thread_union@l
838 li r0,0
839 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
840
841 bl early_init /* We have to do this with MMU on */
842
843/*
844 * Decide what sort of machine this is and initialize the MMU.
845 */
846 mr r3,r31
847 mr r4,r30
848 mr r5,r29
849 mr r6,r28
850 mr r7,r27
851 bl machine_init
852 bl MMU_init
853
854/* Go back to running unmapped so we can load up new values
855 * and change to using our exception vectors.
856 * On the 4xx, all we have to do is invalidate the TLB to clear
857 * the old 16M byte TLB mappings.
858 */
859 lis r4,2f@h
860 ori r4,r4,2f@l
861 tophys(r4,r4)
862 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
863 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
864 mtspr SPRN_SRR0,r4
865 mtspr SPRN_SRR1,r3
866 rfi
867 b . /* prevent prefetch past rfi */
868
869/* Load up the kernel context */
8702:
871 sync /* Flush to memory before changing TLB */
872 tlbia
873 isync /* Flush shadow TLBs */
874
875 /* set up the PTE pointers for the Abatron bdiGDB.
876 */
877 lis r6, swapper_pg_dir@h
878 ori r6, r6, swapper_pg_dir@l
879 lis r5, abatron_pteptrs@h
880 ori r5, r5, abatron_pteptrs@l
881 stw r5, 0xf0(r0) /* Must match your Abatron config file */
882 tophys(r5,r5)
883 stw r6, 0(r5)
884
885/* Now turn on the MMU for real! */
886 lis r4,MSR_KERNEL@h
887 ori r4,r4,MSR_KERNEL@l
888 lis r3,start_kernel@h
889 ori r3,r3,start_kernel@l
890 mtspr SPRN_SRR0,r3
891 mtspr SPRN_SRR1,r4
892 rfi /* enable MMU and jump to start_kernel */
893 b . /* prevent prefetch past rfi */
894
895/* Set up the initial MMU state so we can do the first level of
896 * kernel initialization. This maps the first 16 MBytes of memory 1:1
897 * virtual to physical and more importantly sets the cache mode.
898 */
899initial_mmu:
900 tlbia /* Invalidate all TLB entries */
901 isync
902
903 /* We should still be executing code at physical address 0x0000xxxx
904 * at this point. However, start_here is at virtual address
905 * 0xC000xxxx. So, set up a TLB mapping to cover this once
906 * translation is enabled.
907 */
908
909 lis r3,KERNELBASE@h /* Load the kernel virtual address */
910 ori r3,r3,KERNELBASE@l
911 tophys(r4,r3) /* Load the kernel physical address */
912
913 iccci r0,r3 /* Invalidate the i-cache before use */
914
915 /* Load the kernel PID.
916 */
917 li r0,0
918 mtspr SPRN_PID,r0
919 sync
920
921 /* Configure and load two entries into TLB slots 62 and 63.
922 * In case we are pinning TLBs, these are reserved in by the
923 * other TLB functions. If not reserving, then it doesn't
924 * matter where they are loaded.
925 */
926 clrrwi r4,r4,10 /* Mask off the real page number */
927 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
928
929 clrrwi r3,r3,10 /* Mask off the effective page number */
930 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
931
932 li r0,63 /* TLB slot 63 */
933
934 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
935 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
936
937#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
938
939 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
940 * the UARTs nice and early. We use a 4k real==virtual mapping. */
941
942 lis r3,SERIAL_DEBUG_IO_BASE@h
943 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
944 mr r4,r3
945 clrrwi r4,r4,12
946 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
947
948 clrrwi r3,r3,12
949 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
950
951 li r0,0 /* TLB slot 0 */
952 tlbwe r4,r0,TLB_DATA
953 tlbwe r3,r0,TLB_TAG
954#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
955
956 isync
957
958 /* Establish the exception vector base
959 */
960 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
961 tophys(r0,r4) /* Use the physical address */
962 mtspr SPRN_EVPR,r0
963
964 blr
965
966_GLOBAL(abort)
967 mfspr r13,SPRN_DBCR0
968 oris r13,r13,DBCR0_RST_SYSTEM@h
969 mtspr SPRN_DBCR0,r13
970
971_GLOBAL(set_context)
972
973#ifdef CONFIG_BDI_SWITCH
974 /* Context switch the PTE pointer for the Abatron BDI2000.
975 * The PGDIR is the second parameter.
976 */
977 lis r5, KERNELBASE@h
978 lwz r5, 0xf0(r5)
979 stw r4, 0x4(r5)
980#endif
981 sync
982 mtspr SPRN_PID,r3
983 isync /* Need an isync to flush shadow */
984 /* TLBs after changing PID */
985 blr
986
987/* We put a few things here that have to be page-aligned. This stuff
988 * goes at the beginning of the data segment, which is page-aligned.
989 */
990 .data
991 .align 12
992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
996 .space 4096
997 .globl swapper_pg_dir
998swapper_pg_dir:
999 .space 4096
1000
1001
1002/* Stack for handling critical exceptions from kernel mode */
1003 .section .bss
1004 .align 12
1005exception_stack_bottom:
1006 .space 4096
1007critical_stack_top:
1008 .globl exception_stack_top
1009exception_stack_top:
1010
1011/* This space gets a copy of optional info passed to us by the bootstrap
1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1013 */
1014 .globl cmd_line
1015cmd_line:
1016 .space 512
1017
1018/* Room for two PTE pointers, usually the kernel and current user pointers
1019 * to their respective root page table.
1020 */
1021abatron_pteptrs:
1022 .space 8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 000000000000..147215a0d6c0
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,1957 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
40
41#ifdef CONFIG_PPC_ISERIES
42#define DO_SOFT_DISABLE
43#endif
44
45/*
46 * We layout physical memory as follows:
47 * 0x0000 - 0x00ff : Secondary processor spin code
48 * 0x0100 - 0x2fff : pSeries Interrupt prologs
49 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
50 * 0x6000 - 0x6fff : Initial (CPU0) segment table
51 * 0x7000 - 0x7fff : FWNMI data area
52 * 0x8000 - : Early init and support code
53 */
54
55/*
56 * SPRG Usage
57 *
58 * Register Definition
59 *
60 * SPRG0 reserved for hypervisor
61 * SPRG1 temp - used to save gpr
62 * SPRG2 temp - used to save gpr
63 * SPRG3 virt addr of paca
64 */
65
66/*
67 * Entering into this code we make the following assumptions:
68 * For pSeries:
69 * 1. The MMU is off & open firmware is running in real mode.
70 * 2. The kernel is entered at __start
71 *
72 * For iSeries:
73 * 1. The MMU is on (as it always is for iSeries)
74 * 2. The kernel is entered at system_reset_iSeries
75 */
76
77 .text
78 .globl _stext
79_stext:
80#ifdef CONFIG_PPC_MULTIPLATFORM
81_GLOBAL(__start)
82 /* NOP this out unconditionally */
83BEGIN_FTR_SECTION
84 b .__start_initialization_multiplatform
85END_FTR_SECTION(0, 1)
86#endif /* CONFIG_PPC_MULTIPLATFORM */
87
88 /* Catch branch to 0 in real mode */
89 trap
90
91#ifdef CONFIG_PPC_ISERIES
92 /*
93 * At offset 0x20, there is a pointer to iSeries LPAR data.
94 * This is required by the hypervisor
95 */
96 . = 0x20
97 .llong hvReleaseData-KERNELBASE
98
99 /*
100 * At offset 0x28 and 0x30 are offsets to the mschunks_map
101 * array (used by the iSeries LPAR debugger to do translation
102 * between physical addresses and absolute addresses) and
103 * to the pidhash table (also used by the debugger)
104 */
105 .llong mschunks_map-KERNELBASE
106 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107
108 /* Offset 0x38 - Pointer to start of embedded System.map */
109 .globl embedded_sysmap_start
110embedded_sysmap_start:
111 .llong 0
112 /* Offset 0x40 - Pointer to end of embedded System.map */
113 .globl embedded_sysmap_end
114embedded_sysmap_end:
115 .llong 0
116
117#endif /* CONFIG_PPC_ISERIES */
118
119 /* Secondary processors spin on this value until it goes to 1. */
120 .globl __secondary_hold_spinloop
121__secondary_hold_spinloop:
122 .llong 0x0
123
124 /* Secondary processors write this value with their cpu # */
125 /* after they enter the spin loop immediately below. */
126 .globl __secondary_hold_acknowledge
127__secondary_hold_acknowledge:
128 .llong 0x0
129
130 . = 0x60
131/*
132 * The following code is used on pSeries to hold secondary processors
133 * in a spin loop after they have been freed from OpenFirmware, but
134 * before the bulk of the kernel has been relocated. This code
135 * is relocated to physical address 0x60 before prom_init is run.
136 * All of it must fit below the first exception vector at 0x100.
137 */
138_GLOBAL(__secondary_hold)
139 mfmsr r24
140 ori r24,r24,MSR_RI
141 mtmsrd r24 /* RI on */
142
143 /* Grab our linux cpu number */
144 mr r24,r3
145
146 /* Tell the master cpu we're here */
147 /* Relocation is off & we are located at an address less */
148 /* than 0x100, so only need to grab low order offset. */
149 std r24,__secondary_hold_acknowledge@l(0)
150 sync
151
152 /* All secondary cpus wait here until told to start. */
153100: ld r4,__secondary_hold_spinloop@l(0)
154 cmpdi 0,r4,1
155 bne 100b
156
157#ifdef CONFIG_HMT
158 b .hmt_init
159#else
160#ifdef CONFIG_SMP
161 mr r3,r24
162 b .pSeries_secondary_smp_init
163#else
164 BUG_OPCODE
165#endif
166#endif
167
168/* This value is used to mark exception frames on the stack. */
169 .section ".toc","aw"
170exception_marker:
171 .tc ID_72656773_68657265[TC],0x7265677368657265
172 .text
173
174/*
175 * The following macros define the code that appears as
176 * the prologue to each of the exception handlers. They
177 * are split into two parts to allow a single kernel binary
178 * to be used for pSeries and iSeries.
179 * LOL. One day... - paulus
180 */
181
182/*
183 * We make as much of the exception code common between native
184 * exception handlers (including pSeries LPAR) and iSeries LPAR
185 * implementations as possible.
186 */
187
188/*
189 * This is the start of the interrupt handlers for pSeries
190 * This code runs with relocation off.
191 */
192#define EX_R9 0
193#define EX_R10 8
194#define EX_R11 16
195#define EX_R12 24
196#define EX_R13 32
197#define EX_SRR0 40
198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
199#define EX_DAR 48
200#define EX_LR 48 /* SLB miss saves LR, but not DAR */
201#define EX_DSISR 56
202#define EX_CCR 60
203
204#define EXCEPTION_PROLOG_PSERIES(area, label) \
205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
207 std r10,area+EX_R10(r13); \
208 std r11,area+EX_R11(r13); \
209 std r12,area+EX_R12(r13); \
210 mfspr r9,SPRN_SPRG1; \
211 std r9,area+EX_R13(r13); \
212 mfcr r9; \
213 clrrdi r12,r13,32; /* get high part of &label */ \
214 mfmsr r10; \
215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
216 ori r12,r12,(label)@l; /* virt addr of handler */ \
217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
218 mtspr SPRN_SRR0,r12; \
219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
220 mtspr SPRN_SRR1,r10; \
221 rfid; \
222 b . /* prevent speculative execution */
223
224/*
225 * This is the start of the interrupt handlers for iSeries
226 * This code runs with relocation on.
227 */
228#define EXCEPTION_PROLOG_ISERIES_1(area) \
229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
231 std r10,area+EX_R10(r13); \
232 std r11,area+EX_R11(r13); \
233 std r12,area+EX_R12(r13); \
234 mfspr r9,SPRN_SPRG1; \
235 std r9,area+EX_R13(r13); \
236 mfcr r9
237
238#define EXCEPTION_PROLOG_ISERIES_2 \
239 mfmsr r10; \
240 ld r11,PACALPPACA+LPPACASRR0(r13); \
241 ld r12,PACALPPACA+LPPACASRR1(r13); \
242 ori r10,r10,MSR_RI; \
243 mtmsrd r10,1
244
245/*
246 * The common exception prolog is used for all except a few exceptions
247 * such as a segment miss on a kernel address. We have to be prepared
248 * to take another exception from the point where we first touch the
249 * kernel stack onwards.
250 *
251 * On entry r13 points to the paca, r9-r13 are saved in the paca,
252 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
253 * SRR1, and relocation is on.
254 */
255#define EXCEPTION_PROLOG_COMMON(n, area) \
256 andi. r10,r12,MSR_PR; /* See if coming from user */ \
257 mr r10,r1; /* Save r1 */ \
258 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
259 beq- 1f; \
260 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2611: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
262 bge- cr1,bad_stack; /* abort if it is */ \
263 std r9,_CCR(r1); /* save CR in stackframe */ \
264 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
265 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
266 std r10,0(r1); /* make stack chain pointer */ \
267 std r0,GPR0(r1); /* save r0 in stackframe */ \
268 std r10,GPR1(r1); /* save r1 in stackframe */ \
269 std r2,GPR2(r1); /* save r2 in stackframe */ \
270 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
271 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
272 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
273 ld r10,area+EX_R10(r13); \
274 std r9,GPR9(r1); \
275 std r10,GPR10(r1); \
276 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
277 ld r10,area+EX_R12(r13); \
278 ld r11,area+EX_R13(r13); \
279 std r9,GPR11(r1); \
280 std r10,GPR12(r1); \
281 std r11,GPR13(r1); \
282 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
283 mflr r9; /* save LR in stackframe */ \
284 std r9,_LINK(r1); \
285 mfctr r10; /* save CTR in stackframe */ \
286 std r10,_CTR(r1); \
287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
288 std r11,_XER(r1); \
289 li r9,(n)+1; \
290 std r9,_TRAP(r1); /* set trap number */ \
291 li r10,0; \
292 ld r11,exception_marker@toc(r2); \
293 std r10,RESULT(r1); /* clear regs->result */ \
294 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
295
296/*
297 * Exception vectors.
298 */
299#define STD_EXCEPTION_PSERIES(n, label) \
300 . = n; \
301 .globl label##_pSeries; \
302label##_pSeries: \
303 HMT_MEDIUM; \
304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
305 RUNLATCH_ON(r13); \
306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307
308#define STD_EXCEPTION_ISERIES(n, label, area) \
309 .globl label##_iSeries; \
310label##_iSeries: \
311 HMT_MEDIUM; \
312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
313 RUNLATCH_ON(r13); \
314 EXCEPTION_PROLOG_ISERIES_1(area); \
315 EXCEPTION_PROLOG_ISERIES_2; \
316 b label##_common
317
318#define MASKABLE_EXCEPTION_ISERIES(n, label) \
319 .globl label##_iSeries; \
320label##_iSeries: \
321 HMT_MEDIUM; \
322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 RUNLATCH_ON(r13); \
324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
325 lbz r10,PACAPROCENABLED(r13); \
326 cmpwi 0,r10,0; \
327 beq- label##_iSeries_masked; \
328 EXCEPTION_PROLOG_ISERIES_2; \
329 b label##_common; \
330
331#ifdef DO_SOFT_DISABLE
332#define DISABLE_INTS \
333 lbz r10,PACAPROCENABLED(r13); \
334 li r11,0; \
335 std r10,SOFTE(r1); \
336 mfmsr r10; \
337 stb r11,PACAPROCENABLED(r13); \
338 ori r10,r10,MSR_EE; \
339 mtmsrd r10,1
340
341#define ENABLE_INTS \
342 lbz r10,PACAPROCENABLED(r13); \
343 mfmsr r11; \
344 std r10,SOFTE(r1); \
345 ori r11,r11,MSR_EE; \
346 mtmsrd r11,1
347
348#else /* hard enable/disable interrupts */
349#define DISABLE_INTS
350
351#define ENABLE_INTS \
352 ld r12,_MSR(r1); \
353 mfmsr r11; \
354 rlwimi r11,r12,0,MSR_EE; \
355 mtmsrd r11,1
356
357#endif
358
359#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
360 .align 7; \
361 .globl label##_common; \
362label##_common: \
363 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
364 DISABLE_INTS; \
365 bl .save_nvgprs; \
366 addi r3,r1,STACK_FRAME_OVERHEAD; \
367 bl hdlr; \
368 b .ret_from_except
369
370#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
371 .align 7; \
372 .globl label##_common; \
373label##_common: \
374 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
375 DISABLE_INTS; \
376 addi r3,r1,STACK_FRAME_OVERHEAD; \
377 bl hdlr; \
378 b .ret_from_except_lite
379
380/*
381 * Start of pSeries system interrupt routines
382 */
383 . = 0x100
384 .globl __start_interrupts
385__start_interrupts:
386
387 STD_EXCEPTION_PSERIES(0x100, system_reset)
388
389 . = 0x200
390_machine_check_pSeries:
391 HMT_MEDIUM
392 mtspr SPRN_SPRG1,r13 /* save r13 */
393 RUNLATCH_ON(r13)
394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
395
396 . = 0x300
397 .globl data_access_pSeries
398data_access_pSeries:
399 HMT_MEDIUM
400 mtspr SPRN_SPRG1,r13
401BEGIN_FTR_SECTION
402 mtspr SPRN_SPRG2,r12
403 mfspr r13,SPRN_DAR
404 mfspr r12,SPRN_DSISR
405 srdi r13,r13,60
406 rlwimi r13,r12,16,0x20
407 mfcr r12
408 cmpwi r13,0x2c
409 beq .do_stab_bolted_pSeries
410 mtcrf 0x80,r12
411 mfspr r12,SPRN_SPRG2
412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
414
415 . = 0x380
416 .globl data_access_slb_pSeries
417data_access_slb_pSeries:
418 HMT_MEDIUM
419 mtspr SPRN_SPRG1,r13
420 RUNLATCH_ON(r13)
421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
423 std r10,PACA_EXSLB+EX_R10(r13)
424 std r11,PACA_EXSLB+EX_R11(r13)
425 std r12,PACA_EXSLB+EX_R12(r13)
426 std r3,PACA_EXSLB+EX_R3(r13)
427 mfspr r9,SPRN_SPRG1
428 std r9,PACA_EXSLB+EX_R13(r13)
429 mfcr r9
430 mfspr r12,SPRN_SRR1 /* and SRR1 */
431 mfspr r3,SPRN_DAR
432 b .do_slb_miss /* Rel. branch works in real mode */
433
434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
435
436 . = 0x480
437 .globl instruction_access_slb_pSeries
438instruction_access_slb_pSeries:
439 HMT_MEDIUM
440 mtspr SPRN_SPRG1,r13
441 RUNLATCH_ON(r13)
442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRN_SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SPRN_SRR1 /* and SRR1 */
452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 RUNLATCH_ON(r9)
468 mr r9,r13
469 mfmsr r10
470 mfspr r13,SPRN_SPRG3
471 mfspr r11,SPRN_SRR0
472 clrrdi r12,r13,32
473 oris r12,r12,system_call_common@h
474 ori r12,r12,system_call_common@l
475 mtspr SPRN_SRR0,r12
476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
477 mfspr r12,SPRN_SRR1
478 mtspr SPRN_SRR1,r10
479 rfid
480 b . /* prevent speculative execution */
481
482 STD_EXCEPTION_PSERIES(0xd00, single_step)
483 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
484
485 /* We need to deal with the Altivec unavailable exception
486 * here which is at 0xf20, thus in the middle of the
487 * prolog code of the PerformanceMonitor one. A little
488 * trickery is thus necessary
489 */
490 . = 0xf00
491 b performance_monitor_pSeries
492
493 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
494
495 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
496 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
497
498 . = 0x3000
499
500/*** pSeries interrupt support ***/
501
502 /* moved from 0xf00 */
503 STD_EXCEPTION_PSERIES(., performance_monitor)
504
505 .align 7
506_GLOBAL(do_stab_bolted_pSeries)
507 mtcrf 0x80,r12
508 mfspr r12,SPRN_SPRG2
509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
510
511/*
512 * Vectors for the FWNMI option. Share common code.
513 */
514 .globl system_reset_fwnmi
515system_reset_fwnmi:
516 HMT_MEDIUM
517 mtspr SPRN_SPRG1,r13 /* save r13 */
518 RUNLATCH_ON(r13)
519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
520
521 .globl machine_check_fwnmi
522machine_check_fwnmi:
523 HMT_MEDIUM
524 mtspr SPRN_SPRG1,r13 /* save r13 */
525 RUNLATCH_ON(r13)
526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
527
528#ifdef CONFIG_PPC_ISERIES
529/*** ISeries-LPAR interrupt handlers ***/
530
531 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
532
533 .globl data_access_iSeries
534data_access_iSeries:
535 mtspr SPRN_SPRG1,r13
536BEGIN_FTR_SECTION
537 mtspr SPRN_SPRG2,r12
538 mfspr r13,SPRN_DAR
539 mfspr r12,SPRN_DSISR
540 srdi r13,r13,60
541 rlwimi r13,r12,16,0x20
542 mfcr r12
543 cmpwi r13,0x2c
544 beq .do_stab_bolted_iSeries
545 mtcrf 0x80,r12
546 mfspr r12,SPRN_SPRG2
547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
549 EXCEPTION_PROLOG_ISERIES_2
550 b data_access_common
551
552.do_stab_bolted_iSeries:
553 mtcrf 0x80,r12
554 mfspr r12,SPRN_SPRG2
555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
556 EXCEPTION_PROLOG_ISERIES_2
557 b .do_stab_bolted
558
559 .globl data_access_slb_iSeries
560data_access_slb_iSeries:
561 mtspr SPRN_SPRG1,r13 /* save r13 */
562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
563 std r3,PACA_EXSLB+EX_R3(r13)
564 ld r12,PACALPPACA+LPPACASRR1(r13)
565 mfspr r3,SPRN_DAR
566 b .do_slb_miss
567
568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
569
570 .globl instruction_access_slb_iSeries
571instruction_access_slb_iSeries:
572 mtspr SPRN_SPRG1,r13 /* save r13 */
573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
574 std r3,PACA_EXSLB+EX_R3(r13)
575 ld r12,PACALPPACA+LPPACASRR1(r13)
576 ld r3,PACALPPACA+LPPACASRR0(r13)
577 b .do_slb_miss
578
579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
582 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
583 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
584 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
585 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
586
587 .globl system_call_iSeries
588system_call_iSeries:
589 mr r9,r13
590 mfspr r13,SPRN_SPRG3
591 EXCEPTION_PROLOG_ISERIES_2
592 b system_call_common
593
594 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
596 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
597
598 .globl system_reset_iSeries
599system_reset_iSeries:
600 mfspr r13,SPRN_SPRG3 /* Get paca address */
601 mfmsr r24
602 ori r24,r24,MSR_RI
603 mtmsrd r24 /* RI on */
604 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
605 cmpwi 0,r24,0 /* Are we processor 0? */
606 beq .__start_initialization_iSeries /* Start up the first processor */
607 mfspr r4,SPRN_CTRLF
608 li r5,CTRL_RUNLATCH /* Turn off the run light */
609 andc r4,r4,r5
610 mtspr SPRN_CTRLT,r4
611
6121:
613 HMT_LOW
614#ifdef CONFIG_SMP
615 lbz r23,PACAPROCSTART(r13) /* Test if this processor
616 * should start */
617 sync
618 LOADADDR(r3,current_set)
619 sldi r28,r24,3 /* get current_set[cpu#] */
620 ldx r3,r3,r28
621 addi r1,r3,THREAD_SIZE
622 subi r1,r1,STACK_FRAME_OVERHEAD
623
624 cmpwi 0,r23,0
625 beq iSeries_secondary_smp_loop /* Loop until told to go */
626 bne .__secondary_start /* Loop until told to go */
627iSeries_secondary_smp_loop:
628 /* Let the Hypervisor know we are alive */
629 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
630 lis r3,0x8002
631 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
632#else /* CONFIG_SMP */
633 /* Yield the processor. This is required for non-SMP kernels
634 which are running on multi-threaded machines. */
635 lis r3,0x8000
636 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
637 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
638 li r4,0 /* "yield timed" */
639 li r5,-1 /* "yield forever" */
640#endif /* CONFIG_SMP */
641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
642 sc /* Invoke the hypervisor via a system call */
643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
644 b 1b /* If SMP not configured, secondaries
645 * loop forever */
646
647 .globl decrementer_iSeries_masked
648decrementer_iSeries_masked:
649 li r11,1
650 stb r11,PACALPPACA+LPPACADECRINT(r13)
651 lwz r12,PACADEFAULTDECR(r13)
652 mtspr SPRN_DEC,r12
653 /* fall through */
654
655 .globl hardware_interrupt_iSeries_masked
656hardware_interrupt_iSeries_masked:
657 mtcrf 0x80,r9 /* Restore regs */
658 ld r11,PACALPPACA+LPPACASRR0(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13)
660 mtspr SPRN_SRR0,r11
661 mtspr SPRN_SRR1,r12
662 ld r9,PACA_EXGEN+EX_R9(r13)
663 ld r10,PACA_EXGEN+EX_R10(r13)
664 ld r11,PACA_EXGEN+EX_R11(r13)
665 ld r12,PACA_EXGEN+EX_R12(r13)
666 ld r13,PACA_EXGEN+EX_R13(r13)
667 rfid
668 b . /* prevent speculative execution */
669#endif /* CONFIG_PPC_ISERIES */
670
671/*** Common interrupt handlers ***/
672
673 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
674
675 /*
676 * Machine check is different because we use a different
677 * save area: PACA_EXMC instead of PACA_EXGEN.
678 */
679 .align 7
680 .globl machine_check_common
681machine_check_common:
682 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
683 DISABLE_INTS
684 bl .save_nvgprs
685 addi r3,r1,STACK_FRAME_OVERHEAD
686 bl .machine_check_exception
687 b .ret_from_except
688
689 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
690 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
692 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
693 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
694 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
695 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
696#ifdef CONFIG_ALTIVEC
697 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
698#else
699 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
700#endif
701
702/*
703 * Here we have detected that the kernel stack pointer is bad.
704 * R9 contains the saved CR, r13 points to the paca,
705 * r10 contains the (bad) kernel stack pointer,
706 * r11 and r12 contain the saved SRR0 and SRR1.
707 * We switch to using an emergency stack, save the registers there,
708 * and call kernel_bad_stack(), which panics.
709 */
710bad_stack:
711 ld r1,PACAEMERGSP(r13)
712 subi r1,r1,64+INT_FRAME_SIZE
713 std r9,_CCR(r1)
714 std r10,GPR1(r1)
715 std r11,_NIP(r1)
716 std r12,_MSR(r1)
717 mfspr r11,SPRN_DAR
718 mfspr r12,SPRN_DSISR
719 std r11,_DAR(r1)
720 std r12,_DSISR(r1)
721 mflr r10
722 mfctr r11
723 mfxer r12
724 std r10,_LINK(r1)
725 std r11,_CTR(r1)
726 std r12,_XER(r1)
727 SAVE_GPR(0,r1)
728 SAVE_GPR(2,r1)
729 SAVE_4GPRS(3,r1)
730 SAVE_2GPRS(7,r1)
731 SAVE_10GPRS(12,r1)
732 SAVE_10GPRS(22,r1)
733 addi r11,r1,INT_FRAME_SIZE
734 std r11,0(r1)
735 li r12,0
736 std r12,0(r11)
737 ld r2,PACATOC(r13)
7381: addi r3,r1,STACK_FRAME_OVERHEAD
739 bl .kernel_bad_stack
740 b 1b
741
742/*
743 * Return from an exception with minimal checks.
744 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
745 * If interrupts have been enabled, or anything has been
746 * done that might have changed the scheduling status of
747 * any task or sent any task a signal, you should use
748 * ret_from_except or ret_from_except_lite instead of this.
749 */
750 .globl fast_exception_return
751fast_exception_return:
752 ld r12,_MSR(r1)
753 ld r11,_NIP(r1)
754 andi. r3,r12,MSR_RI /* check if RI is set */
755 beq- unrecov_fer
756 ld r3,_CCR(r1)
757 ld r4,_LINK(r1)
758 ld r5,_CTR(r1)
759 ld r6,_XER(r1)
760 mtcr r3
761 mtlr r4
762 mtctr r5
763 mtxer r6
764 REST_GPR(0, r1)
765 REST_8GPRS(2, r1)
766
767 mfmsr r10
768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
769 mtmsrd r10,1
770
771 mtspr SPRN_SRR1,r12
772 mtspr SPRN_SRR0,r11
773 REST_4GPRS(10, r1)
774 ld r1,GPR1(r1)
775 rfid
776 b . /* prevent speculative execution */
777
778unrecov_fer:
779 bl .save_nvgprs
7801: addi r3,r1,STACK_FRAME_OVERHEAD
781 bl .unrecoverable_exception
782 b 1b
783
784/*
785 * Here r13 points to the paca, r9 contains the saved CR,
786 * SRR0 and SRR1 are saved in r11 and r12,
787 * r9 - r13 are saved in paca->exgen.
788 */
789 .align 7
790 .globl data_access_common
791data_access_common:
792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
793 mfspr r10,SPRN_DAR
794 std r10,PACA_EXGEN+EX_DAR(r13)
795 mfspr r10,SPRN_DSISR
796 stw r10,PACA_EXGEN+EX_DSISR(r13)
797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
798 ld r3,PACA_EXGEN+EX_DAR(r13)
799 lwz r4,PACA_EXGEN+EX_DSISR(r13)
800 li r5,0x300
801 b .do_hash_page /* Try to handle as hpte fault */
802
803 .align 7
804 .globl instruction_access_common
805instruction_access_common:
806 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
807 ld r3,_NIP(r1)
808 andis. r4,r12,0x5820
809 li r5,0x400
810 b .do_hash_page /* Try to handle as hpte fault */
811
812 .align 7
813 .globl hardware_interrupt_common
814 .globl hardware_interrupt_entry
815hardware_interrupt_common:
816 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
817hardware_interrupt_entry:
818 DISABLE_INTS
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 bl .do_IRQ
821 b .ret_from_except_lite
822
823 .align 7
824 .globl alignment_common
825alignment_common:
826 mfspr r10,SPRN_DAR
827 std r10,PACA_EXGEN+EX_DAR(r13)
828 mfspr r10,SPRN_DSISR
829 stw r10,PACA_EXGEN+EX_DSISR(r13)
830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
831 ld r3,PACA_EXGEN+EX_DAR(r13)
832 lwz r4,PACA_EXGEN+EX_DSISR(r13)
833 std r3,_DAR(r1)
834 std r4,_DSISR(r1)
835 bl .save_nvgprs
836 addi r3,r1,STACK_FRAME_OVERHEAD
837 ENABLE_INTS
838 bl .alignment_exception
839 b .ret_from_except
840
841 .align 7
842 .globl program_check_common
843program_check_common:
844 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
845 bl .save_nvgprs
846 addi r3,r1,STACK_FRAME_OVERHEAD
847 ENABLE_INTS
848 bl .program_check_exception
849 b .ret_from_except
850
851 .align 7
852 .globl fp_unavailable_common
853fp_unavailable_common:
854 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
855 bne .load_up_fpu /* if from user, just load it up */
856 bl .save_nvgprs
857 addi r3,r1,STACK_FRAME_OVERHEAD
858 ENABLE_INTS
859 bl .kernel_fp_unavailable_exception
860 BUG_OPCODE
861
862 .align 7
863 .globl altivec_unavailable_common
864altivec_unavailable_common:
865 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
866#ifdef CONFIG_ALTIVEC
867BEGIN_FTR_SECTION
868 bne .load_up_altivec /* if from user, just load it up */
869END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
870#endif
871 bl .save_nvgprs
872 addi r3,r1,STACK_FRAME_OVERHEAD
873 ENABLE_INTS
874 bl .altivec_unavailable_exception
875 b .ret_from_except
876
877#ifdef CONFIG_ALTIVEC
878/*
879 * load_up_altivec(unused, unused, tsk)
880 * Disable VMX for the task which had it previously,
881 * and save its vector registers in its thread_struct.
882 * Enables the VMX for use in the kernel on return.
883 * On SMP we know the VMX is free, since we give it up every
884 * switch (ie, no lazy save of the vector registers).
885 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
886 */
887_STATIC(load_up_altivec)
888 mfmsr r5 /* grab the current MSR */
889 oris r5,r5,MSR_VEC@h
890 mtmsrd r5 /* enable use of VMX now */
891 isync
892
893/*
894 * For SMP, we don't do lazy VMX switching because it just gets too
895 * horrendously complex, especially when a task switches from one CPU
896 * to another. Instead we call giveup_altvec in switch_to.
897 * VRSAVE isn't dealt with here, that is done in the normal context
898 * switch code. Note that we could rely on vrsave value to eventually
899 * avoid saving all of the VREGs here...
900 */
901#ifndef CONFIG_SMP
902 ld r3,last_task_used_altivec@got(r2)
903 ld r4,0(r3)
904 cmpdi 0,r4,0
905 beq 1f
906 /* Save VMX state to last_task_used_altivec's THREAD struct */
907 addi r4,r4,THREAD
908 SAVE_32VRS(0,r5,r4)
909 mfvscr vr0
910 li r10,THREAD_VSCR
911 stvx vr0,r10,r4
912 /* Disable VMX for last_task_used_altivec */
913 ld r5,PT_REGS(r4)
914 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
915 lis r6,MSR_VEC@h
916 andc r4,r4,r6
917 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9181:
919#endif /* CONFIG_SMP */
920 /* Hack: if we get an altivec unavailable trap with VRSAVE
921 * set to all zeros, we assume this is a broken application
922 * that fails to set it properly, and thus we switch it to
923 * all 1's
924 */
925 mfspr r4,SPRN_VRSAVE
926 cmpdi 0,r4,0
927 bne+ 1f
928 li r4,-1
929 mtspr SPRN_VRSAVE,r4
9301:
931 /* enable use of VMX after return */
932 ld r4,PACACURRENT(r13)
933 addi r5,r4,THREAD /* Get THREAD */
934 oris r12,r12,MSR_VEC@h
935 std r12,_MSR(r1)
936 li r4,1
937 li r10,THREAD_VSCR
938 stw r4,THREAD_USED_VR(r5)
939 lvx vr0,r10,r5
940 mtvscr vr0
941 REST_32VRS(0,r4,r5)
942#ifndef CONFIG_SMP
943 /* Update last_task_used_math to 'current' */
944 subi r4,r5,THREAD /* Back to 'current' */
945 std r4,0(r3)
946#endif /* CONFIG_SMP */
947 /* restore registers and return */
948 b fast_exception_return
949#endif /* CONFIG_ALTIVEC */
950
951/*
952 * Hash table stuff
953 */
954 .align 7
955_GLOBAL(do_hash_page)
956 std r3,_DAR(r1)
957 std r4,_DSISR(r1)
958
959 andis. r0,r4,0xa450 /* weird error? */
960 bne- .handle_page_fault /* if not, try to insert a HPTE */
961BEGIN_FTR_SECTION
962 andis. r0,r4,0x0020 /* Is it a segment table fault? */
963 bne- .do_ste_alloc /* If so handle it */
964END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
965
966 /*
967 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
968 * accessing a userspace segment (even from the kernel). We assume
969 * kernel addresses always have the high bit set.
970 */
971 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
972 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
973 orc r0,r12,r0 /* MSR_PR | ~high_bit */
974 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
975 ori r4,r4,1 /* add _PAGE_PRESENT */
976 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
977
978 /*
979 * On iSeries, we soft-disable interrupts here, then
980 * hard-enable interrupts so that the hash_page code can spin on
981 * the hash_table_lock without problems on a shared processor.
982 */
983 DISABLE_INTS
984
985 /*
986 * r3 contains the faulting address
987 * r4 contains the required access permissions
988 * r5 contains the trap number
989 *
990 * at return r3 = 0 for success
991 */
992 bl .hash_page /* build HPTE if possible */
993 cmpdi r3,0 /* see if hash_page succeeded */
994
995#ifdef DO_SOFT_DISABLE
996 /*
997 * If we had interrupts soft-enabled at the point where the
998 * DSI/ISI occurred, and an interrupt came in during hash_page,
999 * handle it now.
1000 * We jump to ret_from_except_lite rather than fast_exception_return
1001 * because ret_from_except_lite will check for and handle pending
1002 * interrupts if necessary.
1003 */
1004 beq .ret_from_except_lite
1005 /* For a hash failure, we don't bother re-enabling interrupts */
1006 ble- 12f
1007
1008 /*
1009 * hash_page couldn't handle it, set soft interrupt enable back
1010 * to what it was before the trap. Note that .local_irq_restore
1011 * handles any interrupts pending at this point.
1012 */
1013 ld r3,SOFTE(r1)
1014 bl .local_irq_restore
1015 b 11f
1016#else
1017 beq fast_exception_return /* Return from exception on success */
1018 ble- 12f /* Failure return from hash_page */
1019
1020 /* fall through */
1021#endif
1022
1023/* Here we have a page fault that hash_page can't handle. */
1024_GLOBAL(handle_page_fault)
1025 ENABLE_INTS
102611: ld r4,_DAR(r1)
1027 ld r5,_DSISR(r1)
1028 addi r3,r1,STACK_FRAME_OVERHEAD
1029 bl .do_page_fault
1030 cmpdi r3,0
1031 beq+ .ret_from_except_lite
1032 bl .save_nvgprs
1033 mr r5,r3
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1035 lwz r4,_DAR(r1)
1036 bl .bad_page_fault
1037 b .ret_from_except
1038
1039/* We have a page fault that hash_page could handle but HV refused
1040 * the PTE insertion
1041 */
104212: bl .save_nvgprs
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 lwz r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
1048 /* here we have a segment miss */
1049_GLOBAL(do_ste_alloc)
1050 bl .ste_allocate /* try to insert stab entry */
1051 cmpdi r3,0
1052 beq+ fast_exception_return
1053 b .handle_page_fault
1054
1055/*
1056 * r13 points to the PACA, r9 contains the saved CR,
1057 * r11 and r12 contain the saved SRR0 and SRR1.
1058 * r9 - r13 are saved in paca->exslb.
1059 * We assume we aren't going to take any exceptions during this procedure.
1060 * We assume (DAR >> 60) == 0xc.
1061 */
1062 .align 7
1063_GLOBAL(do_stab_bolted)
1064 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1065 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1066
1067 /* Hash to the primary group */
1068 ld r10,PACASTABVIRT(r13)
1069 mfspr r11,SPRN_DAR
1070 srdi r11,r11,28
1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1072
1073 /* Calculate VSID */
1074 /* This is a kernel address, so protovsid = ESID */
1075 ASM_VSID_SCRAMBLE(r11, r9)
1076 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1077
1078 /* Search the primary group for a free entry */
10791: ld r11,0(r10) /* Test valid bit of the current ste */
1080 andi. r11,r11,0x80
1081 beq 2f
1082 addi r10,r10,16
1083 andi. r11,r10,0x70
1084 bne 1b
1085
1086 /* Stick for only searching the primary group for now. */
1087 /* At least for now, we use a very simple random castout scheme */
1088 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1089 mftb r11
1090 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1091 ori r11,r11,0x10
1092
1093 /* r10 currently points to an ste one past the group of interest */
1094 /* make it point to the randomly selected entry */
1095 subi r10,r10,128
1096 or r10,r10,r11 /* r10 is the entry to invalidate */
1097
1098 isync /* mark the entry invalid */
1099 ld r11,0(r10)
1100 rldicl r11,r11,56,1 /* clear the valid bit */
1101 rotldi r11,r11,8
1102 std r11,0(r10)
1103 sync
1104
1105 clrrdi r11,r11,28 /* Get the esid part of the ste */
1106 slbie r11
1107
11082: std r9,8(r10) /* Store the vsid part of the ste */
1109 eieio
1110
1111 mfspr r11,SPRN_DAR /* Get the new esid */
1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1113 ori r11,r11,0x90 /* Turn on valid and kp */
1114 std r11,0(r10) /* Put new entry back into the stab */
1115
1116 sync
1117
1118 /* All done -- return from exception. */
1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1120 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1121
1122 andi. r10,r12,MSR_RI
1123 beq- unrecov_slb
1124
1125 mtcrf 0x80,r9 /* restore CR */
1126
1127 mfmsr r10
1128 clrrdi r10,r10,2
1129 mtmsrd r10,1
1130
1131 mtspr SPRN_SRR0,r11
1132 mtspr SPRN_SRR1,r12
1133 ld r9,PACA_EXSLB+EX_R9(r13)
1134 ld r10,PACA_EXSLB+EX_R10(r13)
1135 ld r11,PACA_EXSLB+EX_R11(r13)
1136 ld r12,PACA_EXSLB+EX_R12(r13)
1137 ld r13,PACA_EXSLB+EX_R13(r13)
1138 rfid
1139 b . /* prevent speculative execution */
1140
1141/*
1142 * r13 points to the PACA, r9 contains the saved CR,
1143 * r11 and r12 contain the saved SRR0 and SRR1.
1144 * r3 has the faulting address
1145 * r9 - r13 are saved in paca->exslb.
1146 * r3 is saved in paca->slb_r3
1147 * We assume we aren't going to take any exceptions during this procedure.
1148 */
1149_GLOBAL(do_slb_miss)
1150 mflr r10
1151
1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1154
1155 bl .slb_allocate /* handle it */
1156
1157 /* All done -- return from exception. */
1158
1159 ld r10,PACA_EXSLB+EX_LR(r13)
1160 ld r3,PACA_EXSLB+EX_R3(r13)
1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1162#ifdef CONFIG_PPC_ISERIES
1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1164#endif /* CONFIG_PPC_ISERIES */
1165
1166 mtlr r10
1167
1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1169 beq- unrecov_slb
1170
1171.machine push
1172.machine "power4"
1173 mtcrf 0x80,r9
1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1175.machine pop
1176
1177#ifdef CONFIG_PPC_ISERIES
1178 mtspr SPRN_SRR0,r11
1179 mtspr SPRN_SRR1,r12
1180#endif /* CONFIG_PPC_ISERIES */
1181 ld r9,PACA_EXSLB+EX_R9(r13)
1182 ld r10,PACA_EXSLB+EX_R10(r13)
1183 ld r11,PACA_EXSLB+EX_R11(r13)
1184 ld r12,PACA_EXSLB+EX_R12(r13)
1185 ld r13,PACA_EXSLB+EX_R13(r13)
1186 rfid
1187 b . /* prevent speculative execution */
1188
1189unrecov_slb:
1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1191 DISABLE_INTS
1192 bl .save_nvgprs
11931: addi r3,r1,STACK_FRAME_OVERHEAD
1194 bl .unrecoverable_exception
1195 b 1b
1196
1197/*
1198 * Space for CPU0's segment table.
1199 *
1200 * On iSeries, the hypervisor must fill in at least one entry before
1201 * we get control (with relocate on). The address is give to the hv
1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1204 * PAGE_SHIFT).
1205 */
1206 . = STAB0_PHYS_ADDR /* 0x6000 */
1207 .globl initial_stab
1208initial_stab:
1209 .space 4096
1210
1211/*
1212 * Data area reserved for FWNMI option.
1213 * This address (0x7000) is fixed by the RPA.
1214 */
1215 .= 0x7000
1216 .globl fwnmi_data_area
1217fwnmi_data_area:
1218
1219 /* iSeries does not use the FWNMI stuff, so it is safe to put
1220 * this here, even if we later allow kernels that will boot on
1221 * both pSeries and iSeries */
1222#ifdef CONFIG_PPC_ISERIES
1223 . = LPARMAP_PHYS
1224#include "lparmap.s"
1225/*
1226 * This ".text" is here for old compilers that generate a trailing
1227 * .note section when compiling .c files to .s
1228 */
1229 .text
1230#endif /* CONFIG_PPC_ISERIES */
1231
1232 . = 0x8000
1233
1234/*
1235 * On pSeries, secondary processors spin in the following code.
1236 * At entry, r3 = this processor's number (physical cpu id)
1237 */
1238_GLOBAL(pSeries_secondary_smp_init)
1239 mr r24,r3
1240
1241 /* turn on 64-bit mode */
1242 bl .enable_64b_mode
1243 isync
1244
1245 /* Copy some CPU settings from CPU 0 */
1246 bl .__restore_cpu_setup
1247
1248 /* Set up a paca value for this processor. Since we have the
1249 * physical cpu id in r24, we need to search the pacas to find
1250 * which logical id maps to our physical one.
1251 */
1252 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1253 li r5,0 /* logical cpu id */
12541: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1255 cmpw r6,r24 /* Compare to our id */
1256 beq 2f
1257 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1258 addi r5,r5,1
1259 cmpwi r5,NR_CPUS
1260 blt 1b
1261
1262 mr r3,r24 /* not found, copy phys to r3 */
1263 b .kexec_wait /* next kernel might do better */
1264
12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1266 /* From now on, r24 is expected to be logical cpuid */
1267 mr r24,r5
12683: HMT_LOW
1269 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1270 /* start. */
1271 sync
1272
1273 /* Create a temp kernel stack for use before relocation is on. */
1274 ld r1,PACAEMERGSP(r13)
1275 subi r1,r1,STACK_FRAME_OVERHEAD
1276
1277 cmpwi 0,r23,0
1278#ifdef CONFIG_SMP
1279 bne .__secondary_start
1280#endif
1281 b 3b /* Loop until told to go */
1282
1283#ifdef CONFIG_PPC_ISERIES
1284_STATIC(__start_initialization_iSeries)
1285 /* Clear out the BSS */
1286 LOADADDR(r11,__bss_stop)
1287 LOADADDR(r8,__bss_start)
1288 sub r11,r11,r8 /* bss size */
1289 addi r11,r11,7 /* round up to an even double word */
1290 rldicl. r11,r11,61,3 /* shift right by 3 */
1291 beq 4f
1292 addi r8,r8,-8
1293 li r0,0
1294 mtctr r11 /* zero this many doublewords */
12953: stdu r0,8(r8)
1296 bdnz 3b
12974:
1298 LOADADDR(r1,init_thread_union)
1299 addi r1,r1,THREAD_SIZE
1300 li r0,0
1301 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1302
1303 LOADADDR(r3,cpu_specs)
1304 LOADADDR(r4,cur_cpu_spec)
1305 li r5,0
1306 bl .identify_cpu
1307
1308 LOADADDR(r2,__toc_start)
1309 addi r2,r2,0x4000
1310 addi r2,r2,0x4000
1311
1312 bl .iSeries_early_setup
1313 bl .early_setup
1314
1315 /* relocation is on at this point */
1316
1317 b .start_here_common
1318#endif /* CONFIG_PPC_ISERIES */
1319
1320#ifdef CONFIG_PPC_MULTIPLATFORM
1321
1322_STATIC(__mmu_off)
1323 mfmsr r3
1324 andi. r0,r3,MSR_IR|MSR_DR
1325 beqlr
1326 andc r3,r3,r0
1327 mtspr SPRN_SRR0,r4
1328 mtspr SPRN_SRR1,r3
1329 sync
1330 rfid
1331 b . /* prevent speculative execution */
1332
1333
1334/*
1335 * Here is our main kernel entry point. We support currently 2 kind of entries
1336 * depending on the value of r5.
1337 *
1338 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1339 * in r3...r7
1340 *
1341 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1342 * DT block, r4 is a physical pointer to the kernel itself
1343 *
1344 */
1345_GLOBAL(__start_initialization_multiplatform)
1346 /*
1347 * Are we booted from a PROM Of-type client-interface ?
1348 */
1349 cmpldi cr0,r5,0
1350 bne .__boot_from_prom /* yes -> prom */
1351
1352 /* Save parameters */
1353 mr r31,r3
1354 mr r30,r4
1355
1356 /* Make sure we are running in 64 bits mode */
1357 bl .enable_64b_mode
1358
1359 /* Setup some critical 970 SPRs before switching MMU off */
1360 bl .__970_cpu_preinit
1361
1362 /* cpu # */
1363 li r24,0
1364
1365 /* Switch off MMU if not already */
1366 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1367 add r4,r4,r30
1368 bl .__mmu_off
1369 b .__after_prom_start
1370
1371_STATIC(__boot_from_prom)
1372 /* Save parameters */
1373 mr r31,r3
1374 mr r30,r4
1375 mr r29,r5
1376 mr r28,r6
1377 mr r27,r7
1378
1379 /* Make sure we are running in 64 bits mode */
1380 bl .enable_64b_mode
1381
1382 /* put a relocation offset into r3 */
1383 bl .reloc_offset
1384
1385 LOADADDR(r2,__toc_start)
1386 addi r2,r2,0x4000
1387 addi r2,r2,0x4000
1388
1389 /* Relocate the TOC from a virt addr to a real addr */
1390 add r2,r2,r3
1391
1392 /* Restore parameters */
1393 mr r3,r31
1394 mr r4,r30
1395 mr r5,r29
1396 mr r6,r28
1397 mr r7,r27
1398
1399 /* Do all of the interaction with OF client interface */
1400 bl .prom_init
1401 /* We never return */
1402 trap
1403
1404/*
1405 * At this point, r3 contains the physical address we are running at,
1406 * returned by prom_init()
1407 */
1408_STATIC(__after_prom_start)
1409
1410/*
1411 * We need to run with __start at physical address 0.
1412 * This will leave some code in the first 256B of
1413 * real memory, which are reserved for software use.
1414 * The remainder of the first page is loaded with the fixed
1415 * interrupt vectors. The next two pages are filled with
1416 * unknown exception placeholders.
1417 *
1418 * Note: This process overwrites the OF exception vectors.
1419 * r26 == relocation offset
1420 * r27 == KERNELBASE
1421 */
1422 bl .reloc_offset
1423 mr r26,r3
1424 SET_REG_TO_CONST(r27,KERNELBASE)
1425
1426 li r3,0 /* target addr */
1427
1428 // XXX FIXME: Use phys returned by OF (r30)
1429 add r4,r27,r26 /* source addr */
1430 /* current address of _start */
1431 /* i.e. where we are running */
1432 /* the source addr */
1433
1434 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1435 sub r5,r5,r27
1436
1437 li r6,0x100 /* Start offset, the first 0x100 */
1438 /* bytes were copied earlier. */
1439
1440 bl .copy_and_flush /* copy the first n bytes */
1441 /* this includes the code being */
1442 /* executed here. */
1443
1444 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1445 mtctr r0 /* that we just made/relocated */
1446 bctr
1447
14484: LOADADDR(r5,klimit)
1449 add r5,r5,r26
1450 ld r5,0(r5) /* get the value of klimit */
1451 sub r5,r5,r27
1452 bl .copy_and_flush /* copy the rest */
1453 b .start_here_multiplatform
1454
1455#endif /* CONFIG_PPC_MULTIPLATFORM */
1456
1457/*
1458 * Copy routine used to copy the kernel to start at physical address 0
1459 * and flush and invalidate the caches as needed.
1460 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1461 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1462 *
1463 * Note: this routine *only* clobbers r0, r6 and lr
1464 */
1465_GLOBAL(copy_and_flush)
1466 addi r5,r5,-8
1467 addi r6,r6,-8
14684: li r0,16 /* Use the least common */
1469 /* denominator cache line */
1470 /* size. This results in */
1471 /* extra cache line flushes */
1472 /* but operation is correct. */
1473 /* Can't get cache line size */
1474 /* from NACA as it is being */
1475 /* moved too. */
1476
1477 mtctr r0 /* put # words/line in ctr */
14783: addi r6,r6,8 /* copy a cache line */
1479 ldx r0,r6,r4
1480 stdx r0,r6,r3
1481 bdnz 3b
1482 dcbst r6,r3 /* write it to memory */
1483 sync
1484 icbi r6,r3 /* flush the icache line */
1485 cmpld 0,r6,r5
1486 blt 4b
1487 sync
1488 addi r5,r5,8
1489 addi r6,r6,8
1490 blr
1491
1492.align 8
1493copy_to_here:
1494
1495#ifdef CONFIG_SMP
1496#ifdef CONFIG_PPC_PMAC
1497/*
1498 * On PowerMac, secondary processors starts from the reset vector, which
1499 * is temporarily turned into a call to one of the functions below.
1500 */
1501 .section ".text";
1502 .align 2 ;
1503
1504 .globl __secondary_start_pmac_0
1505__secondary_start_pmac_0:
1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1507 li r24,0
1508 b 1f
1509 li r24,1
1510 b 1f
1511 li r24,2
1512 b 1f
1513 li r24,3
15141:
1515
1516_GLOBAL(pmac_secondary_start)
1517 /* turn on 64-bit mode */
1518 bl .enable_64b_mode
1519 isync
1520
1521 /* Copy some CPU settings from CPU 0 */
1522 bl .__restore_cpu_setup
1523
1524 /* pSeries do that early though I don't think we really need it */
1525 mfmsr r3
1526 ori r3,r3,MSR_RI
1527 mtmsrd r3 /* RI on */
1528
1529 /* Set up a paca value for this processor. */
1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1532 add r13,r13,r4 /* for this processor. */
1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1534
1535 /* Create a temp kernel stack for use before relocation is on. */
1536 ld r1,PACAEMERGSP(r13)
1537 subi r1,r1,STACK_FRAME_OVERHEAD
1538
1539 b .__secondary_start
1540
1541#endif /* CONFIG_PPC_PMAC */
1542
1543/*
1544 * This function is called after the master CPU has released the
1545 * secondary processors. The execution environment is relocation off.
1546 * The paca for this processor has the following fields initialized at
1547 * this point:
1548 * 1. Processor number
1549 * 2. Segment table pointer (virtual address)
1550 * On entry the following are set:
1551 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1552 * r24 = cpu# (in Linux terms)
1553 * r13 = paca virtual address
1554 * SPRG3 = paca virtual address
1555 */
1556_GLOBAL(__secondary_start)
1557
1558 HMT_MEDIUM /* Set thread priority to MEDIUM */
1559
1560 ld r2,PACATOC(r13)
1561 li r6,0
1562 stb r6,PACAPROCENABLED(r13)
1563
1564#ifndef CONFIG_PPC_ISERIES
1565 /* Initialize the page table pointer register. */
1566 LOADADDR(r6,_SDR1)
1567 ld r6,0(r6) /* get the value of _SDR1 */
1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1569#endif
1570 /* Initialize the first segment table (or SLB) entry */
1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1572 bl .stab_initialize
1573
1574 /* Initialize the kernel stack. Just a repeat for iSeries. */
1575 LOADADDR(r3,current_set)
1576 sldi r28,r24,3 /* get current_set[cpu#] */
1577 ldx r1,r3,r28
1578 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1579 std r1,PACAKSAVE(r13)
1580
1581 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1582 ori r4,r3,1 /* turn on valid bit */
1583
1584#ifdef CONFIG_PPC_ISERIES
1585 li r0,-1 /* hypervisor call */
1586 li r3,1
1587 sldi r3,r3,63 /* 0x8000000000000000 */
1588 ori r3,r3,4 /* 0x8000000000000004 */
1589 sc /* HvCall_setASR */
1590#else
1591 /* set the ASR */
1592 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1593 ld r3,0(r3)
1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1596 beq 98f /* branch if result is 0 */
1597 mfspr r3,SPRN_PVR
1598 srwi r3,r3,16
1599 cmpwi r3,0x37 /* SStar */
1600 beq 97f
1601 cmpwi r3,0x36 /* IStar */
1602 beq 97f
1603 cmpwi r3,0x34 /* Pulsar */
1604 bne 98f
160597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1606 HVSC /* Invoking hcall */
1607 b 99f
160898: /* !(rpa hypervisor) || !(star) */
1609 mtasr r4 /* set the stab location */
161099:
1611#endif
1612 li r7,0
1613 mtlr r7
1614
1615 /* enable MMU and jump to start_secondary */
1616 LOADADDR(r3,.start_secondary_prolog)
1617 SET_REG_TO_CONST(r4, MSR_KERNEL)
1618#ifdef DO_SOFT_DISABLE
1619 ori r4,r4,MSR_EE
1620#endif
1621 mtspr SPRN_SRR0,r3
1622 mtspr SPRN_SRR1,r4
1623 rfid
1624 b . /* prevent speculative execution */
1625
1626/*
1627 * Running with relocation on at this point. All we want to do is
1628 * zero the stack back-chain pointer before going into C code.
1629 */
1630_GLOBAL(start_secondary_prolog)
1631 li r3,0
1632 std r3,0(r1) /* Zero the stack frame pointer */
1633 bl .start_secondary
1634#endif
1635
1636/*
1637 * This subroutine clobbers r11 and r12
1638 */
1639_GLOBAL(enable_64b_mode)
1640 mfmsr r11 /* grab the current MSR */
1641 li r12,1
1642 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1643 or r11,r11,r12
1644 li r12,1
1645 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1646 or r11,r11,r12
1647 mtmsrd r11
1648 isync
1649 blr
1650
1651#ifdef CONFIG_PPC_MULTIPLATFORM
1652/*
1653 * This is where the main kernel code starts.
1654 */
1655_STATIC(start_here_multiplatform)
1656 /* get a new offset, now that the kernel has moved. */
1657 bl .reloc_offset
1658 mr r26,r3
1659
1660 /* Clear out the BSS. It may have been done in prom_init,
1661 * already but that's irrelevant since prom_init will soon
1662 * be detached from the kernel completely. Besides, we need
1663 * to clear it now for kexec-style entry.
1664 */
1665 LOADADDR(r11,__bss_stop)
1666 LOADADDR(r8,__bss_start)
1667 sub r11,r11,r8 /* bss size */
1668 addi r11,r11,7 /* round up to an even double word */
1669 rldicl. r11,r11,61,3 /* shift right by 3 */
1670 beq 4f
1671 addi r8,r8,-8
1672 li r0,0
1673 mtctr r11 /* zero this many doublewords */
16743: stdu r0,8(r8)
1675 bdnz 3b
16764:
1677
1678 mfmsr r6
1679 ori r6,r6,MSR_RI
1680 mtmsrd r6 /* RI on */
1681
1682#ifdef CONFIG_HMT
1683 /* Start up the second thread on cpu 0 */
1684 mfspr r3,SPRN_PVR
1685 srwi r3,r3,16
1686 cmpwi r3,0x34 /* Pulsar */
1687 beq 90f
1688 cmpwi r3,0x36 /* Icestar */
1689 beq 90f
1690 cmpwi r3,0x37 /* SStar */
1691 beq 90f
1692 b 91f /* HMT not supported */
169390: li r3,0
1694 bl .hmt_start_secondary
169591:
1696#endif
1697
1698 /* The following gets the stack and TOC set up with the regs */
1699 /* pointing to the real addr of the kernel stack. This is */
1700 /* all done to support the C function call below which sets */
1701 /* up the htab. This is done because we have relocated the */
1702 /* kernel but are still running in real mode. */
1703
1704 LOADADDR(r3,init_thread_union)
1705 add r3,r3,r26
1706
1707 /* set up a stack pointer (physical address) */
1708 addi r1,r3,THREAD_SIZE
1709 li r0,0
1710 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1711
1712 /* set up the TOC (physical address) */
1713 LOADADDR(r2,__toc_start)
1714 addi r2,r2,0x4000
1715 addi r2,r2,0x4000
1716 add r2,r2,r26
1717
1718 LOADADDR(r3,cpu_specs)
1719 add r3,r3,r26
1720 LOADADDR(r4,cur_cpu_spec)
1721 add r4,r4,r26
1722 mr r5,r26
1723 bl .identify_cpu
1724
1725 /* Save some low level config HIDs of CPU0 to be copied to
1726 * other CPUs later on, or used for suspend/resume
1727 */
1728 bl .__save_cpu_setup
1729 sync
1730
1731 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1732 * note that boot_cpuid can always be 0 nowadays since there is
1733 * nowhere it can be initialized differently before we reach this
1734 * code
1735 */
1736 LOADADDR(r27, boot_cpuid)
1737 add r27,r27,r26
1738 lwz r27,0(r27)
1739
1740 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1742 add r13,r13,r24 /* for this processor. */
1743 add r13,r13,r26 /* convert to physical addr */
1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1745
1746 /* Do very early kernel initializations, including initial hash table,
1747 * stab and slb setup before we turn on relocation. */
1748
1749 /* Restore parameters passed from prom_init/kexec */
1750 mr r3,r31
1751 bl .early_setup
1752
1753 /* set the ASR */
1754 ld r3,PACASTABREAL(r13)
1755 ori r4,r3,1 /* turn on valid bit */
1756 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1757 ld r3,0(r3)
1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1760 beq 98f /* branch if result is 0 */
1761 mfspr r3,SPRN_PVR
1762 srwi r3,r3,16
1763 cmpwi r3,0x37 /* SStar */
1764 beq 97f
1765 cmpwi r3,0x36 /* IStar */
1766 beq 97f
1767 cmpwi r3,0x34 /* Pulsar */
1768 bne 98f
176997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1770 HVSC /* Invoking hcall */
1771 b 99f
177298: /* !(rpa hypervisor) || !(star) */
1773 mtasr r4 /* set the stab location */
177499:
1775 /* Set SDR1 (hash table pointer) */
1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1777 ld r3,0(r3)
1778 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1779 /* Test if bit 0 is set (LPAR bit) */
1780 andi. r3,r3,PLATFORM_LPAR
1781 bne 98f /* branch if result is !0 */
1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1783 add r6,r6,r26
1784 ld r6,0(r6) /* get the value of _SDR1 */
1785 mtspr SPRN_SDR1,r6 /* set the htab location */
178698:
1787 LOADADDR(r3,.start_here_common)
1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1789 mtspr SPRN_SRR0,r3
1790 mtspr SPRN_SRR1,r4
1791 rfid
1792 b . /* prevent speculative execution */
1793#endif /* CONFIG_PPC_MULTIPLATFORM */
1794
1795 /* This is where all platforms converge execution */
1796_STATIC(start_here_common)
1797 /* relocation is on at this point */
1798
1799 /* The following code sets up the SP and TOC now that we are */
1800 /* running with translation enabled. */
1801
1802 LOADADDR(r3,init_thread_union)
1803
1804 /* set up the stack */
1805 addi r1,r3,THREAD_SIZE
1806 li r0,0
1807 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1808
1809 /* Apply the CPUs-specific fixups (nop out sections not relevant
1810 * to this CPU
1811 */
1812 li r3,0
1813 bl .do_cpu_ftr_fixups
1814
1815 LOADADDR(r26, boot_cpuid)
1816 lwz r26,0(r26)
1817
1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1820 add r13,r13,r24 /* for this processor. */
1821 mtspr SPRN_SPRG3,r13
1822
1823 /* ptr to current */
1824 LOADADDR(r4,init_task)
1825 std r4,PACACURRENT(r13)
1826
1827 /* Load the TOC */
1828 ld r2,PACATOC(r13)
1829 std r1,PACAKSAVE(r13)
1830
1831 bl .setup_system
1832
1833 /* Load up the kernel context */
18345:
1835#ifdef DO_SOFT_DISABLE
1836 li r5,0
1837 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1838 mfmsr r5
1839 ori r5,r5,MSR_EE /* Hard Enabled */
1840 mtmsrd r5
1841#endif
1842
1843 bl .start_kernel
1844
1845_GLOBAL(hmt_init)
1846#ifdef CONFIG_HMT
1847 LOADADDR(r5, hmt_thread_data)
1848 mfspr r7,SPRN_PVR
1849 srwi r7,r7,16
1850 cmpwi r7,0x34 /* Pulsar */
1851 beq 90f
1852 cmpwi r7,0x36 /* Icestar */
1853 beq 91f
1854 cmpwi r7,0x37 /* SStar */
1855 beq 91f
1856 b 101f
185790: mfspr r6,SPRN_PIR
1858 andi. r6,r6,0x1f
1859 b 92f
186091: mfspr r6,SPRN_PIR
1861 andi. r6,r6,0x3ff
186292: sldi r4,r24,3
1863 stwx r6,r5,r4
1864 bl .hmt_start_secondary
1865 b 101f
1866
1867__hmt_secondary_hold:
1868 LOADADDR(r5, hmt_thread_data)
1869 clrldi r5,r5,4
1870 li r7,0
1871 mfspr r6,SPRN_PIR
1872 mfspr r8,SPRN_PVR
1873 srwi r8,r8,16
1874 cmpwi r8,0x34
1875 bne 93f
1876 andi. r6,r6,0x1f
1877 b 103f
187893: andi. r6,r6,0x3f
1879
1880103: lwzx r8,r5,r7
1881 cmpw r8,r6
1882 beq 104f
1883 addi r7,r7,8
1884 b 103b
1885
1886104: addi r7,r7,4
1887 lwzx r9,r5,r7
1888 mr r24,r9
1889101:
1890#endif
1891 mr r3,r24
1892 b .pSeries_secondary_smp_init
1893
1894#ifdef CONFIG_HMT
1895_GLOBAL(hmt_start_secondary)
1896 LOADADDR(r4,__hmt_secondary_hold)
1897 clrldi r4,r4,4
1898 mtspr SPRN_NIADORM, r4
1899 mfspr r4, SPRN_MSRDORM
1900 li r5, -65
1901 and r4, r4, r5
1902 mtspr SPRN_MSRDORM, r4
1903 lis r4,0xffef
1904 ori r4,r4,0x7403
1905 mtspr SPRN_TSC, r4
1906 li r4,0x1f4
1907 mtspr SPRN_TST, r4
1908 mfspr r4, SPRN_HID0
1909 ori r4, r4, 0x1
1910 mtspr SPRN_HID0, r4
1911 mfspr r4, SPRN_CTRLF
1912 oris r4, r4, 0x40
1913 mtspr SPRN_CTRLT, r4
1914 blr
1915#endif
1916
1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1918_GLOBAL(smp_release_cpus)
1919 /* All secondary cpus are spinning on a common
1920 * spinloop, release them all now so they can start
1921 * to spin on their individual paca spinloops.
1922 * For non SMP kernels, the secondary cpus never
1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1926 */
1927 li r3,1
1928 LOADADDR(r5,__secondary_hold_spinloop)
1929 std r3,0(r5)
1930 sync
1931 blr
1932#endif /* CONFIG_SMP */
1933
1934
1935/*
1936 * We put a few things here that have to be page-aligned.
1937 * This stuff goes at the beginning of the bss, which is page-aligned.
1938 */
1939 .section ".bss"
1940
1941 .align PAGE_SHIFT
1942
1943 .globl empty_zero_page
1944empty_zero_page:
1945 .space PAGE_SIZE
1946
1947 .globl swapper_pg_dir
1948swapper_pg_dir:
1949 .space PAGE_SIZE
1950
1951/*
1952 * This space gets a copy of optional info passed to us by the bootstrap
1953 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1954 */
1955 .globl cmd_line
1956cmd_line:
1957 .space COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644
index 000000000000..bc6d1ac55235
--- /dev/null
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -0,0 +1,860 @@
1/*
2 * arch/ppc/kernel/except_8xx.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications by Dan Malek
12 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains low-level support and setup for PowerPC 8xx
15 * embedded processors, including trap and interrupt dispatch.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cache.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34
35/* Macro to make the code more readable. */
36#ifdef CONFIG_8xx_CPU6
37#define DO_8xx_CPU6(val, reg) \
38 li reg, val; \
39 stw reg, 12(r0); \
40 lwz reg, 12(r0);
41#else
42#define DO_8xx_CPU6(val, reg)
43#endif
44 .text
45 .globl _stext
46_stext:
47 .text
48 .globl _start
49_start:
50
51/* MPC8xx
52 * This port was done on an MBX board with an 860. Right now I only
53 * support an ELF compressed (zImage) boot from EPPC-Bug because the
54 * code there loads up some registers before calling us:
55 * r3: ptr to board info data
56 * r4: initrd_start or if no initrd then 0
57 * r5: initrd_end - unused if r4 is 0
58 * r6: Start of command line string
59 * r7: End of command line string
60 *
61 * I decided to use conditional compilation instead of checking PVR and
62 * adding more processor specific branches around code I don't need.
63 * Since this is an embedded processor, I also appreciate any memory
64 * savings I can get.
65 *
66 * The MPC8xx does not have any BATs, but it supports large page sizes.
67 * We first initialize the MMU to support 8M byte pages, then load one
68 * entry into each of the instruction and data TLBs to map the first
69 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
70 * the "internal" processor registers before MMU_init is called.
71 *
72 * The TLB code currently contains a major hack. Since I use the condition
73 * code register, I have to save and restore it. I am out of registers, so
74 * I just store it in memory location 0 (the TLB handlers are not reentrant).
75 * To avoid making any decisions, I need to use the "segment" valid bit
76 * in the first level table, but that would require many changes to the
77 * Linux page directory/table functions that I don't want to do right now.
78 *
79 * I used to use SPRG2 for a temporary register in the TLB handler, but it
80 * has since been put to other uses. I now use a hack to save a register
81 * and the CCR at memory location 0.....Someday I'll fix this.....
82 * -- Dan
83 */
84 .globl __start
85__start:
86 mr r31,r3 /* save parameters */
87 mr r30,r4
88 mr r29,r5
89 mr r28,r6
90 mr r27,r7
91
92 /* We have to turn on the MMU right away so we get cache modes
93 * set correctly.
94 */
95 bl initial_mmu
96
97/* We now have the lower 8 Meg mapped into TLB entries, and the caches
98 * ready to work.
99 */
100
101turn_on_mmu:
102 mfmsr r0
103 ori r0,r0,MSR_DR|MSR_IR
104 mtspr SPRN_SRR1,r0
105 lis r0,start_here@h
106 ori r0,r0,start_here@l
107 mtspr SPRN_SRR0,r0
108 SYNC
109 rfi /* enables MMU */
110
111/*
112 * Exception entry code. This code runs with address translation
113 * turned off, i.e. using physical addresses.
114 * We assume sprg3 has the physical address of the current
115 * task's thread_struct.
116 */
117#define EXCEPTION_PROLOG \
118 mtspr SPRN_SPRG0,r10; \
119 mtspr SPRN_SPRG1,r11; \
120 mfcr r10; \
121 EXCEPTION_PROLOG_1; \
122 EXCEPTION_PROLOG_2
123
124#define EXCEPTION_PROLOG_1 \
125 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
126 andi. r11,r11,MSR_PR; \
127 tophys(r11,r1); /* use tophys(r1) if kernel */ \
128 beq 1f; \
129 mfspr r11,SPRN_SPRG3; \
130 lwz r11,THREAD_INFO-THREAD(r11); \
131 addi r11,r11,THREAD_SIZE; \
132 tophys(r11,r11); \
1331: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
134
135
136#define EXCEPTION_PROLOG_2 \
137 CLR_TOP32(r11); \
138 stw r10,_CCR(r11); /* save registers */ \
139 stw r12,GPR12(r11); \
140 stw r9,GPR9(r11); \
141 mfspr r10,SPRN_SPRG0; \
142 stw r10,GPR10(r11); \
143 mfspr r12,SPRN_SPRG1; \
144 stw r12,GPR11(r11); \
145 mflr r10; \
146 stw r10,_LINK(r11); \
147 mfspr r12,SPRN_SRR0; \
148 mfspr r9,SPRN_SRR1; \
149 stw r1,GPR1(r11); \
150 stw r1,0(r11); \
151 tovirt(r1,r11); /* set new kernel sp */ \
152 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
153 MTMSRD(r10); /* (except for mach check in rtas) */ \
154 stw r0,GPR0(r11); \
155 SAVE_4GPRS(3, r11); \
156 SAVE_2GPRS(7, r11)
157
158/*
159 * Note: code which follows this uses cr0.eq (set if from kernel),
160 * r11, r12 (SRR0), and r9 (SRR1).
161 *
162 * Note2: once we have set r1 we are in a position to take exceptions
163 * again, and we could thus set MSR:RI at that point.
164 */
165
166/*
167 * Exception vectors.
168 */
169#define EXCEPTION(n, label, hdlr, xfer) \
170 . = n; \
171label: \
172 EXCEPTION_PROLOG; \
173 addi r3,r1,STACK_FRAME_OVERHEAD; \
174 xfer(n, hdlr)
175
176#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
177 li r10,trap; \
178 stw r10,_TRAP(r11); \
179 li r10,MSR_KERNEL; \
180 copyee(r10, r9); \
181 bl tfer; \
182i##n: \
183 .long hdlr; \
184 .long ret
185
186#define COPY_EE(d, s) rlwimi d,s,0,16,16
187#define NOCOPY(d, s)
188
189#define EXC_XFER_STD(n, hdlr) \
190 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
191 ret_from_except_full)
192
193#define EXC_XFER_LITE(n, hdlr) \
194 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
195 ret_from_except)
196
197#define EXC_XFER_EE(n, hdlr) \
198 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
199 ret_from_except_full)
200
201#define EXC_XFER_EE_LITE(n, hdlr) \
202 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
203 ret_from_except)
204
205/* System reset */
206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207
208/* Machine check */
209 . = 0x200
210MachineCheck:
211 EXCEPTION_PROLOG
212 mfspr r4,SPRN_DAR
213 stw r4,_DAR(r11)
214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, machine_check_exception)
218
219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other
221 * translation errors.
222 */
223 . = 0x300
224DataAccess:
225 EXCEPTION_PROLOG
226 mfspr r10,SPRN_DSISR
227 stw r10,_DSISR(r11)
228 mr r5,r10
229 mfspr r4,SPRN_DAR
230 EXC_XFER_EE_LITE(0x300, handle_page_fault)
231
232/* Instruction access exception.
233 * This is "never generated" by the MPC8xx. We jump to it for other
234 * translation errors.
235 */
236 . = 0x400
237InstructionAccess:
238 EXCEPTION_PROLOG
239 mr r4,r12
240 mr r5,r9
241 EXC_XFER_EE_LITE(0x400, handle_page_fault)
242
243/* External interrupt */
244 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
245
246/* Alignment exception */
247 . = 0x600
248Alignment:
249 EXCEPTION_PROLOG
250 mfspr r4,SPRN_DAR
251 stw r4,_DAR(r11)
252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, alignment_exception)
256
257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259
260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/
262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263
264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266
267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269
270/* System call */
271 . = 0xc00
272SystemCall:
273 EXCEPTION_PROLOG
274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275
276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions.
283 */
284 EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
285
286 . = 0x1100
287/*
288 * For the MPC8xx, this is a software tablewalk to load the instruction
289 * TLB. It is modelled after the example in the Motorola manual. The task
290 * switch loads the M_TWB register with the pointer to the first level table.
291 * If we discover there is no second level table (value is zero) or if there
292 * is an invalid pte, we load that into the TLB, which causes another fault
293 * into the TLB Error interrupt where we can handle such problems.
294 * We have to use the MD_xxx registers for the tablewalk because the
295 * equivalent MI_xxx registers only perform the attribute functions.
296 */
297InstructionTLBMiss:
298#ifdef CONFIG_8xx_CPU6
299 stw r3, 8(r0)
300#endif
301 DO_8xx_CPU6(0x3f80, r3)
302 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
303 mfcr r10
304 stw r10, 0(r0)
305 stw r11, 4(r0)
306 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
307 DO_8xx_CPU6(0x3780, r3)
308 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
309 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
310
311 /* If we are faulting a kernel address, we have to use the
312 * kernel page tables.
313 */
314 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
315 beq 3f
316 lis r11, swapper_pg_dir@h
317 ori r11, r11, swapper_pg_dir@l
318 rlwimi r10, r11, 0, 2, 19
3193:
320 lwz r11, 0(r10) /* Get the level 1 entry */
321 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
322 beq 2f /* If zero, don't try to find a pte */
323
324 /* We have a pte table, so load the MI_TWC with the attributes
325 * for this "segment."
326 */
327 ori r11,r11,1 /* Set valid bit */
328 DO_8xx_CPU6(0x2b80, r3)
329 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
330 DO_8xx_CPU6(0x3b80, r3)
331 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
332 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
333 lwz r10, 0(r11) /* Get the pte */
334
335 ori r10, r10, _PAGE_ACCESSED
336 stw r10, 0(r11)
337
338 /* The Linux PTE won't go exactly into the MMU TLB.
339 * Software indicator bits 21, 22 and 28 must be clear.
340 * Software indicator bits 24, 25, 26, and 27 must be
341 * set. All other Linux PTE bits control the behavior
342 * of the MMU.
343 */
3442: li r11, 0x00f0
345 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
346 DO_8xx_CPU6(0x2d80, r3)
347 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
348
349 mfspr r10, SPRN_M_TW /* Restore registers */
350 lwz r11, 0(r0)
351 mtcr r11
352 lwz r11, 4(r0)
353#ifdef CONFIG_8xx_CPU6
354 lwz r3, 8(r0)
355#endif
356 rfi
357
358 . = 0x1200
359DataStoreTLBMiss:
360#ifdef CONFIG_8xx_CPU6
361 stw r3, 8(r0)
362#endif
363 DO_8xx_CPU6(0x3f80, r3)
364 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
365 mfcr r10
366 stw r10, 0(r0)
367 stw r11, 4(r0)
368 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
369
370 /* If we are faulting a kernel address, we have to use the
371 * kernel page tables.
372 */
373 andi. r11, r10, 0x0800
374 beq 3f
375 lis r11, swapper_pg_dir@h
376 ori r11, r11, swapper_pg_dir@l
377 rlwimi r10, r11, 0, 2, 19
3783:
379 lwz r11, 0(r10) /* Get the level 1 entry */
380 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
381 beq 2f /* If zero, don't try to find a pte */
382
383 /* We have a pte table, so load fetch the pte from the table.
384 */
385 ori r11, r11, 1 /* Set valid bit in physical L2 page */
386 DO_8xx_CPU6(0x3b80, r3)
387 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
388 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
389 lwz r10, 0(r10) /* Get the pte */
390
391 /* Insert the Guarded flag into the TWC from the Linux PTE.
392 * It is bit 27 of both the Linux PTE and the TWC (at least
393 * I got that right :-). It will be better when we can put
394 * this into the Linux pgd/pmd and load it in the operation
395 * above.
396 */
397 rlwimi r11, r10, 0, 27, 27
398 DO_8xx_CPU6(0x3b80, r3)
399 mtspr SPRN_MD_TWC, r11
400
401 mfspr r11, SPRN_MD_TWC /* get the pte address again */
402 ori r10, r10, _PAGE_ACCESSED
403 stw r10, 0(r11)
404
405 /* The Linux PTE won't go exactly into the MMU TLB.
406 * Software indicator bits 21, 22 and 28 must be clear.
407 * Software indicator bits 24, 25, 26, and 27 must be
408 * set. All other Linux PTE bits control the behavior
409 * of the MMU.
410 */
4112: li r11, 0x00f0
412 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
413 DO_8xx_CPU6(0x3d80, r3)
414 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
415
416 mfspr r10, SPRN_M_TW /* Restore registers */
417 lwz r11, 0(r0)
418 mtcr r11
419 lwz r11, 4(r0)
420#ifdef CONFIG_8xx_CPU6
421 lwz r3, 8(r0)
422#endif
423 rfi
424
425/* This is an instruction TLB error on the MPC8xx. This could be due
426 * to many reasons, such as executing guarded memory or illegal instruction
427 * addresses. There is nothing to do but handle a big time error fault.
428 */
429 . = 0x1300
430InstructionTLBError:
431 b InstructionAccess
432
433/* This is the data TLB error on the MPC8xx. This could be due to
434 * many reasons, including a dirty update to a pte. We can catch that
435 * one here, but anything else is an error. First, we track down the
436 * Linux pte. If it is valid, write access is allowed, but the
437 * page dirty bit is not set, we will set it and reload the TLB. For
438 * any other case, we bail out to a higher level function that can
439 * handle it.
440 */
441 . = 0x1400
442DataTLBError:
443#ifdef CONFIG_8xx_CPU6
444 stw r3, 8(r0)
445#endif
446 DO_8xx_CPU6(0x3f80, r3)
447 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
448 mfcr r10
449 stw r10, 0(r0)
450 stw r11, 4(r0)
451
452 /* First, make sure this was a store operation.
453 */
454 mfspr r10, SPRN_DSISR
455 andis. r11, r10, 0x0200 /* If set, indicates store op */
456 beq 2f
457
458 /* The EA of a data TLB miss is automatically stored in the MD_EPN
459 * register. The EA of a data TLB error is automatically stored in
460 * the DAR, but not the MD_EPN register. We must copy the 20 most
461 * significant bits of the EA from the DAR to MD_EPN before we
462 * start walking the page tables. We also need to copy the CASID
463 * value from the M_CASID register.
464 * Addendum: The EA of a data TLB error is _supposed_ to be stored
465 * in DAR, but it seems that this doesn't happen in some cases, such
466 * as when the error is due to a dcbi instruction to a page with a
467 * TLB that doesn't have the changed bit set. In such cases, there
468 * does not appear to be any way to recover the EA of the error
469 * since it is neither in DAR nor MD_EPN. As a workaround, the
470 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
471 * are initialized in mapin_ram(). This will avoid the problem,
472 * assuming we only use the dcbi instruction on kernel addresses.
473 */
474 mfspr r10, SPRN_DAR
475 rlwinm r11, r10, 0, 0, 19
476 ori r11, r11, MD_EVALID
477 mfspr r10, SPRN_M_CASID
478 rlwimi r11, r10, 0, 28, 31
479 DO_8xx_CPU6(0x3780, r3)
480 mtspr SPRN_MD_EPN, r11
481
482 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
483
484 /* If we are faulting a kernel address, we have to use the
485 * kernel page tables.
486 */
487 andi. r11, r10, 0x0800
488 beq 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 rlwimi r10, r11, 0, 2, 19
4923:
493 lwz r11, 0(r10) /* Get the level 1 entry */
494 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
495 beq 2f /* If zero, bail */
496
497 /* We have a pte table, so fetch the pte from the table.
498 */
499 ori r11, r11, 1 /* Set valid bit in physical L2 page */
500 DO_8xx_CPU6(0x3b80, r3)
501 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
502 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
503 lwz r10, 0(r11) /* Get the pte */
504
505 andi. r11, r10, _PAGE_RW /* Is it writeable? */
506 beq 2f /* Bail out if not */
507
508 /* Update 'changed', among others.
509 */
510 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
511 mfspr r11, SPRN_MD_TWC /* Get pte address again */
512 stw r10, 0(r11) /* and update pte in table */
513
514 /* The Linux PTE won't go exactly into the MMU TLB.
515 * Software indicator bits 21, 22 and 28 must be clear.
516 * Software indicator bits 24, 25, 26, and 27 must be
517 * set. All other Linux PTE bits control the behavior
518 * of the MMU.
519 */
520 li r11, 0x00f0
521 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
522 DO_8xx_CPU6(0x3d80, r3)
523 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
524
525 mfspr r10, SPRN_M_TW /* Restore registers */
526 lwz r11, 0(r0)
527 mtcr r11
528 lwz r11, 4(r0)
529#ifdef CONFIG_8xx_CPU6
530 lwz r3, 8(r0)
531#endif
532 rfi
5332:
534 mfspr r10, SPRN_M_TW /* Restore registers */
535 lwz r11, 0(r0)
536 mtcr r11
537 lwz r11, 4(r0)
538#ifdef CONFIG_8xx_CPU6
539 lwz r3, 8(r0)
540#endif
541 b DataAccess
542
543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550
551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to
553 * using them.
554 */
555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559
560 . = 0x2000
561
562 .globl giveup_fpu
563giveup_fpu:
564 blr
565
566/*
567 * This is where the main kernel code starts.
568 */
569start_here:
570 /* ptr to current */
571 lis r2,init_task@h
572 ori r2,r2,init_task@l
573
574 /* ptr to phys current thread */
575 tophys(r4,r2)
576 addi r4,r4,THREAD /* init task's THREAD */
577 mtspr SPRN_SPRG3,r4
578 li r3,0
579 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
580
581 /* stack */
582 lis r1,init_thread_union@ha
583 addi r1,r1,init_thread_union@l
584 li r0,0
585 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
586
587 bl early_init /* We have to do this with MMU on */
588
589/*
590 * Decide what sort of machine this is and initialize the MMU.
591 */
592 mr r3,r31
593 mr r4,r30
594 mr r5,r29
595 mr r6,r28
596 mr r7,r27
597 bl machine_init
598 bl MMU_init
599
600/*
601 * Go back to running unmapped so we can load up new values
602 * and change to using our exception vectors.
603 * On the 8xx, all we have to do is invalidate the TLB to clear
604 * the old 8M byte TLB mappings and load the page table base register.
605 */
606 /* The right way to do this would be to track it down through
607 * init's THREAD like the context switch code does, but this is
608 * easier......until someone changes init's static structures.
609 */
610 lis r6, swapper_pg_dir@h
611 ori r6, r6, swapper_pg_dir@l
612 tophys(r6,r6)
613#ifdef CONFIG_8xx_CPU6
614 lis r4, cpu6_errata_word@h
615 ori r4, r4, cpu6_errata_word@l
616 li r3, 0x3980
617 stw r3, 12(r4)
618 lwz r3, 12(r4)
619#endif
620 mtspr SPRN_M_TWB, r6
621 lis r4,2f@h
622 ori r4,r4,2f@l
623 tophys(r4,r4)
624 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
625 mtspr SPRN_SRR0,r4
626 mtspr SPRN_SRR1,r3
627 rfi
628/* Load up the kernel context */
6292:
630 SYNC /* Force all PTE updates to finish */
631 tlbia /* Clear all TLB entries */
632 sync /* wait for tlbia/tlbie to finish */
633 TLBSYNC /* ... on all CPUs */
634
635 /* set up the PTE pointers for the Abatron bdiGDB.
636 */
637 tovirt(r6,r6)
638 lis r5, abatron_pteptrs@h
639 ori r5, r5, abatron_pteptrs@l
640 stw r5, 0xf0(r0) /* Must match your Abatron config file */
641 tophys(r5,r5)
642 stw r6, 0(r5)
643
644/* Now turn on the MMU for real! */
645 li r4,MSR_KERNEL
646 lis r3,start_kernel@h
647 ori r3,r3,start_kernel@l
648 mtspr SPRN_SRR0,r3
649 mtspr SPRN_SRR1,r4
650 rfi /* enable MMU and jump to start_kernel */
651
652/* Set up the initial MMU state so we can do the first level of
653 * kernel initialization. This maps the first 8 MBytes of memory 1:1
654 * virtual to physical. Also, set the cache mode since that is defined
655 * by TLB entries and perform any additional mapping (like of the IMMR).
656 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
657 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
658 * these mappings is mapped by page tables.
659 */
660initial_mmu:
661 tlbia /* Invalidate all TLB entries */
662#ifdef CONFIG_PIN_TLB
663 lis r8, MI_RSV4I@h
664 ori r8, r8, 0x1c00
665#else
666 li r8, 0
667#endif
668 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
669
670#ifdef CONFIG_PIN_TLB
671 lis r10, (MD_RSV4I | MD_RESETVAL)@h
672 ori r10, r10, 0x1c00
673 mr r8, r10
674#else
675 lis r10, MD_RESETVAL@h
676#endif
677#ifndef CONFIG_8xx_COPYBACK
678 oris r10, r10, MD_WTDEF@h
679#endif
680 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
681
682 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
683 * we can load the instruction and data TLB registers with the
684 * same values.
685 */
686 lis r8, KERNELBASE@h /* Create vaddr for TLB */
687 ori r8, r8, MI_EVALID /* Mark it valid */
688 mtspr SPRN_MI_EPN, r8
689 mtspr SPRN_MD_EPN, r8
690 li r8, MI_PS8MEG /* Set 8M byte page */
691 ori r8, r8, MI_SVALID /* Make it valid */
692 mtspr SPRN_MI_TWC, r8
693 mtspr SPRN_MD_TWC, r8
694 li r8, MI_BOOTINIT /* Create RPN for address 0 */
695 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
696 mtspr SPRN_MD_RPN, r8
697 lis r8, MI_Kp@h /* Set the protection mode */
698 mtspr SPRN_MI_AP, r8
699 mtspr SPRN_MD_AP, r8
700
701 /* Map another 8 MByte at the IMMR to get the processor
702 * internal registers (among other things).
703 */
704#ifdef CONFIG_PIN_TLB
705 addi r10, r10, 0x0100
706 mtspr SPRN_MD_CTR, r10
707#endif
708 mfspr r9, 638 /* Get current IMMR */
709 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
710
711 mr r8, r9 /* Create vaddr for TLB */
712 ori r8, r8, MD_EVALID /* Mark it valid */
713 mtspr SPRN_MD_EPN, r8
714 li r8, MD_PS8MEG /* Set 8M byte page */
715 ori r8, r8, MD_SVALID /* Make it valid */
716 mtspr SPRN_MD_TWC, r8
717 mr r8, r9 /* Create paddr for TLB */
718 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
719 mtspr SPRN_MD_RPN, r8
720
721#ifdef CONFIG_PIN_TLB
722 /* Map two more 8M kernel data pages.
723 */
724 addi r10, r10, 0x0100
725 mtspr SPRN_MD_CTR, r10
726
727 lis r8, KERNELBASE@h /* Create vaddr for TLB */
728 addis r8, r8, 0x0080 /* Add 8M */
729 ori r8, r8, MI_EVALID /* Mark it valid */
730 mtspr SPRN_MD_EPN, r8
731 li r9, MI_PS8MEG /* Set 8M byte page */
732 ori r9, r9, MI_SVALID /* Make it valid */
733 mtspr SPRN_MD_TWC, r9
734 li r11, MI_BOOTINIT /* Create RPN for address 0 */
735 addis r11, r11, 0x0080 /* Add 8M */
736 mtspr SPRN_MD_RPN, r8
737
738 addis r8, r8, 0x0080 /* Add 8M */
739 mtspr SPRN_MD_EPN, r8
740 mtspr SPRN_MD_TWC, r9
741 addis r11, r11, 0x0080 /* Add 8M */
742 mtspr SPRN_MD_RPN, r8
743#endif
744
745 /* Since the cache is enabled according to the information we
746 * just loaded into the TLB, invalidate and enable the caches here.
747 * We should probably check/set other modes....later.
748 */
749 lis r8, IDC_INVALL@h
750 mtspr SPRN_IC_CST, r8
751 mtspr SPRN_DC_CST, r8
752 lis r8, IDC_ENABLE@h
753 mtspr SPRN_IC_CST, r8
754#ifdef CONFIG_8xx_COPYBACK
755 mtspr SPRN_DC_CST, r8
756#else
757 /* For a debug option, I left this here to easily enable
758 * the write through cache mode
759 */
760 lis r8, DC_SFWT@h
761 mtspr SPRN_DC_CST, r8
762 lis r8, IDC_ENABLE@h
763 mtspr SPRN_DC_CST, r8
764#endif
765 blr
766
767
768/*
769 * Set up to use a given MMU context.
770 * r3 is context number, r4 is PGD pointer.
771 *
772 * We place the physical address of the new task page directory loaded
773 * into the MMU base register, and set the ASID compare register with
774 * the new "context."
775 */
776_GLOBAL(set_context)
777
778#ifdef CONFIG_BDI_SWITCH
779 /* Context switch the PTE pointer for the Abatron BDI2000.
780 * The PGDIR is passed as second argument.
781 */
782 lis r5, KERNELBASE@h
783 lwz r5, 0xf0(r5)
784 stw r4, 0x4(r5)
785#endif
786
787#ifdef CONFIG_8xx_CPU6
788 lis r6, cpu6_errata_word@h
789 ori r6, r6, cpu6_errata_word@l
790 tophys (r4, r4)
791 li r7, 0x3980
792 stw r7, 12(r6)
793 lwz r7, 12(r6)
794 mtspr SPRN_M_TWB, r4 /* Update MMU base address */
795 li r7, 0x3380
796 stw r7, 12(r6)
797 lwz r7, 12(r6)
798 mtspr SPRN_M_CASID, r3 /* Update context */
799#else
800 mtspr SPRN_M_CASID,r3 /* Update context */
801 tophys (r4, r4)
802 mtspr SPRN_M_TWB, r4 /* and pgd */
803#endif
804 SYNC
805 blr
806
807#ifdef CONFIG_8xx_CPU6
808/* It's here because it is unique to the 8xx.
809 * It is important we get called with interrupts disabled. I used to
810 * do that, but it appears that all code that calls this already had
811 * interrupt disabled.
812 */
813 .globl set_dec_cpu6
814set_dec_cpu6:
815 lis r7, cpu6_errata_word@h
816 ori r7, r7, cpu6_errata_word@l
817 li r4, 0x2c00
818 stw r4, 8(r7)
819 lwz r4, 8(r7)
820 mtspr 22, r3 /* Update Decrementer */
821 SYNC
822 blr
823#endif
824
825/*
826 * We put a few things here that have to be page-aligned.
827 * This stuff goes at the beginning of the data segment,
828 * which is page-aligned.
829 */
830 .data
831 .globl sdata
832sdata:
833 .globl empty_zero_page
834empty_zero_page:
835 .space 4096
836
837 .globl swapper_pg_dir
838swapper_pg_dir:
839 .space 4096
840
841/*
842 * This space gets a copy of optional info passed to us by the bootstrap
843 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
844 */
845 .globl cmd_line
846cmd_line:
847 .space 512
848
849/* Room for two PTE table poiners, usually the kernel and current user
850 * pointer to their respective root page table (pgdir).
851 */
852abatron_pteptrs:
853 .space 8
854
855#ifdef CONFIG_8xx_CPU6
856 .globl cpu6_errata_word
857cpu6_errata_word:
858 .space 16
859#endif
860
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644
index 000000000000..5063c603fad4
--- /dev/null
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -0,0 +1,1063 @@
1/*
2 * arch/ppc/kernel/head_fsl_booke.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2004 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 * Copyright 2004 Freescale Semiconductor, Inc
27 * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the
31 * Free Software Foundation; either version 2 of the License, or (at your
32 * option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/threads.h>
37#include <asm/processor.h>
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm/pgtable.h>
41#include <asm/cputable.h>
42#include <asm/thread_info.h>
43#include <asm/ppc_asm.h>
44#include <asm/asm-offsets.h>
45#include "head_booke.h"
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/* We try to not make any assumptions about how the boot loader
77 * setup or used the TLBs. We invalidate all mappings from the
78 * boot loader and load a single entry in TLB1[0] to map the
79 * first 16M of kernel memory. Any boot info passed from the
80 * bootloader needs to live in this first 16M.
81 *
82 * Requirement on bootloader:
83 * - The page we're executing in needs to reside in TLB1 and
84 * have IPROT=1. If not an invalidate broadcast could
85 * evict the entry we're currently executing in.
86 *
87 * r3 = Index of TLB1 were executing in
88 * r4 = Current MSR[IS]
89 * r5 = Index of TLB1 temp mapping
90 *
91 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
92 * if needed
93 */
94
95/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */
98 mfmsr r7
99 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
100 mfspr r7, SPRN_PID0
101 slwi r7,r7,16
102 or r7,r7,r4
103 mtspr SPRN_MAS6,r7
104 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
105#ifndef CONFIG_E200
106 mfspr r7,SPRN_MAS1
107 andis. r7,r7,MAS1_VALID@h
108 bne match_TLB
109 mfspr r7,SPRN_PID1
110 slwi r7,r7,16
111 or r7,r7,r4
112 mtspr SPRN_MAS6,r7
113 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
114 mfspr r7,SPRN_MAS1
115 andis. r7,r7,MAS1_VALID@h
116 bne match_TLB
117 mfspr r7, SPRN_PID2
118 slwi r7,r7,16
119 or r7,r7,r4
120 mtspr SPRN_MAS6,r7
121 tlbsx 0,r6 /* Fall through, we had to match */
122#endif
123match_TLB:
124 mfspr r7,SPRN_MAS0
125 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
126
127 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
128 oris r7,r7,MAS1_IPROT@h
129 mtspr SPRN_MAS1,r7
130 tlbwe
131
132/* 2. Invalidate all entries except the entry we're executing in */
133 mfspr r9,SPRN_TLB1CFG
134 andi. r9,r9,0xfff
135 li r6,0 /* Set Entry counter to 0 */
1361: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
137 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
138 mtspr SPRN_MAS0,r7
139 tlbre
140 mfspr r7,SPRN_MAS1
141 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
142 cmpw r3,r6
143 beq skpinv /* Dont update the current execution TLB */
144 mtspr SPRN_MAS1,r7
145 tlbwe
146 isync
147skpinv: addi r6,r6,1 /* Increment */
148 cmpw r6,r9 /* Are we done? */
149 bne 1b /* If not, repeat */
150
151 /* Invalidate TLB0 */
152 li r6,0x04
153 tlbivax 0,r6
154#ifdef CONFIG_SMP
155 tlbsync
156#endif
157 /* Invalidate TLB1 */
158 li r6,0x0c
159 tlbivax 0,r6
160#ifdef CONFIG_SMP
161 tlbsync
162#endif
163 msync
164
165/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
167 addi r5, r5, 0x1
168 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
169 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
170 mtspr SPRN_MAS0,r7
171 tlbre
172
173 /* Just modify the entry ID and EPN for the temp mapping */
174 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
175 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
176 mtspr SPRN_MAS0,r7
177 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
178 slwi r6,r6,12
179 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
180 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
181 mtspr SPRN_MAS1,r6
182 mfspr r6,SPRN_MAS2
183 li r7,0 /* temp EPN = 0 */
184 rlwimi r7,r6,0,20,31
185 mtspr SPRN_MAS2,r7
186 tlbwe
187
188 xori r6,r4,1
189 slwi r6,r6,5 /* setup new context with other address space */
190 bl 1f /* Find our address */
1911: mflr r9
192 rlwimi r7,r9,0,20,31
193 addi r7,r7,24
194 mtspr SPRN_SRR0,r7
195 mtspr SPRN_SRR1,r6
196 rfi
197
198/* 4. Clear out PIDs & Search info */
199 li r6,0
200 mtspr SPRN_PID0,r6
201#ifndef CONFIG_E200
202 mtspr SPRN_PID1,r6
203 mtspr SPRN_PID2,r6
204#endif
205 mtspr SPRN_MAS6,r6
206
207/* 5. Invalidate mapping we started in */
208 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
209 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
210 mtspr SPRN_MAS0,r7
211 tlbre
212 li r6,0
213 mtspr SPRN_MAS1,r6
214 tlbwe
215 /* Invalidate TLB1 */
216 li r9,0x0c
217 tlbivax 0,r9
218#ifdef CONFIG_SMP
219 tlbsync
220#endif
221 msync
222
223/* 6. Setup KERNELBASE mapping in TLB1[0] */
224 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
225 mtspr SPRN_MAS0,r6
226 lis r6,(MAS1_VALID|MAS1_IPROT)@h
227 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
228 mtspr SPRN_MAS1,r6
229 li r7,0
230 lis r6,KERNELBASE@h
231 ori r6,r6,KERNELBASE@l
232 rlwimi r6,r7,0,20,31
233 mtspr SPRN_MAS2,r6
234 li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
235 mtspr SPRN_MAS3,r7
236 tlbwe
237
238/* 7. Jump to KERNELBASE mapping */
239 lis r7,MSR_KERNEL@h
240 ori r7,r7,MSR_KERNEL@l
241 bl 1f /* Find our address */
2421: mflr r9
243 rlwimi r6,r9,0,20,31
244 addi r6,r6,24
245 mtspr SPRN_SRR0,r6
246 mtspr SPRN_SRR1,r7
247 rfi /* start execution out of TLB1[0] entry */
248
249/* 8. Clear out the temp mapping */
250 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
251 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
252 mtspr SPRN_MAS0,r7
253 tlbre
254 mtspr SPRN_MAS1,r8
255 tlbwe
256 /* Invalidate TLB1 */
257 li r9,0x0c
258 tlbivax 0,r9
259#ifdef CONFIG_SMP
260 tlbsync
261#endif
262 msync
263
264 /* Establish the interrupt vector offsets */
265 SET_IVOR(0, CriticalInput);
266 SET_IVOR(1, MachineCheck);
267 SET_IVOR(2, DataStorage);
268 SET_IVOR(3, InstructionStorage);
269 SET_IVOR(4, ExternalInput);
270 SET_IVOR(5, Alignment);
271 SET_IVOR(6, Program);
272 SET_IVOR(7, FloatingPointUnavailable);
273 SET_IVOR(8, SystemCall);
274 SET_IVOR(9, AuxillaryProcessorUnavailable);
275 SET_IVOR(10, Decrementer);
276 SET_IVOR(11, FixedIntervalTimer);
277 SET_IVOR(12, WatchdogTimer);
278 SET_IVOR(13, DataTLBError);
279 SET_IVOR(14, InstructionTLBError);
280 SET_IVOR(15, Debug);
281 SET_IVOR(32, SPEUnavailable);
282 SET_IVOR(33, SPEFloatingPointData);
283 SET_IVOR(34, SPEFloatingPointRound);
284#ifndef CONFIG_E200
285 SET_IVOR(35, PerformanceMonitor);
286#endif
287
288 /* Establish the interrupt vector base */
289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
290 mtspr SPRN_IVPR,r4
291
292 /* Setup the defaults for TLB entries */
293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif
297 mtspr SPRN_MAS4, r2
298
299#if 0
300 /* Enable DOZE */
301 mfspr r2,SPRN_HID0
302 oris r2,r2,HID0_DOZE@h
303 mtspr SPRN_HID0, r2
304#endif
305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2
310#endif
311
312#if !defined(CONFIG_BDI_SWITCH)
313 /*
314 * The Abatron BDI JTAG debugger does not tolerate others
315 * mucking with the debug registers.
316 */
317 lis r2,DBCR0_IDM@h
318 mtspr SPRN_DBCR0,r2
319 /* clear any residual debug events */
320 li r2,-1
321 mtspr SPRN_DBSR,r2
322#endif
323
324 /*
325 * This is where the main kernel code starts.
326 */
327
328 /* ptr to current */
329 lis r2,init_task@h
330 ori r2,r2,init_task@l
331
332 /* ptr to current thread */
333 addi r4,r2,THREAD /* init task's THREAD */
334 mtspr SPRN_SPRG3,r4
335
336 /* stack */
337 lis r1,init_thread_union@h
338 ori r1,r1,init_thread_union@l
339 li r0,0
340 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
341
342 bl early_init
343
344 mfspr r3,SPRN_TLB1CFG
345 andi. r3,r3,0xfff
346 lis r4,num_tlbcam_entries@ha
347 stw r3,num_tlbcam_entries@l(r4)
348/*
349 * Decide what sort of machine this is and initialize the MMU.
350 */
351 mr r3,r31
352 mr r4,r30
353 mr r5,r29
354 mr r6,r28
355 mr r7,r27
356 bl machine_init
357 bl MMU_init
358
359 /* Setup PTE pointers for the Abatron bdiGDB */
360 lis r6, swapper_pg_dir@h
361 ori r6, r6, swapper_pg_dir@l
362 lis r5, abatron_pteptrs@h
363 ori r5, r5, abatron_pteptrs@l
364 lis r4, KERNELBASE@h
365 ori r4, r4, KERNELBASE@l
366 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
367 stw r6, 0(r5)
368
369 /* Let's move on */
370 lis r4,start_kernel@h
371 ori r4,r4,start_kernel@l
372 lis r3,MSR_KERNEL@h
373 ori r3,r3,MSR_KERNEL@l
374 mtspr SPRN_SRR0,r4
375 mtspr SPRN_SRR1,r3
376 rfi /* change context and jump to start_kernel */
377
378/* Macros to hide the PTE size differences
379 *
380 * FIND_PTE -- walks the page tables given EA & pgdir pointer
381 * r10 -- EA of fault
382 * r11 -- PGDIR pointer
383 * r12 -- free
384 * label 2: is the bailout case
385 *
386 * if we find the pte (fall through):
387 * r11 is low pte word
388 * r12 is pointer to the pte
389 */
390#ifdef CONFIG_PTE_64BIT
391#define PTE_FLAGS_OFFSET 4
392#define FIND_PTE \
393 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
394 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
395 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
396 beq 2f; /* Bail if no table */ \
397 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
398 lwz r11, 4(r12); /* Get pte entry */
399#else
400#define PTE_FLAGS_OFFSET 0
401#define FIND_PTE \
402 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
403 lwz r11, 0(r11); /* Get L1 entry */ \
404 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
405 beq 2f; /* Bail if no table */ \
406 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
407 lwz r11, 0(r12); /* Get Linux PTE */
408#endif
409
410/*
411 * Interrupt vector entry code
412 *
413 * The Book E MMUs are always on so we don't need to handle
414 * interrupts in real mode as with previous PPC processors. In
415 * this case we handle interrupts in the kernel virtual address
416 * space.
417 *
418 * Interrupt vectors are dynamically placed relative to the
419 * interrupt prefix as determined by the address of interrupt_base.
420 * The interrupt vectors offsets are programmed using the labels
421 * for each interrupt vector entry.
422 *
423 * Interrupt vectors must be aligned on a 16 byte boundary.
424 * We align on a 32 byte cache line boundary for good measure.
425 */
426
427interrupt_base:
428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430
431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif
438
439 /* Data Storage Interrupt */
440 START_EXCEPTION(DataStorage)
441 mtspr SPRN_SPRG0, r10 /* Save some working registers */
442 mtspr SPRN_SPRG1, r11
443 mtspr SPRN_SPRG4W, r12
444 mtspr SPRN_SPRG5W, r13
445 mfcr r11
446 mtspr SPRN_SPRG7W, r11
447
448 /*
449 * Check if it was a store fault, if not then bail
450 * because a user tried to access a kernel or
451 * read-protected page. Otherwise, get the
452 * offending address and handle it.
453 */
454 mfspr r10, SPRN_ESR
455 andis. r10, r10, ESR_ST@h
456 beq 2f
457
458 mfspr r10, SPRN_DEAR /* Get faulting address */
459
460 /* If we are faulting a kernel address, we have to use the
461 * kernel page tables.
462 */
463 lis r11, TASK_SIZE@h
464 ori r11, r11, TASK_SIZE@l
465 cmplw 0, r10, r11
466 bge 2f
467
468 /* Get the PGD for the current thread */
4693:
470 mfspr r11,SPRN_SPRG3
471 lwz r11,PGDIR(r11)
4724:
473 FIND_PTE
474
475 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
476 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
477 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
478 bne 2f /* Bail if not */
479
480 /* Update 'changed'. */
481 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
482 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
483
484 /* MAS2 not updated as the entry does exist in the tlb, this
485 fault taken to detect state transition (eg: COW -> DIRTY)
486 */
487 andi. r11, r11, _PAGE_HWEXEC
488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
490
491 /* update search PID in MAS6, AS = 0 */
492 mfspr r12, SPRN_PID0
493 slwi r12, r12, 16
494 mtspr SPRN_MAS6, r12
495
496 /* find the TLB index that caused the fault. It has to be here. */
497 tlbsx 0, r10
498
499 /* only update the perm bits, assume the RPN is fine */
500 mfspr r12, SPRN_MAS3
501 rlwimi r12, r11, 0, 20, 31
502 mtspr SPRN_MAS3,r12
503 tlbwe
504
505 /* Done...restore registers and get out of here. */
506 mfspr r11, SPRN_SPRG7R
507 mtcr r11
508 mfspr r13, SPRN_SPRG5R
509 mfspr r12, SPRN_SPRG4R
510 mfspr r11, SPRN_SPRG1
511 mfspr r10, SPRN_SPRG0
512 rfi /* Force context change */
513
5142:
515 /*
516 * The bailout. Restore registers to pre-exception conditions
517 * and call the heavyweights to help us out.
518 */
519 mfspr r11, SPRN_SPRG7R
520 mtcr r11
521 mfspr r13, SPRN_SPRG5R
522 mfspr r12, SPRN_SPRG4R
523 mfspr r11, SPRN_SPRG1
524 mfspr r10, SPRN_SPRG0
525 b data_access
526
527 /* Instruction Storage Interrupt */
528 INSTRUCTION_STORAGE_EXCEPTION
529
530 /* External Input Interrupt */
531 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
532
533 /* Alignment Interrupt */
534 ALIGNMENT_EXCEPTION
535
536 /* Program Interrupt */
537 PROGRAM_EXCEPTION
538
539 /* Floating Point Unavailable Interrupt */
540#ifdef CONFIG_PPC_FPU
541 FP_UNAVAILABLE_EXCEPTION
542#else
543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif
549#endif
550
551 /* System Call Interrupt */
552 START_EXCEPTION(SystemCall)
553 NORMAL_EXCEPTION_PROLOG
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555
556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558
559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION
561
562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565
566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif
572
573 /* Data TLB Error Interrupt */
574 START_EXCEPTION(DataTLBError)
575 mtspr SPRN_SPRG0, r10 /* Save some working registers */
576 mtspr SPRN_SPRG1, r11
577 mtspr SPRN_SPRG4W, r12
578 mtspr SPRN_SPRG5W, r13
579 mfcr r11
580 mtspr SPRN_SPRG7W, r11
581 mfspr r10, SPRN_DEAR /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 ori r11, r11, TASK_SIZE@l
588 cmplw 5, r10, r11
589 blt 5, 3f
590 lis r11, swapper_pg_dir@h
591 ori r11, r11, swapper_pg_dir@l
592
593 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
594 rlwinm r12,r12,0,16,1
595 mtspr SPRN_MAS1,r12
596
597 b 4f
598
599 /* Get the PGD for the current thread */
6003:
601 mfspr r11,SPRN_SPRG3
602 lwz r11,PGDIR(r11)
603
6044:
605 FIND_PTE
606 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
607 beq 2f /* Bail if not present */
608
609#ifdef CONFIG_PTE_64BIT
610 lwz r13, 0(r12)
611#endif
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, PTE_FLAGS_OFFSET(r12)
614
615 /* Jump to common tlb load */
616 b finish_tlb_load
6172:
618 /* The bailout. Restore registers to pre-exception conditions
619 * and call the heavyweights to help us out.
620 */
621 mfspr r11, SPRN_SPRG7R
622 mtcr r11
623 mfspr r13, SPRN_SPRG5R
624 mfspr r12, SPRN_SPRG4R
625 mfspr r11, SPRN_SPRG1
626 mfspr r10, SPRN_SPRG0
627 b data_access
628
629 /* Instruction TLB Error Interrupt */
630 /*
631 * Nearly the same as above, except we get our
632 * information from different registers and bailout
633 * to a different point.
634 */
635 START_EXCEPTION(InstructionTLBError)
636 mtspr SPRN_SPRG0, r10 /* Save some working registers */
637 mtspr SPRN_SPRG1, r11
638 mtspr SPRN_SPRG4W, r12
639 mtspr SPRN_SPRG5W, r13
640 mfcr r11
641 mtspr SPRN_SPRG7W, r11
642 mfspr r10, SPRN_SRR0 /* Get faulting address */
643
644 /* If we are faulting a kernel address, we have to use the
645 * kernel page tables.
646 */
647 lis r11, TASK_SIZE@h
648 ori r11, r11, TASK_SIZE@l
649 cmplw 5, r10, r11
650 blt 5, 3f
651 lis r11, swapper_pg_dir@h
652 ori r11, r11, swapper_pg_dir@l
653
654 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
655 rlwinm r12,r12,0,16,1
656 mtspr SPRN_MAS1,r12
657
658 b 4f
659
660 /* Get the PGD for the current thread */
6613:
662 mfspr r11,SPRN_SPRG3
663 lwz r11,PGDIR(r11)
664
6654:
666 FIND_PTE
667 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
668 beq 2f /* Bail if not present */
669
670#ifdef CONFIG_PTE_64BIT
671 lwz r13, 0(r12)
672#endif
673 ori r11, r11, _PAGE_ACCESSED
674 stw r11, PTE_FLAGS_OFFSET(r12)
675
676 /* Jump to common TLB load point */
677 b finish_tlb_load
678
6792:
680 /* The bailout. Restore registers to pre-exception conditions
681 * and call the heavyweights to help us out.
682 */
683 mfspr r11, SPRN_SPRG7R
684 mtcr r11
685 mfspr r13, SPRN_SPRG5R
686 mfspr r12, SPRN_SPRG4R
687 mfspr r11, SPRN_SPRG1
688 mfspr r10, SPRN_SPRG0
689 b InstructionStorage
690
691#ifdef CONFIG_SPE
692 /* SPE Unavailable */
693 START_EXCEPTION(SPEUnavailable)
694 NORMAL_EXCEPTION_PROLOG
695 bne load_up_spe
696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else
699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */
701
702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */
708
709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711
712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714
715
716 /* Debug Interrupt */
717 DEBUG_EXCEPTION
718
719/*
720 * Local functions
721 */
722
723 /*
724 * Data TLB exceptions will bail out to this point
725 * if they can't resolve the lightweight TLB fault.
726 */
727data_access:
728 NORMAL_EXCEPTION_PROLOG
729 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
730 stw r5,_ESR(r11)
731 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
732 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
733 bne 1f
734 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7351:
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
738
739/*
740
741 * Both the instruction and data TLB miss get to this
742 * point to load the TLB.
743 * r10 - EA of fault
744 * r11 - TLB (info from Linux PTE)
745 * r12, r13 - available to use
746 * CR5 - results of addr < TASK_SIZE
747 * MAS0, MAS1 - loaded with proper value when we get here
748 * MAS2, MAS3 - will need additional info from Linux PTE
749 * Upon exit, we reload everything and RFI.
750 */
751finish_tlb_load:
752 /*
753 * We set execute, because we don't have the granularity to
754 * properly set this at the page level (Linux problem).
755 * Many of these bits are software only. Bits we don't set
756 * here we (properly should) assume have the appropriate value.
757 */
758
759 mfspr r12, SPRN_MAS2
760#ifdef CONFIG_PTE_64BIT
761 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
762#else
763 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
764#endif
765 mtspr SPRN_MAS2, r12
766
767 bge 5, 1f
768
769 /* is user addr */
770 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
771 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
772 srwi r10, r12, 1
773 or r12, r12, r10 /* Copy user perms into supervisor */
774 iseleq r12, 0, r12
775 b 2f
776
777 /* is kernel addr */
7781: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
779 ori r12, r12, (MAS3_SX | MAS3_SR)
780
781#ifdef CONFIG_PTE_64BIT
7822: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
783 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
784 mtspr SPRN_MAS3, r12
785BEGIN_FTR_SECTION
786 srwi r10, r13, 8 /* grab RPN[8:31] */
787 mtspr SPRN_MAS7, r10
788END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
789#else
7902: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
791 mtspr SPRN_MAS3, r11
792#endif
793#ifdef CONFIG_E200
794 /* Round robin TLB1 entries assignment */
795 mfspr r12, SPRN_MAS0
796
797 /* Extract TLB1CFG(NENTRY) */
798 mfspr r11, SPRN_TLB1CFG
799 andi. r11, r11, 0xfff
800
801 /* Extract MAS0(NV) */
802 andi. r13, r12, 0xfff
803 addi r13, r13, 1
804 cmpw 0, r13, r11
805 addi r12, r12, 1
806
807 /* check if we need to wrap */
808 blt 7f
809
810 /* wrap back to first free tlbcam entry */
811 lis r13, tlbcam_index@ha
812 lwz r13, tlbcam_index@l(r13)
813 rlwimi r12, r13, 0, 20, 31
8147:
815 mtspr SPRN_MAS0,r12
816#endif /* CONFIG_E200 */
817
818 tlbwe
819
820 /* Done...restore registers and get out of here. */
821 mfspr r11, SPRN_SPRG7R
822 mtcr r11
823 mfspr r13, SPRN_SPRG5R
824 mfspr r12, SPRN_SPRG4R
825 mfspr r11, SPRN_SPRG1
826 mfspr r10, SPRN_SPRG0
827 rfi /* Force context change */
828
829#ifdef CONFIG_SPE
830/* Note that the SPE support is closely modeled after the AltiVec
831 * support. Changes to one are likely to be applicable to the
832 * other! */
833load_up_spe:
834/*
835 * Disable SPE for the task which had SPE previously,
836 * and save its SPE registers in its thread_struct.
837 * Enables SPE for use in the kernel on return.
838 * On SMP we know the SPE units are free, since we give it up every
839 * switch. -- Kumar
840 */
841 mfmsr r5
842 oris r5,r5,MSR_SPE@h
843 mtmsr r5 /* enable use of SPE now */
844 isync
845/*
846 * For SMP, we don't do lazy SPE switching because it just gets too
847 * horrendously complex, especially when a task switches from one CPU
848 * to another. Instead we call giveup_spe in switch_to.
849 */
850#ifndef CONFIG_SMP
851 lis r3,last_task_used_spe@ha
852 lwz r4,last_task_used_spe@l(r3)
853 cmpi 0,r4,0
854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC
860 evstddx evr10, r4, r5 /* save off accumulator */
861 lwz r5,PT_REGS(r4)
862 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
863 lis r10,MSR_SPE@h
864 andc r4,r4,r10 /* disable SPE for previous task */
865 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8661:
867#endif /* CONFIG_SMP */
868 /* enable use of SPE after return */
869 oris r9,r9,MSR_SPE@h
870 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
871 li r4,1
872 li r10,THREAD_ACC
873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5
875 evmra evr4,evr4
876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3)
880#endif /* CONFIG_SMP */
881 /* restore registers and return */
8822: REST_4GPRS(3, r11)
883 lwz r10,_CCR(r11)
884 REST_GPR(1, r11)
885 mtcr r10
886 lwz r10,_LINK(r11)
887 mtlr r10
888 REST_GPR(10, r11)
889 mtspr SPRN_SRR1,r9
890 mtspr SPRN_SRR0,r12
891 REST_GPR(9, r11)
892 REST_GPR(12, r11)
893 lwz r11,GPR11(r11)
894 SYNC
895 rfi
896
897/*
898 * SPE unavailable trap from kernel - print a message, but let
899 * the task use SPE in the kernel until it returns to user mode.
900 */
901KernelSPE:
902 lwz r3,_MSR(r1)
903 oris r3,r3,MSR_SPE@h
904 stw r3,_MSR(r1) /* enable use of SPE after return */
905 lis r3,87f@h
906 ori r3,r3,87f@l
907 mr r4,r2 /* current */
908 lwz r5,_NIP(r1)
909 bl printk
910 b ret_from_except
91187: .string "SPE used in kernel (task=%p, pc=%x) \n"
912 .align 4,0
913
914#endif /* CONFIG_SPE */
915
916/*
917 * Global functions
918 */
919
920/*
921 * extern void loadcam_entry(unsigned int index)
922 *
923 * Load TLBCAM[index] entry in to the L2 CAM MMU
924 */
925_GLOBAL(loadcam_entry)
926 lis r4,TLBCAM@ha
927 addi r4,r4,TLBCAM@l
928 mulli r5,r3,20
929 add r3,r5,r4
930 lwz r4,0(r3)
931 mtspr SPRN_MAS0,r4
932 lwz r4,4(r3)
933 mtspr SPRN_MAS1,r4
934 lwz r4,8(r3)
935 mtspr SPRN_MAS2,r4
936 lwz r4,12(r3)
937 mtspr SPRN_MAS3,r4
938 tlbwe
939 isync
940 blr
941
942/*
943 * extern void giveup_altivec(struct task_struct *prev)
944 *
945 * The e500 core does not have an AltiVec unit.
946 */
947_GLOBAL(giveup_altivec)
948 blr
949
950#ifdef CONFIG_SPE
951/*
952 * extern void giveup_spe(struct task_struct *prev)
953 *
954 */
955_GLOBAL(giveup_spe)
956 mfmsr r5
957 oris r5,r5,MSR_SPE@h
958 SYNC
959 mtmsr r5 /* enable use of SPE now */
960 isync
961 cmpi 0,r3,0
962 beqlr- /* if no previous owner, done */
963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0
966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC
970 evstddx evr6, r4, r3 /* save off accumulator */
971 mfspr r6,SPRN_SPEFSCR
972 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
973 beq 1f
974 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
975 lis r3,MSR_SPE@h
976 andc r4,r4,r3 /* disable SPE for previous task */
977 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9781:
979#ifndef CONFIG_SMP
980 li r5,0
981 lis r4,last_task_used_spe@ha
982 stw r5,last_task_used_spe@l(r4)
983#endif /* CONFIG_SMP */
984 blr
985#endif /* CONFIG_SPE */
986
987/*
988 * extern void giveup_fpu(struct task_struct *prev)
989 *
990 * Not all FSL Book-E cores have an FPU
991 */
992#ifndef CONFIG_PPC_FPU
993_GLOBAL(giveup_fpu)
994 blr
995#endif
996
997/*
998 * extern void abort(void)
999 *
1000 * At present, this routine just applies a system reset.
1001 */
1002_GLOBAL(abort)
1003 li r13,0
1004 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1005 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13
1008 mfspr r13,SPRN_DBCR0
1009 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1010 mtspr SPRN_DBCR0,r13
1011
1012_GLOBAL(set_context)
1013
1014#ifdef CONFIG_BDI_SWITCH
1015 /* Context switch the PTE pointer for the Abatron BDI2000.
1016 * The PGDIR is the second parameter.
1017 */
1018 lis r5, abatron_pteptrs@h
1019 ori r5, r5, abatron_pteptrs@l
1020 stw r4, 0x4(r5)
1021#endif
1022 mtspr SPRN_PID,r3
1023 isync /* Force context change */
1024 blr
1025
1026/*
1027 * We put a few things here that have to be page-aligned. This stuff
1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */
1030 .data
1031 .align 12
1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1036 .space 4096
1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1039 .space 4096
1040
1041/* Reserved 4k for the critical exception stack & 4k for the machine
1042 * check stack per CPU for kernel mode exceptions */
1043 .section .bss
1044 .align 12
1045exception_stack_bottom:
1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1047 .globl exception_stack_top
1048exception_stack_top:
1049
1050/*
1051 * This space gets a copy of optional info passed to us by the bootstrap
1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1053 */
1054 .globl cmd_line
1055cmd_line:
1056 .space 512
1057
1058/*
1059 * Room for two PTE pointers, usually the kernel and current user pointers
1060 * to their respective root page table.
1061 */
1062abatron_pteptrs:
1063 .space 8
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644
index 000000000000..444fdcc769f1
--- /dev/null
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -0,0 +1,233 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/reg.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h
105 beq 2f
106BEGIN_FTR_SECTION
107 /* Disable L2 prefetch on some 745x and try to ensure
108 * L2 prefetch engines are idle. As explained by errata
109 * text, we can't be sure they are, we just hope very hard
110 * that well be enough (sic !). At least I noticed Apple
111 * doesn't even bother doing the dcbf's here...
112 */
113 mfspr r4,SPRN_MSSCR0
114 rlwinm r4,r4,0,0,29
115 sync
116 mtspr SPRN_MSSCR0,r4
117 sync
118 isync
119 lis r4,KERNELBASE@h
120 dcbf 0,r4
121 dcbf 0,r4
122 dcbf 0,r4
123 dcbf 0,r4
124END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
125#ifdef DEBUG
126 lis r6,nap_enter_count@ha
127 lwz r4,nap_enter_count@l(r6)
128 addi r4,r4,1
129 stw r4,nap_enter_count@l(r6)
130#endif
1312:
132BEGIN_FTR_SECTION
133 /* Go to low speed mode on some 750FX */
134 lis r4,powersave_lowspeed@ha
135 lwz r4,powersave_lowspeed@l(r4)
136 cmpwi 0,r4,0
137 beq 1f
138 mfspr r4,SPRN_HID1
139 oris r4,r4,0x0001
140 mtspr SPRN_HID1,r4
1411:
142END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
143
144 /* Go to NAP or DOZE now */
145 mfspr r4,SPRN_HID0
146 lis r5,(HID0_NAP|HID0_SLEEP)@h
147BEGIN_FTR_SECTION
148 oris r5,r5,HID0_DOZE@h
149END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
150 andc r4,r4,r5
151 or r4,r4,r3
152BEGIN_FTR_SECTION
153 oris r4,r4,HID0_DPM@h /* that should be done once for all */
154END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
155 mtspr SPRN_HID0,r4
156BEGIN_FTR_SECTION
157 DSSALL
158 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */
161 oris r7,r7,MSR_POW@h
162 sync
163 isync
164 mtmsr r7
165 isync
166 sync
167 blr
168
169/*
170 * Return from NAP/DOZE mode, restore some CPU specific registers,
171 * we are called with DR/IR still off and r2 containing physical
172 * address of current.
173 */
174_GLOBAL(power_save_6xx_restore)
175 mfspr r11,SPRN_HID0
176 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
177 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
178BEGIN_FTR_SECTION
179 rlwinm r11,r11,0,9,7 /* Clear DOZE */
180END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
181 mtspr SPRN_HID0, r11
182
183#ifdef DEBUG
184 beq cr1,1f
185 lis r11,(nap_return_count-KERNELBASE)@ha
186 lwz r9,nap_return_count@l(r11)
187 addi r9,r9,1
188 stw r9,nap_return_count@l(r11)
1891:
190#endif
191
192 rlwinm r9,r1,0,0,18
193 tophys(r9,r9)
194 lwz r11,TI_CPU(r9)
195 slwi r11,r11,2
196 /* Todo make sure all these are in the same page
197 * and load r22 (@ha part + CPU offset) only once
198 */
199BEGIN_FTR_SECTION
200 beq cr1,1f
201 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
202 lwz r9,nap_save_msscr0@l(r9)
203 mtspr SPRN_MSSCR0, r9
204 sync
205 isync
2061:
207END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
208BEGIN_FTR_SECTION
209 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
210 lwz r9,nap_save_hid1@l(r9)
211 mtspr SPRN_HID1, r9
212END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
213 b transfer_to_handler_cont
214
215 .data
216
217_GLOBAL(nap_save_msscr0)
218 .space 4*NR_CPUS
219
220_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS
222
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed)
226 .long 0
227
228#ifdef DEBUG
229_GLOBAL(nap_enter_count)
230 .space 4
231_GLOBAL(nap_return_count)
232 .space 4
233#endif
diff --git a/arch/ppc64/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index ca02afe2a795..1494e2f177f7 100644
--- a/arch/ppc64/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -39,13 +39,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
39 * can be cleared by CPU init after the fixups are done 39 * can be cleared by CPU init after the fixups are done
40 */ 40 */
41 LOADBASE(r3,cur_cpu_spec) 41 LOADBASE(r3,cur_cpu_spec)
42 ld r4,cur_cpu_spec@l(r3) 42 ld r4,OFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4) 43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP 44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr 45 beqlr
46 /* Now check if user or arch enabled NAP mode */ 46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap) 47 LOADBASE(r3,powersave_nap)
48 lwz r4,powersave_nap@l(r3) 48 lwz r4,OFF(powersave_nap)(r3)
49 cmpwi 0,r4,0 49 cmpwi 0,r4,0
50 beqlr 50 beqlr
51 51
@@ -63,8 +63,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
63 beq 1f 63 beq 1f
64 mtmsrd r7 /* out of line this ? */ 64 mtmsrd r7 /* out of line this ? */
65 blr 65 blr
661: 661:
67 /* Go to NAP now */ 67 /* Go to NAP now */
68BEGIN_FTR_SECTION 68BEGIN_FTR_SECTION
69 DSSALL 69 DSSALL
70 sync 70 sync
@@ -76,4 +76,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
76 isync 76 isync
77 sync 77 sync
78 blr 78 blr
79
diff --git a/arch/ppc64/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 941043ae040f..941043ae040f 100644
--- a/arch/ppc64/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
diff --git a/arch/ppc64/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index b81de286df5e..b81de286df5e 100644
--- a/arch/ppc64/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644
index 000000000000..3bedb532aed9
--- /dev/null
+++ b/arch/powerpc/kernel/misc_32.S
@@ -0,0 +1,1037 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
39 * This returns the high 64 bits of the product of two 64-bit numbers.
40 */
41_GLOBAL(mulhdu)
42 cmpwi r6,0
43 cmpwi cr1,r3,0
44 mr r10,r4
45 mulhwu r4,r4,r5
46 beq 1f
47 mulhwu r0,r10,r6
48 mullw r7,r10,r5
49 addc r7,r0,r7
50 addze r4,r4
511: beqlr cr1 /* all done if high part of A is 0 */
52 mr r10,r3
53 mullw r9,r3,r5
54 mulhwu r3,r3,r5
55 beq 2f
56 mullw r0,r10,r6
57 mulhwu r8,r10,r6
58 addc r7,r0,r7
59 adde r4,r4,r8
60 addze r3,r3
612: addc r4,r4,r9
62 addze r3,r3
63 blr
64
65/*
66 * Returns (address we're running at) - (address we were linked at)
67 * for use before the text and data are mapped to KERNELBASE.
68 */
69_GLOBAL(reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r3
73 LOADADDR(r4,1b)
74 subf r3,r4,r3
75 mtlr r0
76 blr
77
78/*
79 * add_reloc_offset(x) returns x + reloc_offset().
80 */
81_GLOBAL(add_reloc_offset)
82 mflr r0
83 bl 1f
841: mflr r5
85 LOADADDR(r4,1b)
86 subf r5,r4,r5
87 add r3,r3,r5
88 mtlr r0
89 blr
90
91/*
92 * sub_reloc_offset(x) returns x - reloc_offset().
93 */
94_GLOBAL(sub_reloc_offset)
95 mflr r0
96 bl 1f
971: mflr r5
98 lis r4,1b@ha
99 addi r4,r4,1b@l
100 subf r5,r4,r5
101 subf r3,r5,r3
102 mtlr r0
103 blr
104
105/*
106 * reloc_got2 runs through the .got2 section adding an offset
107 * to each entry.
108 */
109_GLOBAL(reloc_got2)
110 mflr r11
111 lis r7,__got2_start@ha
112 addi r7,r7,__got2_start@l
113 lis r8,__got2_end@ha
114 addi r8,r8,__got2_end@l
115 subf r8,r7,r8
116 srwi. r8,r8,2
117 beqlr
118 mtctr r8
119 bl 1f
1201: mflr r0
121 lis r4,1b@ha
122 addi r4,r4,1b@l
123 subf r0,r4,r0
124 add r7,r0,r7
1252: lwz r0,0(r7)
126 add r0,r0,r3
127 stw r0,0(r7)
128 addi r7,r7,4
129 bdnz 2b
130 mtlr r11
131 blr
132
133/*
134 * identify_cpu,
135 * called with r3 = data offset and r4 = CPU number
136 * doesn't change r3
137 */
138_GLOBAL(identify_cpu)
139 addis r8,r3,cpu_specs@ha
140 addi r8,r8,cpu_specs@l
141 mfpvr r7
1421:
143 lwz r5,CPU_SPEC_PVR_MASK(r8)
144 and r5,r5,r7
145 lwz r6,CPU_SPEC_PVR_VALUE(r8)
146 cmplw 0,r6,r5
147 beq 1f
148 addi r8,r8,CPU_SPEC_ENTRY_SIZE
149 b 1b
1501:
151 addis r6,r3,cur_cpu_spec@ha
152 addi r6,r6,cur_cpu_spec@l
153 sub r8,r8,r3
154 stw r8,0(r6)
155 blr
156
157/*
158 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
159 * and writes nop's over sections of code that don't apply for this cpu.
160 * r3 = data offset (not changed)
161 */
162_GLOBAL(do_cpu_ftr_fixups)
163 /* Get CPU 0 features */
164 addis r6,r3,cur_cpu_spec@ha
165 addi r6,r6,cur_cpu_spec@l
166 lwz r4,0(r6)
167 add r4,r4,r3
168 lwz r4,CPU_SPEC_FEATURES(r4)
169
170 /* Get the fixup table */
171 addis r6,r3,__start___ftr_fixup@ha
172 addi r6,r6,__start___ftr_fixup@l
173 addis r7,r3,__stop___ftr_fixup@ha
174 addi r7,r7,__stop___ftr_fixup@l
175
176 /* Do the fixup */
1771: cmplw 0,r6,r7
178 bgelr
179 addi r6,r6,16
180 lwz r8,-16(r6) /* mask */
181 and r8,r8,r4
182 lwz r9,-12(r6) /* value */
183 cmplw 0,r8,r9
184 beq 1b
185 lwz r8,-8(r6) /* section begin */
186 lwz r9,-4(r6) /* section end */
187 subf. r9,r8,r9
188 beq 1b
189 /* write nops over the section of code */
190 /* todo: if large section, add a branch at the start of it */
191 srwi r9,r9,2
192 mtctr r9
193 add r8,r8,r3
194 lis r0,0x60000000@h /* nop */
1953: stw r0,0(r8)
196 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
197 beq 2f
198 dcbst 0,r8 /* suboptimal, but simpler */
199 sync
200 icbi 0,r8
2012: addi r8,r8,4
202 bdnz 3b
203 sync /* additional sync needed on g4 */
204 isync
205 b 1b
206
207/*
208 * call_setup_cpu - call the setup_cpu function for this cpu
209 * r3 = data offset, r24 = cpu number
210 *
211 * Setup function is called with:
212 * r3 = data offset
213 * r4 = ptr to CPU spec (relocated)
214 */
215_GLOBAL(call_setup_cpu)
216 addis r4,r3,cur_cpu_spec@ha
217 addi r4,r4,cur_cpu_spec@l
218 lwz r4,0(r4)
219 add r4,r4,r3
220 lwz r5,CPU_SPEC_SETUP(r4)
221 cmpi 0,r5,0
222 add r5,r5,r3
223 beqlr
224 mtctr r5
225 bctr
226
227#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
228
229/* This gets called by via-pmu.c to switch the PLL selection
230 * on 750fx CPU. This function should really be moved to some
231 * other place (as most of the cpufreq code in via-pmu
232 */
233_GLOBAL(low_choose_750fx_pll)
234 /* Clear MSR:EE */
235 mfmsr r7
236 rlwinm r0,r7,0,17,15
237 mtmsr r0
238
239 /* If switching to PLL1, disable HID0:BTIC */
240 cmplwi cr0,r3,0
241 beq 1f
242 mfspr r5,SPRN_HID0
243 rlwinm r5,r5,0,27,25
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Calc new HID1 value */
251 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
252 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
253 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
254 or r4,r4,r5
255 mtspr SPRN_HID1,r4
256
257 /* Store new HID1 image */
258 rlwinm r6,r1,0,0,18
259 lwz r6,TI_CPU(r6)
260 slwi r6,r6,2
261 addis r6,r6,nap_save_hid1@ha
262 stw r4,nap_save_hid1@l(r6)
263
264 /* If switching to PLL0, enable HID0:BTIC */
265 cmplwi cr0,r3,0
266 bne 1f
267 mfspr r5,SPRN_HID0
268 ori r5,r5,HID0_BTIC
269 sync
270 mtspr SPRN_HID0,r5
271 isync
272 sync
273
2741:
275 /* Return */
276 mtmsr r7
277 blr
278
279_GLOBAL(low_choose_7447a_dfs)
280 /* Clear MSR:EE */
281 mfmsr r7
282 rlwinm r0,r7,0,17,15
283 mtmsr r0
284
285 /* Calc new HID1 value */
286 mfspr r4,SPRN_HID1
287 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
288 sync
289 mtspr SPRN_HID1,r4
290 sync
291 isync
292
293 /* Return */
294 mtmsr r7
295 blr
296
297#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
298
299/*
300 * complement mask on the msr then "or" some values on.
301 * _nmask_and_or_msr(nmask, value_to_or)
302 */
303_GLOBAL(_nmask_and_or_msr)
304 mfmsr r0 /* Get current msr */
305 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
306 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
307 SYNC /* Some chip revs have problems here... */
308 mtmsr r0 /* Update machine state */
309 isync
310 blr /* Done */
311
312
313/*
314 * Flush MMU TLB
315 */
316_GLOBAL(_tlbia)
317#if defined(CONFIG_40x)
318 sync /* Flush to memory before changing mapping */
319 tlbia
320 isync /* Flush shadow TLB */
321#elif defined(CONFIG_44x)
322 li r3,0
323 sync
324
325 /* Load high watermark */
326 lis r4,tlb_44x_hwater@ha
327 lwz r5,tlb_44x_hwater@l(r4)
328
3291: tlbwe r3,r3,PPC44x_TLB_PAGEID
330 addi r3,r3,1
331 cmpw 0,r3,r5
332 ble 1b
333
334 isync
335#elif defined(CONFIG_FSL_BOOKE)
336 /* Invalidate all entries in TLB0 */
337 li r3, 0x04
338 tlbivax 0,3
339 /* Invalidate all entries in TLB1 */
340 li r3, 0x0c
341 tlbivax 0,3
342 /* Invalidate all entries in TLB2 */
343 li r3, 0x14
344 tlbivax 0,3
345 /* Invalidate all entries in TLB3 */
346 li r3, 0x1c
347 tlbivax 0,3
348 msync
349#ifdef CONFIG_SMP
350 tlbsync
351#endif /* CONFIG_SMP */
352#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
353#if defined(CONFIG_SMP)
354 rlwinm r8,r1,0,0,18
355 lwz r8,TI_CPU(r8)
356 oris r8,r8,10
357 mfmsr r10
358 SYNC
359 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
360 rlwinm r0,r0,0,28,26 /* clear DR */
361 mtmsr r0
362 SYNC_601
363 isync
364 lis r9,mmu_hash_lock@h
365 ori r9,r9,mmu_hash_lock@l
366 tophys(r9,r9)
36710: lwarx r7,0,r9
368 cmpwi 0,r7,0
369 bne- 10b
370 stwcx. r8,0,r9
371 bne- 10b
372 sync
373 tlbia
374 sync
375 TLBSYNC
376 li r0,0
377 stw r0,0(r9) /* clear mmu_hash_lock */
378 mtmsr r10
379 SYNC_601
380 isync
381#else /* CONFIG_SMP */
382 sync
383 tlbia
384 sync
385#endif /* CONFIG_SMP */
386#endif /* ! defined(CONFIG_40x) */
387 blr
388
389/*
390 * Flush MMU TLB for a particular address
391 */
392_GLOBAL(_tlbie)
393#if defined(CONFIG_40x)
394 tlbsx. r3, 0, r3
395 bne 10f
396 sync
397 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
398 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
399 * the TLB entry. */
400 tlbwe r3, r3, TLB_TAG
401 isync
40210:
403#elif defined(CONFIG_44x)
404 mfspr r4,SPRN_MMUCR
405 mfspr r5,SPRN_PID /* Get PID */
406 rlwimi r4,r5,0,24,31 /* Set TID */
407 mtspr SPRN_MMUCR,r4
408
409 tlbsx. r3, 0, r3
410 bne 10f
411 sync
412 /* There are only 64 TLB entries, so r3 < 64,
413 * which means bit 22, is clear. Since 22 is
414 * the V bit in the TLB_PAGEID, loading this
415 * value will invalidate the TLB entry.
416 */
417 tlbwe r3, r3, PPC44x_TLB_PAGEID
418 isync
41910:
420#elif defined(CONFIG_FSL_BOOKE)
421 rlwinm r4, r3, 0, 0, 19
422 ori r5, r4, 0x08 /* TLBSEL = 1 */
423 ori r6, r4, 0x10 /* TLBSEL = 2 */
424 ori r7, r4, 0x18 /* TLBSEL = 3 */
425 tlbivax 0, r4
426 tlbivax 0, r5
427 tlbivax 0, r6
428 tlbivax 0, r7
429 msync
430#if defined(CONFIG_SMP)
431 tlbsync
432#endif /* CONFIG_SMP */
433#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
434#if defined(CONFIG_SMP)
435 rlwinm r8,r1,0,0,18
436 lwz r8,TI_CPU(r8)
437 oris r8,r8,11
438 mfmsr r10
439 SYNC
440 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
441 rlwinm r0,r0,0,28,26 /* clear DR */
442 mtmsr r0
443 SYNC_601
444 isync
445 lis r9,mmu_hash_lock@h
446 ori r9,r9,mmu_hash_lock@l
447 tophys(r9,r9)
44810: lwarx r7,0,r9
449 cmpwi 0,r7,0
450 bne- 10b
451 stwcx. r8,0,r9
452 bne- 10b
453 eieio
454 tlbie r3
455 sync
456 TLBSYNC
457 li r0,0
458 stw r0,0(r9) /* clear mmu_hash_lock */
459 mtmsr r10
460 SYNC_601
461 isync
462#else /* CONFIG_SMP */
463 tlbie r3
464 sync
465#endif /* CONFIG_SMP */
466#endif /* ! CONFIG_40x */
467 blr
468
469/*
470 * Flush instruction cache.
471 * This is a no-op on the 601.
472 */
473_GLOBAL(flush_instruction_cache)
474#if defined(CONFIG_8xx)
475 isync
476 lis r5, IDC_INVALL@h
477 mtspr SPRN_IC_CST, r5
478#elif defined(CONFIG_4xx)
479#ifdef CONFIG_403GCX
480 li r3, 512
481 mtctr r3
482 lis r4, KERNELBASE@h
4831: iccci 0, r4
484 addi r4, r4, 16
485 bdnz 1b
486#else
487 lis r3, KERNELBASE@h
488 iccci 0,r3
489#endif
490#elif CONFIG_FSL_BOOKE
491BEGIN_FTR_SECTION
492 mfspr r3,SPRN_L1CSR0
493 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
494 /* msync; isync recommended here */
495 mtspr SPRN_L1CSR0,r3
496 isync
497 blr
498END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 mfspr r3,SPRN_L1CSR1
500 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
501 mtspr SPRN_L1CSR1,r3
502#else
503 mfspr r3,SPRN_PVR
504 rlwinm r3,r3,16,16,31
505 cmpwi 0,r3,1
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
508 mfspr r3,SPRN_HID0
509 ori r3,r3,HID0_ICFI
510 mtspr SPRN_HID0,r3
511#endif /* CONFIG_8xx/4xx */
512 isync
513 blr
514
515/*
516 * Write any modified data cache blocks out to memory
517 * and invalidate the corresponding instruction cache blocks.
518 * This is a no-op on the 601.
519 *
520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */
522_GLOBAL(flush_icache_range)
523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
526 li r5,L1_CACHE_BYTES-1
527 andc r3,r3,r5
528 subf r4,r3,r4
529 add r4,r4,r5
530 srwi. r4,r4,L1_CACHE_SHIFT
531 beqlr
532 mtctr r4
533 mr r6,r3
5341: dcbst 0,r3
535 addi r3,r3,L1_CACHE_BYTES
536 bdnz 1b
537 sync /* wait for dcbst's to get to ram */
538 mtctr r4
5392: icbi 0,r6
540 addi r6,r6,L1_CACHE_BYTES
541 bdnz 2b
542 sync /* additional sync needed on g4 */
543 isync
544 blr
545/*
546 * Write any modified data cache blocks out to memory.
547 * Does not invalidate the corresponding cache lines (especially for
548 * any corresponding instruction cache).
549 *
550 * clean_dcache_range(unsigned long start, unsigned long stop)
551 */
552_GLOBAL(clean_dcache_range)
553 li r5,L1_CACHE_BYTES-1
554 andc r3,r3,r5
555 subf r4,r3,r4
556 add r4,r4,r5
557 srwi. r4,r4,L1_CACHE_SHIFT
558 beqlr
559 mtctr r4
560
5611: dcbst 0,r3
562 addi r3,r3,L1_CACHE_BYTES
563 bdnz 1b
564 sync /* wait for dcbst's to get to ram */
565 blr
566
567/*
568 * Write any modified data cache blocks out to memory and invalidate them.
569 * Does not invalidate the corresponding instruction cache blocks.
570 *
571 * flush_dcache_range(unsigned long start, unsigned long stop)
572 */
573_GLOBAL(flush_dcache_range)
574 li r5,L1_CACHE_BYTES-1
575 andc r3,r3,r5
576 subf r4,r3,r4
577 add r4,r4,r5
578 srwi. r4,r4,L1_CACHE_SHIFT
579 beqlr
580 mtctr r4
581
5821: dcbf 0,r3
583 addi r3,r3,L1_CACHE_BYTES
584 bdnz 1b
585 sync /* wait for dcbst's to get to ram */
586 blr
587
588/*
589 * Like above, but invalidate the D-cache. This is used by the 8xx
590 * to invalidate the cache so the PPC core doesn't get stale data
591 * from the CPM (no cache snooping here :-).
592 *
593 * invalidate_dcache_range(unsigned long start, unsigned long stop)
594 */
595_GLOBAL(invalidate_dcache_range)
596 li r5,L1_CACHE_BYTES-1
597 andc r3,r3,r5
598 subf r4,r3,r4
599 add r4,r4,r5
600 srwi. r4,r4,L1_CACHE_SHIFT
601 beqlr
602 mtctr r4
603
6041: dcbi 0,r3
605 addi r3,r3,L1_CACHE_BYTES
606 bdnz 1b
607 sync /* wait for dcbi's to get to ram */
608 blr
609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
626 addi r5, r5, L1_CACHE_BYTES
627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/*
632 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not*
634 * snoop from the data cache.
635 * This is a no-op on the 601 which has a unified cache.
636 *
637 * void __flush_dcache_icache(void *page)
638 */
639_GLOBAL(__flush_dcache_icache)
640BEGIN_FTR_SECTION
641 blr /* for 601, do nothing */
642END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
643 rlwinm r3,r3,0,0,19 /* Get page base address */
644 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
645 mtctr r4
646 mr r6,r3
6470: dcbst 0,r3 /* Write line to ram */
648 addi r3,r3,L1_CACHE_BYTES
649 bdnz 0b
650 sync
651 mtctr r4
6521: icbi 0,r6
653 addi r6,r6,L1_CACHE_BYTES
654 bdnz 1b
655 sync
656 isync
657 blr
658
659/*
660 * Flush a particular page from the data cache to RAM, identified
661 * by its physical address. We turn off the MMU so we can just use
662 * the physical address (this may be a highmem page without a kernel
663 * mapping).
664 *
665 * void __flush_dcache_icache_phys(unsigned long physaddr)
666 */
667_GLOBAL(__flush_dcache_icache_phys)
668BEGIN_FTR_SECTION
669 blr /* for 601, do nothing */
670END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
671 mfmsr r10
672 rlwinm r0,r10,0,28,26 /* clear DR */
673 mtmsr r0
674 isync
675 rlwinm r3,r3,0,0,19 /* Get page base address */
676 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
677 mtctr r4
678 mr r6,r3
6790: dcbst 0,r3 /* Write line to ram */
680 addi r3,r3,L1_CACHE_BYTES
681 bdnz 0b
682 sync
683 mtctr r4
6841: icbi 0,r6
685 addi r6,r6,L1_CACHE_BYTES
686 bdnz 1b
687 sync
688 mtmsr r10 /* restore DR */
689 isync
690 blr
691
692/*
693 * Clear pages using the dcbz instruction, which doesn't cause any
694 * memory traffic (except to write out any cache lines which get
695 * displaced). This only works on cacheable memory.
696 *
697 * void clear_pages(void *page, int order) ;
698 */
699_GLOBAL(clear_pages)
700 li r0,4096/L1_CACHE_BYTES
701 slw r0,r0,r4
702 mtctr r0
703#ifdef CONFIG_8xx
704 li r4, 0
7051: stw r4, 0(r3)
706 stw r4, 4(r3)
707 stw r4, 8(r3)
708 stw r4, 12(r3)
709#else
7101: dcbz 0,r3
711#endif
712 addi r3,r3,L1_CACHE_BYTES
713 bdnz 1b
714 blr
715
716/*
717 * Copy a whole page. We use the dcbz instruction on the destination
718 * to reduce memory traffic (it eliminates the unnecessary reads of
719 * the destination into cache). This requires that the destination
720 * is cacheable.
721 */
722#define COPY_16_BYTES \
723 lwz r6,4(r4); \
724 lwz r7,8(r4); \
725 lwz r8,12(r4); \
726 lwzu r9,16(r4); \
727 stw r6,4(r3); \
728 stw r7,8(r3); \
729 stw r8,12(r3); \
730 stwu r9,16(r3)
731
732_GLOBAL(copy_page)
733 addi r3,r3,-4
734 addi r4,r4,-4
735
736#ifdef CONFIG_8xx
737 /* don't use prefetch on 8xx */
738 li r0,4096/L1_CACHE_BYTES
739 mtctr r0
7401: COPY_16_BYTES
741 bdnz 1b
742 blr
743
744#else /* not 8xx, we can prefetch */
745 li r5,4
746
747#if MAX_COPY_PREFETCH > 1
748 li r0,MAX_COPY_PREFETCH
749 li r11,4
750 mtctr r0
75111: dcbt r11,r4
752 addi r11,r11,L1_CACHE_BYTES
753 bdnz 11b
754#else /* MAX_COPY_PREFETCH == 1 */
755 dcbt r5,r4
756 li r11,L1_CACHE_BYTES+4
757#endif /* MAX_COPY_PREFETCH */
758 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
759 crclr 4*cr0+eq
7602:
761 mtctr r0
7621:
763 dcbt r11,r4
764 dcbz r5,r3
765 COPY_16_BYTES
766#if L1_CACHE_BYTES >= 32
767 COPY_16_BYTES
768#if L1_CACHE_BYTES >= 64
769 COPY_16_BYTES
770 COPY_16_BYTES
771#if L1_CACHE_BYTES >= 128
772 COPY_16_BYTES
773 COPY_16_BYTES
774 COPY_16_BYTES
775 COPY_16_BYTES
776#endif
777#endif
778#endif
779 bdnz 1b
780 beqlr
781 crnot 4*cr0+eq,4*cr0+eq
782 li r0,MAX_COPY_PREFETCH
783 li r11,4
784 b 2b
785#endif /* CONFIG_8xx */
786
787/*
788 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
789 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
790 */
791_GLOBAL(atomic_clear_mask)
79210: lwarx r5,0,r4
793 andc r5,r5,r3
794 PPC405_ERR77(0,r4)
795 stwcx. r5,0,r4
796 bne- 10b
797 blr
798_GLOBAL(atomic_set_mask)
79910: lwarx r5,0,r4
800 or r5,r5,r3
801 PPC405_ERR77(0,r4)
802 stwcx. r5,0,r4
803 bne- 10b
804 blr
805
806/*
807 * I/O string operations
808 *
809 * insb(port, buf, len)
810 * outsb(port, buf, len)
811 * insw(port, buf, len)
812 * outsw(port, buf, len)
813 * insl(port, buf, len)
814 * outsl(port, buf, len)
815 * insw_ns(port, buf, len)
816 * outsw_ns(port, buf, len)
817 * insl_ns(port, buf, len)
818 * outsl_ns(port, buf, len)
819 *
820 * The *_ns versions don't do byte-swapping.
821 */
822_GLOBAL(_insb)
823 cmpwi 0,r5,0
824 mtctr r5
825 subi r4,r4,1
826 blelr-
82700: lbz r5,0(r3)
828 eieio
829 stbu r5,1(r4)
830 bdnz 00b
831 blr
832
833_GLOBAL(_outsb)
834 cmpwi 0,r5,0
835 mtctr r5
836 subi r4,r4,1
837 blelr-
83800: lbzu r5,1(r4)
839 stb r5,0(r3)
840 eieio
841 bdnz 00b
842 blr
843
844_GLOBAL(_insw)
845 cmpwi 0,r5,0
846 mtctr r5
847 subi r4,r4,2
848 blelr-
84900: lhbrx r5,0,r3
850 eieio
851 sthu r5,2(r4)
852 bdnz 00b
853 blr
854
855_GLOBAL(_outsw)
856 cmpwi 0,r5,0
857 mtctr r5
858 subi r4,r4,2
859 blelr-
86000: lhzu r5,2(r4)
861 eieio
862 sthbrx r5,0,r3
863 bdnz 00b
864 blr
865
866_GLOBAL(_insl)
867 cmpwi 0,r5,0
868 mtctr r5
869 subi r4,r4,4
870 blelr-
87100: lwbrx r5,0,r3
872 eieio
873 stwu r5,4(r4)
874 bdnz 00b
875 blr
876
877_GLOBAL(_outsl)
878 cmpwi 0,r5,0
879 mtctr r5
880 subi r4,r4,4
881 blelr-
88200: lwzu r5,4(r4)
883 stwbrx r5,0,r3
884 eieio
885 bdnz 00b
886 blr
887
888_GLOBAL(__ide_mm_insw)
889_GLOBAL(_insw_ns)
890 cmpwi 0,r5,0
891 mtctr r5
892 subi r4,r4,2
893 blelr-
89400: lhz r5,0(r3)
895 eieio
896 sthu r5,2(r4)
897 bdnz 00b
898 blr
899
900_GLOBAL(__ide_mm_outsw)
901_GLOBAL(_outsw_ns)
902 cmpwi 0,r5,0
903 mtctr r5
904 subi r4,r4,2
905 blelr-
90600: lhzu r5,2(r4)
907 sth r5,0(r3)
908 eieio
909 bdnz 00b
910 blr
911
912_GLOBAL(__ide_mm_insl)
913_GLOBAL(_insl_ns)
914 cmpwi 0,r5,0
915 mtctr r5
916 subi r4,r4,4
917 blelr-
91800: lwz r5,0(r3)
919 eieio
920 stwu r5,4(r4)
921 bdnz 00b
922 blr
923
924_GLOBAL(__ide_mm_outsl)
925_GLOBAL(_outsl_ns)
926 cmpwi 0,r5,0
927 mtctr r5
928 subi r4,r4,4
929 blelr-
93000: lwzu r5,4(r4)
931 stw r5,0(r3)
932 eieio
933 bdnz 00b
934 blr
935
936/*
937 * Extended precision shifts.
938 *
939 * Updated to be valid for shift counts from 0 to 63 inclusive.
940 * -- Gabriel
941 *
942 * R3/R4 has 64 bit value
943 * R5 has shift count
944 * result in R3/R4
945 *
946 * ashrdi3: arithmetic right shift (sign propagation)
947 * lshrdi3: logical right shift
948 * ashldi3: left shift
949 */
950_GLOBAL(__ashrdi3)
951 subfic r6,r5,32
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
956 sraw r7,r3,r7 # t2 = MSW >> (count-32)
957 or r4,r4,r6 # LSW |= t1
958 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
959 sraw r3,r3,r5 # MSW = MSW >> count
960 or r4,r4,r7 # LSW |= t2
961 blr
962
963_GLOBAL(__ashldi3)
964 subfic r6,r5,32
965 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
966 addi r7,r5,32 # could be xori, or addi with -32
967 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
968 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
969 or r3,r3,r6 # MSW |= t1
970 slw r4,r4,r5 # LSW = LSW << count
971 or r3,r3,r7 # MSW |= t2
972 blr
973
974_GLOBAL(__lshrdi3)
975 subfic r6,r5,32
976 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
977 addi r7,r5,32 # could be xori, or addi with -32
978 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
979 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
980 or r4,r4,r6 # LSW |= t1
981 srw r3,r3,r5 # MSW = MSW >> count
982 or r4,r4,r7 # LSW |= t2
983 blr
984
985_GLOBAL(abs)
986 srawi r4,r3,31
987 xor r3,r3,r4
988 sub r3,r3,r4
989 blr
990
991_GLOBAL(_get_SP)
992 mr r3,r1 /* Close enough */
993 blr
994
995/*
996 * Create a kernel thread
997 * kernel_thread(fn, arg, flags)
998 */
999_GLOBAL(kernel_thread)
1000 stwu r1,-16(r1)
1001 stw r30,8(r1)
1002 stw r31,12(r1)
1003 mr r30,r3 /* function */
1004 mr r31,r4 /* argument */
1005 ori r3,r5,CLONE_VM /* flags */
1006 oris r3,r3,CLONE_UNTRACED>>16
1007 li r4,0 /* new sp (unused) */
1008 li r0,__NR_clone
1009 sc
1010 cmpwi 0,r3,0 /* parent or child? */
1011 bne 1f /* return if parent */
1012 li r0,0 /* make top-level stack frame */
1013 stwu r0,-16(r1)
1014 mtlr r30 /* fn addr in lr */
1015 mr r3,r31 /* load arg and call fn */
1016 PPC440EP_ERR42
1017 blrl
1018 li r0,__NR_exit /* exit if function returns */
1019 li r3,0
1020 sc
10211: lwz r30,8(r1)
1022 lwz r31,12(r1)
1023 addi r1,r1,16
1024 blr
1025
1026_GLOBAL(execve)
1027 li r0,__NR_execve
1028 sc
1029 bnslr
1030 neg r3,r3
1031 blr
1032
1033/*
1034 * This routine is just here to keep GCC happy - sigh...
1035 */
1036_GLOBAL(__main)
1037 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644
index 000000000000..b3e95ff0dba0
--- /dev/null
+++ b/arch/powerpc/kernel/misc_64.S
@@ -0,0 +1,880 @@
1/*
2 * arch/powerpc/kernel/misc64.S
3 *
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * and Paul Mackerras.
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30
31 .text
32
33/*
34 * Returns (address we are running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
36 */
37
38_GLOBAL(reloc_offset)
39 mflr r0
40 bl 1f
411: mflr r3
42 LOADADDR(r4,1b)
43 subf r3,r4,r3
44 mtlr r0
45 blr
46
47/*
48 * add_reloc_offset(x) returns x + reloc_offset().
49 */
50_GLOBAL(add_reloc_offset)
51 mflr r0
52 bl 1f
531: mflr r5
54 LOADADDR(r4,1b)
55 subf r5,r4,r5
56 add r3,r3,r5
57 mtlr r0
58 blr
59
60_GLOBAL(get_msr)
61 mfmsr r3
62 blr
63
64_GLOBAL(get_dar)
65 mfdar r3
66 blr
67
68_GLOBAL(get_srr0)
69 mfsrr0 r3
70 blr
71
72_GLOBAL(get_srr1)
73 mfsrr1 r3
74 blr
75
76_GLOBAL(get_sp)
77 mr r3,r1
78 blr
79
80#ifdef CONFIG_IRQSTACKS
81_GLOBAL(call_do_softirq)
82 mflr r0
83 std r0,16(r1)
84 stdu r1,THREAD_SIZE-112(r3)
85 mr r1,r3
86 bl .__do_softirq
87 ld r1,0(r1)
88 ld r0,16(r1)
89 mtlr r0
90 blr
91
92_GLOBAL(call_handle_IRQ_event)
93 mflr r0
94 std r0,16(r1)
95 stdu r1,THREAD_SIZE-112(r6)
96 mr r1,r6
97 bl .handle_IRQ_event
98 ld r1,0(r1)
99 ld r0,16(r1)
100 mtlr r0
101 blr
102#endif /* CONFIG_IRQSTACKS */
103
104 /*
105 * To be called by C code which needs to do some operations with MMU
106 * disabled. Note that interrupts have to be disabled by the caller
107 * prior to calling us. The code called _MUST_ be in the RMO of course
108 * and part of the linear mapping as we don't attempt to translate the
109 * stack pointer at all. The function is called with the stack switched
110 * to this CPU emergency stack
111 *
112 * prototype is void *call_with_mmu_off(void *func, void *data);
113 *
114 * the called function is expected to be of the form
115 *
116 * void *called(void *data);
117 */
118_GLOBAL(call_with_mmu_off)
119 mflr r0 /* get link, save it on stackframe */
120 std r0,16(r1)
121 mr r1,r5 /* save old stack ptr */
122 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
123 subi r1,r1,STACK_FRAME_OVERHEAD
124 std r0,16(r1) /* save link on emerg. stack */
125 std r5,0(r1) /* save old stack ptr in backchain */
126 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
127 bl 2f /* we need LR to return, continue at label 2 */
128
129 ld r0,16(r1) /* we return here from the call, get LR and */
130 ld r1,0(r1) /* .. old stack ptr */
131 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
132 mfmsr r4
133 ori r4,r4,MSR_IR|MSR_DR
134 mtspr SPRN_SRR1,r4
135 rfid
136
1372: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
138 mr r3,r4 /* get parameter */
139 mfmsr r0
140 ori r0,r0,MSR_IR|MSR_DR
141 xori r0,r0,MSR_IR|MSR_DR
142 mtspr SPRN_SRR1,r0
143 rfid
144
145
146 .section ".toc","aw"
147PPC64_CACHES:
148 .tc ppc64_caches[TC],ppc64_caches
149 .section ".text"
150
151/*
152 * Write any modified data cache blocks out to memory
153 * and invalidate the corresponding instruction cache blocks.
154 *
155 * flush_icache_range(unsigned long start, unsigned long stop)
156 *
157 * flush all bytes from start through stop-1 inclusive
158 */
159
160_KPROBE(__flush_icache_range)
161
162/*
163 * Flush the data cache to memory
164 *
165 * Different systems have different cache line sizes
166 * and in some cases i-cache and d-cache line sizes differ from
167 * each other.
168 */
169 ld r10,PPC64_CACHES@toc(r2)
170 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
171 addi r5,r7,-1
172 andc r6,r3,r5 /* round low to line bdy */
173 subf r8,r6,r4 /* compute length */
174 add r8,r8,r5 /* ensure we get enough */
175 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
176 srw. r8,r8,r9 /* compute line count */
177 beqlr /* nothing to do? */
178 mtctr r8
1791: dcbst 0,r6
180 add r6,r6,r7
181 bdnz 1b
182 sync
183
184/* Now invalidate the instruction cache */
185
186 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
187 addi r5,r7,-1
188 andc r6,r3,r5 /* round low to line bdy */
189 subf r8,r6,r4 /* compute length */
190 add r8,r8,r5
191 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
192 srw. r8,r8,r9 /* compute line count */
193 beqlr /* nothing to do? */
194 mtctr r8
1952: icbi 0,r6
196 add r6,r6,r7
197 bdnz 2b
198 isync
199 blr
200 .previous .text
201/*
202 * Like above, but only do the D-cache.
203 *
204 * flush_dcache_range(unsigned long start, unsigned long stop)
205 *
206 * flush all bytes from start to stop-1 inclusive
207 */
208_GLOBAL(flush_dcache_range)
209
210/*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215 ld r10,PPC64_CACHES@toc(r2)
216 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
217 addi r5,r7,-1
218 andc r6,r3,r5 /* round low to line bdy */
219 subf r8,r6,r4 /* compute length */
220 add r8,r8,r5 /* ensure we get enough */
221 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
222 srw. r8,r8,r9 /* compute line count */
223 beqlr /* nothing to do? */
224 mtctr r8
2250: dcbst 0,r6
226 add r6,r6,r7
227 bdnz 0b
228 sync
229 blr
230
231/*
232 * Like above, but works on non-mapped physical addresses.
233 * Use only for non-LPAR setups ! It also assumes real mode
234 * is cacheable. Used for flushing out the DART before using
235 * it as uncacheable memory
236 *
237 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
238 *
239 * flush all bytes from start to stop-1 inclusive
240 */
241_GLOBAL(flush_dcache_phys_range)
242 ld r10,PPC64_CACHES@toc(r2)
243 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
244 addi r5,r7,-1
245 andc r6,r3,r5 /* round low to line bdy */
246 subf r8,r6,r4 /* compute length */
247 add r8,r8,r5 /* ensure we get enough */
248 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
249 srw. r8,r8,r9 /* compute line count */
250 beqlr /* nothing to do? */
251 mfmsr r5 /* Disable MMU Data Relocation */
252 ori r0,r5,MSR_DR
253 xori r0,r0,MSR_DR
254 sync
255 mtmsr r0
256 sync
257 isync
258 mtctr r8
2590: dcbst 0,r6
260 add r6,r6,r7
261 bdnz 0b
262 sync
263 isync
264 mtmsr r5 /* Re-enable MMU Data Relocation */
265 sync
266 isync
267 blr
268
269_GLOBAL(flush_inval_dcache_range)
270 ld r10,PPC64_CACHES@toc(r2)
271 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
272 addi r5,r7,-1
273 andc r6,r3,r5 /* round low to line bdy */
274 subf r8,r6,r4 /* compute length */
275 add r8,r8,r5 /* ensure we get enough */
276 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
277 srw. r8,r8,r9 /* compute line count */
278 beqlr /* nothing to do? */
279 sync
280 isync
281 mtctr r8
2820: dcbf 0,r6
283 add r6,r6,r7
284 bdnz 0b
285 sync
286 isync
287 blr
288
289
290/*
291 * Flush a particular page from the data cache to RAM.
292 * Note: this is necessary because the instruction cache does *not*
293 * snoop from the data cache.
294 *
295 * void __flush_dcache_icache(void *page)
296 */
297_GLOBAL(__flush_dcache_icache)
298/*
299 * Flush the data cache to memory
300 *
301 * Different systems have different cache line sizes
302 */
303
304/* Flush the dcache */
305 ld r7,PPC64_CACHES@toc(r2)
306 clrrdi r3,r3,PAGE_SHIFT /* Page align */
307 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
308 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
309 mr r6,r3
310 mtctr r4
3110: dcbst 0,r6
312 add r6,r6,r5
313 bdnz 0b
314 sync
315
316/* Now invalidate the icache */
317
318 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
319 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
320 mtctr r4
3211: icbi 0,r3
322 add r3,r3,r5
323 bdnz 1b
324 isync
325 blr
326
327/*
328 * I/O string operations
329 *
330 * insb(port, buf, len)
331 * outsb(port, buf, len)
332 * insw(port, buf, len)
333 * outsw(port, buf, len)
334 * insl(port, buf, len)
335 * outsl(port, buf, len)
336 * insw_ns(port, buf, len)
337 * outsw_ns(port, buf, len)
338 * insl_ns(port, buf, len)
339 * outsl_ns(port, buf, len)
340 *
341 * The *_ns versions don't do byte-swapping.
342 */
343_GLOBAL(_insb)
344 cmpwi 0,r5,0
345 mtctr r5
346 subi r4,r4,1
347 blelr-
34800: lbz r5,0(r3)
349 eieio
350 stbu r5,1(r4)
351 bdnz 00b
352 twi 0,r5,0
353 isync
354 blr
355
356_GLOBAL(_outsb)
357 cmpwi 0,r5,0
358 mtctr r5
359 subi r4,r4,1
360 blelr-
36100: lbzu r5,1(r4)
362 stb r5,0(r3)
363 bdnz 00b
364 sync
365 blr
366
367_GLOBAL(_insw)
368 cmpwi 0,r5,0
369 mtctr r5
370 subi r4,r4,2
371 blelr-
37200: lhbrx r5,0,r3
373 eieio
374 sthu r5,2(r4)
375 bdnz 00b
376 twi 0,r5,0
377 isync
378 blr
379
380_GLOBAL(_outsw)
381 cmpwi 0,r5,0
382 mtctr r5
383 subi r4,r4,2
384 blelr-
38500: lhzu r5,2(r4)
386 sthbrx r5,0,r3
387 bdnz 00b
388 sync
389 blr
390
391_GLOBAL(_insl)
392 cmpwi 0,r5,0
393 mtctr r5
394 subi r4,r4,4
395 blelr-
39600: lwbrx r5,0,r3
397 eieio
398 stwu r5,4(r4)
399 bdnz 00b
400 twi 0,r5,0
401 isync
402 blr
403
404_GLOBAL(_outsl)
405 cmpwi 0,r5,0
406 mtctr r5
407 subi r4,r4,4
408 blelr-
40900: lwzu r5,4(r4)
410 stwbrx r5,0,r3
411 bdnz 00b
412 sync
413 blr
414
415/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
416_GLOBAL(_insw_ns)
417 cmpwi 0,r5,0
418 mtctr r5
419 subi r4,r4,2
420 blelr-
42100: lhz r5,0(r3)
422 eieio
423 sthu r5,2(r4)
424 bdnz 00b
425 twi 0,r5,0
426 isync
427 blr
428
429/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
430_GLOBAL(_outsw_ns)
431 cmpwi 0,r5,0
432 mtctr r5
433 subi r4,r4,2
434 blelr-
43500: lhzu r5,2(r4)
436 sth r5,0(r3)
437 bdnz 00b
438 sync
439 blr
440
441_GLOBAL(_insl_ns)
442 cmpwi 0,r5,0
443 mtctr r5
444 subi r4,r4,4
445 blelr-
44600: lwz r5,0(r3)
447 eieio
448 stwu r5,4(r4)
449 bdnz 00b
450 twi 0,r5,0
451 isync
452 blr
453
454_GLOBAL(_outsl_ns)
455 cmpwi 0,r5,0
456 mtctr r5
457 subi r4,r4,4
458 blelr-
45900: lwzu r5,4(r4)
460 stw r5,0(r3)
461 bdnz 00b
462 sync
463 blr
464
465/*
466 * identify_cpu and calls setup_cpu
467 * In: r3 = base of the cpu_specs array
468 * r4 = address of cur_cpu_spec
469 * r5 = relocation offset
470 */
471_GLOBAL(identify_cpu)
472 mfpvr r7
4731:
474 lwz r8,CPU_SPEC_PVR_MASK(r3)
475 and r8,r8,r7
476 lwz r9,CPU_SPEC_PVR_VALUE(r3)
477 cmplw 0,r9,r8
478 beq 1f
479 addi r3,r3,CPU_SPEC_ENTRY_SIZE
480 b 1b
4811:
482 sub r0,r3,r5
483 std r0,0(r4)
484 ld r4,CPU_SPEC_SETUP(r3)
485 add r4,r4,r5
486 ld r4,0(r4)
487 add r4,r4,r5
488 mtctr r4
489 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
490 mr r4,r3
491 mr r3,r5
492 bctr
493
494/*
495 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
496 * and writes nop's over sections of code that don't apply for this cpu.
497 * r3 = data offset (not changed)
498 */
499_GLOBAL(do_cpu_ftr_fixups)
500 /* Get CPU 0 features */
501 LOADADDR(r6,cur_cpu_spec)
502 sub r6,r6,r3
503 ld r4,0(r6)
504 sub r4,r4,r3
505 ld r4,CPU_SPEC_FEATURES(r4)
506 /* Get the fixup table */
507 LOADADDR(r6,__start___ftr_fixup)
508 sub r6,r6,r3
509 LOADADDR(r7,__stop___ftr_fixup)
510 sub r7,r7,r3
511 /* Do the fixup */
5121: cmpld r6,r7
513 bgelr
514 addi r6,r6,32
515 ld r8,-32(r6) /* mask */
516 and r8,r8,r4
517 ld r9,-24(r6) /* value */
518 cmpld r8,r9
519 beq 1b
520 ld r8,-16(r6) /* section begin */
521 ld r9,-8(r6) /* section end */
522 subf. r9,r8,r9
523 beq 1b
524 /* write nops over the section of code */
525 /* todo: if large section, add a branch at the start of it */
526 srwi r9,r9,2
527 mtctr r9
528 sub r8,r8,r3
529 lis r0,0x60000000@h /* nop */
5303: stw r0,0(r8)
531 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
532 beq 2f
533 dcbst 0,r8 /* suboptimal, but simpler */
534 sync
535 icbi 0,r8
5362: addi r8,r8,4
537 bdnz 3b
538 sync /* additional sync needed on g4 */
539 isync
540 b 1b
541
542#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
543/*
544 * Do an IO access in real mode
545 */
546_GLOBAL(real_readb)
547 mfmsr r7
548 ori r0,r7,MSR_DR
549 xori r0,r0,MSR_DR
550 sync
551 mtmsrd r0
552 sync
553 isync
554 mfspr r6,SPRN_HID4
555 rldicl r5,r6,32,0
556 ori r5,r5,0x100
557 rldicl r5,r5,32,0
558 sync
559 mtspr SPRN_HID4,r5
560 isync
561 slbia
562 isync
563 lbz r3,0(r3)
564 sync
565 mtspr SPRN_HID4,r6
566 isync
567 slbia
568 isync
569 mtmsrd r7
570 sync
571 isync
572 blr
573
574 /*
575 * Do an IO access in real mode
576 */
577_GLOBAL(real_writeb)
578 mfmsr r7
579 ori r0,r7,MSR_DR
580 xori r0,r0,MSR_DR
581 sync
582 mtmsrd r0
583 sync
584 isync
585 mfspr r6,SPRN_HID4
586 rldicl r5,r6,32,0
587 ori r5,r5,0x100
588 rldicl r5,r5,32,0
589 sync
590 mtspr SPRN_HID4,r5
591 isync
592 slbia
593 isync
594 stb r3,0(r4)
595 sync
596 mtspr SPRN_HID4,r6
597 isync
598 slbia
599 isync
600 mtmsrd r7
601 sync
602 isync
603 blr
604#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
605
606/*
607 * Create a kernel thread
608 * kernel_thread(fn, arg, flags)
609 */
610_GLOBAL(kernel_thread)
611 std r29,-24(r1)
612 std r30,-16(r1)
613 stdu r1,-STACK_FRAME_OVERHEAD(r1)
614 mr r29,r3
615 mr r30,r4
616 ori r3,r5,CLONE_VM /* flags */
617 oris r3,r3,(CLONE_UNTRACED>>16)
618 li r4,0 /* new sp (unused) */
619 li r0,__NR_clone
620 sc
621 cmpdi 0,r3,0 /* parent or child? */
622 bne 1f /* return if parent */
623 li r0,0
624 stdu r0,-STACK_FRAME_OVERHEAD(r1)
625 ld r2,8(r29)
626 ld r29,0(r29)
627 mtlr r29 /* fn addr in lr */
628 mr r3,r30 /* load arg and call fn */
629 blrl
630 li r0,__NR_exit /* exit after child exits */
631 li r3,0
632 sc
6331: addi r1,r1,STACK_FRAME_OVERHEAD
634 ld r29,-24(r1)
635 ld r30,-16(r1)
636 blr
637
638/*
639 * disable_kernel_fp()
640 * Disable the FPU.
641 */
642_GLOBAL(disable_kernel_fp)
643 mfmsr r3
644 rldicl r0,r3,(63-MSR_FP_LG),1
645 rldicl r3,r0,(MSR_FP_LG+1),0
646 mtmsrd r3 /* disable use of fpu now */
647 isync
648 blr
649
650#ifdef CONFIG_ALTIVEC
651
652#if 0 /* this has no callers for now */
653/*
654 * disable_kernel_altivec()
655 * Disable the VMX.
656 */
657_GLOBAL(disable_kernel_altivec)
658 mfmsr r3
659 rldicl r0,r3,(63-MSR_VEC_LG),1
660 rldicl r3,r0,(MSR_VEC_LG+1),0
661 mtmsrd r3 /* disable use of VMX now */
662 isync
663 blr
664#endif /* 0 */
665
666/*
667 * giveup_altivec(tsk)
668 * Disable VMX for the task given as the argument,
669 * and save the vector registers in its thread_struct.
670 * Enables the VMX for use in the kernel on return.
671 */
672_GLOBAL(giveup_altivec)
673 mfmsr r5
674 oris r5,r5,MSR_VEC@h
675 mtmsrd r5 /* enable use of VMX now */
676 isync
677 cmpdi 0,r3,0
678 beqlr- /* if no previous owner, done */
679 addi r3,r3,THREAD /* want THREAD of task */
680 ld r5,PT_REGS(r3)
681 cmpdi 0,r5,0
682 SAVE_32VRS(0,r4,r3)
683 mfvscr vr0
684 li r4,THREAD_VSCR
685 stvx vr0,r4,r3
686 beq 1f
687 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
688 lis r3,MSR_VEC@h
689 andc r4,r4,r3 /* disable FP for previous task */
690 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
6911:
692#ifndef CONFIG_SMP
693 li r5,0
694 ld r4,last_task_used_altivec@got(r2)
695 std r5,0(r4)
696#endif /* CONFIG_SMP */
697 blr
698
699#endif /* CONFIG_ALTIVEC */
700
701_GLOBAL(__setup_cpu_power3)
702 blr
703
704_GLOBAL(execve)
705 li r0,__NR_execve
706 sc
707 bnslr
708 neg r3,r3
709 blr
710
711/* kexec_wait(phys_cpu)
712 *
713 * wait for the flag to change, indicating this kernel is going away but
714 * the slave code for the next one is at addresses 0 to 100.
715 *
716 * This is used by all slaves.
717 *
718 * Physical (hardware) cpu id should be in r3.
719 */
720_GLOBAL(kexec_wait)
721 bl 1f
7221: mflr r5
723 addi r5,r5,kexec_flag-1b
724
72599: HMT_LOW
726#ifdef CONFIG_KEXEC /* use no memory without kexec */
727 lwz r4,0(r5)
728 cmpwi 0,r4,0
729 bnea 0x60
730#endif
731 b 99b
732
733/* this can be in text because we won't change it until we are
734 * running in real anyways
735 */
736kexec_flag:
737 .long 0
738
739
740#ifdef CONFIG_KEXEC
741
742/* kexec_smp_wait(void)
743 *
744 * call with interrupts off
745 * note: this is a terminal routine, it does not save lr
746 *
747 * get phys id from paca
748 * set paca id to -1 to say we got here
749 * switch to real mode
750 * join other cpus in kexec_wait(phys_id)
751 */
752_GLOBAL(kexec_smp_wait)
753 lhz r3,PACAHWCPUID(r13)
754 li r4,-1
755 sth r4,PACAHWCPUID(r13) /* let others know we left */
756 bl real_mode
757 b .kexec_wait
758
759/*
760 * switch to real mode (turn mmu off)
761 * we use the early kernel trick that the hardware ignores bits
762 * 0 and 1 (big endian) of the effective address in real mode
763 *
764 * don't overwrite r3 here, it is live for kexec_wait above.
765 */
766real_mode: /* assume normal blr return */
7671: li r9,MSR_RI
768 li r10,MSR_DR|MSR_IR
769 mflr r11 /* return address to SRR0 */
770 mfmsr r12
771 andc r9,r12,r9
772 andc r10,r12,r10
773
774 mtmsrd r9,1
775 mtspr SPRN_SRR1,r10
776 mtspr SPRN_SRR0,r11
777 rfid
778
779
780/*
781 * kexec_sequence(newstack, start, image, control, clear_all())
782 *
783 * does the grungy work with stack switching and real mode switches
784 * also does simple calls to other code
785 */
786
787_GLOBAL(kexec_sequence)
788 mflr r0
789 std r0,16(r1)
790
791 /* switch stacks to newstack -- &kexec_stack.stack */
792 stdu r1,THREAD_SIZE-112(r3)
793 mr r1,r3
794
795 li r0,0
796 std r0,16(r1)
797
798 /* save regs for local vars on new stack.
799 * yes, we won't go back, but ...
800 */
801 std r31,-8(r1)
802 std r30,-16(r1)
803 std r29,-24(r1)
804 std r28,-32(r1)
805 std r27,-40(r1)
806 std r26,-48(r1)
807 std r25,-56(r1)
808
809 stdu r1,-112-64(r1)
810
811 /* save args into preserved regs */
812 mr r31,r3 /* newstack (both) */
813 mr r30,r4 /* start (real) */
814 mr r29,r5 /* image (virt) */
815 mr r28,r6 /* control, unused */
816 mr r27,r7 /* clear_all() fn desc */
817 mr r26,r8 /* spare */
818 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
819
820 /* disable interrupts, we are overwriting kernel data next */
821 mfmsr r3
822 rlwinm r3,r3,0,17,15
823 mtmsrd r3,1
824
825 /* copy dest pages, flush whole dest image */
826 mr r3,r29
827 bl .kexec_copy_flush /* (image) */
828
829 /* turn off mmu */
830 bl real_mode
831
832 /* clear out hardware hash page table and tlb */
833 ld r5,0(r27) /* deref function descriptor */
834 mtctr r5
835 bctrl /* ppc_md.hash_clear_all(void); */
836
837/*
838 * kexec image calling is:
839 * the first 0x100 bytes of the entry point are copied to 0
840 *
841 * all slaves branch to slave = 0x60 (absolute)
842 * slave(phys_cpu_id);
843 *
844 * master goes to start = entry point
845 * start(phys_cpu_id, start, 0);
846 *
847 *
848 * a wrapper is needed to call existing kernels, here is an approximate
849 * description of one method:
850 *
851 * v2: (2.6.10)
852 * start will be near the boot_block (maybe 0x100 bytes before it?)
853 * it will have a 0x60, which will b to boot_block, where it will wait
854 * and 0 will store phys into struct boot-block and load r3 from there,
855 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
856 *
857 * v1: (2.6.9)
858 * boot block will have all cpus scanning device tree to see if they
859 * are the boot cpu ?????
860 * other device tree differences (prop sizes, va vs pa, etc)...
861 */
862
863 /* copy 0x100 bytes starting at start to 0 */
864 li r3,0
865 mr r4,r30
866 li r5,0x100
867 li r6,0
868 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
8691: /* assume normal blr return */
870
871 /* release other cpus to the new kernel secondary start at 0x60 */
872 mflr r5
873 li r6,1
874 stw r6,kexec_flag-1b(5)
875 mr r3,r25 # my phys cpu
876 mr r4,r30 # start, aka phys mem offset
877 mtlr 4
878 li r5,0
879 blr /* image->start(physid, image->start, 0); */
880#endif /* CONFIG_KEXEC */
diff --git a/arch/ppc64/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 9f200f0f2ad5..7065e40e2f42 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -4,6 +4,8 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <linux/slab.h>
8
7#include <asm/errno.h> 9#include <asm/errno.h>
8#include <asm/of_device.h> 10#include <asm/of_device.h>
9 11
@@ -184,6 +186,7 @@ void of_release_dev(struct device *dev)
184 struct of_device *ofdev; 186 struct of_device *ofdev;
185 187
186 ofdev = to_of_device(dev); 188 ofdev = to_of_device(dev);
189 of_node_put(ofdev->node);
187 kfree(ofdev); 190 kfree(ofdev);
188} 191}
189 192
@@ -244,7 +247,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
244 return NULL; 247 return NULL;
245 memset(dev, 0, sizeof(*dev)); 248 memset(dev, 0, sizeof(*dev));
246 249
247 dev->node = np; 250 dev->node = of_node_get(np);
248 dev->dma_mask = 0xffffffffUL; 251 dev->dma_mask = 0xffffffffUL;
249 dev->dev.dma_mask = &dev->dma_mask; 252 dev->dev.dma_mask = &dev->dma_mask;
250 dev->dev.parent = parent; 253 dev->dev.parent = parent;
@@ -261,7 +264,6 @@ struct of_device* of_platform_device_create(struct device_node *np,
261 return dev; 264 return dev;
262} 265}
263 266
264
265EXPORT_SYMBOL(of_match_device); 267EXPORT_SYMBOL(of_match_device);
266EXPORT_SYMBOL(of_platform_bus_type); 268EXPORT_SYMBOL(of_platform_bus_type);
267EXPORT_SYMBOL(of_register_driver); 269EXPORT_SYMBOL(of_register_driver);
diff --git a/arch/ppc64/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 63d9481c3ec2..2d333cc84082 100644
--- a/arch/ppc64/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -1,7 +1,10 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/pmc.c 2 * arch/powerpc/kernel/pmc.c
3 * 3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation. 4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
5 * 8 *
6 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -17,6 +20,20 @@
17#include <asm/processor.h> 20#include <asm/processor.h>
18#include <asm/pmc.h> 21#include <asm/pmc.h>
19 22
23#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
24static void dummy_perf(struct pt_regs *regs)
25{
26 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
27
28 pmgc0 &= ~PMGC0_PMIE;
29 mtpmr(PMRN_PMGC0, pmgc0);
30}
31#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32
33#ifndef MMCR0_PMAO
34#define MMCR0_PMAO 0
35#endif
36
20/* Ensure exceptions are disabled */ 37/* Ensure exceptions are disabled */
21static void dummy_perf(struct pt_regs *regs) 38static void dummy_perf(struct pt_regs *regs)
22{ 39{
@@ -25,6 +42,11 @@ static void dummy_perf(struct pt_regs *regs)
25 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO); 42 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
26 mtspr(SPRN_MMCR0, mmcr0); 43 mtspr(SPRN_MMCR0, mmcr0);
27} 44}
45#else
46static void dummy_perf(struct pt_regs *regs)
47{
48}
49#endif
28 50
29static DEFINE_SPINLOCK(pmc_owner_lock); 51static DEFINE_SPINLOCK(pmc_owner_lock);
30static void *pmc_owner_caller; /* mostly for debugging */ 52static void *pmc_owner_caller; /* mostly for debugging */
@@ -66,11 +88,12 @@ void release_pmc_hardware(void)
66} 88}
67EXPORT_SYMBOL_GPL(release_pmc_hardware); 89EXPORT_SYMBOL_GPL(release_pmc_hardware);
68 90
91#ifdef CONFIG_PPC64
69void power4_enable_pmcs(void) 92void power4_enable_pmcs(void)
70{ 93{
71 unsigned long hid0; 94 unsigned long hid0;
72 95
73 hid0 = mfspr(HID0); 96 hid0 = mfspr(SPRN_HID0);
74 hid0 |= 1UL << (63 - 20); 97 hid0 |= 1UL << (63 - 20);
75 98
76 /* POWER4 requires the following sequence */ 99 /* POWER4 requires the following sequence */
@@ -83,6 +106,7 @@ void power4_enable_pmcs(void)
83 "mfspr %0, %1\n" 106 "mfspr %0, %1\n"
84 "mfspr %0, %1\n" 107 "mfspr %0, %1\n"
85 "mfspr %0, %1\n" 108 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 109 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
87 "memory"); 110 "memory");
88} 111}
112#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644
index 000000000000..8bc540337ba0
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -0,0 +1,273 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/threads.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/elfcore.h>
7#include <linux/string.h>
8#include <linux/interrupt.h>
9#include <linux/tty.h>
10#include <linux/vt_kern.h>
11#include <linux/nvram.h>
12#include <linux/console.h>
13#include <linux/irq.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/ide.h>
17#include <linux/bitops.h>
18
19#include <asm/page.h>
20#include <asm/semaphore.h>
21#include <asm/processor.h>
22#include <asm/uaccess.h>
23#include <asm/io.h>
24#include <asm/ide.h>
25#include <asm/atomic.h>
26#include <asm/checksum.h>
27#include <asm/pgtable.h>
28#include <asm/tlbflush.h>
29#include <linux/adb.h>
30#include <linux/cuda.h>
31#include <linux/pmu.h>
32#include <asm/prom.h>
33#include <asm/system.h>
34#include <asm/pci-bridge.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/dma.h>
38#include <asm/machdep.h>
39#include <asm/hw_irq.h>
40#include <asm/nvram.h>
41#include <asm/mmu_context.h>
42#include <asm/backlight.h>
43#include <asm/time.h>
44#include <asm/cputable.h>
45#include <asm/btext.h>
46#include <asm/div64.h>
47
48#ifdef CONFIG_8xx
49#include <asm/commproc.h>
50#endif
51
52#ifdef CONFIG_PPC32
53extern void transfer_to_handler(void);
54extern void do_IRQ(struct pt_regs *regs);
55extern void machine_check_exception(struct pt_regs *regs);
56extern void alignment_exception(struct pt_regs *regs);
57extern void program_check_exception(struct pt_regs *regs);
58extern void single_step_exception(struct pt_regs *regs);
59extern int do_signal(sigset_t *, struct pt_regs *);
60extern int pmac_newworld;
61extern int sys_sigreturn(struct pt_regs *regs);
62
63EXPORT_SYMBOL(clear_pages);
64EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
65EXPORT_SYMBOL(DMA_MODE_READ);
66EXPORT_SYMBOL(DMA_MODE_WRITE);
67EXPORT_SYMBOL(__div64_32);
68
69EXPORT_SYMBOL(do_signal);
70EXPORT_SYMBOL(transfer_to_handler);
71EXPORT_SYMBOL(do_IRQ);
72EXPORT_SYMBOL(machine_check_exception);
73EXPORT_SYMBOL(alignment_exception);
74EXPORT_SYMBOL(program_check_exception);
75EXPORT_SYMBOL(single_step_exception);
76EXPORT_SYMBOL(sys_sigreturn);
77#endif
78
79#if defined(CONFIG_PPC_PREP)
80EXPORT_SYMBOL(_prep_type);
81EXPORT_SYMBOL(ucSystemType);
82#endif
83
84#if !defined(__INLINE_BITOPS)
85EXPORT_SYMBOL(set_bit);
86EXPORT_SYMBOL(clear_bit);
87EXPORT_SYMBOL(change_bit);
88EXPORT_SYMBOL(test_and_set_bit);
89EXPORT_SYMBOL(test_and_clear_bit);
90EXPORT_SYMBOL(test_and_change_bit);
91#endif /* __INLINE_BITOPS */
92
93EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat);
96EXPORT_SYMBOL(strncat);
97EXPORT_SYMBOL(strchr);
98EXPORT_SYMBOL(strrchr);
99EXPORT_SYMBOL(strpbrk);
100EXPORT_SYMBOL(strstr);
101EXPORT_SYMBOL(strlen);
102EXPORT_SYMBOL(strnlen);
103EXPORT_SYMBOL(strcmp);
104EXPORT_SYMBOL(strncmp);
105EXPORT_SYMBOL(strcasecmp);
106
107EXPORT_SYMBOL(csum_partial);
108EXPORT_SYMBOL(csum_partial_copy_generic);
109EXPORT_SYMBOL(ip_fast_csum);
110EXPORT_SYMBOL(csum_tcpudp_magic);
111
112EXPORT_SYMBOL(__copy_tofrom_user);
113EXPORT_SYMBOL(__clear_user);
114EXPORT_SYMBOL(__strncpy_from_user);
115EXPORT_SYMBOL(__strnlen_user);
116
117EXPORT_SYMBOL(_insb);
118EXPORT_SYMBOL(_outsb);
119EXPORT_SYMBOL(_insw);
120EXPORT_SYMBOL(_outsw);
121EXPORT_SYMBOL(_insl);
122EXPORT_SYMBOL(_outsl);
123EXPORT_SYMBOL(_insw_ns);
124EXPORT_SYMBOL(_outsw_ns);
125EXPORT_SYMBOL(_insl_ns);
126EXPORT_SYMBOL(_outsl_ns);
127EXPORT_SYMBOL(ioremap);
128#ifdef CONFIG_44x
129EXPORT_SYMBOL(ioremap64);
130#endif
131EXPORT_SYMBOL(__ioremap);
132EXPORT_SYMBOL(iounmap);
133#ifdef CONFIG_PPC32
134EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
135#endif
136
137#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
138EXPORT_SYMBOL(ppc_ide_md);
139#endif
140
141#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
142EXPORT_SYMBOL(isa_io_base);
143EXPORT_SYMBOL(isa_mem_base);
144EXPORT_SYMBOL(pci_dram_offset);
145EXPORT_SYMBOL(pci_alloc_consistent);
146EXPORT_SYMBOL(pci_free_consistent);
147EXPORT_SYMBOL(pci_bus_io_base);
148EXPORT_SYMBOL(pci_bus_io_base_phys);
149EXPORT_SYMBOL(pci_bus_mem_base_phys);
150EXPORT_SYMBOL(pci_bus_to_hose);
151EXPORT_SYMBOL(pci_resource_to_bus);
152EXPORT_SYMBOL(pci_phys_to_bus);
153EXPORT_SYMBOL(pci_bus_to_phys);
154#endif /* CONFIG_PCI */
155
156#ifdef CONFIG_NOT_COHERENT_CACHE
157EXPORT_SYMBOL(flush_dcache_all);
158#endif
159
160EXPORT_SYMBOL(start_thread);
161EXPORT_SYMBOL(kernel_thread);
162
163EXPORT_SYMBOL(giveup_fpu);
164#ifdef CONFIG_ALTIVEC
165EXPORT_SYMBOL(giveup_altivec);
166#endif /* CONFIG_ALTIVEC */
167#ifdef CONFIG_SPE
168EXPORT_SYMBOL(giveup_spe);
169#endif /* CONFIG_SPE */
170
171#ifdef CONFIG_PPC64
172EXPORT_SYMBOL(__flush_icache_range);
173#else
174EXPORT_SYMBOL(flush_instruction_cache);
175EXPORT_SYMBOL(flush_icache_range);
176EXPORT_SYMBOL(flush_tlb_kernel_range);
177EXPORT_SYMBOL(flush_tlb_page);
178EXPORT_SYMBOL(_tlbie);
179#endif
180EXPORT_SYMBOL(flush_dcache_range);
181
182#ifdef CONFIG_SMP
183EXPORT_SYMBOL(smp_call_function);
184#ifdef CONFIG_PPC32
185EXPORT_SYMBOL(smp_hw_index);
186#endif
187#endif
188
189#ifdef CONFIG_ADB
190EXPORT_SYMBOL(adb_request);
191EXPORT_SYMBOL(adb_register);
192EXPORT_SYMBOL(adb_unregister);
193EXPORT_SYMBOL(adb_poll);
194EXPORT_SYMBOL(adb_try_handler_change);
195#endif /* CONFIG_ADB */
196#ifdef CONFIG_ADB_CUDA
197EXPORT_SYMBOL(cuda_request);
198EXPORT_SYMBOL(cuda_poll);
199#endif /* CONFIG_ADB_CUDA */
200#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
201EXPORT_SYMBOL(_machine);
202#endif
203#ifdef CONFIG_PPC_PMAC
204EXPORT_SYMBOL(sys_ctrler);
205#endif
206#ifdef CONFIG_VT
207EXPORT_SYMBOL(kd_mksound);
208#endif
209EXPORT_SYMBOL(to_tm);
210
211#ifdef CONFIG_PPC32
212long long __ashrdi3(long long, int);
213long long __ashldi3(long long, int);
214long long __lshrdi3(long long, int);
215EXPORT_SYMBOL(__ashrdi3);
216EXPORT_SYMBOL(__ashldi3);
217EXPORT_SYMBOL(__lshrdi3);
218#endif
219
220EXPORT_SYMBOL(memcpy);
221EXPORT_SYMBOL(memset);
222EXPORT_SYMBOL(memmove);
223EXPORT_SYMBOL(memscan);
224EXPORT_SYMBOL(memcmp);
225EXPORT_SYMBOL(memchr);
226
227#if defined(CONFIG_FB_VGA16_MODULE)
228EXPORT_SYMBOL(screen_info);
229#endif
230
231#ifdef CONFIG_PPC32
232EXPORT_SYMBOL(__delay);
233EXPORT_SYMBOL(timer_interrupt);
234EXPORT_SYMBOL(irq_desc);
235EXPORT_SYMBOL(tb_ticks_per_jiffy);
236EXPORT_SYMBOL(console_drivers);
237EXPORT_SYMBOL(cacheable_memcpy);
238#endif
239
240EXPORT_SYMBOL(__up);
241EXPORT_SYMBOL(__down);
242EXPORT_SYMBOL(__down_interruptible);
243
244#ifdef CONFIG_8xx
245EXPORT_SYMBOL(cpm_install_handler);
246EXPORT_SYMBOL(cpm_free_handler);
247#endif /* CONFIG_8xx */
248#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
249 defined(CONFIG_83xx)
250EXPORT_SYMBOL(__res);
251#endif
252
253#ifdef CONFIG_PPC32
254EXPORT_SYMBOL(next_mmu_context);
255EXPORT_SYMBOL(set_context);
256#endif
257
258#ifdef CONFIG_PPC_STD_MMU_32
259extern long mol_trampoline;
260EXPORT_SYMBOL(mol_trampoline); /* For MOL */
261EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
262EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
263#ifdef CONFIG_SMP
264extern int mmu_hash_lock;
265EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
266#endif /* CONFIG_SMP */
267extern long *intercept_table;
268EXPORT_SYMBOL(intercept_table);
269#endif /* CONFIG_PPC_STD_MMU_32 */
270#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
271EXPORT_SYMBOL(__mtdcr);
272EXPORT_SYMBOL(__mfdcr);
273#endif
diff --git a/arch/ppc64/kernel/process.c b/arch/powerpc/kernel/process.c
index 887005358eb1..8f85dabe4df3 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/process.c 2 * arch/ppc/kernel/process.c
3 * 3 *
4 * Derived from "arch/i386/kernel/process.c" 4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1995 Linus Torvalds
@@ -7,7 +7,7 @@
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au) 8 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * 9 *
10 * PowerPC version 10 * PowerPC version
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * 12 *
13 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/errno.h> 20#include <linux/errno.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -26,15 +25,17 @@
26#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
27#include <linux/stddef.h> 26#include <linux/stddef.h>
28#include <linux/unistd.h> 27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/user.h> 30#include <linux/user.h>
31#include <linux/elf.h> 31#include <linux/elf.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/init_task.h>
34#include <linux/prctl.h> 33#include <linux/prctl.h>
35#include <linux/ptrace.h> 34#include <linux/init_task.h>
35#include <linux/module.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/interrupt.h> 37#include <linux/mqueue.h>
38#include <linux/hardirq.h>
38#include <linux/utsname.h> 39#include <linux/utsname.h>
39#include <linux/kprobes.h> 40#include <linux/kprobes.h>
40 41
@@ -44,21 +45,19 @@
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/mmu.h> 47#include <asm/mmu.h>
47#include <asm/mmu_context.h>
48#include <asm/prom.h> 48#include <asm/prom.h>
49#include <asm/ppcdebug.h> 49#ifdef CONFIG_PPC64
50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h>
53#include <asm/firmware.h> 50#include <asm/firmware.h>
54#include <asm/sections.h>
55#include <asm/tlbflush.h>
56#include <asm/time.h>
57#include <asm/plpar_wrappers.h> 51#include <asm/plpar_wrappers.h>
52#include <asm/time.h>
53#endif
54
55extern unsigned long _get_SP(void);
58 56
59#ifndef CONFIG_SMP 57#ifndef CONFIG_SMP
60struct task_struct *last_task_used_math = NULL; 58struct task_struct *last_task_used_math = NULL;
61struct task_struct *last_task_used_altivec = NULL; 59struct task_struct *last_task_used_altivec = NULL;
60struct task_struct *last_task_used_spe = NULL;
62#endif 61#endif
63 62
64/* 63/*
@@ -121,7 +120,6 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
121} 120}
122 121
123#ifdef CONFIG_ALTIVEC 122#ifdef CONFIG_ALTIVEC
124
125void enable_kernel_altivec(void) 123void enable_kernel_altivec(void)
126{ 124{
127 WARN_ON(preemptible()); 125 WARN_ON(preemptible());
@@ -130,7 +128,7 @@ void enable_kernel_altivec(void)
130 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 128 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
131 giveup_altivec(current); 129 giveup_altivec(current);
132 else 130 else
133 giveup_altivec(NULL); /* just enables FP for kernel */ 131 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
134#else 132#else
135 giveup_altivec(last_task_used_altivec); 133 giveup_altivec(last_task_used_altivec);
136#endif /* CONFIG_SMP */ 134#endif /* CONFIG_SMP */
@@ -161,9 +159,48 @@ int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 159 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
162 return 1; 160 return 1;
163} 161}
164
165#endif /* CONFIG_ALTIVEC */ 162#endif /* CONFIG_ALTIVEC */
166 163
164#ifdef CONFIG_SPE
165
166void enable_kernel_spe(void)
167{
168 WARN_ON(preemptible());
169
170#ifdef CONFIG_SMP
171 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
172 giveup_spe(current);
173 else
174 giveup_spe(NULL); /* just enable SPE for kernel - force */
175#else
176 giveup_spe(last_task_used_spe);
177#endif /* __SMP __ */
178}
179EXPORT_SYMBOL(enable_kernel_spe);
180
181void flush_spe_to_thread(struct task_struct *tsk)
182{
183 if (tsk->thread.regs) {
184 preempt_disable();
185 if (tsk->thread.regs->msr & MSR_SPE) {
186#ifdef CONFIG_SMP
187 BUG_ON(tsk != current);
188#endif
189 giveup_spe(current);
190 }
191 preempt_enable();
192 }
193}
194
195int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
196{
197 flush_spe_to_thread(current);
198 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
199 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
200 return 1;
201}
202#endif /* CONFIG_SPE */
203
167static void set_dabr_spr(unsigned long val) 204static void set_dabr_spr(unsigned long val)
168{ 205{
169 mtspr(SPRN_DABR, val); 206 mtspr(SPRN_DABR, val);
@@ -173,24 +210,27 @@ int set_dabr(unsigned long dabr)
173{ 210{
174 int ret = 0; 211 int ret = 0;
175 212
213#ifdef CONFIG_PPC64
176 if (firmware_has_feature(FW_FEATURE_XDABR)) { 214 if (firmware_has_feature(FW_FEATURE_XDABR)) {
177 /* We want to catch accesses from kernel and userspace */ 215 /* We want to catch accesses from kernel and userspace */
178 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; 216 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
179 ret = plpar_set_xdabr(dabr, flags); 217 ret = plpar_set_xdabr(dabr, flags);
180 } else if (firmware_has_feature(FW_FEATURE_DABR)) { 218 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
181 ret = plpar_set_dabr(dabr); 219 ret = plpar_set_dabr(dabr);
182 } else { 220 } else
221#endif
183 set_dabr_spr(dabr); 222 set_dabr_spr(dabr);
184 }
185 223
186 return ret; 224 return ret;
187} 225}
188 226
227#ifdef CONFIG_PPC64
189DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 228DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
190static DEFINE_PER_CPU(unsigned long, current_dabr); 229static DEFINE_PER_CPU(unsigned long, current_dabr);
230#endif
191 231
192struct task_struct *__switch_to(struct task_struct *prev, 232struct task_struct *__switch_to(struct task_struct *prev,
193 struct task_struct *new) 233 struct task_struct *new)
194{ 234{
195 struct thread_struct *new_thread, *old_thread; 235 struct thread_struct *new_thread, *old_thread;
196 unsigned long flags; 236 unsigned long flags;
@@ -200,7 +240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
200 /* avoid complexity of lazy save/restore of fpu 240 /* avoid complexity of lazy save/restore of fpu
201 * by just saving it every time we switch out if 241 * by just saving it every time we switch out if
202 * this task used the fpu during the last quantum. 242 * this task used the fpu during the last quantum.
203 * 243 *
204 * If it tries to use the fpu again, it'll trap and 244 * If it tries to use the fpu again, it'll trap and
205 * reload its fp regs. So we don't have to do a restore 245 * reload its fp regs. So we don't have to do a restore
206 * every switch, just a save. 246 * every switch, just a save.
@@ -209,31 +249,65 @@ struct task_struct *__switch_to(struct task_struct *prev,
209 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 249 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
210 giveup_fpu(prev); 250 giveup_fpu(prev);
211#ifdef CONFIG_ALTIVEC 251#ifdef CONFIG_ALTIVEC
252 /*
253 * If the previous thread used altivec in the last quantum
254 * (thus changing altivec regs) then save them.
255 * We used to check the VRSAVE register but not all apps
256 * set it, so we don't rely on it now (and in fact we need
257 * to save & restore VSCR even if VRSAVE == 0). -- paulus
258 *
259 * On SMP we always save/restore altivec regs just to avoid the
260 * complexity of changing processors.
261 * -- Cort
262 */
212 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 263 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
213 giveup_altivec(prev); 264 giveup_altivec(prev);
214#endif /* CONFIG_ALTIVEC */ 265#endif /* CONFIG_ALTIVEC */
215#endif /* CONFIG_SMP */ 266#ifdef CONFIG_SPE
267 /*
268 * If the previous thread used spe in the last quantum
269 * (thus changing spe regs) then save them.
270 *
271 * On SMP we always save/restore spe regs just to avoid the
272 * complexity of changing processors.
273 */
274 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
275 giveup_spe(prev);
276#endif /* CONFIG_SPE */
216 277
217#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP) 278#else /* CONFIG_SMP */
279#ifdef CONFIG_ALTIVEC
218 /* Avoid the trap. On smp this this never happens since 280 /* Avoid the trap. On smp this this never happens since
219 * we don't set last_task_used_altivec -- Cort 281 * we don't set last_task_used_altivec -- Cort
220 */ 282 */
221 if (new->thread.regs && last_task_used_altivec == new) 283 if (new->thread.regs && last_task_used_altivec == new)
222 new->thread.regs->msr |= MSR_VEC; 284 new->thread.regs->msr |= MSR_VEC;
223#endif /* CONFIG_ALTIVEC */ 285#endif /* CONFIG_ALTIVEC */
286#ifdef CONFIG_SPE
287 /* Avoid the trap. On smp this this never happens since
288 * we don't set last_task_used_spe
289 */
290 if (new->thread.regs && last_task_used_spe == new)
291 new->thread.regs->msr |= MSR_SPE;
292#endif /* CONFIG_SPE */
224 293
294#endif /* CONFIG_SMP */
295
296#ifdef CONFIG_PPC64 /* for now */
225 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { 297 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
226 set_dabr(new->thread.dabr); 298 set_dabr(new->thread.dabr);
227 __get_cpu_var(current_dabr) = new->thread.dabr; 299 __get_cpu_var(current_dabr) = new->thread.dabr;
228 } 300 }
229 301
230 flush_tlb_pending(); 302 flush_tlb_pending();
303#endif
231 304
232 new_thread = &new->thread; 305 new_thread = &new->thread;
233 old_thread = &current->thread; 306 old_thread = &current->thread;
234 307
235 /* Collect purr utilization data per process and per processor 308#ifdef CONFIG_PPC64
236 * wise purr is nothing but processor time base 309 /*
310 * Collect processor utilization data per process
237 */ 311 */
238 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 312 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
239 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 313 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
@@ -243,6 +317,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
243 old_thread->accum_tb += (current_tb - start_tb); 317 old_thread->accum_tb += (current_tb - start_tb);
244 new_thread->start_tb = current_tb; 318 new_thread->start_tb = current_tb;
245 } 319 }
320#endif
246 321
247 local_irq_save(flags); 322 local_irq_save(flags);
248 last = _switch(old_thread, new_thread); 323 last = _switch(old_thread, new_thread);
@@ -254,6 +329,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
254 329
255static int instructions_to_print = 16; 330static int instructions_to_print = 16;
256 331
332#ifdef CONFIG_PPC64
333#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
334 (REGION_ID(pc) != VMALLOC_REGION_ID))
335#else
336#define BAD_PC(pc) ((pc) < KERNELBASE)
337#endif
338
257static void show_instructions(struct pt_regs *regs) 339static void show_instructions(struct pt_regs *regs)
258{ 340{
259 int i; 341 int i;
@@ -268,9 +350,7 @@ static void show_instructions(struct pt_regs *regs)
268 if (!(i % 8)) 350 if (!(i % 8))
269 printk("\n"); 351 printk("\n");
270 352
271 if (((REGION_ID(pc) != KERNEL_REGION_ID) && 353 if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
272 (REGION_ID(pc) != VMALLOC_REGION_ID)) ||
273 __get_user(instr, (unsigned int *)pc)) {
274 printk("XXXXXXXX "); 354 printk("XXXXXXXX ");
275 } else { 355 } else {
276 if (regs->nip == pc) 356 if (regs->nip == pc)
@@ -285,50 +365,82 @@ static void show_instructions(struct pt_regs *regs)
285 printk("\n"); 365 printk("\n");
286} 366}
287 367
368static struct regbit {
369 unsigned long bit;
370 const char *name;
371} msr_bits[] = {
372 {MSR_EE, "EE"},
373 {MSR_PR, "PR"},
374 {MSR_FP, "FP"},
375 {MSR_ME, "ME"},
376 {MSR_IR, "IR"},
377 {MSR_DR, "DR"},
378 {0, NULL}
379};
380
381static void printbits(unsigned long val, struct regbit *bits)
382{
383 const char *sep = "";
384
385 printk("<");
386 for (; bits->bit; ++bits)
387 if (val & bits->bit) {
388 printk("%s%s", sep, bits->name);
389 sep = ",";
390 }
391 printk(">");
392}
393
394#ifdef CONFIG_PPC64
395#define REG "%016lX"
396#define REGS_PER_LINE 4
397#define LAST_VOLATILE 13
398#else
399#define REG "%08lX"
400#define REGS_PER_LINE 8
401#define LAST_VOLATILE 12
402#endif
403
288void show_regs(struct pt_regs * regs) 404void show_regs(struct pt_regs * regs)
289{ 405{
290 int i; 406 int i, trap;
291 unsigned long trap;
292 407
293 printk("NIP: %016lX XER: %08X LR: %016lX CTR: %016lX\n", 408 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
294 regs->nip, (unsigned int)regs->xer, regs->link, regs->ctr); 409 regs->nip, regs->link, regs->ctr);
295 printk("REGS: %p TRAP: %04lx %s (%s)\n", 410 printk("REGS: %p TRAP: %04lx %s (%s)\n",
296 regs, regs->trap, print_tainted(), system_utsname.release); 411 regs, regs->trap, print_tainted(), system_utsname.release);
297 printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x " 412 printk("MSR: "REG" ", regs->msr);
298 "IR/DR: %01x%01x CR: %08X\n", 413 printbits(regs->msr, msr_bits);
299 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, 414 printk(" CR: %08lX XER: %08lX\n", regs->ccr, regs->xer);
300 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
301 regs->msr&MSR_IR ? 1 : 0,
302 regs->msr&MSR_DR ? 1 : 0,
303 (unsigned int)regs->ccr);
304 trap = TRAP(regs); 415 trap = TRAP(regs);
305 printk("DAR: %016lx DSISR: %016lx\n", regs->dar, regs->dsisr); 416 if (trap == 0x300 || trap == 0x600)
306 printk("TASK: %p[%d] '%s' THREAD: %p", 417 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
418 printk("TASK = %p[%d] '%s' THREAD: %p",
307 current, current->pid, current->comm, current->thread_info); 419 current, current->pid, current->comm, current->thread_info);
308 420
309#ifdef CONFIG_SMP 421#ifdef CONFIG_SMP
310 printk(" CPU: %d", smp_processor_id()); 422 printk(" CPU: %d", smp_processor_id());
311#endif /* CONFIG_SMP */ 423#endif /* CONFIG_SMP */
312 424
313 for (i = 0; i < 32; i++) { 425 for (i = 0; i < 32; i++) {
314 if ((i % 4) == 0) { 426 if ((i % REGS_PER_LINE) == 0)
315 printk("\n" KERN_INFO "GPR%02d: ", i); 427 printk("\n" KERN_INFO "GPR%02d: ", i);
316 } 428 printk(REG " ", regs->gpr[i]);
317 429 if (i == LAST_VOLATILE && !FULL_REGS(regs))
318 printk("%016lX ", regs->gpr[i]);
319 if (i == 13 && !FULL_REGS(regs))
320 break; 430 break;
321 } 431 }
322 printk("\n"); 432 printk("\n");
433#ifdef CONFIG_KALLSYMS
323 /* 434 /*
324 * Lookup NIP late so we have the best change of getting the 435 * Lookup NIP late so we have the best change of getting the
325 * above info out without failing 436 * above info out without failing
326 */ 437 */
327 printk("NIP [%016lx] ", regs->nip); 438 printk("NIP ["REG"] ", regs->nip);
328 print_symbol("%s\n", regs->nip); 439 print_symbol("%s\n", regs->nip);
329 printk("LR [%016lx] ", regs->link); 440 printk("LR ["REG"] ", regs->link);
330 print_symbol("%s\n", regs->link); 441 print_symbol("%s\n", regs->link);
331 show_stack(current, (unsigned long *)regs->gpr[1]); 442#endif
443 show_stack(current, (unsigned long *) regs->gpr[1]);
332 if (!user_mode(regs)) 444 if (!user_mode(regs))
333 show_instructions(regs); 445 show_instructions(regs);
334} 446}
@@ -344,16 +456,22 @@ void exit_thread(void)
344 if (last_task_used_altivec == current) 456 if (last_task_used_altivec == current)
345 last_task_used_altivec = NULL; 457 last_task_used_altivec = NULL;
346#endif /* CONFIG_ALTIVEC */ 458#endif /* CONFIG_ALTIVEC */
459#ifdef CONFIG_SPE
460 if (last_task_used_spe == current)
461 last_task_used_spe = NULL;
462#endif
347#endif /* CONFIG_SMP */ 463#endif /* CONFIG_SMP */
348} 464}
349 465
350void flush_thread(void) 466void flush_thread(void)
351{ 467{
468#ifdef CONFIG_PPC64
352 struct thread_info *t = current_thread_info(); 469 struct thread_info *t = current_thread_info();
353 470
354 kprobe_flush_task(current);
355 if (t->flags & _TIF_ABI_PENDING) 471 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); 472 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
473#endif
474 kprobe_flush_task(current);
357 475
358#ifndef CONFIG_SMP 476#ifndef CONFIG_SMP
359 if (last_task_used_math == current) 477 if (last_task_used_math == current)
@@ -362,12 +480,18 @@ void flush_thread(void)
362 if (last_task_used_altivec == current) 480 if (last_task_used_altivec == current)
363 last_task_used_altivec = NULL; 481 last_task_used_altivec = NULL;
364#endif /* CONFIG_ALTIVEC */ 482#endif /* CONFIG_ALTIVEC */
483#ifdef CONFIG_SPE
484 if (last_task_used_spe == current)
485 last_task_used_spe = NULL;
486#endif
365#endif /* CONFIG_SMP */ 487#endif /* CONFIG_SMP */
366 488
489#ifdef CONFIG_PPC64 /* for now */
367 if (current->thread.dabr) { 490 if (current->thread.dabr) {
368 current->thread.dabr = 0; 491 current->thread.dabr = 0;
369 set_dabr(0); 492 set_dabr(0);
370 } 493 }
494#endif
371} 495}
372 496
373void 497void
@@ -375,7 +499,6 @@ release_thread(struct task_struct *t)
375{ 499{
376} 500}
377 501
378
379/* 502/*
380 * This gets called before we allocate a new thread and copy 503 * This gets called before we allocate a new thread and copy
381 * the current task into it. 504 * the current task into it.
@@ -384,36 +507,44 @@ void prepare_to_copy(struct task_struct *tsk)
384{ 507{
385 flush_fp_to_thread(current); 508 flush_fp_to_thread(current);
386 flush_altivec_to_thread(current); 509 flush_altivec_to_thread(current);
510 flush_spe_to_thread(current);
387} 511}
388 512
389/* 513/*
390 * Copy a thread.. 514 * Copy a thread..
391 */ 515 */
392int 516int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
393copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 517 unsigned long unused, struct task_struct *p,
394 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 518 struct pt_regs *regs)
395{ 519{
396 struct pt_regs *childregs, *kregs; 520 struct pt_regs *childregs, *kregs;
397 extern void ret_from_fork(void); 521 extern void ret_from_fork(void);
398 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; 522 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
399 523
524 CHECK_FULL_REGS(regs);
400 /* Copy registers */ 525 /* Copy registers */
401 sp -= sizeof(struct pt_regs); 526 sp -= sizeof(struct pt_regs);
402 childregs = (struct pt_regs *) sp; 527 childregs = (struct pt_regs *) sp;
403 *childregs = *regs; 528 *childregs = *regs;
404 if ((childregs->msr & MSR_PR) == 0) { 529 if ((childregs->msr & MSR_PR) == 0) {
405 /* for kernel thread, set stackptr in new task */ 530 /* for kernel thread, set `current' and stackptr in new task */
406 childregs->gpr[1] = sp + sizeof(struct pt_regs); 531 childregs->gpr[1] = sp + sizeof(struct pt_regs);
407 p->thread.regs = NULL; /* no user register state */ 532#ifdef CONFIG_PPC32
533 childregs->gpr[2] = (unsigned long) p;
534#else
408 clear_ti_thread_flag(p->thread_info, TIF_32BIT); 535 clear_ti_thread_flag(p->thread_info, TIF_32BIT);
536#endif
537 p->thread.regs = NULL; /* no user register state */
409 } else { 538 } else {
410 childregs->gpr[1] = usp; 539 childregs->gpr[1] = usp;
411 p->thread.regs = childregs; 540 p->thread.regs = childregs;
412 if (clone_flags & CLONE_SETTLS) { 541 if (clone_flags & CLONE_SETTLS) {
413 if (test_thread_flag(TIF_32BIT)) 542#ifdef CONFIG_PPC64
414 childregs->gpr[2] = childregs->gpr[6]; 543 if (!test_thread_flag(TIF_32BIT))
415 else
416 childregs->gpr[13] = childregs->gpr[6]; 544 childregs->gpr[13] = childregs->gpr[6];
545 else
546#endif
547 childregs->gpr[2] = childregs->gpr[6];
417 } 548 }
418 } 549 }
419 childregs->gpr[3] = 0; /* Result from fork() */ 550 childregs->gpr[3] = 0; /* Result from fork() */
@@ -431,6 +562,8 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
431 kregs = (struct pt_regs *) sp; 562 kregs = (struct pt_regs *) sp;
432 sp -= STACK_FRAME_OVERHEAD; 563 sp -= STACK_FRAME_OVERHEAD;
433 p->thread.ksp = sp; 564 p->thread.ksp = sp;
565
566#ifdef CONFIG_PPC64
434 if (cpu_has_feature(CPU_FTR_SLB)) { 567 if (cpu_has_feature(CPU_FTR_SLB)) {
435 unsigned long sp_vsid = get_kernel_vsid(sp); 568 unsigned long sp_vsid = get_kernel_vsid(sp);
436 569
@@ -449,6 +582,10 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
449 * function. 582 * function.
450 */ 583 */
451 kregs->nip = *((unsigned long *)ret_from_fork); 584 kregs->nip = *((unsigned long *)ret_from_fork);
585#else
586 kregs->nip = (unsigned long)ret_from_fork;
587 p->thread.last_syscall = -1;
588#endif
452 589
453 return 0; 590 return 0;
454} 591}
@@ -456,30 +593,17 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
456/* 593/*
457 * Set up a thread for executing a new program 594 * Set up a thread for executing a new program
458 */ 595 */
459void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp) 596void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
460{ 597{
461 unsigned long entry, toc, load_addr = regs->gpr[2]; 598#ifdef CONFIG_PPC64
599 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
600#endif
462 601
463 /* fdptr is a relocated pointer to the function descriptor for
464 * the elf _start routine. The first entry in the function
465 * descriptor is the entry address of _start and the second
466 * entry is the TOC value we need to use.
467 */
468 set_fs(USER_DS); 602 set_fs(USER_DS);
469 __get_user(entry, (unsigned long __user *)fdptr);
470 __get_user(toc, (unsigned long __user *)fdptr+1);
471
472 /* Check whether the e_entry function descriptor entries
473 * need to be relocated before we can use them.
474 */
475 if (load_addr != 0) {
476 entry += load_addr;
477 toc += load_addr;
478 }
479 603
480 /* 604 /*
481 * If we exec out of a kernel thread then thread.regs will not be 605 * If we exec out of a kernel thread then thread.regs will not be
482 * set. Do it now. 606 * set. Do it now.
483 */ 607 */
484 if (!current->thread.regs) { 608 if (!current->thread.regs) {
485 unsigned long childregs = (unsigned long)current->thread_info + 609 unsigned long childregs = (unsigned long)current->thread_info +
@@ -488,36 +612,101 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
488 current->thread.regs = (struct pt_regs *)childregs; 612 current->thread.regs = (struct pt_regs *)childregs;
489 } 613 }
490 614
491 regs->nip = entry; 615 memset(regs->gpr, 0, sizeof(regs->gpr));
616 regs->ctr = 0;
617 regs->link = 0;
618 regs->xer = 0;
619 regs->ccr = 0;
492 regs->gpr[1] = sp; 620 regs->gpr[1] = sp;
493 regs->gpr[2] = toc; 621
494 regs->msr = MSR_USER64; 622#ifdef CONFIG_PPC32
623 regs->mq = 0;
624 regs->nip = start;
625 regs->msr = MSR_USER;
626#else
627 if (!test_thread_flag(TIF_32BIT)) {
628 unsigned long entry, toc;
629
630 /* start is a relocated pointer to the function descriptor for
631 * the elf _start routine. The first entry in the function
632 * descriptor is the entry address of _start and the second
633 * entry is the TOC value we need to use.
634 */
635 __get_user(entry, (unsigned long __user *)start);
636 __get_user(toc, (unsigned long __user *)start+1);
637
638 /* Check whether the e_entry function descriptor entries
639 * need to be relocated before we can use them.
640 */
641 if (load_addr != 0) {
642 entry += load_addr;
643 toc += load_addr;
644 }
645 regs->nip = entry;
646 regs->gpr[2] = toc;
647 regs->msr = MSR_USER64;
648 } else {
649 regs->nip = start;
650 regs->gpr[2] = 0;
651 regs->msr = MSR_USER32;
652 }
653#endif
654
495#ifndef CONFIG_SMP 655#ifndef CONFIG_SMP
496 if (last_task_used_math == current) 656 if (last_task_used_math == current)
497 last_task_used_math = 0; 657 last_task_used_math = NULL;
498#endif /* CONFIG_SMP */
499 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
500 current->thread.fpscr = 0;
501#ifdef CONFIG_ALTIVEC 658#ifdef CONFIG_ALTIVEC
502#ifndef CONFIG_SMP
503 if (last_task_used_altivec == current) 659 if (last_task_used_altivec == current)
504 last_task_used_altivec = 0; 660 last_task_used_altivec = NULL;
661#endif
662#ifdef CONFIG_SPE
663 if (last_task_used_spe == current)
664 last_task_used_spe = NULL;
665#endif
505#endif /* CONFIG_SMP */ 666#endif /* CONFIG_SMP */
667 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
668 current->thread.fpscr.val = 0;
669#ifdef CONFIG_ALTIVEC
506 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 670 memset(current->thread.vr, 0, sizeof(current->thread.vr));
507 current->thread.vscr.u[0] = 0; 671 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
508 current->thread.vscr.u[1] = 0;
509 current->thread.vscr.u[2] = 0;
510 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 672 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
511 current->thread.vrsave = 0; 673 current->thread.vrsave = 0;
512 current->thread.used_vr = 0; 674 current->thread.used_vr = 0;
513#endif /* CONFIG_ALTIVEC */ 675#endif /* CONFIG_ALTIVEC */
676#ifdef CONFIG_SPE
677 memset(current->thread.evr, 0, sizeof(current->thread.evr));
678 current->thread.acc = 0;
679 current->thread.spefscr = 0;
680 current->thread.used_spe = 0;
681#endif /* CONFIG_SPE */
514} 682}
515EXPORT_SYMBOL(start_thread); 683
684#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
685 | PR_FP_EXC_RES | PR_FP_EXC_INV)
516 686
517int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 687int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
518{ 688{
519 struct pt_regs *regs = tsk->thread.regs; 689 struct pt_regs *regs = tsk->thread.regs;
520 690
691 /* This is a bit hairy. If we are an SPE enabled processor
692 * (have embedded fp) we store the IEEE exception enable flags in
693 * fpexc_mode. fpexc_mode is also used for setting FP exception
694 * mode (asyn, precise, disabled) for 'Classic' FP. */
695 if (val & PR_FP_EXC_SW_ENABLE) {
696#ifdef CONFIG_SPE
697 tsk->thread.fpexc_mode = val &
698 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
699 return 0;
700#else
701 return -EINVAL;
702#endif
703 }
704
705 /* on a CONFIG_SPE this does not hurt us. The bits that
706 * __pack_fe01 use do not overlap with bits used for
707 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
708 * on CONFIG_SPE implementations are reserved so writing to
709 * them does not change anything */
521 if (val > PR_FP_EXC_PRECISE) 710 if (val > PR_FP_EXC_PRECISE)
522 return -EINVAL; 711 return -EINVAL;
523 tsk->thread.fpexc_mode = __pack_fe01(val); 712 tsk->thread.fpexc_mode = __pack_fe01(val);
@@ -531,38 +720,41 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
531{ 720{
532 unsigned int val; 721 unsigned int val;
533 722
534 val = __unpack_fe01(tsk->thread.fpexc_mode); 723 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
724#ifdef CONFIG_SPE
725 val = tsk->thread.fpexc_mode;
726#else
727 return -EINVAL;
728#endif
729 else
730 val = __unpack_fe01(tsk->thread.fpexc_mode);
535 return put_user(val, (unsigned int __user *) adr); 731 return put_user(val, (unsigned int __user *) adr);
536} 732}
537 733
538int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3, 734#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
539 unsigned long p4, unsigned long p5, unsigned long p6, 735
736int sys_clone(unsigned long clone_flags, unsigned long usp,
737 int __user *parent_tidp, void __user *child_threadptr,
738 int __user *child_tidp, int p6,
540 struct pt_regs *regs) 739 struct pt_regs *regs)
541{ 740{
542 unsigned long parent_tidptr = 0; 741 CHECK_FULL_REGS(regs);
543 unsigned long child_tidptr = 0; 742 if (usp == 0)
544 743 usp = regs->gpr[1]; /* stack pointer for child */
545 if (p2 == 0) 744#ifdef CONFIG_PPC64
546 p2 = regs->gpr[1]; /* stack pointer for child */ 745 if (test_thread_flag(TIF_32BIT)) {
547 746 parent_tidp = TRUNC_PTR(parent_tidp);
548 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | 747 child_tidp = TRUNC_PTR(child_tidp);
549 CLONE_CHILD_CLEARTID)) {
550 parent_tidptr = p3;
551 child_tidptr = p5;
552 if (test_thread_flag(TIF_32BIT)) {
553 parent_tidptr &= 0xffffffff;
554 child_tidptr &= 0xffffffff;
555 }
556 } 748 }
557 749#endif
558 return do_fork(clone_flags, p2, regs, 0, 750 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
559 (int __user *)parent_tidptr, (int __user *)child_tidptr);
560} 751}
561 752
562int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 753int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
563 unsigned long p4, unsigned long p5, unsigned long p6, 754 unsigned long p4, unsigned long p5, unsigned long p6,
564 struct pt_regs *regs) 755 struct pt_regs *regs)
565{ 756{
757 CHECK_FULL_REGS(regs);
566 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 758 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
567} 759}
568 760
@@ -570,8 +762,9 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
570 unsigned long p4, unsigned long p5, unsigned long p6, 762 unsigned long p4, unsigned long p5, unsigned long p6,
571 struct pt_regs *regs) 763 struct pt_regs *regs)
572{ 764{
573 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0, 765 CHECK_FULL_REGS(regs);
574 NULL, NULL); 766 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
767 regs, 0, NULL, NULL);
575} 768}
576 769
577int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 770int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -579,30 +772,27 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
579 struct pt_regs *regs) 772 struct pt_regs *regs)
580{ 773{
581 int error; 774 int error;
582 char * filename; 775 char *filename;
583 776
584 filename = getname((char __user *) a0); 777 filename = getname((char __user *) a0);
585 error = PTR_ERR(filename); 778 error = PTR_ERR(filename);
586 if (IS_ERR(filename)) 779 if (IS_ERR(filename))
587 goto out; 780 goto out;
588 flush_fp_to_thread(current); 781 flush_fp_to_thread(current);
589 flush_altivec_to_thread(current); 782 flush_altivec_to_thread(current);
783 flush_spe_to_thread(current);
590 error = do_execve(filename, (char __user * __user *) a1, 784 error = do_execve(filename, (char __user * __user *) a1,
591 (char __user * __user *) a2, regs); 785 (char __user * __user *) a2, regs);
592
593 if (error == 0) { 786 if (error == 0) {
594 task_lock(current); 787 task_lock(current);
595 current->ptrace &= ~PT_DTRACE; 788 current->ptrace &= ~PT_DTRACE;
596 task_unlock(current); 789 task_unlock(current);
597 } 790 }
598 putname(filename); 791 putname(filename);
599
600out: 792out:
601 return error; 793 return error;
602} 794}
603 795
604static int kstack_depth_to_print = 64;
605
606static int validate_sp(unsigned long sp, struct task_struct *p, 796static int validate_sp(unsigned long sp, struct task_struct *p,
607 unsigned long nbytes) 797 unsigned long nbytes)
608{ 798{
@@ -627,6 +817,20 @@ static int validate_sp(unsigned long sp, struct task_struct *p,
627 return 0; 817 return 0;
628} 818}
629 819
820#ifdef CONFIG_PPC64
821#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
822#define FRAME_LR_SAVE 2
823#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
824#define REGS_MARKER 0x7265677368657265ul
825#define FRAME_MARKER 12
826#else
827#define MIN_STACK_FRAME 16
828#define FRAME_LR_SAVE 1
829#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
830#define REGS_MARKER 0x72656773ul
831#define FRAME_MARKER 2
832#endif
833
630unsigned long get_wchan(struct task_struct *p) 834unsigned long get_wchan(struct task_struct *p)
631{ 835{
632 unsigned long ip, sp; 836 unsigned long ip, sp;
@@ -636,15 +840,15 @@ unsigned long get_wchan(struct task_struct *p)
636 return 0; 840 return 0;
637 841
638 sp = p->thread.ksp; 842 sp = p->thread.ksp;
639 if (!validate_sp(sp, p, 112)) 843 if (!validate_sp(sp, p, MIN_STACK_FRAME))
640 return 0; 844 return 0;
641 845
642 do { 846 do {
643 sp = *(unsigned long *)sp; 847 sp = *(unsigned long *)sp;
644 if (!validate_sp(sp, p, 112)) 848 if (!validate_sp(sp, p, MIN_STACK_FRAME))
645 return 0; 849 return 0;
646 if (count > 0) { 850 if (count > 0) {
647 ip = *(unsigned long *)(sp + 16); 851 ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
648 if (!in_sched_functions(ip)) 852 if (!in_sched_functions(ip))
649 return ip; 853 return ip;
650 } 854 }
@@ -653,33 +857,35 @@ unsigned long get_wchan(struct task_struct *p)
653} 857}
654EXPORT_SYMBOL(get_wchan); 858EXPORT_SYMBOL(get_wchan);
655 859
656void show_stack(struct task_struct *p, unsigned long *_sp) 860static int kstack_depth_to_print = 64;
861
862void show_stack(struct task_struct *tsk, unsigned long *stack)
657{ 863{
658 unsigned long ip, newsp, lr; 864 unsigned long sp, ip, lr, newsp;
659 int count = 0; 865 int count = 0;
660 unsigned long sp = (unsigned long)_sp;
661 int firstframe = 1; 866 int firstframe = 1;
662 867
868 sp = (unsigned long) stack;
869 if (tsk == NULL)
870 tsk = current;
663 if (sp == 0) { 871 if (sp == 0) {
664 if (p) { 872 if (tsk == current)
665 sp = p->thread.ksp; 873 asm("mr %0,1" : "=r" (sp));
666 } else { 874 else
667 sp = __get_SP(); 875 sp = tsk->thread.ksp;
668 p = current;
669 }
670 } 876 }
671 877
672 lr = 0; 878 lr = 0;
673 printk("Call Trace:\n"); 879 printk("Call Trace:\n");
674 do { 880 do {
675 if (!validate_sp(sp, p, 112)) 881 if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
676 return; 882 return;
677 883
678 _sp = (unsigned long *) sp; 884 stack = (unsigned long *) sp;
679 newsp = _sp[0]; 885 newsp = stack[0];
680 ip = _sp[2]; 886 ip = stack[FRAME_LR_SAVE];
681 if (!firstframe || ip != lr) { 887 if (!firstframe || ip != lr) {
682 printk("[%016lx] [%016lx] ", sp, ip); 888 printk("["REG"] ["REG"] ", sp, ip);
683 print_symbol("%s", ip); 889 print_symbol("%s", ip);
684 if (firstframe) 890 if (firstframe)
685 printk(" (unreliable)"); 891 printk(" (unreliable)");
@@ -691,8 +897,8 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
691 * See if this is an exception frame. 897 * See if this is an exception frame.
692 * We look for the "regshere" marker in the current frame. 898 * We look for the "regshere" marker in the current frame.
693 */ 899 */
694 if (validate_sp(sp, p, sizeof(struct pt_regs) + 400) 900 if (validate_sp(sp, tsk, INT_FRAME_SIZE)
695 && _sp[12] == 0x7265677368657265ul) { 901 && stack[FRAME_MARKER] == REGS_MARKER) {
696 struct pt_regs *regs = (struct pt_regs *) 902 struct pt_regs *regs = (struct pt_regs *)
697 (sp + STACK_FRAME_OVERHEAD); 903 (sp + STACK_FRAME_OVERHEAD);
698 printk("--- Exception: %lx", regs->trap); 904 printk("--- Exception: %lx", regs->trap);
@@ -708,6 +914,6 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
708 914
709void dump_stack(void) 915void dump_stack(void)
710{ 916{
711 show_stack(current, (unsigned long *)__get_SP()); 917 show_stack(current, NULL);
712} 918}
713EXPORT_SYMBOL(dump_stack); 919EXPORT_SYMBOL(dump_stack);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644
index 000000000000..2eccd0e159e3
--- /dev/null
+++ b/arch/powerpc/kernel/prom.c
@@ -0,0 +1,2170 @@
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static int __initdata dt_root_addr_cells;
82static int __initdata dt_root_size_cells;
83
84#ifdef CONFIG_PPC64
85static int __initdata iommu_is_off;
86int __initdata iommu_force_on;
87unsigned long tce_alloc_start, tce_alloc_end;
88#endif
89
90typedef u32 cell_t;
91
92#if 0
93static struct boot_param_header *initial_boot_params __initdata;
94#else
95struct boot_param_header *initial_boot_params;
96#endif
97
98static struct device_node *allnodes = NULL;
99
100/* use when traversing tree through the allnext, child, sibling,
101 * or parent members of struct device_node.
102 */
103static DEFINE_RWLOCK(devtree_lock);
104
105/* export that to outside world */
106struct device_node *of_chosen;
107
108struct device_node *dflt_interrupt_controller;
109int num_interrupt_controllers;
110
111/*
112 * Wrapper for allocating memory for various data that needs to be
113 * attached to device nodes as they are processed at boot or when
114 * added to the device tree later (e.g. DLPAR). At boot there is
115 * already a region reserved so we just increment *mem_start by size;
116 * otherwise we call kmalloc.
117 */
118static void * prom_alloc(unsigned long size, unsigned long *mem_start)
119{
120 unsigned long tmp;
121
122 if (!mem_start)
123 return kmalloc(size, GFP_KERNEL);
124
125 tmp = *mem_start;
126 *mem_start += size;
127 return (void *)tmp;
128}
129
130/*
131 * Find the device_node with a given phandle.
132 */
133static struct device_node * find_phandle(phandle ph)
134{
135 struct device_node *np;
136
137 for (np = allnodes; np != 0; np = np->allnext)
138 if (np->linux_phandle == ph)
139 return np;
140 return NULL;
141}
142
143/*
144 * Find the interrupt parent of a node.
145 */
146static struct device_node * __devinit intr_parent(struct device_node *p)
147{
148 phandle *parp;
149
150 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
151 if (parp == NULL)
152 return p->parent;
153 p = find_phandle(*parp);
154 if (p != NULL)
155 return p;
156 /*
157 * On a powermac booted with BootX, we don't get to know the
158 * phandles for any nodes, so find_phandle will return NULL.
159 * Fortunately these machines only have one interrupt controller
160 * so there isn't in fact any ambiguity. -- paulus
161 */
162 if (num_interrupt_controllers == 1)
163 p = dflt_interrupt_controller;
164 return p;
165}
166
167/*
168 * Find out the size of each entry of the interrupts property
169 * for a node.
170 */
171int __devinit prom_n_intr_cells(struct device_node *np)
172{
173 struct device_node *p;
174 unsigned int *icp;
175
176 for (p = np; (p = intr_parent(p)) != NULL; ) {
177 icp = (unsigned int *)
178 get_property(p, "#interrupt-cells", NULL);
179 if (icp != NULL)
180 return *icp;
181 if (get_property(p, "interrupt-controller", NULL) != NULL
182 || get_property(p, "interrupt-map", NULL) != NULL) {
183 printk("oops, node %s doesn't have #interrupt-cells\n",
184 p->full_name);
185 return 1;
186 }
187 }
188#ifdef DEBUG_IRQ
189 printk("prom_n_intr_cells failed for %s\n", np->full_name);
190#endif
191 return 1;
192}
193
194/*
195 * Map an interrupt from a device up to the platform interrupt
196 * descriptor.
197 */
198static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
199 struct device_node *np, unsigned int *ints,
200 int nintrc)
201{
202 struct device_node *p, *ipar;
203 unsigned int *imap, *imask, *ip;
204 int i, imaplen, match;
205 int newintrc = 0, newaddrc = 0;
206 unsigned int *reg;
207 int naddrc;
208
209 reg = (unsigned int *) get_property(np, "reg", NULL);
210 naddrc = prom_n_addr_cells(np);
211 p = intr_parent(np);
212 while (p != NULL) {
213 if (get_property(p, "interrupt-controller", NULL) != NULL)
214 /* this node is an interrupt controller, stop here */
215 break;
216 imap = (unsigned int *)
217 get_property(p, "interrupt-map", &imaplen);
218 if (imap == NULL) {
219 p = intr_parent(p);
220 continue;
221 }
222 imask = (unsigned int *)
223 get_property(p, "interrupt-map-mask", NULL);
224 if (imask == NULL) {
225 printk("oops, %s has interrupt-map but no mask\n",
226 p->full_name);
227 return 0;
228 }
229 imaplen /= sizeof(unsigned int);
230 match = 0;
231 ipar = NULL;
232 while (imaplen > 0 && !match) {
233 /* check the child-interrupt field */
234 match = 1;
235 for (i = 0; i < naddrc && match; ++i)
236 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
237 for (; i < naddrc + nintrc && match; ++i)
238 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
239 imap += naddrc + nintrc;
240 imaplen -= naddrc + nintrc;
241 /* grab the interrupt parent */
242 ipar = find_phandle((phandle) *imap++);
243 --imaplen;
244 if (ipar == NULL && num_interrupt_controllers == 1)
245 /* cope with BootX not giving us phandles */
246 ipar = dflt_interrupt_controller;
247 if (ipar == NULL) {
248 printk("oops, no int parent %x in map of %s\n",
249 imap[-1], p->full_name);
250 return 0;
251 }
252 /* find the parent's # addr and intr cells */
253 ip = (unsigned int *)
254 get_property(ipar, "#interrupt-cells", NULL);
255 if (ip == NULL) {
256 printk("oops, no #interrupt-cells on %s\n",
257 ipar->full_name);
258 return 0;
259 }
260 newintrc = *ip;
261 ip = (unsigned int *)
262 get_property(ipar, "#address-cells", NULL);
263 newaddrc = (ip == NULL)? 0: *ip;
264 imap += newaddrc + newintrc;
265 imaplen -= newaddrc + newintrc;
266 }
267 if (imaplen < 0) {
268 printk("oops, error decoding int-map on %s, len=%d\n",
269 p->full_name, imaplen);
270 return 0;
271 }
272 if (!match) {
273#ifdef DEBUG_IRQ
274 printk("oops, no match in %s int-map for %s\n",
275 p->full_name, np->full_name);
276#endif
277 return 0;
278 }
279 p = ipar;
280 naddrc = newaddrc;
281 nintrc = newintrc;
282 ints = imap - nintrc;
283 reg = ints - naddrc;
284 }
285 if (p == NULL) {
286#ifdef DEBUG_IRQ
287 printk("hmmm, int tree for %s doesn't have ctrler\n",
288 np->full_name);
289#endif
290 return 0;
291 }
292 *irq = ints;
293 *ictrler = p;
294 return nintrc;
295}
296
297static unsigned char map_isa_senses[4] = {
298 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
299 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
300 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
301 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
302};
303
304static unsigned char map_mpic_senses[4] = {
305 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
306 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
307 /* 2 seems to be used for the 8259 cascade... */
308 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
309 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
310};
311
312static int __devinit finish_node_interrupts(struct device_node *np,
313 unsigned long *mem_start,
314 int measure_only)
315{
316 unsigned int *ints;
317 int intlen, intrcells, intrcount;
318 int i, j, n, sense;
319 unsigned int *irq, virq;
320 struct device_node *ic;
321
322 if (num_interrupt_controllers == 0) {
323 /*
324 * Old machines just have a list of interrupt numbers
325 * and no interrupt-controller nodes.
326 */
327 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
328 &intlen);
329 /* XXX old interpret_pci_props looked in parent too */
330 /* XXX old interpret_macio_props looked for interrupts
331 before AAPL,interrupts */
332 if (ints == NULL)
333 ints = (unsigned int *) get_property(np, "interrupts",
334 &intlen);
335 if (ints == NULL)
336 return 0;
337
338 np->n_intrs = intlen / sizeof(unsigned int);
339 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
340 mem_start);
341 if (!np->intrs)
342 return -ENOMEM;
343 if (measure_only)
344 return 0;
345
346 for (i = 0; i < np->n_intrs; ++i) {
347 np->intrs[i].line = *ints++;
348 np->intrs[i].sense = IRQ_SENSE_LEVEL
349 | IRQ_POLARITY_NEGATIVE;
350 }
351 return 0;
352 }
353
354 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
355 if (ints == NULL)
356 return 0;
357 intrcells = prom_n_intr_cells(np);
358 intlen /= intrcells * sizeof(unsigned int);
359
360 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
361 if (!np->intrs)
362 return -ENOMEM;
363
364 if (measure_only)
365 return 0;
366
367 intrcount = 0;
368 for (i = 0; i < intlen; ++i, ints += intrcells) {
369 n = map_interrupt(&irq, &ic, np, ints, intrcells);
370 if (n <= 0)
371 continue;
372
373 /* don't map IRQ numbers under a cascaded 8259 controller */
374 if (ic && device_is_compatible(ic, "chrp,iic")) {
375 np->intrs[intrcount].line = irq[0];
376 sense = (n > 1)? (irq[1] & 3): 3;
377 np->intrs[intrcount].sense = map_isa_senses[sense];
378 } else {
379 virq = virt_irq_create_mapping(irq[0]);
380#ifdef CONFIG_PPC64
381 if (virq == NO_IRQ) {
382 printk(KERN_CRIT "Could not allocate interrupt"
383 " number for %s\n", np->full_name);
384 continue;
385 }
386#endif
387 np->intrs[intrcount].line = irq_offset_up(virq);
388 sense = (n > 1)? (irq[1] & 3): 1;
389 np->intrs[intrcount].sense = map_mpic_senses[sense];
390 }
391
392#ifdef CONFIG_PPC64
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128;
398 else if (!(name && !strcmp(name, "mac-io")))
399 /* ignore other cascaded controllers, such as
400 the k2-sata-root */
401 break;
402 }
403#endif
404 if (n > 2) {
405 printk("hmmm, got %d intr cells for %s:", n,
406 np->full_name);
407 for (j = 0; j < n; ++j)
408 printk(" %d", irq[j]);
409 printk("\n");
410 }
411 ++intrcount;
412 }
413 np->n_intrs = intrcount;
414
415 return 0;
416}
417
418static int __devinit interpret_pci_props(struct device_node *np,
419 unsigned long *mem_start,
420 int naddrc, int nsizec,
421 int measure_only)
422{
423 struct address_range *adr;
424 struct pci_reg_property *pci_addrs;
425 int i, l, n_addrs;
426
427 pci_addrs = (struct pci_reg_property *)
428 get_property(np, "assigned-addresses", &l);
429 if (!pci_addrs)
430 return 0;
431
432 n_addrs = l / sizeof(*pci_addrs);
433
434 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
435 if (!adr)
436 return -ENOMEM;
437
438 if (measure_only)
439 return 0;
440
441 np->addrs = adr;
442 np->n_addrs = n_addrs;
443
444 for (i = 0; i < n_addrs; i++) {
445 adr[i].space = pci_addrs[i].addr.a_hi;
446 adr[i].address = pci_addrs[i].addr.a_lo |
447 ((u64)pci_addrs[i].addr.a_mid << 32);
448 adr[i].size = pci_addrs[i].size_lo;
449 }
450
451 return 0;
452}
453
454static int __init interpret_dbdma_props(struct device_node *np,
455 unsigned long *mem_start,
456 int naddrc, int nsizec,
457 int measure_only)
458{
459 struct reg_property32 *rp;
460 struct address_range *adr;
461 unsigned long base_address;
462 int i, l;
463 struct device_node *db;
464
465 base_address = 0;
466 if (!measure_only) {
467 for (db = np->parent; db != NULL; db = db->parent) {
468 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
469 base_address = db->addrs[0].address;
470 break;
471 }
472 }
473 }
474
475 rp = (struct reg_property32 *) get_property(np, "reg", &l);
476 if (rp != 0 && l >= sizeof(struct reg_property32)) {
477 i = 0;
478 adr = (struct address_range *) (*mem_start);
479 while ((l -= sizeof(struct reg_property32)) >= 0) {
480 if (!measure_only) {
481 adr[i].space = 2;
482 adr[i].address = rp[i].address + base_address;
483 adr[i].size = rp[i].size;
484 }
485 ++i;
486 }
487 np->addrs = adr;
488 np->n_addrs = i;
489 (*mem_start) += i * sizeof(struct address_range);
490 }
491
492 return 0;
493}
494
495static int __init interpret_macio_props(struct device_node *np,
496 unsigned long *mem_start,
497 int naddrc, int nsizec,
498 int measure_only)
499{
500 struct reg_property32 *rp;
501 struct address_range *adr;
502 unsigned long base_address;
503 int i, l;
504 struct device_node *db;
505
506 base_address = 0;
507 if (!measure_only) {
508 for (db = np->parent; db != NULL; db = db->parent) {
509 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
510 base_address = db->addrs[0].address;
511 break;
512 }
513 }
514 }
515
516 rp = (struct reg_property32 *) get_property(np, "reg", &l);
517 if (rp != 0 && l >= sizeof(struct reg_property32)) {
518 i = 0;
519 adr = (struct address_range *) (*mem_start);
520 while ((l -= sizeof(struct reg_property32)) >= 0) {
521 if (!measure_only) {
522 adr[i].space = 2;
523 adr[i].address = rp[i].address + base_address;
524 adr[i].size = rp[i].size;
525 }
526 ++i;
527 }
528 np->addrs = adr;
529 np->n_addrs = i;
530 (*mem_start) += i * sizeof(struct address_range);
531 }
532
533 return 0;
534}
535
536static int __init interpret_isa_props(struct device_node *np,
537 unsigned long *mem_start,
538 int naddrc, int nsizec,
539 int measure_only)
540{
541 struct isa_reg_property *rp;
542 struct address_range *adr;
543 int i, l;
544
545 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
546 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
547 i = 0;
548 adr = (struct address_range *) (*mem_start);
549 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
550 if (!measure_only) {
551 adr[i].space = rp[i].space;
552 adr[i].address = rp[i].address;
553 adr[i].size = rp[i].size;
554 }
555 ++i;
556 }
557 np->addrs = adr;
558 np->n_addrs = i;
559 (*mem_start) += i * sizeof(struct address_range);
560 }
561
562 return 0;
563}
564
565static int __init interpret_root_props(struct device_node *np,
566 unsigned long *mem_start,
567 int naddrc, int nsizec,
568 int measure_only)
569{
570 struct address_range *adr;
571 int i, l;
572 unsigned int *rp;
573 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
574
575 rp = (unsigned int *) get_property(np, "reg", &l);
576 if (rp != 0 && l >= rpsize) {
577 i = 0;
578 adr = (struct address_range *) (*mem_start);
579 while ((l -= rpsize) >= 0) {
580 if (!measure_only) {
581 adr[i].space = 0;
582 adr[i].address = rp[naddrc - 1];
583 adr[i].size = rp[naddrc + nsizec - 1];
584 }
585 ++i;
586 rp += naddrc + nsizec;
587 }
588 np->addrs = adr;
589 np->n_addrs = i;
590 (*mem_start) += i * sizeof(struct address_range);
591 }
592
593 return 0;
594}
595
596static int __devinit finish_node(struct device_node *np,
597 unsigned long *mem_start,
598 interpret_func *ifunc,
599 int naddrc, int nsizec,
600 int measure_only)
601{
602 struct device_node *child;
603 int *ip, rc = 0;
604
605 /* get the device addresses and interrupts */
606 if (ifunc != NULL)
607 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
608 if (rc)
609 goto out;
610
611 rc = finish_node_interrupts(np, mem_start, measure_only);
612 if (rc)
613 goto out;
614
615 /* Look for #address-cells and #size-cells properties. */
616 ip = (int *) get_property(np, "#address-cells", NULL);
617 if (ip != NULL)
618 naddrc = *ip;
619 ip = (int *) get_property(np, "#size-cells", NULL);
620 if (ip != NULL)
621 nsizec = *ip;
622
623 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
624 ifunc = interpret_root_props;
625 else if (np->type == 0)
626 ifunc = NULL;
627 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
628 ifunc = interpret_pci_props;
629 else if (!strcmp(np->type, "dbdma"))
630 ifunc = interpret_dbdma_props;
631 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
632 ifunc = interpret_macio_props;
633 else if (!strcmp(np->type, "isa"))
634 ifunc = interpret_isa_props;
635 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
636 ifunc = interpret_root_props;
637 else if (!((ifunc == interpret_dbdma_props
638 || ifunc == interpret_macio_props)
639 && (!strcmp(np->type, "escc")
640 || !strcmp(np->type, "media-bay"))))
641 ifunc = NULL;
642
643 for (child = np->child; child != NULL; child = child->sibling) {
644 rc = finish_node(child, mem_start, ifunc,
645 naddrc, nsizec, measure_only);
646 if (rc)
647 goto out;
648 }
649out:
650 return rc;
651}
652
653static void __init scan_interrupt_controllers(void)
654{
655 struct device_node *np;
656 int n = 0;
657 char *name, *ic;
658 int iclen;
659
660 for (np = allnodes; np != NULL; np = np->allnext) {
661 ic = get_property(np, "interrupt-controller", &iclen);
662 name = get_property(np, "name", NULL);
663 /* checking iclen makes sure we don't get a false
664 match on /chosen.interrupt_controller */
665 if ((name != NULL
666 && strcmp(name, "interrupt-controller") == 0)
667 || (ic != NULL && iclen == 0
668 && strcmp(name, "AppleKiwi"))) {
669 if (n == 0)
670 dflt_interrupt_controller = np;
671 ++n;
672 }
673 }
674 num_interrupt_controllers = n;
675}
676
677/**
678 * finish_device_tree is called once things are running normally
679 * (i.e. with text and data mapped to the address they were linked at).
680 * It traverses the device tree and fills in some of the additional,
681 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
682 * mapping is also initialized at this point.
683 */
684void __init finish_device_tree(void)
685{
686 unsigned long start, end, size = 0;
687
688 DBG(" -> finish_device_tree\n");
689
690#ifdef CONFIG_PPC64
691 /* Initialize virtual IRQ map */
692 virt_irq_init();
693#endif
694 scan_interrupt_controllers();
695
696 /*
697 * Finish device-tree (pre-parsing some properties etc...)
698 * We do this in 2 passes. One with "measure_only" set, which
699 * will only measure the amount of memory needed, then we can
700 * allocate that memory, and call finish_node again. However,
701 * we must be careful as most routines will fail nowadays when
702 * prom_alloc() returns 0, so we must make sure our first pass
703 * doesn't start at 0. We pre-initialize size to 16 for that
704 * reason and then remove those additional 16 bytes
705 */
706 size = 16;
707 finish_node(allnodes, &size, NULL, 0, 0, 1);
708 size -= 16;
709 end = start = (unsigned long) __va(lmb_alloc(size, 128));
710 finish_node(allnodes, &end, NULL, 0, 0, 0);
711 BUG_ON(end != start + size);
712
713 DBG(" <- finish_device_tree\n");
714}
715
716static inline char *find_flat_dt_string(u32 offset)
717{
718 return ((char *)initial_boot_params) +
719 initial_boot_params->off_dt_strings + offset;
720}
721
722/**
723 * This function is used to scan the flattened device-tree, it is
724 * used to extract the memory informations at boot before we can
725 * unflatten the tree
726 */
727static int __init scan_flat_dt(int (*it)(unsigned long node,
728 const char *uname, int depth,
729 void *data),
730 void *data)
731{
732 unsigned long p = ((unsigned long)initial_boot_params) +
733 initial_boot_params->off_dt_struct;
734 int rc = 0;
735 int depth = -1;
736
737 do {
738 u32 tag = *((u32 *)p);
739 char *pathp;
740
741 p += 4;
742 if (tag == OF_DT_END_NODE) {
743 depth --;
744 continue;
745 }
746 if (tag == OF_DT_NOP)
747 continue;
748 if (tag == OF_DT_END)
749 break;
750 if (tag == OF_DT_PROP) {
751 u32 sz = *((u32 *)p);
752 p += 8;
753 if (initial_boot_params->version < 0x10)
754 p = _ALIGN(p, sz >= 8 ? 8 : 4);
755 p += sz;
756 p = _ALIGN(p, 4);
757 continue;
758 }
759 if (tag != OF_DT_BEGIN_NODE) {
760 printk(KERN_WARNING "Invalid tag %x scanning flattened"
761 " device tree !\n", tag);
762 return -EINVAL;
763 }
764 depth++;
765 pathp = (char *)p;
766 p = _ALIGN(p + strlen(pathp) + 1, 4);
767 if ((*pathp) == '/') {
768 char *lp, *np;
769 for (lp = NULL, np = pathp; *np; np++)
770 if ((*np) == '/')
771 lp = np+1;
772 if (lp != NULL)
773 pathp = lp;
774 }
775 rc = it(p, pathp, depth, data);
776 if (rc != 0)
777 break;
778 } while(1);
779
780 return rc;
781}
782
783/**
784 * This function can be used within scan_flattened_dt callback to get
785 * access to properties
786 */
787static void* __init get_flat_dt_prop(unsigned long node, const char *name,
788 unsigned long *size)
789{
790 unsigned long p = node;
791
792 do {
793 u32 tag = *((u32 *)p);
794 u32 sz, noff;
795 const char *nstr;
796
797 p += 4;
798 if (tag == OF_DT_NOP)
799 continue;
800 if (tag != OF_DT_PROP)
801 return NULL;
802
803 sz = *((u32 *)p);
804 noff = *((u32 *)(p + 4));
805 p += 8;
806 if (initial_boot_params->version < 0x10)
807 p = _ALIGN(p, sz >= 8 ? 8 : 4);
808
809 nstr = find_flat_dt_string(noff);
810 if (nstr == NULL) {
811 printk(KERN_WARNING "Can't find property index"
812 " name !\n");
813 return NULL;
814 }
815 if (strcmp(name, nstr) == 0) {
816 if (size)
817 *size = sz;
818 return (void *)p;
819 }
820 p += sz;
821 p = _ALIGN(p, 4);
822 } while(1);
823}
824
825static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
826 unsigned long align)
827{
828 void *res;
829
830 *mem = _ALIGN(*mem, align);
831 res = (void *)*mem;
832 *mem += size;
833
834 return res;
835}
836
837static unsigned long __init unflatten_dt_node(unsigned long mem,
838 unsigned long *p,
839 struct device_node *dad,
840 struct device_node ***allnextpp,
841 unsigned long fpsize)
842{
843 struct device_node *np;
844 struct property *pp, **prev_pp = NULL;
845 char *pathp;
846 u32 tag;
847 unsigned int l, allocl;
848 int has_name = 0;
849 int new_format = 0;
850
851 tag = *((u32 *)(*p));
852 if (tag != OF_DT_BEGIN_NODE) {
853 printk("Weird tag at start of node: %x\n", tag);
854 return mem;
855 }
856 *p += 4;
857 pathp = (char *)*p;
858 l = allocl = strlen(pathp) + 1;
859 *p = _ALIGN(*p + l, 4);
860
861 /* version 0x10 has a more compact unit name here instead of the full
862 * path. we accumulate the full path size using "fpsize", we'll rebuild
863 * it later. We detect this because the first character of the name is
864 * not '/'.
865 */
866 if ((*pathp) != '/') {
867 new_format = 1;
868 if (fpsize == 0) {
869 /* root node: special case. fpsize accounts for path
870 * plus terminating zero. root node only has '/', so
871 * fpsize should be 2, but we want to avoid the first
872 * level nodes to have two '/' so we use fpsize 1 here
873 */
874 fpsize = 1;
875 allocl = 2;
876 } else {
877 /* account for '/' and path size minus terminal 0
878 * already in 'l'
879 */
880 fpsize += l;
881 allocl = fpsize;
882 }
883 }
884
885
886 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
887 __alignof__(struct device_node));
888 if (allnextpp) {
889 memset(np, 0, sizeof(*np));
890 np->full_name = ((char*)np) + sizeof(struct device_node);
891 if (new_format) {
892 char *p = np->full_name;
893 /* rebuild full path for new format */
894 if (dad && dad->parent) {
895 strcpy(p, dad->full_name);
896#ifdef DEBUG
897 if ((strlen(p) + l + 1) != allocl) {
898 DBG("%s: p: %d, l: %d, a: %d\n",
899 pathp, strlen(p), l, allocl);
900 }
901#endif
902 p += strlen(p);
903 }
904 *(p++) = '/';
905 memcpy(p, pathp, l);
906 } else
907 memcpy(np->full_name, pathp, l);
908 prev_pp = &np->properties;
909 **allnextpp = np;
910 *allnextpp = &np->allnext;
911 if (dad != NULL) {
912 np->parent = dad;
913 /* we temporarily use the next field as `last_child'*/
914 if (dad->next == 0)
915 dad->child = np;
916 else
917 dad->next->sibling = np;
918 dad->next = np;
919 }
920 kref_init(&np->kref);
921 }
922 while(1) {
923 u32 sz, noff;
924 char *pname;
925
926 tag = *((u32 *)(*p));
927 if (tag == OF_DT_NOP) {
928 *p += 4;
929 continue;
930 }
931 if (tag != OF_DT_PROP)
932 break;
933 *p += 4;
934 sz = *((u32 *)(*p));
935 noff = *((u32 *)((*p) + 4));
936 *p += 8;
937 if (initial_boot_params->version < 0x10)
938 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
939
940 pname = find_flat_dt_string(noff);
941 if (pname == NULL) {
942 printk("Can't find property name in list !\n");
943 break;
944 }
945 if (strcmp(pname, "name") == 0)
946 has_name = 1;
947 l = strlen(pname) + 1;
948 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
949 __alignof__(struct property));
950 if (allnextpp) {
951 if (strcmp(pname, "linux,phandle") == 0) {
952 np->node = *((u32 *)*p);
953 if (np->linux_phandle == 0)
954 np->linux_phandle = np->node;
955 }
956 if (strcmp(pname, "ibm,phandle") == 0)
957 np->linux_phandle = *((u32 *)*p);
958 pp->name = pname;
959 pp->length = sz;
960 pp->value = (void *)*p;
961 *prev_pp = pp;
962 prev_pp = &pp->next;
963 }
964 *p = _ALIGN((*p) + sz, 4);
965 }
966 /* with version 0x10 we may not have the name property, recreate
967 * it here from the unit name if absent
968 */
969 if (!has_name) {
970 char *p = pathp, *ps = pathp, *pa = NULL;
971 int sz;
972
973 while (*p) {
974 if ((*p) == '@')
975 pa = p;
976 if ((*p) == '/')
977 ps = p + 1;
978 p++;
979 }
980 if (pa < ps)
981 pa = p;
982 sz = (pa - ps) + 1;
983 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
984 __alignof__(struct property));
985 if (allnextpp) {
986 pp->name = "name";
987 pp->length = sz;
988 pp->value = (unsigned char *)(pp + 1);
989 *prev_pp = pp;
990 prev_pp = &pp->next;
991 memcpy(pp->value, ps, sz - 1);
992 ((char *)pp->value)[sz - 1] = 0;
993 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
994 }
995 }
996 if (allnextpp) {
997 *prev_pp = NULL;
998 np->name = get_property(np, "name", NULL);
999 np->type = get_property(np, "device_type", NULL);
1000
1001 if (!np->name)
1002 np->name = "<NULL>";
1003 if (!np->type)
1004 np->type = "<NULL>";
1005 }
1006 while (tag == OF_DT_BEGIN_NODE) {
1007 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1008 tag = *((u32 *)(*p));
1009 }
1010 if (tag != OF_DT_END_NODE) {
1011 printk("Weird tag at end of node: %x\n", tag);
1012 return mem;
1013 }
1014 *p += 4;
1015 return mem;
1016}
1017
1018
1019/**
1020 * unflattens the device-tree passed by the firmware, creating the
1021 * tree of struct device_node. It also fills the "name" and "type"
1022 * pointers of the nodes so the normal device-tree walking functions
1023 * can be used (this used to be done by finish_device_tree)
1024 */
1025void __init unflatten_device_tree(void)
1026{
1027 unsigned long start, mem, size;
1028 struct device_node **allnextp = &allnodes;
1029 char *p = NULL;
1030 int l = 0;
1031
1032 DBG(" -> unflatten_device_tree()\n");
1033
1034 /* First pass, scan for size */
1035 start = ((unsigned long)initial_boot_params) +
1036 initial_boot_params->off_dt_struct;
1037 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1038 size = (size | 3) + 1;
1039
1040 DBG(" size is %lx, allocating...\n", size);
1041
1042 /* Allocate memory for the expanded device tree */
1043 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1044 if (!mem) {
1045 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1046 panic("Couldn't allocate memory with lmb_alloc()!\n");
1047 }
1048 mem = (unsigned long) __va(mem);
1049
1050 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1051
1052 DBG(" unflattening %lx...\n", mem);
1053
1054 /* Second pass, do actual unflattening */
1055 start = ((unsigned long)initial_boot_params) +
1056 initial_boot_params->off_dt_struct;
1057 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1058 if (*((u32 *)start) != OF_DT_END)
1059 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1060 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1061 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1062 ((u32 *)mem)[size / 4] );
1063 *allnextp = NULL;
1064
1065 /* Get pointer to OF "/chosen" node for use everywhere */
1066 of_chosen = of_find_node_by_path("/chosen");
1067 if (of_chosen == NULL)
1068 of_chosen = of_find_node_by_path("/chosen@0");
1069
1070 /* Retreive command line */
1071 if (of_chosen != NULL) {
1072 p = (char *)get_property(of_chosen, "bootargs", &l);
1073 if (p != NULL && l > 0)
1074 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1075 }
1076#ifdef CONFIG_CMDLINE
1077 if (l == 0 || (l == 1 && (*p) == 0))
1078 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1079#endif /* CONFIG_CMDLINE */
1080
1081 DBG("Command line is: %s\n", cmd_line);
1082
1083 DBG(" <- unflatten_device_tree()\n");
1084}
1085
1086
1087static int __init early_init_dt_scan_cpus(unsigned long node,
1088 const char *uname, int depth, void *data)
1089{
1090 char *type = get_flat_dt_prop(node, "device_type", NULL);
1091 u32 *prop;
1092 unsigned long size = 0;
1093
1094 /* We are scanning "cpu" nodes only */
1095 if (type == NULL || strcmp(type, "cpu") != 0)
1096 return 0;
1097
1098#ifdef CONFIG_PPC_PSERIES
1099 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1100 */
1101 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1102 u32 *pft_size;
1103 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1104 if (pft_size != NULL) {
1105 /* pft_size[0] is the NUMA CEC cookie */
1106 ppc64_pft_size = pft_size[1];
1107 }
1108 }
1109#endif
1110
1111 boot_cpuid = 0;
1112 boot_cpuid_phys = 0;
1113 if (initial_boot_params && initial_boot_params->version >= 2) {
1114 /* version 2 of the kexec param format adds the phys cpuid
1115 * of booted proc.
1116 */
1117 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1118 } else {
1119 /* Check if it's the boot-cpu, set it's hw index now */
1120 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1121 prop = get_flat_dt_prop(node, "reg", NULL);
1122 if (prop != NULL)
1123 boot_cpuid_phys = *prop;
1124 }
1125 }
1126 set_hard_smp_processor_id(0, boot_cpuid_phys);
1127
1128#ifdef CONFIG_ALTIVEC
1129 /* Check if we have a VMX and eventually update CPU features */
1130 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1131 if (prop && (*prop) > 0) {
1132 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1133 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1134 }
1135
1136 /* Same goes for Apple's "altivec" property */
1137 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1138 if (prop) {
1139 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1140 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1141 }
1142#endif /* CONFIG_ALTIVEC */
1143
1144#ifdef CONFIG_PPC_PSERIES
1145 /*
1146 * Check for an SMT capable CPU and set the CPU feature. We do
1147 * this by looking at the size of the ibm,ppc-interrupt-server#s
1148 * property
1149 */
1150 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1151 &size);
1152 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1153 if (prop && ((size / sizeof(u32)) > 1))
1154 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1155#endif
1156
1157 return 0;
1158}
1159
1160static int __init early_init_dt_scan_chosen(unsigned long node,
1161 const char *uname, int depth, void *data)
1162{
1163 u32 *prop;
1164 unsigned long *lprop;
1165
1166 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1167
1168 if (depth != 1 ||
1169 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1170 return 0;
1171
1172 /* get platform type */
1173 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1174 if (prop == NULL)
1175 return 0;
1176#ifdef CONFIG_PPC64
1177 systemcfg->platform = *prop;
1178#else
1179#ifdef CONFIG_PPC_MULTIPLATFORM
1180 _machine = *prop;
1181#endif
1182#endif
1183
1184#ifdef CONFIG_PPC64
1185 /* check if iommu is forced on or off */
1186 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1187 iommu_is_off = 1;
1188 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1189 iommu_force_on = 1;
1190#endif
1191
1192 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1193 if (lprop)
1194 memory_limit = *lprop;
1195
1196#ifdef CONFIG_PPC64
1197 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1198 if (lprop)
1199 tce_alloc_start = *lprop;
1200 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1201 if (lprop)
1202 tce_alloc_end = *lprop;
1203#endif
1204
1205#ifdef CONFIG_PPC_RTAS
1206 /* To help early debugging via the front panel, we retreive a minimal
1207 * set of RTAS infos now if available
1208 */
1209 {
1210 u64 *basep, *entryp;
1211
1212 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1213 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1214 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1215 if (basep && entryp && prop) {
1216 rtas.base = *basep;
1217 rtas.entry = *entryp;
1218 rtas.size = *prop;
1219 }
1220 }
1221#endif /* CONFIG_PPC_RTAS */
1222
1223 /* break now */
1224 return 1;
1225}
1226
1227static int __init early_init_dt_scan_root(unsigned long node,
1228 const char *uname, int depth, void *data)
1229{
1230 u32 *prop;
1231
1232 if (depth != 0)
1233 return 0;
1234
1235 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1236 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1237 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1238
1239 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1240 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1241 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1242
1243 /* break now */
1244 return 1;
1245}
1246
1247static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1248{
1249 cell_t *p = *cellp;
1250 unsigned long r;
1251
1252 /* Ignore more than 2 cells */
1253 while (s > sizeof(unsigned long) / 4) {
1254 p++;
1255 s--;
1256 }
1257 r = *p++;
1258#ifdef CONFIG_PPC64
1259 if (s > 1) {
1260 r <<= 32;
1261 r |= *(p++);
1262 s--;
1263 }
1264#endif
1265
1266 *cellp = p;
1267 return r;
1268}
1269
1270
1271static int __init early_init_dt_scan_memory(unsigned long node,
1272 const char *uname, int depth, void *data)
1273{
1274 char *type = get_flat_dt_prop(node, "device_type", NULL);
1275 cell_t *reg, *endp;
1276 unsigned long l;
1277
1278 /* We are scanning "memory" nodes only */
1279 if (type == NULL || strcmp(type, "memory") != 0)
1280 return 0;
1281
1282 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1283 if (reg == NULL)
1284 return 0;
1285
1286 endp = reg + (l / sizeof(cell_t));
1287
1288 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1289 uname, l, reg[0], reg[1], reg[2], reg[3]);
1290
1291 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1292 unsigned long base, size;
1293
1294 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1295 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1296
1297 if (size == 0)
1298 continue;
1299 DBG(" - %lx , %lx\n", base, size);
1300#ifdef CONFIG_PPC64
1301 if (iommu_is_off) {
1302 if (base >= 0x80000000ul)
1303 continue;
1304 if ((base + size) > 0x80000000ul)
1305 size = 0x80000000ul - base;
1306 }
1307#endif
1308 lmb_add(base, size);
1309 }
1310 return 0;
1311}
1312
1313static void __init early_reserve_mem(void)
1314{
1315 unsigned long base, size;
1316 unsigned long *reserve_map;
1317
1318 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1319 initial_boot_params->off_mem_rsvmap);
1320 while (1) {
1321 base = *(reserve_map++);
1322 size = *(reserve_map++);
1323 if (size == 0)
1324 break;
1325 DBG("reserving: %lx -> %lx\n", base, size);
1326 lmb_reserve(base, size);
1327 }
1328
1329#if 0
1330 DBG("memory reserved, lmbs :\n");
1331 lmb_dump_all();
1332#endif
1333}
1334
1335void __init early_init_devtree(void *params)
1336{
1337 DBG(" -> early_init_devtree()\n");
1338
1339 /* Setup flat device-tree pointer */
1340 initial_boot_params = params;
1341
1342 /* Retrieve various informations from the /chosen node of the
1343 * device-tree, including the platform type, initrd location and
1344 * size, TCE reserve, and more ...
1345 */
1346 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1347
1348 /* Scan memory nodes and rebuild LMBs */
1349 lmb_init();
1350 scan_flat_dt(early_init_dt_scan_root, NULL);
1351 scan_flat_dt(early_init_dt_scan_memory, NULL);
1352 lmb_enforce_memory_limit(memory_limit);
1353 lmb_analyze();
1354#ifdef CONFIG_PPC64
1355 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1356#endif
1357 lmb_reserve(0, __pa(klimit));
1358
1359 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1360
1361 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1362 early_reserve_mem();
1363
1364 DBG("Scanning CPUs ...\n");
1365
1366 /* Retreive hash table size from flattened tree plus other
1367 * CPU related informations (altivec support, boot CPU ID, ...)
1368 */
1369 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1370
1371 DBG(" <- early_init_devtree()\n");
1372}
1373
1374#undef printk
1375
1376int
1377prom_n_addr_cells(struct device_node* np)
1378{
1379 int* ip;
1380 do {
1381 if (np->parent)
1382 np = np->parent;
1383 ip = (int *) get_property(np, "#address-cells", NULL);
1384 if (ip != NULL)
1385 return *ip;
1386 } while (np->parent);
1387 /* No #address-cells property for the root node, default to 1 */
1388 return 1;
1389}
1390
1391int
1392prom_n_size_cells(struct device_node* np)
1393{
1394 int* ip;
1395 do {
1396 if (np->parent)
1397 np = np->parent;
1398 ip = (int *) get_property(np, "#size-cells", NULL);
1399 if (ip != NULL)
1400 return *ip;
1401 } while (np->parent);
1402 /* No #size-cells property for the root node, default to 1 */
1403 return 1;
1404}
1405
1406/**
1407 * Work out the sense (active-low level / active-high edge)
1408 * of each interrupt from the device tree.
1409 */
1410void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1411{
1412 struct device_node *np;
1413 int i, j;
1414
1415 /* default to level-triggered */
1416 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1417
1418 for (np = allnodes; np != 0; np = np->allnext) {
1419 for (j = 0; j < np->n_intrs; j++) {
1420 i = np->intrs[j].line;
1421 if (i >= off && i < max)
1422 senses[i-off] = np->intrs[j].sense;
1423 }
1424 }
1425}
1426
1427/**
1428 * Construct and return a list of the device_nodes with a given name.
1429 */
1430struct device_node *find_devices(const char *name)
1431{
1432 struct device_node *head, **prevp, *np;
1433
1434 prevp = &head;
1435 for (np = allnodes; np != 0; np = np->allnext) {
1436 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1437 *prevp = np;
1438 prevp = &np->next;
1439 }
1440 }
1441 *prevp = NULL;
1442 return head;
1443}
1444EXPORT_SYMBOL(find_devices);
1445
1446/**
1447 * Construct and return a list of the device_nodes with a given type.
1448 */
1449struct device_node *find_type_devices(const char *type)
1450{
1451 struct device_node *head, **prevp, *np;
1452
1453 prevp = &head;
1454 for (np = allnodes; np != 0; np = np->allnext) {
1455 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1456 *prevp = np;
1457 prevp = &np->next;
1458 }
1459 }
1460 *prevp = NULL;
1461 return head;
1462}
1463EXPORT_SYMBOL(find_type_devices);
1464
1465/**
1466 * Returns all nodes linked together
1467 */
1468struct device_node *find_all_nodes(void)
1469{
1470 struct device_node *head, **prevp, *np;
1471
1472 prevp = &head;
1473 for (np = allnodes; np != 0; np = np->allnext) {
1474 *prevp = np;
1475 prevp = &np->next;
1476 }
1477 *prevp = NULL;
1478 return head;
1479}
1480EXPORT_SYMBOL(find_all_nodes);
1481
1482/** Checks if the given "compat" string matches one of the strings in
1483 * the device's "compatible" property
1484 */
1485int device_is_compatible(struct device_node *device, const char *compat)
1486{
1487 const char* cp;
1488 int cplen, l;
1489
1490 cp = (char *) get_property(device, "compatible", &cplen);
1491 if (cp == NULL)
1492 return 0;
1493 while (cplen > 0) {
1494 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1495 return 1;
1496 l = strlen(cp) + 1;
1497 cp += l;
1498 cplen -= l;
1499 }
1500
1501 return 0;
1502}
1503EXPORT_SYMBOL(device_is_compatible);
1504
1505
1506/**
1507 * Indicates whether the root node has a given value in its
1508 * compatible property.
1509 */
1510int machine_is_compatible(const char *compat)
1511{
1512 struct device_node *root;
1513 int rc = 0;
1514
1515 root = of_find_node_by_path("/");
1516 if (root) {
1517 rc = device_is_compatible(root, compat);
1518 of_node_put(root);
1519 }
1520 return rc;
1521}
1522EXPORT_SYMBOL(machine_is_compatible);
1523
1524/**
1525 * Construct and return a list of the device_nodes with a given type
1526 * and compatible property.
1527 */
1528struct device_node *find_compatible_devices(const char *type,
1529 const char *compat)
1530{
1531 struct device_node *head, **prevp, *np;
1532
1533 prevp = &head;
1534 for (np = allnodes; np != 0; np = np->allnext) {
1535 if (type != NULL
1536 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1537 continue;
1538 if (device_is_compatible(np, compat)) {
1539 *prevp = np;
1540 prevp = &np->next;
1541 }
1542 }
1543 *prevp = NULL;
1544 return head;
1545}
1546EXPORT_SYMBOL(find_compatible_devices);
1547
1548/**
1549 * Find the device_node with a given full_name.
1550 */
1551struct device_node *find_path_device(const char *path)
1552{
1553 struct device_node *np;
1554
1555 for (np = allnodes; np != 0; np = np->allnext)
1556 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1557 return np;
1558 return NULL;
1559}
1560EXPORT_SYMBOL(find_path_device);
1561
1562/*******
1563 *
1564 * New implementation of the OF "find" APIs, return a refcounted
1565 * object, call of_node_put() when done. The device tree and list
1566 * are protected by a rw_lock.
1567 *
1568 * Note that property management will need some locking as well,
1569 * this isn't dealt with yet.
1570 *
1571 *******/
1572
1573/**
1574 * of_find_node_by_name - Find a node by its "name" property
1575 * @from: The node to start searching from or NULL, the node
1576 * you pass will not be searched, only the next one
1577 * will; typically, you pass what the previous call
1578 * returned. of_node_put() will be called on it
1579 * @name: The name string to match against
1580 *
1581 * Returns a node pointer with refcount incremented, use
1582 * of_node_put() on it when done.
1583 */
1584struct device_node *of_find_node_by_name(struct device_node *from,
1585 const char *name)
1586{
1587 struct device_node *np;
1588
1589 read_lock(&devtree_lock);
1590 np = from ? from->allnext : allnodes;
1591 for (; np != 0; np = np->allnext)
1592 if (np->name != 0 && strcasecmp(np->name, name) == 0
1593 && of_node_get(np))
1594 break;
1595 if (from)
1596 of_node_put(from);
1597 read_unlock(&devtree_lock);
1598 return np;
1599}
1600EXPORT_SYMBOL(of_find_node_by_name);
1601
1602/**
1603 * of_find_node_by_type - Find a node by its "device_type" property
1604 * @from: The node to start searching from or NULL, the node
1605 * you pass will not be searched, only the next one
1606 * will; typically, you pass what the previous call
1607 * returned. of_node_put() will be called on it
1608 * @name: The type string to match against
1609 *
1610 * Returns a node pointer with refcount incremented, use
1611 * of_node_put() on it when done.
1612 */
1613struct device_node *of_find_node_by_type(struct device_node *from,
1614 const char *type)
1615{
1616 struct device_node *np;
1617
1618 read_lock(&devtree_lock);
1619 np = from ? from->allnext : allnodes;
1620 for (; np != 0; np = np->allnext)
1621 if (np->type != 0 && strcasecmp(np->type, type) == 0
1622 && of_node_get(np))
1623 break;
1624 if (from)
1625 of_node_put(from);
1626 read_unlock(&devtree_lock);
1627 return np;
1628}
1629EXPORT_SYMBOL(of_find_node_by_type);
1630
1631/**
1632 * of_find_compatible_node - Find a node based on type and one of the
1633 * tokens in its "compatible" property
1634 * @from: The node to start searching from or NULL, the node
1635 * you pass will not be searched, only the next one
1636 * will; typically, you pass what the previous call
1637 * returned. of_node_put() will be called on it
1638 * @type: The type string to match "device_type" or NULL to ignore
1639 * @compatible: The string to match to one of the tokens in the device
1640 * "compatible" list.
1641 *
1642 * Returns a node pointer with refcount incremented, use
1643 * of_node_put() on it when done.
1644 */
1645struct device_node *of_find_compatible_node(struct device_node *from,
1646 const char *type, const char *compatible)
1647{
1648 struct device_node *np;
1649
1650 read_lock(&devtree_lock);
1651 np = from ? from->allnext : allnodes;
1652 for (; np != 0; np = np->allnext) {
1653 if (type != NULL
1654 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1655 continue;
1656 if (device_is_compatible(np, compatible) && of_node_get(np))
1657 break;
1658 }
1659 if (from)
1660 of_node_put(from);
1661 read_unlock(&devtree_lock);
1662 return np;
1663}
1664EXPORT_SYMBOL(of_find_compatible_node);
1665
1666/**
1667 * of_find_node_by_path - Find a node matching a full OF path
1668 * @path: The full path to match
1669 *
1670 * Returns a node pointer with refcount incremented, use
1671 * of_node_put() on it when done.
1672 */
1673struct device_node *of_find_node_by_path(const char *path)
1674{
1675 struct device_node *np = allnodes;
1676
1677 read_lock(&devtree_lock);
1678 for (; np != 0; np = np->allnext) {
1679 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1680 && of_node_get(np))
1681 break;
1682 }
1683 read_unlock(&devtree_lock);
1684 return np;
1685}
1686EXPORT_SYMBOL(of_find_node_by_path);
1687
1688/**
1689 * of_find_node_by_phandle - Find a node given a phandle
1690 * @handle: phandle of the node to find
1691 *
1692 * Returns a node pointer with refcount incremented, use
1693 * of_node_put() on it when done.
1694 */
1695struct device_node *of_find_node_by_phandle(phandle handle)
1696{
1697 struct device_node *np;
1698
1699 read_lock(&devtree_lock);
1700 for (np = allnodes; np != 0; np = np->allnext)
1701 if (np->linux_phandle == handle)
1702 break;
1703 if (np)
1704 of_node_get(np);
1705 read_unlock(&devtree_lock);
1706 return np;
1707}
1708EXPORT_SYMBOL(of_find_node_by_phandle);
1709
1710/**
1711 * of_find_all_nodes - Get next node in global list
1712 * @prev: Previous node or NULL to start iteration
1713 * of_node_put() will be called on it
1714 *
1715 * Returns a node pointer with refcount incremented, use
1716 * of_node_put() on it when done.
1717 */
1718struct device_node *of_find_all_nodes(struct device_node *prev)
1719{
1720 struct device_node *np;
1721
1722 read_lock(&devtree_lock);
1723 np = prev ? prev->allnext : allnodes;
1724 for (; np != 0; np = np->allnext)
1725 if (of_node_get(np))
1726 break;
1727 if (prev)
1728 of_node_put(prev);
1729 read_unlock(&devtree_lock);
1730 return np;
1731}
1732EXPORT_SYMBOL(of_find_all_nodes);
1733
1734/**
1735 * of_get_parent - Get a node's parent if any
1736 * @node: Node to get parent
1737 *
1738 * Returns a node pointer with refcount incremented, use
1739 * of_node_put() on it when done.
1740 */
1741struct device_node *of_get_parent(const struct device_node *node)
1742{
1743 struct device_node *np;
1744
1745 if (!node)
1746 return NULL;
1747
1748 read_lock(&devtree_lock);
1749 np = of_node_get(node->parent);
1750 read_unlock(&devtree_lock);
1751 return np;
1752}
1753EXPORT_SYMBOL(of_get_parent);
1754
1755/**
1756 * of_get_next_child - Iterate a node childs
1757 * @node: parent node
1758 * @prev: previous child of the parent node, or NULL to get first
1759 *
1760 * Returns a node pointer with refcount incremented, use
1761 * of_node_put() on it when done.
1762 */
1763struct device_node *of_get_next_child(const struct device_node *node,
1764 struct device_node *prev)
1765{
1766 struct device_node *next;
1767
1768 read_lock(&devtree_lock);
1769 next = prev ? prev->sibling : node->child;
1770 for (; next != 0; next = next->sibling)
1771 if (of_node_get(next))
1772 break;
1773 if (prev)
1774 of_node_put(prev);
1775 read_unlock(&devtree_lock);
1776 return next;
1777}
1778EXPORT_SYMBOL(of_get_next_child);
1779
1780/**
1781 * of_node_get - Increment refcount of a node
1782 * @node: Node to inc refcount, NULL is supported to
1783 * simplify writing of callers
1784 *
1785 * Returns node.
1786 */
1787struct device_node *of_node_get(struct device_node *node)
1788{
1789 if (node)
1790 kref_get(&node->kref);
1791 return node;
1792}
1793EXPORT_SYMBOL(of_node_get);
1794
1795static inline struct device_node * kref_to_device_node(struct kref *kref)
1796{
1797 return container_of(kref, struct device_node, kref);
1798}
1799
1800/**
1801 * of_node_release - release a dynamically allocated node
1802 * @kref: kref element of the node to be released
1803 *
1804 * In of_node_put() this function is passed to kref_put()
1805 * as the destructor.
1806 */
1807static void of_node_release(struct kref *kref)
1808{
1809 struct device_node *node = kref_to_device_node(kref);
1810 struct property *prop = node->properties;
1811
1812 if (!OF_IS_DYNAMIC(node))
1813 return;
1814 while (prop) {
1815 struct property *next = prop->next;
1816 kfree(prop->name);
1817 kfree(prop->value);
1818 kfree(prop);
1819 prop = next;
1820 }
1821 kfree(node->intrs);
1822 kfree(node->addrs);
1823 kfree(node->full_name);
1824 kfree(node->data);
1825 kfree(node);
1826}
1827
1828/**
1829 * of_node_put - Decrement refcount of a node
1830 * @node: Node to dec refcount, NULL is supported to
1831 * simplify writing of callers
1832 *
1833 */
1834void of_node_put(struct device_node *node)
1835{
1836 if (node)
1837 kref_put(&node->kref, of_node_release);
1838}
1839EXPORT_SYMBOL(of_node_put);
1840
1841/*
1842 * Plug a device node into the tree and global list.
1843 */
1844void of_attach_node(struct device_node *np)
1845{
1846 write_lock(&devtree_lock);
1847 np->sibling = np->parent->child;
1848 np->allnext = allnodes;
1849 np->parent->child = np;
1850 allnodes = np;
1851 write_unlock(&devtree_lock);
1852}
1853
1854/*
1855 * "Unplug" a node from the device tree. The caller must hold
1856 * a reference to the node. The memory associated with the node
1857 * is not freed until its refcount goes to zero.
1858 */
1859void of_detach_node(const struct device_node *np)
1860{
1861 struct device_node *parent;
1862
1863 write_lock(&devtree_lock);
1864
1865 parent = np->parent;
1866
1867 if (allnodes == np)
1868 allnodes = np->allnext;
1869 else {
1870 struct device_node *prev;
1871 for (prev = allnodes;
1872 prev->allnext != np;
1873 prev = prev->allnext)
1874 ;
1875 prev->allnext = np->allnext;
1876 }
1877
1878 if (parent->child == np)
1879 parent->child = np->sibling;
1880 else {
1881 struct device_node *prevsib;
1882 for (prevsib = np->parent->child;
1883 prevsib->sibling != np;
1884 prevsib = prevsib->sibling)
1885 ;
1886 prevsib->sibling = np->sibling;
1887 }
1888
1889 write_unlock(&devtree_lock);
1890}
1891
1892#ifdef CONFIG_PPC_PSERIES
1893/*
1894 * Fix up the uninitialized fields in a new device node:
1895 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1896 *
1897 * A lot of boot-time code is duplicated here, because functions such
1898 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1899 * slab allocator.
1900 *
1901 * This should probably be split up into smaller chunks.
1902 */
1903
1904static int of_finish_dynamic_node(struct device_node *node,
1905 unsigned long *unused1, int unused2,
1906 int unused3, int unused4)
1907{
1908 struct device_node *parent = of_get_parent(node);
1909 int err = 0;
1910 phandle *ibm_phandle;
1911
1912 node->name = get_property(node, "name", NULL);
1913 node->type = get_property(node, "device_type", NULL);
1914
1915 if (!parent) {
1916 err = -ENODEV;
1917 goto out;
1918 }
1919
1920 /* We don't support that function on PowerMac, at least
1921 * not yet
1922 */
1923 if (systemcfg->platform == PLATFORM_POWERMAC)
1924 return -ENODEV;
1925
1926 /* fix up new node's linux_phandle field */
1927 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1928 node->linux_phandle = *ibm_phandle;
1929
1930out:
1931 of_node_put(parent);
1932 return err;
1933}
1934
1935static int prom_reconfig_notifier(struct notifier_block *nb,
1936 unsigned long action, void *node)
1937{
1938 int err;
1939
1940 switch (action) {
1941 case PSERIES_RECONFIG_ADD:
1942 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1943 if (err < 0) {
1944 printk(KERN_ERR "finish_node returned %d\n", err);
1945 err = NOTIFY_BAD;
1946 }
1947 break;
1948 default:
1949 err = NOTIFY_DONE;
1950 break;
1951 }
1952 return err;
1953}
1954
1955static struct notifier_block prom_reconfig_nb = {
1956 .notifier_call = prom_reconfig_notifier,
1957 .priority = 10, /* This one needs to run first */
1958};
1959
1960static int __init prom_reconfig_setup(void)
1961{
1962 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1963}
1964__initcall(prom_reconfig_setup);
1965#endif
1966
1967/*
1968 * Find a property with a given name for a given node
1969 * and return the value.
1970 */
1971unsigned char *get_property(struct device_node *np, const char *name,
1972 int *lenp)
1973{
1974 struct property *pp;
1975
1976 for (pp = np->properties; pp != 0; pp = pp->next)
1977 if (strcmp(pp->name, name) == 0) {
1978 if (lenp != 0)
1979 *lenp = pp->length;
1980 return pp->value;
1981 }
1982 return NULL;
1983}
1984EXPORT_SYMBOL(get_property);
1985
1986/*
1987 * Add a property to a node
1988 */
1989void prom_add_property(struct device_node* np, struct property* prop)
1990{
1991 struct property **next = &np->properties;
1992
1993 prop->next = NULL;
1994 while (*next)
1995 next = &(*next)->next;
1996 *next = prop;
1997}
1998
1999/* I quickly hacked that one, check against spec ! */
2000static inline unsigned long
2001bus_space_to_resource_flags(unsigned int bus_space)
2002{
2003 u8 space = (bus_space >> 24) & 0xf;
2004 if (space == 0)
2005 space = 0x02;
2006 if (space == 0x02)
2007 return IORESOURCE_MEM;
2008 else if (space == 0x01)
2009 return IORESOURCE_IO;
2010 else {
2011 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2012 bus_space);
2013 return 0;
2014 }
2015}
2016
2017#ifdef CONFIG_PCI
2018static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2019 struct address_range *range)
2020{
2021 unsigned long mask;
2022 int i;
2023
2024 /* Check this one */
2025 mask = bus_space_to_resource_flags(range->space);
2026 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2027 if ((pdev->resource[i].flags & mask) == mask &&
2028 pdev->resource[i].start <= range->address &&
2029 pdev->resource[i].end > range->address) {
2030 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2031 /* Add better message */
2032 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2033 return NULL;
2034 }
2035 break;
2036 }
2037 }
2038 if (i == DEVICE_COUNT_RESOURCE)
2039 return NULL;
2040 return &pdev->resource[i];
2041}
2042
2043/*
2044 * Request an OF device resource. Currently handles child of PCI devices,
2045 * or other nodes attached to the root node. Ultimately, put some
2046 * link to resources in the OF node.
2047 */
2048struct resource *request_OF_resource(struct device_node* node, int index,
2049 const char* name_postfix)
2050{
2051 struct pci_dev* pcidev;
2052 u8 pci_bus, pci_devfn;
2053 unsigned long iomask;
2054 struct device_node* nd;
2055 struct resource* parent;
2056 struct resource *res = NULL;
2057 int nlen, plen;
2058
2059 if (index >= node->n_addrs)
2060 goto fail;
2061
2062 /* Sanity check on bus space */
2063 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2064 if (iomask & IORESOURCE_MEM)
2065 parent = &iomem_resource;
2066 else if (iomask & IORESOURCE_IO)
2067 parent = &ioport_resource;
2068 else
2069 goto fail;
2070
2071 /* Find a PCI parent if any */
2072 nd = node;
2073 pcidev = NULL;
2074 while (nd) {
2075 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2076 pcidev = pci_find_slot(pci_bus, pci_devfn);
2077 if (pcidev) break;
2078 nd = nd->parent;
2079 }
2080 if (pcidev)
2081 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2082 if (!parent) {
2083 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2084 node->name);
2085 goto fail;
2086 }
2087
2088 res = __request_region(parent, node->addrs[index].address,
2089 node->addrs[index].size, NULL);
2090 if (!res)
2091 goto fail;
2092 nlen = strlen(node->name);
2093 plen = name_postfix ? strlen(name_postfix) : 0;
2094 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2095 if (res->name) {
2096 strcpy((char *)res->name, node->name);
2097 if (plen)
2098 strcpy((char *)res->name+nlen, name_postfix);
2099 }
2100 return res;
2101fail:
2102 return NULL;
2103}
2104EXPORT_SYMBOL(request_OF_resource);
2105
2106int release_OF_resource(struct device_node *node, int index)
2107{
2108 struct pci_dev* pcidev;
2109 u8 pci_bus, pci_devfn;
2110 unsigned long iomask, start, end;
2111 struct device_node* nd;
2112 struct resource* parent;
2113 struct resource *res = NULL;
2114
2115 if (index >= node->n_addrs)
2116 return -EINVAL;
2117
2118 /* Sanity check on bus space */
2119 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2120 if (iomask & IORESOURCE_MEM)
2121 parent = &iomem_resource;
2122 else if (iomask & IORESOURCE_IO)
2123 parent = &ioport_resource;
2124 else
2125 return -EINVAL;
2126
2127 /* Find a PCI parent if any */
2128 nd = node;
2129 pcidev = NULL;
2130 while(nd) {
2131 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2132 pcidev = pci_find_slot(pci_bus, pci_devfn);
2133 if (pcidev) break;
2134 nd = nd->parent;
2135 }
2136 if (pcidev)
2137 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2138 if (!parent) {
2139 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2140 node->name);
2141 return -ENODEV;
2142 }
2143
2144 /* Find us in the parent and its childs */
2145 res = parent->child;
2146 start = node->addrs[index].address;
2147 end = start + node->addrs[index].size - 1;
2148 while (res) {
2149 if (res->start == start && res->end == end &&
2150 (res->flags & IORESOURCE_BUSY))
2151 break;
2152 if (res->start <= start && res->end >= end)
2153 res = res->child;
2154 else
2155 res = res->sibling;
2156 }
2157 if (!res)
2158 return -ENODEV;
2159
2160 if (res->name) {
2161 kfree(res->name);
2162 res->name = NULL;
2163 }
2164 release_resource(res);
2165 kfree(res);
2166
2167 return 0;
2168}
2169EXPORT_SYMBOL(release_OF_resource);
2170#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
new file mode 100644
index 000000000000..9750b3cd8ecd
--- /dev/null
+++ b/arch/powerpc/kernel/prom_init.c
@@ -0,0 +1,2109 @@
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/proc_fs.h>
28#include <linux/stringify.h>
29#include <linux/delay.h>
30#include <linux/initrd.h>
31#include <linux/bitops.h>
32#include <asm/prom.h>
33#include <asm/rtas.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <asm/irq.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/system.h>
40#include <asm/mmu.h>
41#include <asm/pgtable.h>
42#include <asm/pci.h>
43#include <asm/iommu.h>
44#include <asm/btext.h>
45#include <asm/sections.h>
46#include <asm/machdep.h>
47
48#ifdef CONFIG_LOGO_LINUX_CLUT224
49#include <linux/linux_logo.h>
50extern const struct linux_logo logo_linux_clut224;
51#endif
52
53/*
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
58 */
59#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
60
61/*
62 * Eventually bump that one up
63 */
64#define DEVTREE_CHUNK_SIZE 0x100000
65
66/*
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
71 * compatibility
72 */
73#define MEM_RESERVE_MAP_SIZE 8
74
75/*
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
83 *
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
88 *
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
93 */
94#ifdef CONFIG_PPC64
95#define RELOC(x) (*PTRRELOC(&(x)))
96#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97#else
98#define RELOC(x) (x)
99#define ADDR(x) (u32) (x)
100#endif
101
102#define PROM_BUG() do { \
103 prom_printf("kernel BUG at %s line 0x%x!\n", \
104 RELOC(__FILE__), __LINE__); \
105 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
106} while (0)
107
108#ifdef DEBUG_PROM
109#define prom_debug(x...) prom_printf(x)
110#else
111#define prom_debug(x...)
112#endif
113
114#ifdef CONFIG_PPC32
115#define PLATFORM_POWERMAC _MACH_Pmac
116#define PLATFORM_CHRP _MACH_chrp
117#endif
118
119
120typedef u32 prom_arg_t;
121
122struct prom_args {
123 u32 service;
124 u32 nargs;
125 u32 nret;
126 prom_arg_t args[10];
127};
128
129struct prom_t {
130 ihandle root;
131 ihandle chosen;
132 int cpu;
133 ihandle stdout;
134 ihandle mmumap;
135};
136
137struct mem_map_entry {
138 unsigned long base;
139 unsigned long size;
140};
141
142typedef u32 cell_t;
143
144extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
145
146#ifdef CONFIG_PPC64
147extern int enter_prom(struct prom_args *args, unsigned long entry);
148#else
149static inline int enter_prom(struct prom_args *args, unsigned long entry)
150{
151 return ((int (*)(struct prom_args *))entry)(args);
152}
153#endif
154
155extern void copy_and_flush(unsigned long dest, unsigned long src,
156 unsigned long size, unsigned long offset);
157
158/* prom structure */
159static struct prom_t __initdata prom;
160
161static unsigned long prom_entry __initdata;
162
163#define PROM_SCRATCH_SIZE 256
164
165static char __initdata of_stdout_device[256];
166static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
167
168static unsigned long __initdata dt_header_start;
169static unsigned long __initdata dt_struct_start, dt_struct_end;
170static unsigned long __initdata dt_string_start, dt_string_end;
171
172static unsigned long __initdata prom_initrd_start, prom_initrd_end;
173
174#ifdef CONFIG_PPC64
175static int __initdata iommu_force_on;
176static int __initdata ppc64_iommu_off;
177static unsigned long __initdata prom_tce_alloc_start;
178static unsigned long __initdata prom_tce_alloc_end;
179#endif
180
181static int __initdata of_platform;
182
183static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
184
185static unsigned long __initdata prom_memory_limit;
186
187static unsigned long __initdata alloc_top;
188static unsigned long __initdata alloc_top_high;
189static unsigned long __initdata alloc_bottom;
190static unsigned long __initdata rmo_top;
191static unsigned long __initdata ram_top;
192
193static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194static int __initdata mem_reserve_cnt;
195
196static cell_t __initdata regbuf[1024];
197
198
199#define MAX_CPU_THREADS 2
200
201/* TO GO */
202#ifdef CONFIG_HMT
203struct {
204 unsigned int pir;
205 unsigned int threadid;
206} hmt_thread_data[NR_CPUS];
207#endif /* CONFIG_HMT */
208
209/*
210 * Error results ... some OF calls will return "-1" on error, some
211 * will return 0, some will return either. To simplify, here are
212 * macros to use with any ihandle or phandle return value to check if
213 * it is valid
214 */
215
216#define PROM_ERROR (-1u)
217#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
218#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
219
220
221/* This is the one and *ONLY* place where we actually call open
222 * firmware.
223 */
224
225static int __init call_prom(const char *service, int nargs, int nret, ...)
226{
227 int i;
228 struct prom_args args;
229 va_list list;
230
231 args.service = ADDR(service);
232 args.nargs = nargs;
233 args.nret = nret;
234
235 va_start(list, nret);
236 for (i = 0; i < nargs; i++)
237 args.args[i] = va_arg(list, prom_arg_t);
238 va_end(list);
239
240 for (i = 0; i < nret; i++)
241 args.args[nargs+i] = 0;
242
243 if (enter_prom(&args, RELOC(prom_entry)) < 0)
244 return PROM_ERROR;
245
246 return (nret > 0) ? args.args[nargs] : 0;
247}
248
249static int __init call_prom_ret(const char *service, int nargs, int nret,
250 prom_arg_t *rets, ...)
251{
252 int i;
253 struct prom_args args;
254 va_list list;
255
256 args.service = ADDR(service);
257 args.nargs = nargs;
258 args.nret = nret;
259
260 va_start(list, rets);
261 for (i = 0; i < nargs; i++)
262 args.args[i] = va_arg(list, prom_arg_t);
263 va_end(list);
264
265 for (i = 0; i < nret; i++)
266 rets[nargs+i] = 0;
267
268 if (enter_prom(&args, RELOC(prom_entry)) < 0)
269 return PROM_ERROR;
270
271 if (rets != NULL)
272 for (i = 1; i < nret; ++i)
273 rets[i-1] = args.args[nargs+i];
274
275 return (nret > 0) ? args.args[nargs] : 0;
276}
277
278
279static void __init prom_print(const char *msg)
280{
281 const char *p, *q;
282 struct prom_t *_prom = &RELOC(prom);
283
284 if (_prom->stdout == 0)
285 return;
286
287 for (p = msg; *p != 0; p = q) {
288 for (q = p; *q != 0 && *q != '\n'; ++q)
289 ;
290 if (q > p)
291 call_prom("write", 3, 1, _prom->stdout, p, q - p);
292 if (*q == 0)
293 break;
294 ++q;
295 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
296 }
297}
298
299
300static void __init prom_print_hex(unsigned long val)
301{
302 int i, nibbles = sizeof(val)*2;
303 char buf[sizeof(val)*2+1];
304 struct prom_t *_prom = &RELOC(prom);
305
306 for (i = nibbles-1; i >= 0; i--) {
307 buf[i] = (val & 0xf) + '0';
308 if (buf[i] > '9')
309 buf[i] += ('a'-'0'-10);
310 val >>= 4;
311 }
312 buf[nibbles] = '\0';
313 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
314}
315
316
317static void __init prom_printf(const char *format, ...)
318{
319 const char *p, *q, *s;
320 va_list args;
321 unsigned long v;
322 struct prom_t *_prom = &RELOC(prom);
323
324 va_start(args, format);
325#ifdef CONFIG_PPC64
326 format = PTRRELOC(format);
327#endif
328 for (p = format; *p != 0; p = q) {
329 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
330 ;
331 if (q > p)
332 call_prom("write", 3, 1, _prom->stdout, p, q - p);
333 if (*q == 0)
334 break;
335 if (*q == '\n') {
336 ++q;
337 call_prom("write", 3, 1, _prom->stdout,
338 ADDR("\r\n"), 2);
339 continue;
340 }
341 ++q;
342 if (*q == 0)
343 break;
344 switch (*q) {
345 case 's':
346 ++q;
347 s = va_arg(args, const char *);
348 prom_print(s);
349 break;
350 case 'x':
351 ++q;
352 v = va_arg(args, unsigned long);
353 prom_print_hex(v);
354 break;
355 }
356 }
357}
358
359
360static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
361 unsigned long align)
362{
363 int ret;
364 struct prom_t *_prom = &RELOC(prom);
365
366 ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
367 (prom_arg_t)align);
368 if (ret != -1 && _prom->mmumap != 0)
369 /* old pmacs need us to map as well */
370 call_prom("call-method", 6, 1,
371 ADDR("map"), _prom->mmumap, 0, size, virt, virt);
372 return ret;
373}
374
375static void __init __attribute__((noreturn)) prom_panic(const char *reason)
376{
377#ifdef CONFIG_PPC64
378 reason = PTRRELOC(reason);
379#endif
380 prom_print(reason);
381 /* ToDo: should put up an SRC here on p/iSeries */
382 call_prom("exit", 0, 0);
383
384 for (;;) /* should never get here */
385 ;
386}
387
388
389static int __init prom_next_node(phandle *nodep)
390{
391 phandle node;
392
393 if ((node = *nodep) != 0
394 && (*nodep = call_prom("child", 1, 1, node)) != 0)
395 return 1;
396 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
397 return 1;
398 for (;;) {
399 if ((node = call_prom("parent", 1, 1, node)) == 0)
400 return 0;
401 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
402 return 1;
403 }
404}
405
406static int __init prom_getprop(phandle node, const char *pname,
407 void *value, size_t valuelen)
408{
409 return call_prom("getprop", 4, 1, node, ADDR(pname),
410 (u32)(unsigned long) value, (u32) valuelen);
411}
412
413static int __init prom_getproplen(phandle node, const char *pname)
414{
415 return call_prom("getproplen", 2, 1, node, ADDR(pname));
416}
417
418static int __init prom_setprop(phandle node, const char *pname,
419 void *value, size_t valuelen)
420{
421 return call_prom("setprop", 4, 1, node, ADDR(pname),
422 (u32)(unsigned long) value, (u32) valuelen);
423}
424
425/* We can't use the standard versions because of RELOC headaches. */
426#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
427 || ('a' <= (c) && (c) <= 'f') \
428 || ('A' <= (c) && (c) <= 'F'))
429
430#define isdigit(c) ('0' <= (c) && (c) <= '9')
431#define islower(c) ('a' <= (c) && (c) <= 'z')
432#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
433
434unsigned long prom_strtoul(const char *cp, const char **endp)
435{
436 unsigned long result = 0, base = 10, value;
437
438 if (*cp == '0') {
439 base = 8;
440 cp++;
441 if (toupper(*cp) == 'X') {
442 cp++;
443 base = 16;
444 }
445 }
446
447 while (isxdigit(*cp) &&
448 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
449 result = result * base + value;
450 cp++;
451 }
452
453 if (endp)
454 *endp = cp;
455
456 return result;
457}
458
459unsigned long prom_memparse(const char *ptr, const char **retptr)
460{
461 unsigned long ret = prom_strtoul(ptr, retptr);
462 int shift = 0;
463
464 /*
465 * We can't use a switch here because GCC *may* generate a
466 * jump table which won't work, because we're not running at
467 * the address we're linked at.
468 */
469 if ('G' == **retptr || 'g' == **retptr)
470 shift = 30;
471
472 if ('M' == **retptr || 'm' == **retptr)
473 shift = 20;
474
475 if ('K' == **retptr || 'k' == **retptr)
476 shift = 10;
477
478 if (shift) {
479 ret <<= shift;
480 (*retptr)++;
481 }
482
483 return ret;
484}
485
486/*
487 * Early parsing of the command line passed to the kernel, used for
488 * "mem=x" and the options that affect the iommu
489 */
490static void __init early_cmdline_parse(void)
491{
492 struct prom_t *_prom = &RELOC(prom);
493 char *opt, *p;
494 int l = 0;
495
496 RELOC(prom_cmd_line[0]) = 0;
497 p = RELOC(prom_cmd_line);
498 if ((long)_prom->chosen > 0)
499 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
500#ifdef CONFIG_CMDLINE
501 if (l == 0) /* dbl check */
502 strlcpy(RELOC(prom_cmd_line),
503 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
504#endif /* CONFIG_CMDLINE */
505 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
506
507#ifdef CONFIG_PPC64
508 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
509 if (opt) {
510 prom_printf("iommu opt is: %s\n", opt);
511 opt += 6;
512 while (*opt && *opt == ' ')
513 opt++;
514 if (!strncmp(opt, RELOC("off"), 3))
515 RELOC(ppc64_iommu_off) = 1;
516 else if (!strncmp(opt, RELOC("force"), 5))
517 RELOC(iommu_force_on) = 1;
518 }
519#endif
520
521 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
522 if (opt) {
523 opt += 4;
524 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
525#ifdef CONFIG_PPC64
526 /* Align to 16 MB == size of ppc64 large page */
527 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
528#endif
529 }
530}
531
532#ifdef CONFIG_PPC_PSERIES
533/*
534 * To tell the firmware what our capabilities are, we have to pass
535 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
536 * that contain structures that contain the actual values.
537 */
538static struct fake_elf {
539 Elf32_Ehdr elfhdr;
540 Elf32_Phdr phdr[2];
541 struct chrpnote {
542 u32 namesz;
543 u32 descsz;
544 u32 type;
545 char name[8]; /* "PowerPC" */
546 struct chrpdesc {
547 u32 real_mode;
548 u32 real_base;
549 u32 real_size;
550 u32 virt_base;
551 u32 virt_size;
552 u32 load_base;
553 } chrpdesc;
554 } chrpnote;
555 struct rpanote {
556 u32 namesz;
557 u32 descsz;
558 u32 type;
559 char name[24]; /* "IBM,RPA-Client-Config" */
560 struct rpadesc {
561 u32 lpar_affinity;
562 u32 min_rmo_size;
563 u32 min_rmo_percent;
564 u32 max_pft_size;
565 u32 splpar;
566 u32 min_load;
567 u32 new_mem_def;
568 u32 ignore_me;
569 } rpadesc;
570 } rpanote;
571} fake_elf = {
572 .elfhdr = {
573 .e_ident = { 0x7f, 'E', 'L', 'F',
574 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
575 .e_type = ET_EXEC, /* yeah right */
576 .e_machine = EM_PPC,
577 .e_version = EV_CURRENT,
578 .e_phoff = offsetof(struct fake_elf, phdr),
579 .e_phentsize = sizeof(Elf32_Phdr),
580 .e_phnum = 2
581 },
582 .phdr = {
583 [0] = {
584 .p_type = PT_NOTE,
585 .p_offset = offsetof(struct fake_elf, chrpnote),
586 .p_filesz = sizeof(struct chrpnote)
587 }, [1] = {
588 .p_type = PT_NOTE,
589 .p_offset = offsetof(struct fake_elf, rpanote),
590 .p_filesz = sizeof(struct rpanote)
591 }
592 },
593 .chrpnote = {
594 .namesz = sizeof("PowerPC"),
595 .descsz = sizeof(struct chrpdesc),
596 .type = 0x1275,
597 .name = "PowerPC",
598 .chrpdesc = {
599 .real_mode = ~0U, /* ~0 means "don't care" */
600 .real_base = ~0U,
601 .real_size = ~0U,
602 .virt_base = ~0U,
603 .virt_size = ~0U,
604 .load_base = ~0U
605 },
606 },
607 .rpanote = {
608 .namesz = sizeof("IBM,RPA-Client-Config"),
609 .descsz = sizeof(struct rpadesc),
610 .type = 0x12759999,
611 .name = "IBM,RPA-Client-Config",
612 .rpadesc = {
613 .lpar_affinity = 0,
614 .min_rmo_size = 64, /* in megabytes */
615 .min_rmo_percent = 0,
616 .max_pft_size = 48, /* 2^48 bytes max PFT size */
617 .splpar = 1,
618 .min_load = ~0U,
619 .new_mem_def = 0
620 }
621 }
622};
623
624static void __init prom_send_capabilities(void)
625{
626 ihandle elfloader;
627
628 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
629 if (elfloader == 0) {
630 prom_printf("couldn't open /packages/elf-loader\n");
631 return;
632 }
633 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
634 elfloader, ADDR(&fake_elf));
635 call_prom("close", 1, 0, elfloader);
636}
637#endif
638
639/*
640 * Memory allocation strategy... our layout is normally:
641 *
642 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
643 * rare cases, initrd might end up being before the kernel though.
644 * We assume this won't override the final kernel at 0, we have no
645 * provision to handle that in this version, but it should hopefully
646 * never happen.
647 *
648 * alloc_top is set to the top of RMO, eventually shrink down if the
649 * TCEs overlap
650 *
651 * alloc_bottom is set to the top of kernel/initrd
652 *
653 * from there, allocations are done this way : rtas is allocated
654 * topmost, and the device-tree is allocated from the bottom. We try
655 * to grow the device-tree allocation as we progress. If we can't,
656 * then we fail, we don't currently have a facility to restart
657 * elsewhere, but that shouldn't be necessary.
658 *
659 * Note that calls to reserve_mem have to be done explicitly, memory
660 * allocated with either alloc_up or alloc_down isn't automatically
661 * reserved.
662 */
663
664
665/*
666 * Allocates memory in the RMO upward from the kernel/initrd
667 *
668 * When align is 0, this is a special case, it means to allocate in place
669 * at the current location of alloc_bottom or fail (that is basically
670 * extending the previous allocation). Used for the device-tree flattening
671 */
672static unsigned long __init alloc_up(unsigned long size, unsigned long align)
673{
674 unsigned long base = RELOC(alloc_bottom);
675 unsigned long addr = 0;
676
677 if (align)
678 base = _ALIGN_UP(base, align);
679 prom_debug("alloc_up(%x, %x)\n", size, align);
680 if (RELOC(ram_top) == 0)
681 prom_panic("alloc_up() called with mem not initialized\n");
682
683 if (align)
684 base = _ALIGN_UP(RELOC(alloc_bottom), align);
685 else
686 base = RELOC(alloc_bottom);
687
688 for(; (base + size) <= RELOC(alloc_top);
689 base = _ALIGN_UP(base + 0x100000, align)) {
690 prom_debug(" trying: 0x%x\n\r", base);
691 addr = (unsigned long)prom_claim(base, size, 0);
692 if (addr != PROM_ERROR && addr != 0)
693 break;
694 addr = 0;
695 if (align == 0)
696 break;
697 }
698 if (addr == 0)
699 return 0;
700 RELOC(alloc_bottom) = addr;
701
702 prom_debug(" -> %x\n", addr);
703 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
704 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
705 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
706 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
707 prom_debug(" ram_top : %x\n", RELOC(ram_top));
708
709 return addr;
710}
711
712/*
713 * Allocates memory downward, either from top of RMO, or if highmem
714 * is set, from the top of RAM. Note that this one doesn't handle
715 * failures. It does claim memory if highmem is not set.
716 */
717static unsigned long __init alloc_down(unsigned long size, unsigned long align,
718 int highmem)
719{
720 unsigned long base, addr = 0;
721
722 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
723 highmem ? RELOC("(high)") : RELOC("(low)"));
724 if (RELOC(ram_top) == 0)
725 prom_panic("alloc_down() called with mem not initialized\n");
726
727 if (highmem) {
728 /* Carve out storage for the TCE table. */
729 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
730 if (addr <= RELOC(alloc_bottom))
731 return 0;
732 /* Will we bump into the RMO ? If yes, check out that we
733 * didn't overlap existing allocations there, if we did,
734 * we are dead, we must be the first in town !
735 */
736 if (addr < RELOC(rmo_top)) {
737 /* Good, we are first */
738 if (RELOC(alloc_top) == RELOC(rmo_top))
739 RELOC(alloc_top) = RELOC(rmo_top) = addr;
740 else
741 return 0;
742 }
743 RELOC(alloc_top_high) = addr;
744 goto bail;
745 }
746
747 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
748 for (; base > RELOC(alloc_bottom);
749 base = _ALIGN_DOWN(base - 0x100000, align)) {
750 prom_debug(" trying: 0x%x\n\r", base);
751 addr = (unsigned long)prom_claim(base, size, 0);
752 if (addr != PROM_ERROR && addr != 0)
753 break;
754 addr = 0;
755 }
756 if (addr == 0)
757 return 0;
758 RELOC(alloc_top) = addr;
759
760 bail:
761 prom_debug(" -> %x\n", addr);
762 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
763 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
764 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
765 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
766 prom_debug(" ram_top : %x\n", RELOC(ram_top));
767
768 return addr;
769}
770
771/*
772 * Parse a "reg" cell
773 */
774static unsigned long __init prom_next_cell(int s, cell_t **cellp)
775{
776 cell_t *p = *cellp;
777 unsigned long r = 0;
778
779 /* Ignore more than 2 cells */
780 while (s > sizeof(unsigned long) / 4) {
781 p++;
782 s--;
783 }
784 r = *p++;
785#ifdef CONFIG_PPC64
786 if (s > 1) {
787 r <<= 32;
788 r |= *(p++);
789 }
790#endif
791 *cellp = p;
792 return r;
793}
794
795/*
796 * Very dumb function for adding to the memory reserve list, but
797 * we don't need anything smarter at this point
798 *
799 * XXX Eventually check for collisions. They should NEVER happen.
800 * If problems seem to show up, it would be a good start to track
801 * them down.
802 */
803static void reserve_mem(unsigned long base, unsigned long size)
804{
805 unsigned long top = base + size;
806 unsigned long cnt = RELOC(mem_reserve_cnt);
807
808 if (size == 0)
809 return;
810
811 /* We need to always keep one empty entry so that we
812 * have our terminator with "size" set to 0 since we are
813 * dumb and just copy this entire array to the boot params
814 */
815 base = _ALIGN_DOWN(base, PAGE_SIZE);
816 top = _ALIGN_UP(top, PAGE_SIZE);
817 size = top - base;
818
819 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
820 prom_panic("Memory reserve map exhausted !\n");
821 RELOC(mem_reserve_map)[cnt].base = base;
822 RELOC(mem_reserve_map)[cnt].size = size;
823 RELOC(mem_reserve_cnt) = cnt + 1;
824}
825
826/*
827 * Initialize memory allocation mecanism, parse "memory" nodes and
828 * obtain that way the top of memory and RMO to setup out local allocator
829 */
830static void __init prom_init_mem(void)
831{
832 phandle node;
833 char *path, type[64];
834 unsigned int plen;
835 cell_t *p, *endp;
836 struct prom_t *_prom = &RELOC(prom);
837 u32 rac, rsc;
838
839 /*
840 * We iterate the memory nodes to find
841 * 1) top of RMO (first node)
842 * 2) top of memory
843 */
844 rac = 2;
845 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
846 rsc = 1;
847 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
848 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
849 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
850
851 prom_debug("scanning memory:\n");
852 path = RELOC(prom_scratch);
853
854 for (node = 0; prom_next_node(&node); ) {
855 type[0] = 0;
856 prom_getprop(node, "device_type", type, sizeof(type));
857
858 if (type[0] == 0) {
859 /*
860 * CHRP Longtrail machines have no device_type
861 * on the memory node, so check the name instead...
862 */
863 prom_getprop(node, "name", type, sizeof(type));
864 }
865 if (strcmp(type, RELOC("memory")))
866 continue;
867
868 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
869 if (plen > sizeof(regbuf)) {
870 prom_printf("memory node too large for buffer !\n");
871 plen = sizeof(regbuf);
872 }
873 p = RELOC(regbuf);
874 endp = p + (plen / sizeof(cell_t));
875
876#ifdef DEBUG_PROM
877 memset(path, 0, PROM_SCRATCH_SIZE);
878 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
879 prom_debug(" node %s :\n", path);
880#endif /* DEBUG_PROM */
881
882 while ((endp - p) >= (rac + rsc)) {
883 unsigned long base, size;
884
885 base = prom_next_cell(rac, &p);
886 size = prom_next_cell(rsc, &p);
887
888 if (size == 0)
889 continue;
890 prom_debug(" %x %x\n", base, size);
891 if (base == 0)
892 RELOC(rmo_top) = size;
893 if ((base + size) > RELOC(ram_top))
894 RELOC(ram_top) = base + size;
895 }
896 }
897
898 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
899
900 /* Check if we have an initrd after the kernel, if we do move our bottom
901 * point to after it
902 */
903 if (RELOC(prom_initrd_start)) {
904 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
905 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
906 }
907
908 /*
909 * If prom_memory_limit is set we reduce the upper limits *except* for
910 * alloc_top_high. This must be the real top of RAM so we can put
911 * TCE's up there.
912 */
913
914 RELOC(alloc_top_high) = RELOC(ram_top);
915
916 if (RELOC(prom_memory_limit)) {
917 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
918 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
919 RELOC(prom_memory_limit));
920 RELOC(prom_memory_limit) = 0;
921 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
922 prom_printf("Ignoring mem=%x >= ram_top.\n",
923 RELOC(prom_memory_limit));
924 RELOC(prom_memory_limit) = 0;
925 } else {
926 RELOC(ram_top) = RELOC(prom_memory_limit);
927 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
928 }
929 }
930
931 /*
932 * Setup our top alloc point, that is top of RMO or top of
933 * segment 0 when running non-LPAR.
934 * Some RS64 machines have buggy firmware where claims up at
935 * 1GB fail. Cap at 768MB as a workaround.
936 * Since 768MB is plenty of room, and we need to cap to something
937 * reasonable on 32-bit, cap at 768MB on all machines.
938 */
939 if (!RELOC(rmo_top))
940 RELOC(rmo_top) = RELOC(ram_top);
941 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
942 RELOC(alloc_top) = RELOC(rmo_top);
943
944 prom_printf("memory layout at init:\n");
945 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
946 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
947 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
948 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
949 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
950 prom_printf(" ram_top : %x\n", RELOC(ram_top));
951}
952
953
954/*
955 * Allocate room for and instantiate RTAS
956 */
957static void __init prom_instantiate_rtas(void)
958{
959 phandle rtas_node;
960 ihandle rtas_inst;
961 u32 base, entry = 0;
962 u32 size = 0;
963
964 prom_debug("prom_instantiate_rtas: start...\n");
965
966 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
967 prom_debug("rtas_node: %x\n", rtas_node);
968 if (!PHANDLE_VALID(rtas_node))
969 return;
970
971 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
972 if (size == 0)
973 return;
974
975 base = alloc_down(size, PAGE_SIZE, 0);
976 if (base == 0) {
977 prom_printf("RTAS allocation failed !\n");
978 return;
979 }
980
981 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
982 if (!IHANDLE_VALID(rtas_inst)) {
983 prom_printf("opening rtas package failed");
984 return;
985 }
986
987 prom_printf("instantiating rtas at 0x%x ...", base);
988
989 if (call_prom_ret("call-method", 3, 2, &entry,
990 ADDR("instantiate-rtas"),
991 rtas_inst, base) == PROM_ERROR
992 || entry == 0) {
993 prom_printf(" failed\n");
994 return;
995 }
996 prom_printf(" done\n");
997
998 reserve_mem(base, size);
999
1000 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
1001 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
1002
1003 prom_debug("rtas base = 0x%x\n", base);
1004 prom_debug("rtas entry = 0x%x\n", entry);
1005 prom_debug("rtas size = 0x%x\n", (long)size);
1006
1007 prom_debug("prom_instantiate_rtas: end...\n");
1008}
1009
1010#ifdef CONFIG_PPC64
1011/*
1012 * Allocate room for and initialize TCE tables
1013 */
1014static void __init prom_initialize_tce_table(void)
1015{
1016 phandle node;
1017 ihandle phb_node;
1018 char compatible[64], type[64], model[64];
1019 char *path = RELOC(prom_scratch);
1020 u64 base, align;
1021 u32 minalign, minsize;
1022 u64 tce_entry, *tce_entryp;
1023 u64 local_alloc_top, local_alloc_bottom;
1024 u64 i;
1025
1026 if (RELOC(ppc64_iommu_off))
1027 return;
1028
1029 prom_debug("starting prom_initialize_tce_table\n");
1030
1031 /* Cache current top of allocs so we reserve a single block */
1032 local_alloc_top = RELOC(alloc_top_high);
1033 local_alloc_bottom = local_alloc_top;
1034
1035 /* Search all nodes looking for PHBs. */
1036 for (node = 0; prom_next_node(&node); ) {
1037 compatible[0] = 0;
1038 type[0] = 0;
1039 model[0] = 0;
1040 prom_getprop(node, "compatible",
1041 compatible, sizeof(compatible));
1042 prom_getprop(node, "device_type", type, sizeof(type));
1043 prom_getprop(node, "model", model, sizeof(model));
1044
1045 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1046 continue;
1047
1048 /* Keep the old logic in tack to avoid regression. */
1049 if (compatible[0] != 0) {
1050 if ((strstr(compatible, RELOC("python")) == NULL) &&
1051 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1052 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1053 continue;
1054 } else if (model[0] != 0) {
1055 if ((strstr(model, RELOC("ython")) == NULL) &&
1056 (strstr(model, RELOC("peedwagon")) == NULL) &&
1057 (strstr(model, RELOC("innipeg")) == NULL))
1058 continue;
1059 }
1060
1061 if (prom_getprop(node, "tce-table-minalign", &minalign,
1062 sizeof(minalign)) == PROM_ERROR)
1063 minalign = 0;
1064 if (prom_getprop(node, "tce-table-minsize", &minsize,
1065 sizeof(minsize)) == PROM_ERROR)
1066 minsize = 4UL << 20;
1067
1068 /*
1069 * Even though we read what OF wants, we just set the table
1070 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1071 * By doing this, we avoid the pitfalls of trying to DMA to
1072 * MMIO space and the DMA alias hole.
1073 *
1074 * On POWER4, firmware sets the TCE region by assuming
1075 * each TCE table is 8MB. Using this memory for anything
1076 * else will impact performance, so we always allocate 8MB.
1077 * Anton
1078 */
1079 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1080 minsize = 8UL << 20;
1081 else
1082 minsize = 4UL << 20;
1083
1084 /* Align to the greater of the align or size */
1085 align = max(minalign, minsize);
1086 base = alloc_down(minsize, align, 1);
1087 if (base == 0)
1088 prom_panic("ERROR, cannot find space for TCE table.\n");
1089 if (base < local_alloc_bottom)
1090 local_alloc_bottom = base;
1091
1092 /* Save away the TCE table attributes for later use. */
1093 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1094 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1095
1096 /* It seems OF doesn't null-terminate the path :-( */
1097 memset(path, 0, sizeof(path));
1098 /* Call OF to setup the TCE hardware */
1099 if (call_prom("package-to-path", 3, 1, node,
1100 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1101 prom_printf("package-to-path failed\n");
1102 }
1103
1104 prom_debug("TCE table: %s\n", path);
1105 prom_debug("\tnode = 0x%x\n", node);
1106 prom_debug("\tbase = 0x%x\n", base);
1107 prom_debug("\tsize = 0x%x\n", minsize);
1108
1109 /* Initialize the table to have a one-to-one mapping
1110 * over the allocated size.
1111 */
1112 tce_entryp = (unsigned long *)base;
1113 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1114 tce_entry = (i << PAGE_SHIFT);
1115 tce_entry |= 0x3;
1116 *tce_entryp = tce_entry;
1117 }
1118
1119 prom_printf("opening PHB %s", path);
1120 phb_node = call_prom("open", 1, 1, path);
1121 if (phb_node == 0)
1122 prom_printf("... failed\n");
1123 else
1124 prom_printf("... done\n");
1125
1126 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1127 phb_node, -1, minsize,
1128 (u32) base, (u32) (base >> 32));
1129 call_prom("close", 1, 0, phb_node);
1130 }
1131
1132 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1133
1134 if (RELOC(prom_memory_limit)) {
1135 /*
1136 * We align the start to a 16MB boundary so we can map
1137 * the TCE area using large pages if possible.
1138 * The end should be the top of RAM so no need to align it.
1139 */
1140 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1141 0x1000000);
1142 RELOC(prom_tce_alloc_end) = local_alloc_top;
1143 }
1144
1145 /* Flag the first invalid entry */
1146 prom_debug("ending prom_initialize_tce_table\n");
1147}
1148#endif
1149
1150/*
1151 * With CHRP SMP we need to use the OF to start the other processors.
1152 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1153 * so we have to put the processors into a holding pattern controlled
1154 * by the kernel (not OF) before we destroy the OF.
1155 *
1156 * This uses a chunk of low memory, puts some holding pattern
1157 * code there and sends the other processors off to there until
1158 * smp_boot_cpus tells them to do something. The holding pattern
1159 * checks that address until its cpu # is there, when it is that
1160 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1161 * of setting those values.
1162 *
1163 * We also use physical address 0x4 here to tell when a cpu
1164 * is in its holding pattern code.
1165 *
1166 * -- Cort
1167 */
1168extern void __secondary_hold(void);
1169extern unsigned long __secondary_hold_spinloop;
1170extern unsigned long __secondary_hold_acknowledge;
1171
1172/*
1173 * We want to reference the copy of __secondary_hold_* in the
1174 * 0 - 0x100 address range
1175 */
1176#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1177
1178static void __init prom_hold_cpus(void)
1179{
1180 unsigned long i;
1181 unsigned int reg;
1182 phandle node;
1183 char type[64];
1184 int cpuid = 0;
1185 unsigned int interrupt_server[MAX_CPU_THREADS];
1186 unsigned int cpu_threads, hw_cpu_num;
1187 int propsize;
1188 struct prom_t *_prom = &RELOC(prom);
1189 unsigned long *spinloop
1190 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1191 unsigned long *acknowledge
1192 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1193#ifdef CONFIG_PPC64
1194 /* __secondary_hold is actually a descriptor, not the text address */
1195 unsigned long secondary_hold
1196 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1197#else
1198 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1199#endif
1200
1201 prom_debug("prom_hold_cpus: start...\n");
1202 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1203 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1204 prom_debug(" 1) acknowledge = 0x%x\n",
1205 (unsigned long)acknowledge);
1206 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1207 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1208
1209 /* Set the common spinloop variable, so all of the secondary cpus
1210 * will block when they are awakened from their OF spinloop.
1211 * This must occur for both SMP and non SMP kernels, since OF will
1212 * be trashed when we move the kernel.
1213 */
1214 *spinloop = 0;
1215
1216#ifdef CONFIG_HMT
1217 for (i = 0; i < NR_CPUS; i++)
1218 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1219#endif
1220 /* look for cpus */
1221 for (node = 0; prom_next_node(&node); ) {
1222 type[0] = 0;
1223 prom_getprop(node, "device_type", type, sizeof(type));
1224 if (strcmp(type, RELOC("cpu")) != 0)
1225 continue;
1226
1227 /* Skip non-configured cpus. */
1228 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1229 if (strcmp(type, RELOC("okay")) != 0)
1230 continue;
1231
1232 reg = -1;
1233 prom_getprop(node, "reg", &reg, sizeof(reg));
1234
1235 prom_debug("\ncpuid = 0x%x\n", cpuid);
1236 prom_debug("cpu hw idx = 0x%x\n", reg);
1237
1238 /* Init the acknowledge var which will be reset by
1239 * the secondary cpu when it awakens from its OF
1240 * spinloop.
1241 */
1242 *acknowledge = (unsigned long)-1;
1243
1244 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1245 &interrupt_server,
1246 sizeof(interrupt_server));
1247 if (propsize < 0) {
1248 /* no property. old hardware has no SMT */
1249 cpu_threads = 1;
1250 interrupt_server[0] = reg; /* fake it with phys id */
1251 } else {
1252 /* We have a threaded processor */
1253 cpu_threads = propsize / sizeof(u32);
1254 if (cpu_threads > MAX_CPU_THREADS) {
1255 prom_printf("SMT: too many threads!\n"
1256 "SMT: found %x, max is %x\n",
1257 cpu_threads, MAX_CPU_THREADS);
1258 cpu_threads = 1; /* ToDo: panic? */
1259 }
1260 }
1261
1262 hw_cpu_num = interrupt_server[0];
1263 if (hw_cpu_num != _prom->cpu) {
1264 /* Primary Thread of non-boot cpu */
1265 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1266 call_prom("start-cpu", 3, 0, node,
1267 secondary_hold, reg);
1268
1269 for (i = 0; (i < 100000000) &&
1270 (*acknowledge == ((unsigned long)-1)); i++ )
1271 mb();
1272
1273 if (*acknowledge == reg)
1274 prom_printf("done\n");
1275 else
1276 prom_printf("failed: %x\n", *acknowledge);
1277 }
1278#ifdef CONFIG_SMP
1279 else
1280 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1281#endif /* CONFIG_SMP */
1282
1283 /* Reserve cpu #s for secondary threads. They start later. */
1284 cpuid += cpu_threads;
1285 }
1286#ifdef CONFIG_HMT
1287 /* Only enable HMT on processors that provide support. */
1288 if (__is_processor(PV_PULSAR) ||
1289 __is_processor(PV_ICESTAR) ||
1290 __is_processor(PV_SSTAR)) {
1291 prom_printf(" starting secondary threads\n");
1292
1293 for (i = 0; i < NR_CPUS; i += 2) {
1294 if (!cpu_online(i))
1295 continue;
1296
1297 if (i == 0) {
1298 unsigned long pir = mfspr(SPRN_PIR);
1299 if (__is_processor(PV_PULSAR)) {
1300 RELOC(hmt_thread_data)[i].pir =
1301 pir & 0x1f;
1302 } else {
1303 RELOC(hmt_thread_data)[i].pir =
1304 pir & 0x3ff;
1305 }
1306 }
1307 }
1308 } else {
1309 prom_printf("Processor is not HMT capable\n");
1310 }
1311#endif
1312
1313 if (cpuid > NR_CPUS)
1314 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1315 ") exceeded: ignoring extras\n");
1316
1317 prom_debug("prom_hold_cpus: end...\n");
1318}
1319
1320
1321static void __init prom_init_client_services(unsigned long pp)
1322{
1323 struct prom_t *_prom = &RELOC(prom);
1324
1325 /* Get a handle to the prom entry point before anything else */
1326 RELOC(prom_entry) = pp;
1327
1328 /* get a handle for the stdout device */
1329 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1330 if (!PHANDLE_VALID(_prom->chosen))
1331 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1332
1333 /* get device tree root */
1334 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1335 if (!PHANDLE_VALID(_prom->root))
1336 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1337
1338 _prom->mmumap = 0;
1339}
1340
1341#ifdef CONFIG_PPC32
1342/*
1343 * For really old powermacs, we need to map things we claim.
1344 * For that, we need the ihandle of the mmu.
1345 */
1346static void __init prom_find_mmu(void)
1347{
1348 struct prom_t *_prom = &RELOC(prom);
1349 phandle oprom;
1350 char version[64];
1351
1352 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1353 if (!PHANDLE_VALID(oprom))
1354 return;
1355 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1356 return;
1357 version[sizeof(version) - 1] = 0;
1358 prom_printf("OF version is '%s'\n", version);
1359 /* XXX might need to add other versions here */
1360 if (strcmp(version, "Open Firmware, 1.0.5") != 0)
1361 return;
1362 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1363 sizeof(_prom->mmumap));
1364}
1365#else
1366#define prom_find_mmu()
1367#endif
1368
1369static void __init prom_init_stdout(void)
1370{
1371 struct prom_t *_prom = &RELOC(prom);
1372 char *path = RELOC(of_stdout_device);
1373 char type[16];
1374 u32 val;
1375
1376 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1377 prom_panic("cannot find stdout");
1378
1379 _prom->stdout = val;
1380
1381 /* Get the full OF pathname of the stdout device */
1382 memset(path, 0, 256);
1383 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1384 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1385 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1386 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1387 prom_setprop(_prom->chosen, "linux,stdout-path",
1388 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1389
1390 /* If it's a display, note it */
1391 memset(type, 0, sizeof(type));
1392 prom_getprop(val, "device_type", type, sizeof(type));
1393 if (strcmp(type, RELOC("display")) == 0)
1394 prom_setprop(val, "linux,boot-display", NULL, 0);
1395}
1396
1397static void __init prom_close_stdin(void)
1398{
1399 struct prom_t *_prom = &RELOC(prom);
1400 ihandle val;
1401
1402 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1403 call_prom("close", 1, 0, val);
1404}
1405
1406static int __init prom_find_machine_type(void)
1407{
1408 struct prom_t *_prom = &RELOC(prom);
1409 char compat[256];
1410 int len, i = 0;
1411 phandle rtas;
1412
1413 len = prom_getprop(_prom->root, "compatible",
1414 compat, sizeof(compat)-1);
1415 if (len > 0) {
1416 compat[len] = 0;
1417 while (i < len) {
1418 char *p = &compat[i];
1419 int sl = strlen(p);
1420 if (sl == 0)
1421 break;
1422 if (strstr(p, RELOC("Power Macintosh")) ||
1423 strstr(p, RELOC("MacRISC")))
1424 return PLATFORM_POWERMAC;
1425#ifdef CONFIG_PPC64
1426 if (strstr(p, RELOC("Momentum,Maple")))
1427 return PLATFORM_MAPLE;
1428#endif
1429 i += sl + 1;
1430 }
1431 }
1432#ifdef CONFIG_PPC64
1433 /* Default to pSeries. We need to know if we are running LPAR */
1434 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1435 if (PHANDLE_VALID(rtas)) {
1436 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1437 if (x != PROM_ERROR) {
1438 prom_printf("Hypertas detected, assuming LPAR !\n");
1439 return PLATFORM_PSERIES_LPAR;
1440 }
1441 }
1442 return PLATFORM_PSERIES;
1443#else
1444 return PLATFORM_CHRP;
1445#endif
1446}
1447
1448static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1449{
1450 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1451}
1452
1453/*
1454 * If we have a display that we don't know how to drive,
1455 * we will want to try to execute OF's open method for it
1456 * later. However, OF will probably fall over if we do that
1457 * we've taken over the MMU.
1458 * So we check whether we will need to open the display,
1459 * and if so, open it now.
1460 */
1461static void __init prom_check_displays(void)
1462{
1463 char type[16], *path;
1464 phandle node;
1465 ihandle ih;
1466 int i;
1467
1468 static unsigned char default_colors[] = {
1469 0x00, 0x00, 0x00,
1470 0x00, 0x00, 0xaa,
1471 0x00, 0xaa, 0x00,
1472 0x00, 0xaa, 0xaa,
1473 0xaa, 0x00, 0x00,
1474 0xaa, 0x00, 0xaa,
1475 0xaa, 0xaa, 0x00,
1476 0xaa, 0xaa, 0xaa,
1477 0x55, 0x55, 0x55,
1478 0x55, 0x55, 0xff,
1479 0x55, 0xff, 0x55,
1480 0x55, 0xff, 0xff,
1481 0xff, 0x55, 0x55,
1482 0xff, 0x55, 0xff,
1483 0xff, 0xff, 0x55,
1484 0xff, 0xff, 0xff
1485 };
1486 const unsigned char *clut;
1487
1488 prom_printf("Looking for displays\n");
1489 for (node = 0; prom_next_node(&node); ) {
1490 memset(type, 0, sizeof(type));
1491 prom_getprop(node, "device_type", type, sizeof(type));
1492 if (strcmp(type, RELOC("display")) != 0)
1493 continue;
1494
1495 /* It seems OF doesn't null-terminate the path :-( */
1496 path = RELOC(prom_scratch);
1497 memset(path, 0, PROM_SCRATCH_SIZE);
1498
1499 /*
1500 * leave some room at the end of the path for appending extra
1501 * arguments
1502 */
1503 if (call_prom("package-to-path", 3, 1, node, path,
1504 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1505 continue;
1506 prom_printf("found display : %s, opening ... ", path);
1507
1508 ih = call_prom("open", 1, 1, path);
1509 if (ih == 0) {
1510 prom_printf("failed\n");
1511 continue;
1512 }
1513
1514 /* Success */
1515 prom_printf("done\n");
1516 prom_setprop(node, "linux,opened", NULL, 0);
1517
1518 /* Setup a usable color table when the appropriate
1519 * method is available. Should update this to set-colors */
1520 clut = RELOC(default_colors);
1521 for (i = 0; i < 32; i++, clut += 3)
1522 if (prom_set_color(ih, i, clut[0], clut[1],
1523 clut[2]) != 0)
1524 break;
1525
1526#ifdef CONFIG_LOGO_LINUX_CLUT224
1527 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1528 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1529 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1530 clut[2]) != 0)
1531 break;
1532#endif /* CONFIG_LOGO_LINUX_CLUT224 */
1533 }
1534}
1535
1536
1537/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1538static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1539 unsigned long needed, unsigned long align)
1540{
1541 void *ret;
1542
1543 *mem_start = _ALIGN(*mem_start, align);
1544 while ((*mem_start + needed) > *mem_end) {
1545 unsigned long room, chunk;
1546
1547 prom_debug("Chunk exhausted, claiming more at %x...\n",
1548 RELOC(alloc_bottom));
1549 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1550 if (room > DEVTREE_CHUNK_SIZE)
1551 room = DEVTREE_CHUNK_SIZE;
1552 if (room < PAGE_SIZE)
1553 prom_panic("No memory for flatten_device_tree (no room)");
1554 chunk = alloc_up(room, 0);
1555 if (chunk == 0)
1556 prom_panic("No memory for flatten_device_tree (claim failed)");
1557 *mem_end = RELOC(alloc_top);
1558 }
1559
1560 ret = (void *)*mem_start;
1561 *mem_start += needed;
1562
1563 return ret;
1564}
1565
1566#define dt_push_token(token, mem_start, mem_end) \
1567 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1568
1569static unsigned long __init dt_find_string(char *str)
1570{
1571 char *s, *os;
1572
1573 s = os = (char *)RELOC(dt_string_start);
1574 s += 4;
1575 while (s < (char *)RELOC(dt_string_end)) {
1576 if (strcmp(s, str) == 0)
1577 return s - os;
1578 s += strlen(s) + 1;
1579 }
1580 return 0;
1581}
1582
1583/*
1584 * The Open Firmware 1275 specification states properties must be 31 bytes or
1585 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1586 */
1587#define MAX_PROPERTY_NAME 64
1588
1589static void __init scan_dt_build_strings(phandle node,
1590 unsigned long *mem_start,
1591 unsigned long *mem_end)
1592{
1593 char *prev_name, *namep, *sstart;
1594 unsigned long soff;
1595 phandle child;
1596
1597 sstart = (char *)RELOC(dt_string_start);
1598
1599 /* get and store all property names */
1600 prev_name = RELOC("");
1601 for (;;) {
1602 /* 64 is max len of name including nul. */
1603 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1604 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1605 /* No more nodes: unwind alloc */
1606 *mem_start = (unsigned long)namep;
1607 break;
1608 }
1609
1610 /* skip "name" */
1611 if (strcmp(namep, RELOC("name")) == 0) {
1612 *mem_start = (unsigned long)namep;
1613 prev_name = RELOC("name");
1614 continue;
1615 }
1616 /* get/create string entry */
1617 soff = dt_find_string(namep);
1618 if (soff != 0) {
1619 *mem_start = (unsigned long)namep;
1620 namep = sstart + soff;
1621 } else {
1622 /* Trim off some if we can */
1623 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1624 RELOC(dt_string_end) = *mem_start;
1625 }
1626 prev_name = namep;
1627 }
1628
1629 /* do all our children */
1630 child = call_prom("child", 1, 1, node);
1631 while (child != 0) {
1632 scan_dt_build_strings(child, mem_start, mem_end);
1633 child = call_prom("peer", 1, 1, child);
1634 }
1635}
1636
1637static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1638 unsigned long *mem_end)
1639{
1640 phandle child;
1641 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1642 unsigned long soff;
1643 unsigned char *valp;
1644 static char pname[MAX_PROPERTY_NAME];
1645 int l, room;
1646
1647 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1648
1649 /* get the node's full name */
1650 namep = (char *)*mem_start;
1651 room = *mem_end - *mem_start;
1652 if (room > 255)
1653 room = 255;
1654 l = call_prom("package-to-path", 3, 1, node, namep, room);
1655 if (l >= 0) {
1656 /* Didn't fit? Get more room. */
1657 if (l >= room) {
1658 if (l >= *mem_end - *mem_start)
1659 namep = make_room(mem_start, mem_end, l+1, 1);
1660 call_prom("package-to-path", 3, 1, node, namep, l);
1661 }
1662 namep[l] = '\0';
1663
1664 /* Fixup an Apple bug where they have bogus \0 chars in the
1665 * middle of the path in some properties, and extract
1666 * the unit name (everything after the last '/').
1667 */
1668 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1669 if (*p == '/')
1670 lp = namep;
1671 else if (*p != 0)
1672 *lp++ = *p;
1673 }
1674 *lp = 0;
1675 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1676 }
1677
1678 /* get it again for debugging */
1679 path = RELOC(prom_scratch);
1680 memset(path, 0, PROM_SCRATCH_SIZE);
1681 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1682
1683 /* get and store all properties */
1684 prev_name = RELOC("");
1685 sstart = (char *)RELOC(dt_string_start);
1686 for (;;) {
1687 if (call_prom("nextprop", 3, 1, node, prev_name,
1688 RELOC(pname)) != 1)
1689 break;
1690
1691 /* skip "name" */
1692 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1693 prev_name = RELOC("name");
1694 continue;
1695 }
1696
1697 /* find string offset */
1698 soff = dt_find_string(RELOC(pname));
1699 if (soff == 0) {
1700 prom_printf("WARNING: Can't find string index for"
1701 " <%s>, node %s\n", RELOC(pname), path);
1702 break;
1703 }
1704 prev_name = sstart + soff;
1705
1706 /* get length */
1707 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1708
1709 /* sanity checks */
1710 if (l == PROM_ERROR)
1711 continue;
1712 if (l > MAX_PROPERTY_LENGTH) {
1713 prom_printf("WARNING: ignoring large property ");
1714 /* It seems OF doesn't null-terminate the path :-( */
1715 prom_printf("[%s] ", path);
1716 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1717 continue;
1718 }
1719
1720 /* push property head */
1721 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1722 dt_push_token(l, mem_start, mem_end);
1723 dt_push_token(soff, mem_start, mem_end);
1724
1725 /* push property content */
1726 valp = make_room(mem_start, mem_end, l, 4);
1727 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1728 *mem_start = _ALIGN(*mem_start, 4);
1729 }
1730
1731 /* Add a "linux,phandle" property. */
1732 soff = dt_find_string(RELOC("linux,phandle"));
1733 if (soff == 0)
1734 prom_printf("WARNING: Can't find string index for"
1735 " <linux-phandle> node %s\n", path);
1736 else {
1737 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1738 dt_push_token(4, mem_start, mem_end);
1739 dt_push_token(soff, mem_start, mem_end);
1740 valp = make_room(mem_start, mem_end, 4, 4);
1741 *(u32 *)valp = node;
1742 }
1743
1744 /* do all our children */
1745 child = call_prom("child", 1, 1, node);
1746 while (child != 0) {
1747 scan_dt_build_struct(child, mem_start, mem_end);
1748 child = call_prom("peer", 1, 1, child);
1749 }
1750
1751 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1752}
1753
1754static void __init flatten_device_tree(void)
1755{
1756 phandle root;
1757 unsigned long mem_start, mem_end, room;
1758 struct boot_param_header *hdr;
1759 struct prom_t *_prom = &RELOC(prom);
1760 char *namep;
1761 u64 *rsvmap;
1762
1763 /*
1764 * Check how much room we have between alloc top & bottom (+/- a
1765 * few pages), crop to 4Mb, as this is our "chuck" size
1766 */
1767 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1768 if (room > DEVTREE_CHUNK_SIZE)
1769 room = DEVTREE_CHUNK_SIZE;
1770 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1771
1772 /* Now try to claim that */
1773 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1774 if (mem_start == 0)
1775 prom_panic("Can't allocate initial device-tree chunk\n");
1776 mem_end = RELOC(alloc_top);
1777
1778 /* Get root of tree */
1779 root = call_prom("peer", 1, 1, (phandle)0);
1780 if (root == (phandle)0)
1781 prom_panic ("couldn't get device tree root\n");
1782
1783 /* Build header and make room for mem rsv map */
1784 mem_start = _ALIGN(mem_start, 4);
1785 hdr = make_room(&mem_start, &mem_end,
1786 sizeof(struct boot_param_header), 4);
1787 RELOC(dt_header_start) = (unsigned long)hdr;
1788 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1789
1790 /* Start of strings */
1791 mem_start = PAGE_ALIGN(mem_start);
1792 RELOC(dt_string_start) = mem_start;
1793 mem_start += 4; /* hole */
1794
1795 /* Add "linux,phandle" in there, we'll need it */
1796 namep = make_room(&mem_start, &mem_end, 16, 1);
1797 strcpy(namep, RELOC("linux,phandle"));
1798 mem_start = (unsigned long)namep + strlen(namep) + 1;
1799
1800 /* Build string array */
1801 prom_printf("Building dt strings...\n");
1802 scan_dt_build_strings(root, &mem_start, &mem_end);
1803 RELOC(dt_string_end) = mem_start;
1804
1805 /* Build structure */
1806 mem_start = PAGE_ALIGN(mem_start);
1807 RELOC(dt_struct_start) = mem_start;
1808 prom_printf("Building dt structure...\n");
1809 scan_dt_build_struct(root, &mem_start, &mem_end);
1810 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1811 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1812
1813 /* Finish header */
1814 hdr->boot_cpuid_phys = _prom->cpu;
1815 hdr->magic = OF_DT_HEADER;
1816 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1817 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1818 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1819 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1820 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1821 hdr->version = OF_DT_VERSION;
1822 /* Version 16 is not backward compatible */
1823 hdr->last_comp_version = 0x10;
1824
1825 /* Reserve the whole thing and copy the reserve map in, we
1826 * also bump mem_reserve_cnt to cause further reservations to
1827 * fail since it's too late.
1828 */
1829 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1830 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1831
1832#ifdef DEBUG_PROM
1833 {
1834 int i;
1835 prom_printf("reserved memory map:\n");
1836 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1837 prom_printf(" %x - %x\n",
1838 RELOC(mem_reserve_map)[i].base,
1839 RELOC(mem_reserve_map)[i].size);
1840 }
1841#endif
1842 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1843
1844 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1845 RELOC(dt_string_start), RELOC(dt_string_end));
1846 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1847 RELOC(dt_struct_start), RELOC(dt_struct_end));
1848
1849}
1850
1851
1852static void __init fixup_device_tree(void)
1853{
1854#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1855 phandle u3, i2c, mpic;
1856 u32 u3_rev;
1857 u32 interrupts[2];
1858 u32 parent;
1859
1860 /* Some G5s have a missing interrupt definition, fix it up here */
1861 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1862 if (!PHANDLE_VALID(u3))
1863 return;
1864 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1865 if (!PHANDLE_VALID(i2c))
1866 return;
1867 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1868 if (!PHANDLE_VALID(mpic))
1869 return;
1870
1871 /* check if proper rev of u3 */
1872 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1873 == PROM_ERROR)
1874 return;
1875 if (u3_rev != 0x35 && u3_rev != 0x37)
1876 return;
1877 /* does it need fixup ? */
1878 if (prom_getproplen(i2c, "interrupts") > 0)
1879 return;
1880
1881 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1882
1883 /* interrupt on this revision of u3 is number 0 and level */
1884 interrupts[0] = 0;
1885 interrupts[1] = 1;
1886 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1887 parent = (u32)mpic;
1888 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1889#endif
1890}
1891
1892
1893static void __init prom_find_boot_cpu(void)
1894{
1895 struct prom_t *_prom = &RELOC(prom);
1896 u32 getprop_rval;
1897 ihandle prom_cpu;
1898 phandle cpu_pkg;
1899
1900 _prom->cpu = 0;
1901 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1902 return;
1903
1904 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1905
1906 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1907 _prom->cpu = getprop_rval;
1908
1909 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1910}
1911
1912static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1913{
1914#ifdef CONFIG_BLK_DEV_INITRD
1915 struct prom_t *_prom = &RELOC(prom);
1916
1917 if (r3 && r4 && r4 != 0xdeadbeef) {
1918 unsigned long val;
1919
1920 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1921 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1922
1923 val = RELOC(prom_initrd_start);
1924 prom_setprop(_prom->chosen, "linux,initrd-start", &val,
1925 sizeof(val));
1926 val = RELOC(prom_initrd_end);
1927 prom_setprop(_prom->chosen, "linux,initrd-end", &val,
1928 sizeof(val));
1929
1930 reserve_mem(RELOC(prom_initrd_start),
1931 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1932
1933 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1934 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1935 }
1936#endif /* CONFIG_BLK_DEV_INITRD */
1937}
1938
1939/*
1940 * We enter here early on, when the Open Firmware prom is still
1941 * handling exceptions and the MMU hash table for us.
1942 */
1943
1944unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1945 unsigned long pp,
1946 unsigned long r6, unsigned long r7)
1947{
1948 struct prom_t *_prom;
1949 unsigned long hdr;
1950 u32 getprop_rval;
1951 unsigned long offset = reloc_offset();
1952
1953#ifdef CONFIG_PPC32
1954 reloc_got2(offset);
1955#endif
1956
1957 _prom = &RELOC(prom);
1958
1959 /*
1960 * First zero the BSS
1961 */
1962 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
1963
1964 /*
1965 * Init interface to Open Firmware, get some node references,
1966 * like /chosen
1967 */
1968 prom_init_client_services(pp);
1969
1970 /*
1971 * Init prom stdout device
1972 */
1973 prom_init_stdout();
1974
1975 /*
1976 * See if this OF is old enough that we need to do explicit maps
1977 */
1978 prom_find_mmu();
1979
1980 /*
1981 * Check for an initrd
1982 */
1983 prom_check_initrd(r3, r4);
1984
1985 /*
1986 * Get default machine type. At this point, we do not differentiate
1987 * between pSeries SMP and pSeries LPAR
1988 */
1989 RELOC(of_platform) = prom_find_machine_type();
1990 getprop_rval = RELOC(of_platform);
1991 prom_setprop(_prom->chosen, "linux,platform",
1992 &getprop_rval, sizeof(getprop_rval));
1993
1994#ifdef CONFIG_PPC_PSERIES
1995 /*
1996 * On pSeries, inform the firmware about our capabilities
1997 */
1998 if (RELOC(of_platform) & PLATFORM_PSERIES)
1999 prom_send_capabilities();
2000#endif
2001
2002 /*
2003 * On pSeries and BPA, copy the CPU hold code
2004 */
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2007
2008 /*
2009 * Do early parsing of command line
2010 */
2011 early_cmdline_parse();
2012
2013 /*
2014 * Initialize memory management within prom_init
2015 */
2016 prom_init_mem();
2017
2018 /*
2019 * Determine which cpu is actually running right _now_
2020 */
2021 prom_find_boot_cpu();
2022
2023 /*
2024 * Initialize display devices
2025 */
2026 prom_check_displays();
2027
2028#ifdef CONFIG_PPC64
2029 /*
2030 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2031 * that uses the allocator, we need to make sure we get the top of memory
2032 * available for us here...
2033 */
2034 if (RELOC(of_platform) == PLATFORM_PSERIES)
2035 prom_initialize_tce_table();
2036#endif
2037
2038 /*
2039 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2040 * in spin-loops. PowerMacs don't have a working RTAS and use
2041 * a different way to spin CPUs
2042 */
2043 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2044 prom_instantiate_rtas();
2045 prom_hold_cpus();
2046 }
2047
2048 /*
2049 * Fill in some infos for use by the kernel later on
2050 */
2051 if (RELOC(prom_memory_limit))
2052 prom_setprop(_prom->chosen, "linux,memory-limit",
2053 &RELOC(prom_memory_limit),
2054 sizeof(prom_memory_limit));
2055#ifdef CONFIG_PPC64
2056 if (RELOC(ppc64_iommu_off))
2057 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2058
2059 if (RELOC(iommu_force_on))
2060 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2061
2062 if (RELOC(prom_tce_alloc_start)) {
2063 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2064 &RELOC(prom_tce_alloc_start),
2065 sizeof(prom_tce_alloc_start));
2066 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2067 &RELOC(prom_tce_alloc_end),
2068 sizeof(prom_tce_alloc_end));
2069 }
2070#endif
2071
2072 /*
2073 * Fixup any known bugs in the device-tree
2074 */
2075 fixup_device_tree();
2076
2077 /*
2078 * Now finally create the flattened device-tree
2079 */
2080 prom_printf("copying OF device tree ...\n");
2081 flatten_device_tree();
2082
2083 /* in case stdin is USB and still active on IBM machines... */
2084 prom_close_stdin();
2085
2086 /*
2087 * Call OF "quiesce" method to shut down pending DMA's from
2088 * devices etc...
2089 */
2090 prom_printf("Calling quiesce ...\n");
2091 call_prom("quiesce", 0, 0);
2092
2093 /*
2094 * And finally, call the kernel passing it the flattened device
2095 * tree and NULL as r5, thus triggering the new entry point which
2096 * is common to us and kexec
2097 */
2098 hdr = RELOC(dt_header_start);
2099 prom_printf("returning from prom_init\n");
2100 prom_debug("->dt_header_start=0x%x\n", hdr);
2101
2102#ifdef CONFIG_PPC32
2103 reloc_got2(-offset);
2104#endif
2105
2106 __start(hdr, KERNELBASE + offset, 0);
2107
2108 return 0;
2109}
diff --git a/arch/ppc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e7aee4108dea..568ea335d616 100644
--- a/arch/ppc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc/kernel/ptrace.c
3 *
4 * PowerPC version 2 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 4 *
@@ -10,13 +8,14 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 9 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 11 * and Paul Mackerras (paulus@samba.org).
14 * 12 *
15 * This file is subject to the terms and conditions of the GNU General 13 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 14 * Public License. See the file README.legal in the main directory of
17 * this archive for more details. 15 * this archive for more details.
18 */ 16 */
19 17
18#include <linux/config.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/sched.h> 20#include <linux/sched.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
@@ -29,13 +28,19 @@
29#include <linux/signal.h> 28#include <linux/signal.h>
30#include <linux/seccomp.h> 29#include <linux/seccomp.h>
31#include <linux/audit.h> 30#include <linux/audit.h>
31#ifdef CONFIG_PPC32
32#include <linux/module.h> 32#include <linux/module.h>
33#endif
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/system.h> 38#include <asm/system.h>
39#ifdef CONFIG_PPC64
40#include <asm/ptrace-common.h>
41#endif
38 42
43#ifdef CONFIG_PPC32
39/* 44/*
40 * Set of msr bits that gdb can change on behalf of a process. 45 * Set of msr bits that gdb can change on behalf of a process.
41 */ 46 */
@@ -44,12 +49,14 @@
44#else 49#else
45#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) 50#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
46#endif 51#endif
52#endif /* CONFIG_PPC32 */
47 53
48/* 54/*
49 * does not yet catch signals sent when the child dies. 55 * does not yet catch signals sent when the child dies.
50 * in exit.c or in signal.c. 56 * in exit.c or in signal.c.
51 */ 57 */
52 58
59#ifdef CONFIG_PPC32
53/* 60/*
54 * Get contents of register REGNO in task TASK. 61 * Get contents of register REGNO in task TASK.
55 */ 62 */
@@ -228,6 +235,7 @@ clear_single_step(struct task_struct *task)
228#endif 235#endif
229 } 236 }
230} 237}
238#endif /* CONFIG_PPC32 */
231 239
232/* 240/*
233 * Called by kernel/ptrace.c when detaching.. 241 * Called by kernel/ptrace.c when detaching..
@@ -240,7 +248,7 @@ void ptrace_disable(struct task_struct *child)
240 clear_single_step(child); 248 clear_single_step(child);
241} 249}
242 250
243int sys_ptrace(long request, long pid, long addr, long data) 251long sys_ptrace(long request, long pid, long addr, long data)
244{ 252{
245 struct task_struct *child; 253 struct task_struct *child;
246 int ret = -EPERM; 254 int ret = -EPERM;
@@ -296,25 +304,28 @@ int sys_ptrace(long request, long pid, long addr, long data)
296 } 304 }
297 305
298 /* read the word at location addr in the USER area. */ 306 /* read the word at location addr in the USER area. */
299 /* XXX this will need fixing for 64-bit */
300 case PTRACE_PEEKUSR: { 307 case PTRACE_PEEKUSR: {
301 unsigned long index, tmp; 308 unsigned long index, tmp;
302 309
303 ret = -EIO; 310 ret = -EIO;
304 /* convert to index and check */ 311 /* convert to index and check */
312#ifdef CONFIG_PPC32
305 index = (unsigned long) addr >> 2; 313 index = (unsigned long) addr >> 2;
306 if ((addr & 3) || index > PT_FPSCR 314 if ((addr & 3) || (index > PT_FPSCR)
307 || child->thread.regs == NULL) 315 || (child->thread.regs == NULL))
316#else
317 index = (unsigned long) addr >> 3;
318 if ((addr & 7) || (index > PT_FPSCR))
319#endif
308 break; 320 break;
309 321
322#ifdef CONFIG_PPC32
310 CHECK_FULL_REGS(child->thread.regs); 323 CHECK_FULL_REGS(child->thread.regs);
324#endif
311 if (index < PT_FPR0) { 325 if (index < PT_FPR0) {
312 tmp = get_reg(child, (int) index); 326 tmp = get_reg(child, (int) index);
313 } else { 327 } else {
314 preempt_disable(); 328 flush_fp_to_thread(child);
315 if (child->thread.regs->msr & MSR_FP)
316 giveup_fpu(child);
317 preempt_enable();
318 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; 329 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
319 } 330 }
320 ret = put_user(tmp,(unsigned long __user *) data); 331 ret = put_user(tmp,(unsigned long __user *) data);
@@ -325,7 +336,8 @@ int sys_ptrace(long request, long pid, long addr, long data)
325 case PTRACE_POKETEXT: /* write the word at location addr. */ 336 case PTRACE_POKETEXT: /* write the word at location addr. */
326 case PTRACE_POKEDATA: 337 case PTRACE_POKEDATA:
327 ret = 0; 338 ret = 0;
328 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) 339 if (access_process_vm(child, addr, &data, sizeof(data), 1)
340 == sizeof(data))
329 break; 341 break;
330 ret = -EIO; 342 ret = -EIO;
331 break; 343 break;
@@ -336,21 +348,25 @@ int sys_ptrace(long request, long pid, long addr, long data)
336 348
337 ret = -EIO; 349 ret = -EIO;
338 /* convert to index and check */ 350 /* convert to index and check */
351#ifdef CONFIG_PPC32
339 index = (unsigned long) addr >> 2; 352 index = (unsigned long) addr >> 2;
340 if ((addr & 3) || index > PT_FPSCR 353 if ((addr & 3) || (index > PT_FPSCR)
341 || child->thread.regs == NULL) 354 || (child->thread.regs == NULL))
355#else
356 index = (unsigned long) addr >> 3;
357 if ((addr & 7) || (index > PT_FPSCR))
358#endif
342 break; 359 break;
343 360
361#ifdef CONFIG_PPC32
344 CHECK_FULL_REGS(child->thread.regs); 362 CHECK_FULL_REGS(child->thread.regs);
363#endif
345 if (index == PT_ORIG_R3) 364 if (index == PT_ORIG_R3)
346 break; 365 break;
347 if (index < PT_FPR0) { 366 if (index < PT_FPR0) {
348 ret = put_reg(child, index, data); 367 ret = put_reg(child, index, data);
349 } else { 368 } else {
350 preempt_disable(); 369 flush_fp_to_thread(child);
351 if (child->thread.regs->msr & MSR_FP)
352 giveup_fpu(child);
353 preempt_enable();
354 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; 370 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
355 ret = 0; 371 ret = 0;
356 } 372 }
@@ -362,11 +378,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
362 ret = -EIO; 378 ret = -EIO;
363 if (!valid_signal(data)) 379 if (!valid_signal(data))
364 break; 380 break;
365 if (request == PTRACE_SYSCALL) { 381 if (request == PTRACE_SYSCALL)
366 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 382 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
367 } else { 383 else
368 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 384 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
369 }
370 child->exit_code = data; 385 child->exit_code = data;
371 /* make sure the single step bit is not set. */ 386 /* make sure the single step bit is not set. */
372 clear_single_step(child); 387 clear_single_step(child);
@@ -404,28 +419,102 @@ int sys_ptrace(long request, long pid, long addr, long data)
404 break; 419 break;
405 } 420 }
406 421
422#ifdef CONFIG_PPC64
423 case PTRACE_GET_DEBUGREG: {
424 ret = -EINVAL;
425 /* We only support one DABR and no IABRS at the moment */
426 if (addr > 0)
427 break;
428 ret = put_user(child->thread.dabr,
429 (unsigned long __user *)data);
430 break;
431 }
432
433 case PTRACE_SET_DEBUGREG:
434 ret = ptrace_set_debugreg(child, addr, data);
435 break;
436#endif
437
407 case PTRACE_DETACH: 438 case PTRACE_DETACH:
408 ret = ptrace_detach(child, data); 439 ret = ptrace_detach(child, data);
409 break; 440 break;
410 441
442#ifdef CONFIG_PPC64
443 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
444 int i;
445 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
446 unsigned long __user *tmp = (unsigned long __user *)addr;
447
448 for (i = 0; i < 32; i++) {
449 ret = put_user(*reg, tmp);
450 if (ret)
451 break;
452 reg++;
453 tmp++;
454 }
455 break;
456 }
457
458 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
459 int i;
460 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
461 unsigned long __user *tmp = (unsigned long __user *)addr;
462
463 for (i = 0; i < 32; i++) {
464 ret = get_user(*reg, tmp);
465 if (ret)
466 break;
467 reg++;
468 tmp++;
469 }
470 break;
471 }
472
473 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
474 int i;
475 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
476 unsigned long __user *tmp = (unsigned long __user *)addr;
477
478 flush_fp_to_thread(child);
479
480 for (i = 0; i < 32; i++) {
481 ret = put_user(*reg, tmp);
482 if (ret)
483 break;
484 reg++;
485 tmp++;
486 }
487 break;
488 }
489
490 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
491 int i;
492 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
493 unsigned long __user *tmp = (unsigned long __user *)addr;
494
495 flush_fp_to_thread(child);
496
497 for (i = 0; i < 32; i++) {
498 ret = get_user(*reg, tmp);
499 if (ret)
500 break;
501 reg++;
502 tmp++;
503 }
504 break;
505 }
506#endif /* CONFIG_PPC64 */
507
411#ifdef CONFIG_ALTIVEC 508#ifdef CONFIG_ALTIVEC
412 case PTRACE_GETVRREGS: 509 case PTRACE_GETVRREGS:
413 /* Get the child altivec register state. */ 510 /* Get the child altivec register state. */
414 preempt_disable(); 511 flush_altivec_to_thread(child);
415 if (child->thread.regs->msr & MSR_VEC)
416 giveup_altivec(child);
417 preempt_enable();
418 ret = get_vrregs((unsigned long __user *)data, child); 512 ret = get_vrregs((unsigned long __user *)data, child);
419 break; 513 break;
420 514
421 case PTRACE_SETVRREGS: 515 case PTRACE_SETVRREGS:
422 /* Set the child altivec register state. */ 516 /* Set the child altivec register state. */
423 /* this is to clear the MSR_VEC bit to force a reload 517 flush_altivec_to_thread(child);
424 * of register state from memory */
425 preempt_disable();
426 if (child->thread.regs->msr & MSR_VEC)
427 giveup_altivec(child);
428 preempt_enable();
429 ret = set_vrregs(child, (unsigned long __user *)data); 518 ret = set_vrregs(child, (unsigned long __user *)data);
430 break; 519 break;
431#endif 520#endif
@@ -478,12 +567,21 @@ static void do_syscall_trace(void)
478 567
479void do_syscall_trace_enter(struct pt_regs *regs) 568void do_syscall_trace_enter(struct pt_regs *regs)
480{ 569{
570#ifdef CONFIG_PPC64
571 secure_computing(regs->gpr[0]);
572#endif
573
481 if (test_thread_flag(TIF_SYSCALL_TRACE) 574 if (test_thread_flag(TIF_SYSCALL_TRACE)
482 && (current->ptrace & PT_PTRACED)) 575 && (current->ptrace & PT_PTRACED))
483 do_syscall_trace(); 576 do_syscall_trace();
484 577
485 if (unlikely(current->audit_context)) 578 if (unlikely(current->audit_context))
486 audit_syscall_entry(current, AUDIT_ARCH_PPC, 579 audit_syscall_entry(current,
580#ifdef CONFIG_PPC32
581 AUDIT_ARCH_PPC,
582#else
583 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
584#endif
487 regs->gpr[0], 585 regs->gpr[0],
488 regs->gpr[3], regs->gpr[4], 586 regs->gpr[3], regs->gpr[4],
489 regs->gpr[5], regs->gpr[6]); 587 regs->gpr[5], regs->gpr[6]);
@@ -491,17 +589,25 @@ void do_syscall_trace_enter(struct pt_regs *regs)
491 589
492void do_syscall_trace_leave(struct pt_regs *regs) 590void do_syscall_trace_leave(struct pt_regs *regs)
493{ 591{
592#ifdef CONFIG_PPC32
494 secure_computing(regs->gpr[0]); 593 secure_computing(regs->gpr[0]);
594#endif
495 595
496 if (unlikely(current->audit_context)) 596 if (unlikely(current->audit_context))
497 audit_syscall_exit(current, 597 audit_syscall_exit(current,
498 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 598 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
499 regs->result); 599 regs->result);
500 600
501 if ((test_thread_flag(TIF_SYSCALL_TRACE)) 601 if ((test_thread_flag(TIF_SYSCALL_TRACE)
602#ifdef CONFIG_PPC64
603 || test_thread_flag(TIF_SINGLESTEP)
604#endif
605 )
502 && (current->ptrace & PT_PTRACED)) 606 && (current->ptrace & PT_PTRACED))
503 do_syscall_trace(); 607 do_syscall_trace();
504} 608}
505 609
610#ifdef CONFIG_PPC32
506EXPORT_SYMBOL(do_syscall_trace_enter); 611EXPORT_SYMBOL(do_syscall_trace_enter);
507EXPORT_SYMBOL(do_syscall_trace_leave); 612EXPORT_SYMBOL(do_syscall_trace_leave);
613#endif
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index fb8c22d6084a..91eb952e0293 100644
--- a/arch/ppc64/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/ptrace32.c 2 * ptrace for 32-bit processes running on a 64-bit kernel.
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -10,10 +10,10 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 13 * and Paul Mackerras (paulus@samba.org).
14 * 14 *
15 * This file is subject to the terms and conditions of the GNU General 15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 16 * Public License. See the file COPYING in the main directory of
17 * this archive for more details. 17 * this archive for more details.
18 */ 18 */
19 19
@@ -40,7 +40,8 @@
40 * in exit.c or in signal.c. 40 * in exit.c or in signal.c.
41 */ 41 */
42 42
43int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) 43long compat_sys_ptrace(int request, int pid, unsigned long addr,
44 unsigned long data)
44{ 45{
45 struct task_struct *child; 46 struct task_struct *child;
46 int ret = -EPERM; 47 int ret = -EPERM;
diff --git a/arch/ppc64/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 5e8eb33b8e54..4d22eeeeb91d 100644
--- a/arch/ppc64/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,28 +25,29 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/param.h> 26#include <asm/param.h>
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/abs_addr.h>
29#include <asm/udbg.h>
30#include <asm/delay.h> 28#include <asm/delay.h>
31#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/lmb.h>
31#ifdef CONFIG_PPC64
32#include <asm/systemcfg.h> 32#include <asm/systemcfg.h>
33#endif
33 34
34struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 35struct rtas_t rtas = {
35
36struct rtas_t rtas = {
37 .lock = SPIN_LOCK_UNLOCKED 36 .lock = SPIN_LOCK_UNLOCKED
38}; 37};
39 38
40EXPORT_SYMBOL(rtas); 39EXPORT_SYMBOL(rtas);
41 40
42char rtas_err_buf[RTAS_ERROR_LOG_MAX];
43
44DEFINE_SPINLOCK(rtas_data_buf_lock); 41DEFINE_SPINLOCK(rtas_data_buf_lock);
45char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned; 42char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
46unsigned long rtas_rmo_buf; 43unsigned long rtas_rmo_buf;
47 44
48void 45/*
49call_rtas_display_status(unsigned char c) 46 * call_rtas_display_status and call_rtas_display_status_delay
47 * are designed only for very early low-level debugging, which
48 * is why the token is hard-coded to 10.
49 */
50void call_rtas_display_status(unsigned char c)
50{ 51{
51 struct rtas_args *args = &rtas.args; 52 struct rtas_args *args = &rtas.args;
52 unsigned long s; 53 unsigned long s;
@@ -66,8 +67,7 @@ call_rtas_display_status(unsigned char c)
66 spin_unlock_irqrestore(&rtas.lock, s); 67 spin_unlock_irqrestore(&rtas.lock, s);
67} 68}
68 69
69void 70void call_rtas_display_status_delay(unsigned char c)
70call_rtas_display_status_delay(unsigned char c)
71{ 71{
72 static int pending_newline = 0; /* did last write end with unprinted newline? */ 72 static int pending_newline = 0; /* did last write end with unprinted newline? */
73 static int width = 16; 73 static int width = 16;
@@ -91,8 +91,7 @@ call_rtas_display_status_delay(unsigned char c)
91 } 91 }
92} 92}
93 93
94void 94void rtas_progress(char *s, unsigned short hex)
95rtas_progress(char *s, unsigned short hex)
96{ 95{
97 struct device_node *root; 96 struct device_node *root;
98 int width, *p; 97 int width, *p;
@@ -208,18 +207,16 @@ rtas_progress(char *s, unsigned short hex)
208 spin_unlock(&progress_lock); 207 spin_unlock(&progress_lock);
209} 208}
210 209
211int 210int rtas_token(const char *service)
212rtas_token(const char *service)
213{ 211{
214 int *tokp; 212 int *tokp;
215 if (rtas.dev == NULL) { 213 if (rtas.dev == NULL)
216 PPCDBG(PPCDBG_RTAS,"\tNo rtas device in device-tree...\n");
217 return RTAS_UNKNOWN_SERVICE; 214 return RTAS_UNKNOWN_SERVICE;
218 }
219 tokp = (int *) get_property(rtas.dev, service, NULL); 215 tokp = (int *) get_property(rtas.dev, service, NULL);
220 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; 216 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
221} 217}
222 218
219#ifdef CONFIG_RTAS_ERROR_LOGGING
223/* 220/*
224 * Return the firmware-specified size of the error log buffer 221 * Return the firmware-specified size of the error log buffer
225 * for all rtas calls that require an error buffer argument. 222 * for all rtas calls that require an error buffer argument.
@@ -234,31 +231,38 @@ int rtas_get_error_log_max(void)
234 rtas_error_log_max = rtas_token ("rtas-error-log-max"); 231 rtas_error_log_max = rtas_token ("rtas-error-log-max");
235 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) || 232 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
236 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) { 233 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
237 printk (KERN_WARNING "RTAS: bad log buffer size %d\n", rtas_error_log_max); 234 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
235 rtas_error_log_max);
238 rtas_error_log_max = RTAS_ERROR_LOG_MAX; 236 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
239 } 237 }
240 return rtas_error_log_max; 238 return rtas_error_log_max;
241} 239}
240EXPORT_SYMBOL(rtas_get_error_log_max);
242 241
243 242
243char rtas_err_buf[RTAS_ERROR_LOG_MAX];
244int rtas_last_error_token;
245
244/** Return a copy of the detailed error text associated with the 246/** Return a copy of the detailed error text associated with the
245 * most recent failed call to rtas. Because the error text 247 * most recent failed call to rtas. Because the error text
246 * might go stale if there are any other intervening rtas calls, 248 * might go stale if there are any other intervening rtas calls,
247 * this routine must be called atomically with whatever produced 249 * this routine must be called atomically with whatever produced
248 * the error (i.e. with rtas.lock still held from the previous call). 250 * the error (i.e. with rtas.lock still held from the previous call).
249 */ 251 */
250static int 252static char *__fetch_rtas_last_error(char *altbuf)
251__fetch_rtas_last_error(void)
252{ 253{
253 struct rtas_args err_args, save_args; 254 struct rtas_args err_args, save_args;
254 u32 bufsz; 255 u32 bufsz;
256 char *buf = NULL;
257
258 if (rtas_last_error_token == -1)
259 return NULL;
255 260
256 bufsz = rtas_get_error_log_max(); 261 bufsz = rtas_get_error_log_max();
257 262
258 err_args.token = rtas_token("rtas-last-error"); 263 err_args.token = rtas_last_error_token;
259 err_args.nargs = 2; 264 err_args.nargs = 2;
260 err_args.nret = 1; 265 err_args.nret = 1;
261
262 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf); 266 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
263 err_args.args[1] = bufsz; 267 err_args.args[1] = bufsz;
264 err_args.args[2] = 0; 268 err_args.args[2] = 0;
@@ -271,23 +275,38 @@ __fetch_rtas_last_error(void)
271 err_args = rtas.args; 275 err_args = rtas.args;
272 rtas.args = save_args; 276 rtas.args = save_args;
273 277
274 return err_args.args[2]; 278 /* Log the error in the unlikely case that there was one. */
279 if (unlikely(err_args.args[2] == 0)) {
280 if (altbuf) {
281 buf = altbuf;
282 } else {
283 buf = rtas_err_buf;
284 if (mem_init_done)
285 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
286 }
287 if (buf)
288 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
289 }
290
291 return buf;
275} 292}
276 293
294#define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
295
296#else /* CONFIG_RTAS_ERROR_LOGGING */
297#define __fetch_rtas_last_error(x) NULL
298#define get_errorlog_buffer() NULL
299#endif
300
277int rtas_call(int token, int nargs, int nret, int *outputs, ...) 301int rtas_call(int token, int nargs, int nret, int *outputs, ...)
278{ 302{
279 va_list list; 303 va_list list;
280 int i, logit = 0; 304 int i;
281 unsigned long s; 305 unsigned long s;
282 struct rtas_args *rtas_args; 306 struct rtas_args *rtas_args;
283 char * buff_copy = NULL; 307 char *buff_copy = NULL;
284 int ret; 308 int ret;
285 309
286 PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
287 PPCDBG(PPCDBG_RTAS, "\ttoken = 0x%x\n", token);
288 PPCDBG(PPCDBG_RTAS, "\tnargs = %d\n", nargs);
289 PPCDBG(PPCDBG_RTAS, "\tnret = %d\n", nret);
290 PPCDBG(PPCDBG_RTAS, "\t&outputs = 0x%lx\n", outputs);
291 if (token == RTAS_UNKNOWN_SERVICE) 310 if (token == RTAS_UNKNOWN_SERVICE)
292 return -1; 311 return -1;
293 312
@@ -300,46 +319,25 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
300 rtas_args->nret = nret; 319 rtas_args->nret = nret;
301 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]); 320 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
302 va_start(list, outputs); 321 va_start(list, outputs);
303 for (i = 0; i < nargs; ++i) { 322 for (i = 0; i < nargs; ++i)
304 rtas_args->args[i] = va_arg(list, rtas_arg_t); 323 rtas_args->args[i] = va_arg(list, rtas_arg_t);
305 PPCDBG(PPCDBG_RTAS, "\tnarg[%d] = 0x%x\n", i, rtas_args->args[i]);
306 }
307 va_end(list); 324 va_end(list);
308 325
309 for (i = 0; i < nret; ++i) 326 for (i = 0; i < nret; ++i)
310 rtas_args->rets[i] = 0; 327 rtas_args->rets[i] = 0;
311 328
312 PPCDBG(PPCDBG_RTAS, "\tentering rtas with 0x%lx\n",
313 __pa(rtas_args));
314 enter_rtas(__pa(rtas_args)); 329 enter_rtas(__pa(rtas_args));
315 PPCDBG(PPCDBG_RTAS, "\treturned from rtas ...\n");
316 330
317 /* A -1 return code indicates that the last command couldn't 331 /* A -1 return code indicates that the last command couldn't
318 be completed due to a hardware error. */ 332 be completed due to a hardware error. */
319 if (rtas_args->rets[0] == -1) 333 if (rtas_args->rets[0] == -1)
320 logit = (__fetch_rtas_last_error() == 0); 334 buff_copy = __fetch_rtas_last_error(NULL);
321
322 ifppcdebug(PPCDBG_RTAS) {
323 for(i=0; i < nret ;i++)
324 udbg_printf("\tnret[%d] = 0x%lx\n", i, (ulong)rtas_args->rets[i]);
325 }
326 335
327 if (nret > 1 && outputs != NULL) 336 if (nret > 1 && outputs != NULL)
328 for (i = 0; i < nret-1; ++i) 337 for (i = 0; i < nret-1; ++i)
329 outputs[i] = rtas_args->rets[i+1]; 338 outputs[i] = rtas_args->rets[i+1];
330 ret = (nret > 0)? rtas_args->rets[0]: 0; 339 ret = (nret > 0)? rtas_args->rets[0]: 0;
331 340
332 /* Log the error in the unlikely case that there was one. */
333 if (unlikely(logit)) {
334 buff_copy = rtas_err_buf;
335 if (mem_init_done) {
336 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
337 if (buff_copy)
338 memcpy(buff_copy, rtas_err_buf,
339 RTAS_ERROR_LOG_MAX);
340 }
341 }
342
343 /* Gotta do something different here, use global lock for now... */ 341 /* Gotta do something different here, use global lock for now... */
344 spin_unlock_irqrestore(&rtas.lock, s); 342 spin_unlock_irqrestore(&rtas.lock, s);
345 343
@@ -354,8 +352,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
354/* Given an RTAS status code of 990n compute the hinted delay of 10^n 352/* Given an RTAS status code of 990n compute the hinted delay of 10^n
355 * (last digit) milliseconds. For now we bound at n=5 (100 sec). 353 * (last digit) milliseconds. For now we bound at n=5 (100 sec).
356 */ 354 */
357unsigned int 355unsigned int rtas_extended_busy_delay_time(int status)
358rtas_extended_busy_delay_time(int status)
359{ 356{
360 int order = status - 9900; 357 int order = status - 9900;
361 unsigned long ms; 358 unsigned long ms;
@@ -366,7 +363,7 @@ rtas_extended_busy_delay_time(int status)
366 order = 5; /* bound */ 363 order = 5; /* bound */
367 364
368 /* Use microseconds for reasonable accuracy */ 365 /* Use microseconds for reasonable accuracy */
369 for (ms=1; order > 0; order--) 366 for (ms = 1; order > 0; order--)
370 ms *= 10; 367 ms *= 10;
371 368
372 return ms; 369 return ms;
@@ -493,112 +490,23 @@ int rtas_set_indicator(int indicator, int index, int new_value)
493 return rc; 490 return rc;
494} 491}
495 492
496#define FLASH_BLOCK_LIST_VERSION (1UL) 493void rtas_restart(char *cmd)
497static void
498rtas_flash_firmware(void)
499{
500 unsigned long image_size;
501 struct flash_block_list *f, *next, *flist;
502 unsigned long rtas_block_list;
503 int i, status, update_token;
504
505 update_token = rtas_token("ibm,update-flash-64-and-reboot");
506 if (update_token == RTAS_UNKNOWN_SERVICE) {
507 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
508 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
509 return;
510 }
511
512 /* NOTE: the "first" block list is a global var with no data
513 * blocks in the kernel data segment. We do this because
514 * we want to ensure this block_list addr is under 4GB.
515 */
516 rtas_firmware_flash_list.num_blocks = 0;
517 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
518 rtas_block_list = virt_to_abs(flist);
519 if (rtas_block_list >= 4UL*1024*1024*1024) {
520 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
521 return;
522 }
523
524 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
525 /* Update the block_list in place. */
526 image_size = 0;
527 for (f = flist; f; f = next) {
528 /* Translate data addrs to absolute */
529 for (i = 0; i < f->num_blocks; i++) {
530 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
531 image_size += f->blocks[i].length;
532 }
533 next = f->next;
534 /* Don't translate NULL pointer for last entry */
535 if (f->next)
536 f->next = (struct flash_block_list *)virt_to_abs(f->next);
537 else
538 f->next = NULL;
539 /* make num_blocks into the version/length field */
540 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
541 }
542
543 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
544 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
545 rtas_progress("Flashing \n", 0x0);
546 rtas_progress("Please Wait... ", 0x0);
547 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
548 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
549 switch (status) { /* should only get "bad" status */
550 case 0:
551 printk(KERN_ALERT "FLASH: success\n");
552 break;
553 case -1:
554 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
555 break;
556 case -3:
557 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
558 break;
559 case -4:
560 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
561 break;
562 default:
563 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
564 break;
565 }
566}
567
568void rtas_flash_bypass_warning(void)
569{
570 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
571 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
572}
573
574
575void
576rtas_restart(char *cmd)
577{ 494{
578 if (rtas_firmware_flash_list.next)
579 rtas_flash_firmware();
580
581 printk("RTAS system-reboot returned %d\n", 495 printk("RTAS system-reboot returned %d\n",
582 rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); 496 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
583 for (;;); 497 for (;;);
584} 498}
585 499
586void 500void rtas_power_off(void)
587rtas_power_off(void)
588{ 501{
589 if (rtas_firmware_flash_list.next)
590 rtas_flash_bypass_warning();
591 /* allow power on only with power button press */ 502 /* allow power on only with power button press */
592 printk("RTAS power-off returned %d\n", 503 printk("RTAS power-off returned %d\n",
593 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); 504 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
594 for (;;); 505 for (;;);
595} 506}
596 507
597void 508void rtas_halt(void)
598rtas_halt(void)
599{ 509{
600 if (rtas_firmware_flash_list.next)
601 rtas_flash_bypass_warning();
602 rtas_power_off(); 510 rtas_power_off();
603} 511}
604 512
@@ -631,9 +539,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
631{ 539{
632 struct rtas_args args; 540 struct rtas_args args;
633 unsigned long flags; 541 unsigned long flags;
634 char * buff_copy; 542 char *buff_copy, *errbuf = NULL;
635 int nargs; 543 int nargs;
636 int err_rc = 0;
637 544
638 if (!capable(CAP_SYS_ADMIN)) 545 if (!capable(CAP_SYS_ADMIN))
639 return -EPERM; 546 return -EPERM;
@@ -652,7 +559,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
652 nargs * sizeof(rtas_arg_t)) != 0) 559 nargs * sizeof(rtas_arg_t)) != 0)
653 return -EFAULT; 560 return -EFAULT;
654 561
655 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL); 562 buff_copy = get_errorlog_buffer();
656 563
657 spin_lock_irqsave(&rtas.lock, flags); 564 spin_lock_irqsave(&rtas.lock, flags);
658 565
@@ -664,19 +571,14 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
664 571
665 /* A -1 return code indicates that the last command couldn't 572 /* A -1 return code indicates that the last command couldn't
666 be completed due to a hardware error. */ 573 be completed due to a hardware error. */
667 if (args.rets[0] == -1) { 574 if (args.rets[0] == -1)
668 err_rc = __fetch_rtas_last_error(); 575 errbuf = __fetch_rtas_last_error(buff_copy);
669 if ((err_rc == 0) && buff_copy) {
670 memcpy(buff_copy, rtas_err_buf, RTAS_ERROR_LOG_MAX);
671 }
672 }
673 576
674 spin_unlock_irqrestore(&rtas.lock, flags); 577 spin_unlock_irqrestore(&rtas.lock, flags);
675 578
676 if (buff_copy) { 579 if (buff_copy) {
677 if ((args.rets[0] == -1) && (err_rc == 0)) { 580 if (errbuf)
678 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 581 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
679 }
680 kfree(buff_copy); 582 kfree(buff_copy);
681 } 583 }
682 584
@@ -689,6 +591,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
689 return 0; 591 return 0;
690} 592}
691 593
594#ifdef CONFIG_SMP
692/* This version can't take the spinlock, because it never returns */ 595/* This version can't take the spinlock, because it never returns */
693 596
694struct rtas_args rtas_stop_self_args = { 597struct rtas_args rtas_stop_self_args = {
@@ -713,6 +616,7 @@ void rtas_stop_self(void)
713 616
714 panic("Alas, I survived.\n"); 617 panic("Alas, I survived.\n");
715} 618}
619#endif
716 620
717/* 621/*
718 * Call early during boot, before mem init or bootmem, to retreive the RTAS 622 * Call early during boot, before mem init or bootmem, to retreive the RTAS
@@ -721,6 +625,8 @@ void rtas_stop_self(void)
721 */ 625 */
722void __init rtas_initialize(void) 626void __init rtas_initialize(void)
723{ 627{
628 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
629
724 /* Get RTAS dev node and fill up our "rtas" structure with infos 630 /* Get RTAS dev node and fill up our "rtas" structure with infos
725 * about it. 631 * about it.
726 */ 632 */
@@ -742,26 +648,27 @@ void __init rtas_initialize(void)
742 } else 648 } else
743 rtas.dev = NULL; 649 rtas.dev = NULL;
744 } 650 }
651 if (!rtas.dev)
652 return;
653
745 /* If RTAS was found, allocate the RMO buffer for it and look for 654 /* If RTAS was found, allocate the RMO buffer for it and look for
746 * the stop-self token if any 655 * the stop-self token if any
747 */ 656 */
748 if (rtas.dev) { 657#ifdef CONFIG_PPC64
749 unsigned long rtas_region = RTAS_INSTANTIATE_MAX; 658 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
750 if (systemcfg->platform == PLATFORM_PSERIES_LPAR) 659 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
751 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 660#endif
752 661 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
753 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE,
754 rtas_region);
755 662
756#ifdef CONFIG_HOTPLUG_CPU 663#ifdef CONFIG_HOTPLUG_CPU
757 rtas_stop_self_args.token = rtas_token("stop-self"); 664 rtas_stop_self_args.token = rtas_token("stop-self");
758#endif /* CONFIG_HOTPLUG_CPU */ 665#endif /* CONFIG_HOTPLUG_CPU */
759 } 666#ifdef CONFIG_RTAS_ERROR_LOGGING
760 667 rtas_last_error_token = rtas_token("rtas-last-error");
668#endif
761} 669}
762 670
763 671
764EXPORT_SYMBOL(rtas_firmware_flash_list);
765EXPORT_SYMBOL(rtas_token); 672EXPORT_SYMBOL(rtas_token);
766EXPORT_SYMBOL(rtas_call); 673EXPORT_SYMBOL(rtas_call);
767EXPORT_SYMBOL(rtas_data_buf); 674EXPORT_SYMBOL(rtas_data_buf);
@@ -771,4 +678,3 @@ EXPORT_SYMBOL(rtas_get_sensor);
771EXPORT_SYMBOL(rtas_get_power_level); 678EXPORT_SYMBOL(rtas_get_power_level);
772EXPORT_SYMBOL(rtas_set_power_level); 679EXPORT_SYMBOL(rtas_set_power_level);
773EXPORT_SYMBOL(rtas_set_indicator); 680EXPORT_SYMBOL(rtas_set_indicator);
774EXPORT_SYMBOL(rtas_get_error_log_max);
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644
index 000000000000..2f8c3c951394
--- /dev/null
+++ b/arch/powerpc/kernel/semaphore.c
@@ -0,0 +1,135 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
new file mode 100644
index 000000000000..1292460fcde2
--- /dev/null
+++ b/arch/powerpc/kernel/setup-common.c
@@ -0,0 +1,410 @@
1/*
2 * Common boot and setup code for both 32-bit and 64-bit.
3 * Extracted from arch/powerpc/kernel/setup_64.c.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/initrd.h>
21#include <linux/ide.h>
22#include <linux/seq_file.h>
23#include <linux/ioport.h>
24#include <linux/console.h>
25#include <linux/utsname.h>
26#include <linux/tty.h>
27#include <linux/root_dev.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/unistd.h>
31#include <linux/serial.h>
32#include <linux/serial_8250.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/processor.h>
36#include <asm/pgtable.h>
37#include <asm/smp.h>
38#include <asm/elf.h>
39#include <asm/machdep.h>
40#include <asm/time.h>
41#include <asm/cputable.h>
42#include <asm/sections.h>
43#include <asm/btext.h>
44#include <asm/nvram.h>
45#include <asm/setup.h>
46#include <asm/system.h>
47#include <asm/rtas.h>
48#include <asm/iommu.h>
49#include <asm/serial.h>
50#include <asm/cache.h>
51#include <asm/page.h>
52#include <asm/mmu.h>
53#include <asm/lmb.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63/*
64 * This still seems to be needed... -- paulus
65 */
66struct screen_info screen_info = {
67 .orig_x = 0,
68 .orig_y = 25,
69 .orig_video_cols = 80,
70 .orig_video_lines = 25,
71 .orig_video_isVGA = 1,
72 .orig_video_points = 16
73};
74
75#ifdef __DO_IRQ_CANON
76/* XXX should go elsewhere eventually */
77int ppc_do_canonicalize_irqs;
78EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
79#endif
80
81/* also used by kexec */
82void machine_shutdown(void)
83{
84 if (ppc_md.nvram_sync)
85 ppc_md.nvram_sync();
86}
87
88void machine_restart(char *cmd)
89{
90 machine_shutdown();
91 ppc_md.restart(cmd);
92#ifdef CONFIG_SMP
93 smp_send_stop();
94#endif
95 printk(KERN_EMERG "System Halted, OK to turn off power\n");
96 local_irq_disable();
97 while (1) ;
98}
99
100void machine_power_off(void)
101{
102 machine_shutdown();
103 ppc_md.power_off();
104#ifdef CONFIG_SMP
105 smp_send_stop();
106#endif
107 printk(KERN_EMERG "System Halted, OK to turn off power\n");
108 local_irq_disable();
109 while (1) ;
110}
111/* Used by the G5 thermal driver */
112EXPORT_SYMBOL_GPL(machine_power_off);
113
114void (*pm_power_off)(void) = machine_power_off;
115EXPORT_SYMBOL_GPL(pm_power_off);
116
117void machine_halt(void)
118{
119 machine_shutdown();
120 ppc_md.halt();
121#ifdef CONFIG_SMP
122 smp_send_stop();
123#endif
124 printk(KERN_EMERG "System Halted, OK to turn off power\n");
125 local_irq_disable();
126 while (1) ;
127}
128
129
130#ifdef CONFIG_TAU
131extern u32 cpu_temp(unsigned long cpu);
132extern u32 cpu_temp_both(unsigned long cpu);
133#endif /* CONFIG_TAU */
134
135#ifdef CONFIG_SMP
136DEFINE_PER_CPU(unsigned int, pvr);
137#endif
138
139static int show_cpuinfo(struct seq_file *m, void *v)
140{
141 unsigned long cpu_id = (unsigned long)v - 1;
142 unsigned int pvr;
143 unsigned short maj;
144 unsigned short min;
145
146 if (cpu_id == NR_CPUS) {
147#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
148 unsigned long bogosum = 0;
149 int i;
150 for (i = 0; i < NR_CPUS; ++i)
151 if (cpu_online(i))
152 bogosum += loops_per_jiffy;
153 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
154 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
155#endif /* CONFIG_SMP && CONFIG_PPC32 */
156 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
157
158 if (ppc_md.show_cpuinfo != NULL)
159 ppc_md.show_cpuinfo(m);
160
161 return 0;
162 }
163
164 /* We only show online cpus: disable preempt (overzealous, I
165 * knew) to prevent cpu going down. */
166 preempt_disable();
167 if (!cpu_online(cpu_id)) {
168 preempt_enable();
169 return 0;
170 }
171
172#ifdef CONFIG_SMP
173#ifdef CONFIG_PPC64 /* XXX for now */
174 pvr = per_cpu(pvr, cpu_id);
175#else
176 pvr = cpu_data[cpu_id].pvr;
177#endif
178#else
179 pvr = mfspr(SPRN_PVR);
180#endif
181 maj = (pvr >> 8) & 0xFF;
182 min = pvr & 0xFF;
183
184 seq_printf(m, "processor\t: %lu\n", cpu_id);
185 seq_printf(m, "cpu\t\t: ");
186
187 if (cur_cpu_spec->pvr_mask)
188 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
189 else
190 seq_printf(m, "unknown (%08x)", pvr);
191
192#ifdef CONFIG_ALTIVEC
193 if (cpu_has_feature(CPU_FTR_ALTIVEC))
194 seq_printf(m, ", altivec supported");
195#endif /* CONFIG_ALTIVEC */
196
197 seq_printf(m, "\n");
198
199#ifdef CONFIG_TAU
200 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
201#ifdef CONFIG_TAU_AVERAGE
202 /* more straightforward, but potentially misleading */
203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
204 cpu_temp(i));
205#else
206 /* show the actual temp sensor range */
207 u32 temp;
208 temp = cpu_temp_both(i);
209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
210 temp & 0xff, temp >> 16);
211#endif
212 }
213#endif /* CONFIG_TAU */
214
215 /*
216 * Assume here that all clock rates are the same in a
217 * smp system. -- Cort
218 */
219 if (ppc_proc_freq)
220 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
221 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
222
223 if (ppc_md.show_percpuinfo != NULL)
224 ppc_md.show_percpuinfo(m, cpu_id);
225
226 /* If we are a Freescale core do a simple check so
227 * we dont have to keep adding cases in the future */
228 if (PVR_VER(pvr) & 0x8000) {
229 maj = PVR_MAJ(pvr);
230 min = PVR_MIN(pvr);
231 } else {
232 switch (PVR_VER(pvr)) {
233 case 0x0020: /* 403 family */
234 maj = PVR_MAJ(pvr) + 1;
235 min = PVR_MIN(pvr);
236 break;
237 case 0x1008: /* 740P/750P ?? */
238 maj = ((pvr >> 8) & 0xFF) - 1;
239 min = pvr & 0xFF;
240 break;
241 default:
242 maj = (pvr >> 8) & 0xFF;
243 min = pvr & 0xFF;
244 break;
245 }
246 }
247
248 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
249 maj, min, PVR_VER(pvr), PVR_REV(pvr));
250
251#ifdef CONFIG_PPC32
252 seq_printf(m, "bogomips\t: %lu.%02lu\n",
253 loops_per_jiffy / (500000/HZ),
254 (loops_per_jiffy / (5000/HZ)) % 100);
255#endif
256
257#ifdef CONFIG_SMP
258 seq_printf(m, "\n");
259#endif
260
261 preempt_enable();
262 return 0;
263}
264
265static void *c_start(struct seq_file *m, loff_t *pos)
266{
267 unsigned long i = *pos;
268
269 return i <= NR_CPUS ? (void *)(i + 1) : NULL;
270}
271
272static void *c_next(struct seq_file *m, void *v, loff_t *pos)
273{
274 ++*pos;
275 return c_start(m, pos);
276}
277
278static void c_stop(struct seq_file *m, void *v)
279{
280}
281
282struct seq_operations cpuinfo_op = {
283 .start =c_start,
284 .next = c_next,
285 .stop = c_stop,
286 .show = show_cpuinfo,
287};
288
289#ifdef CONFIG_PPC_MULTIPLATFORM
290static int __init set_preferred_console(void)
291{
292 struct device_node *prom_stdout = NULL;
293 char *name;
294 u32 *spd;
295 int offset = 0;
296
297 DBG(" -> set_preferred_console()\n");
298
299 /* The user has requested a console so this is already set up. */
300 if (strstr(saved_command_line, "console=")) {
301 DBG(" console was specified !\n");
302 return -EBUSY;
303 }
304
305 if (!of_chosen) {
306 DBG(" of_chosen is NULL !\n");
307 return -ENODEV;
308 }
309 /* We are getting a weird phandle from OF ... */
310 /* ... So use the full path instead */
311 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
312 if (name == NULL) {
313 DBG(" no linux,stdout-path !\n");
314 return -ENODEV;
315 }
316 prom_stdout = of_find_node_by_path(name);
317 if (!prom_stdout) {
318 DBG(" can't find stdout package %s !\n", name);
319 return -ENODEV;
320 }
321 DBG("stdout is %s\n", prom_stdout->full_name);
322
323 name = (char *)get_property(prom_stdout, "name", NULL);
324 if (!name) {
325 DBG(" stdout package has no name !\n");
326 goto not_found;
327 }
328 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
329
330 if (0)
331 ;
332#ifdef CONFIG_SERIAL_8250_CONSOLE
333 else if (strcmp(name, "serial") == 0) {
334 int i;
335 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
336 if (i > 8) {
337 switch (reg[1]) {
338 case 0x3f8:
339 offset = 0;
340 break;
341 case 0x2f8:
342 offset = 1;
343 break;
344 case 0x898:
345 offset = 2;
346 break;
347 case 0x890:
348 offset = 3;
349 break;
350 default:
351 /* We dont recognise the serial port */
352 goto not_found;
353 }
354 }
355 }
356#endif /* CONFIG_SERIAL_8250_CONSOLE */
357#ifdef CONFIG_PPC_PSERIES
358 else if (strcmp(name, "vty") == 0) {
359 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
360 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
361
362 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
363 /* Host Virtual Serial Interface */
364 switch (reg[0]) {
365 case 0x30000000:
366 offset = 0;
367 break;
368 case 0x30000001:
369 offset = 1;
370 break;
371 default:
372 goto not_found;
373 }
374 of_node_put(prom_stdout);
375 DBG("Found hvsi console at offset %d\n", offset);
376 return add_preferred_console("hvsi", offset, NULL);
377 } else {
378 /* pSeries LPAR virtual console */
379 of_node_put(prom_stdout);
380 DBG("Found hvc console\n");
381 return add_preferred_console("hvc", 0, NULL);
382 }
383 }
384#endif /* CONFIG_PPC_PSERIES */
385#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
386 else if (strcmp(name, "ch-a") == 0)
387 offset = 0;
388 else if (strcmp(name, "ch-b") == 0)
389 offset = 1;
390#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
391 else
392 goto not_found;
393 of_node_put(prom_stdout);
394
395 DBG("Found serial console at ttyS%d\n", offset);
396
397 if (spd) {
398 static char __initdata opt[16];
399 sprintf(opt, "%d", *spd);
400 return add_preferred_console("ttyS", offset, opt);
401 } else
402 return add_preferred_console("ttyS", offset, NULL);
403
404 not_found:
405 DBG("No preferred console found !\n");
406 of_node_put(prom_stdout);
407 return -ENODEV;
408}
409console_initcall(set_preferred_console);
410#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
new file mode 100644
index 000000000000..9680ae99b084
--- /dev/null
+++ b/arch/powerpc/kernel/setup_32.c
@@ -0,0 +1,372 @@
1/*
2 * Common prep/pmac/chrp boot and setup code.
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <linux/string.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/reboot.h>
12#include <linux/delay.h>
13#include <linux/initrd.h>
14#include <linux/ide.h>
15#include <linux/tty.h>
16#include <linux/bootmem.h>
17#include <linux/seq_file.h>
18#include <linux/root_dev.h>
19#include <linux/cpu.h>
20#include <linux/console.h>
21
22#include <asm/residual.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/processor.h>
26#include <asm/pgtable.h>
27#include <asm/setup.h>
28#include <asm/amigappc.h>
29#include <asm/smp.h>
30#include <asm/elf.h>
31#include <asm/cputable.h>
32#include <asm/bootx.h>
33#include <asm/btext.h>
34#include <asm/machdep.h>
35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <asm/pmac_feature.h>
38#include <asm/sections.h>
39#include <asm/nvram.h>
40#include <asm/xmon.h>
41#include <asm/time.h>
42
43#define DBG(fmt...)
44
45#if defined CONFIG_KGDB
46#include <asm/kgdb.h>
47#endif
48
49extern void platform_init(void);
50extern void bootx_init(unsigned long r4, unsigned long phys);
51
52extern void ppc6xx_idle(void);
53extern void power4_idle(void);
54
55boot_infos_t *boot_infos;
56struct ide_machdep_calls ppc_ide_md;
57
58/* XXX should go elsewhere */
59int __irq_offset_value;
60EXPORT_SYMBOL(__irq_offset_value);
61
62int boot_cpuid;
63EXPORT_SYMBOL_GPL(boot_cpuid);
64int boot_cpuid_phys;
65
66unsigned long ISA_DMA_THRESHOLD;
67unsigned int DMA_MODE_READ;
68unsigned int DMA_MODE_WRITE;
69
70int have_of = 1;
71
72#ifdef CONFIG_PPC_MULTIPLATFORM
73int _machine = 0;
74
75extern void prep_init(void);
76extern void pmac_init(void);
77extern void chrp_init(void);
78
79dev_t boot_dev;
80#endif /* CONFIG_PPC_MULTIPLATFORM */
81
82#ifdef CONFIG_MAGIC_SYSRQ
83unsigned long SYSRQ_KEY = 0x54;
84#endif /* CONFIG_MAGIC_SYSRQ */
85
86#ifdef CONFIG_VGA_CONSOLE
87unsigned long vgacon_remap_base;
88#endif
89
90struct machdep_calls ppc_md;
91EXPORT_SYMBOL(ppc_md);
92
93/*
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
96 */
97int dcache_bsize;
98int icache_bsize;
99int ucache_bsize;
100
101/*
102 * We're called here very early in the boot. We determine the machine
103 * type and call the appropriate low-level setup functions.
104 * -- Cort <cort@fsmlabs.com>
105 *
106 * Note that the kernel may be running at an address which is different
107 * from the address that it was linked at, so we must use RELOC/PTRRELOC
108 * to access static data (including strings). -- paulus
109 */
110unsigned long __init early_init(unsigned long dt_ptr)
111{
112 unsigned long offset = reloc_offset();
113
114 /* First zero the BSS -- use memset_io, some platforms don't have
115 * caches on yet */
116 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
117
118 /*
119 * Identify the CPU type and fix up code sections
120 * that depend on which cpu we have.
121 */
122 identify_cpu(offset, 0);
123 do_cpu_ftr_fixups(offset);
124
125 return KERNELBASE + offset;
126}
127
128#ifdef CONFIG_PPC_MULTIPLATFORM
129/*
130 * The PPC_MULTIPLATFORM version of platform_init...
131 */
132void __init platform_init(void)
133{
134 /* if we didn't get any bootinfo telling us what we are... */
135 if (_machine == 0) {
136 /* prep boot loader tells us if we're prep or not */
137 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
138 _machine = _MACH_prep;
139 }
140
141#ifdef CONFIG_PPC_PREP
142 /* not much more to do here, if prep */
143 if (_machine == _MACH_prep) {
144 prep_init();
145 return;
146 }
147#endif
148
149#ifdef CONFIG_ADB
150 if (strstr(cmd_line, "adb_sync")) {
151 extern int __adb_probe_sync;
152 __adb_probe_sync = 1;
153 }
154#endif /* CONFIG_ADB */
155
156 switch (_machine) {
157#ifdef CONFIG_PPC_PMAC
158 case _MACH_Pmac:
159 pmac_init();
160 break;
161#endif
162#ifdef CONFIG_PPC_CHRP
163 case _MACH_chrp:
164 chrp_init();
165 break;
166#endif
167 }
168}
169#endif
170
171/*
172 * Find out what kind of machine we're on and save any data we need
173 * from the early boot process (devtree is copied on pmac by prom_init()).
174 * This is called very early on the boot process, after a minimal
175 * MMU environment has been set up but before MMU_init is called.
176 */
177void __init machine_init(unsigned long dt_ptr, unsigned long phys)
178{
179 early_init_devtree(__va(dt_ptr));
180
181#ifdef CONFIG_CMDLINE
182 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
183#endif /* CONFIG_CMDLINE */
184
185 platform_init();
186
187#ifdef CONFIG_6xx
188 ppc_md.power_save = ppc6xx_idle;
189#endif
190
191 if (ppc_md.progress)
192 ppc_md.progress("id mach(): done", 0x200);
193}
194
195#ifdef CONFIG_BOOKE_WDT
196/* Checks wdt=x and wdt_period=xx command-line option */
197int __init early_parse_wdt(char *p)
198{
199 if (p && strncmp(p, "0", 1) != 0)
200 booke_wdt_enabled = 1;
201
202 return 0;
203}
204early_param("wdt", early_parse_wdt);
205
206int __init early_parse_wdt_period (char *p)
207{
208 if (p)
209 booke_wdt_period = simple_strtoul(p, NULL, 0);
210
211 return 0;
212}
213early_param("wdt_period", early_parse_wdt_period);
214#endif /* CONFIG_BOOKE_WDT */
215
216/* Checks "l2cr=xxxx" command-line option */
217int __init ppc_setup_l2cr(char *str)
218{
219 if (cpu_has_feature(CPU_FTR_L2CR)) {
220 unsigned long val = simple_strtoul(str, NULL, 0);
221 printk(KERN_INFO "l2cr set to %lx\n", val);
222 _set_L2CR(0); /* force invalidate by disable cache */
223 _set_L2CR(val); /* and enable it */
224 }
225 return 1;
226}
227__setup("l2cr=", ppc_setup_l2cr);
228
229#ifdef CONFIG_GENERIC_NVRAM
230
231/* Generic nvram hooks used by drivers/char/gen_nvram.c */
232unsigned char nvram_read_byte(int addr)
233{
234 if (ppc_md.nvram_read_val)
235 return ppc_md.nvram_read_val(addr);
236 return 0xff;
237}
238EXPORT_SYMBOL(nvram_read_byte);
239
240void nvram_write_byte(unsigned char val, int addr)
241{
242 if (ppc_md.nvram_write_val)
243 ppc_md.nvram_write_val(addr, val);
244}
245EXPORT_SYMBOL(nvram_write_byte);
246
247void nvram_sync(void)
248{
249 if (ppc_md.nvram_sync)
250 ppc_md.nvram_sync();
251}
252EXPORT_SYMBOL(nvram_sync);
253
254#endif /* CONFIG_NVRAM */
255
256static struct cpu cpu_devices[NR_CPUS];
257
258int __init ppc_init(void)
259{
260 int i;
261
262 /* clear the progress line */
263 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
264
265 /* register CPU devices */
266 for (i = 0; i < NR_CPUS; i++)
267 if (cpu_possible(i))
268 register_cpu(&cpu_devices[i], i, NULL);
269
270 /* call platform init */
271 if (ppc_md.init != NULL) {
272 ppc_md.init();
273 }
274 return 0;
275}
276
277arch_initcall(ppc_init);
278
279/* Warning, IO base is not yet inited */
280void __init setup_arch(char **cmdline_p)
281{
282 extern char *klimit;
283 extern void do_init_bootmem(void);
284
285 /* so udelay does something sensible, assume <= 1000 bogomips */
286 loops_per_jiffy = 500000000 / HZ;
287
288 unflatten_device_tree();
289 finish_device_tree();
290
291#ifdef CONFIG_BOOTX_TEXT
292 init_boot_display();
293#endif
294
295#ifdef CONFIG_PPC_PMAC
296 /* This could be called "early setup arch", it must be done
297 * now because xmon need it
298 */
299 if (_machine == _MACH_Pmac)
300 pmac_feature_init(); /* New cool way */
301#endif
302
303#ifdef CONFIG_XMON
304 xmon_map_scc();
305 if (strstr(cmd_line, "xmon")) {
306 xmon_init(1);
307 debugger(NULL);
308 }
309#endif /* CONFIG_XMON */
310 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
311
312#if defined(CONFIG_KGDB)
313 if (ppc_md.kgdb_map_scc)
314 ppc_md.kgdb_map_scc();
315 set_debug_traps();
316 if (strstr(cmd_line, "gdb")) {
317 if (ppc_md.progress)
318 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
319 printk("kgdb breakpoint activated\n");
320 breakpoint();
321 }
322#endif
323
324 /*
325 * Set cache line size based on type of cpu as a default.
326 * Systems with OF can look in the properties on the cpu node(s)
327 * for a possibly more accurate value.
328 */
329 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
330 dcache_bsize = cur_cpu_spec->dcache_bsize;
331 icache_bsize = cur_cpu_spec->icache_bsize;
332 ucache_bsize = 0;
333 } else
334 ucache_bsize = dcache_bsize = icache_bsize
335 = cur_cpu_spec->dcache_bsize;
336
337 /* reboot on panic */
338 panic_timeout = 180;
339
340 init_mm.start_code = PAGE_OFFSET;
341 init_mm.end_code = (unsigned long) _etext;
342 init_mm.end_data = (unsigned long) _edata;
343 init_mm.brk = (unsigned long) klimit;
344
345 /* Save unparsed command line copy for /proc/cmdline */
346 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
347 *cmdline_p = cmd_line;
348
349 parse_early_param();
350
351 /* set up the bootmem stuff with available memory */
352 do_init_bootmem();
353 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
354
355#ifdef CONFIG_PPC_OCP
356 /* Initialize OCP device list */
357 ocp_early_init();
358 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
359#endif
360
361#ifdef CONFIG_DUMMY_CONSOLE
362 conswitchp = &dummy_con;
363#endif
364
365 ppc_md.setup_arch();
366 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
367
368 paging_init();
369
370 /* this is for modules since _machine can be a define -- Cort */
371 ppc_md.ppc_machine = _machine;
372}
diff --git a/arch/ppc64/kernel/setup.c b/arch/powerpc/kernel/setup_64.c
index 5ac48bd64891..40c48100bf1b 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,7 +37,6 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/bootinfo.h>
41#include <asm/smp.h> 40#include <asm/smp.h>
42#include <asm/elf.h> 41#include <asm/elf.h>
43#include <asm/machdep.h> 42#include <asm/machdep.h>
@@ -58,6 +57,9 @@
58#include <asm/mmu.h> 57#include <asm/mmu.h>
59#include <asm/lmb.h> 58#include <asm/lmb.h>
60#include <asm/iSeries/ItLpNaca.h> 59#include <asm/iSeries/ItLpNaca.h>
60#include <asm/firmware.h>
61#include <asm/systemcfg.h>
62#include <asm/xmon.h>
61 63
62#ifdef DEBUG 64#ifdef DEBUG
63#define DBG(fmt...) udbg_printf(fmt) 65#define DBG(fmt...) udbg_printf(fmt)
@@ -136,24 +138,7 @@ static struct notifier_block ppc64_panic_block = {
136 .priority = INT_MIN /* may not return; must be done last */ 138 .priority = INT_MIN /* may not return; must be done last */
137}; 139};
138 140
139/* 141#ifdef CONFIG_SMP
140 * Perhaps we can put the pmac screen_info[] here
141 * on pmac as well so we don't need the ifdef's.
142 * Until we get multiple-console support in here
143 * that is. -- Cort
144 * Maybe tie it to serial consoles, since this is really what
145 * these processors use on existing boards. -- Dan
146 */
147struct screen_info screen_info = {
148 .orig_x = 0,
149 .orig_y = 25,
150 .orig_video_cols = 80,
151 .orig_video_lines = 25,
152 .orig_video_isVGA = 1,
153 .orig_video_points = 16
154};
155
156#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
157 142
158static int smt_enabled_cmdline; 143static int smt_enabled_cmdline;
159 144
@@ -306,15 +291,13 @@ static void __init setup_cpu_maps(void)
306 291
307 systemcfg->processorCount = num_present_cpus(); 292 systemcfg->processorCount = num_present_cpus();
308} 293}
309#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */ 294#endif /* CONFIG_SMP */
310
311
312#ifdef CONFIG_PPC_MULTIPLATFORM
313 295
314extern struct machdep_calls pSeries_md; 296extern struct machdep_calls pSeries_md;
315extern struct machdep_calls pmac_md; 297extern struct machdep_calls pmac_md;
316extern struct machdep_calls maple_md; 298extern struct machdep_calls maple_md;
317extern struct machdep_calls bpa_md; 299extern struct machdep_calls bpa_md;
300extern struct machdep_calls iseries_md;
318 301
319/* Ultimately, stuff them in an elf section like initcalls... */ 302/* Ultimately, stuff them in an elf section like initcalls... */
320static struct machdep_calls __initdata *machines[] = { 303static struct machdep_calls __initdata *machines[] = {
@@ -330,6 +313,9 @@ static struct machdep_calls __initdata *machines[] = {
330#ifdef CONFIG_PPC_BPA 313#ifdef CONFIG_PPC_BPA
331 &bpa_md, 314 &bpa_md,
332#endif 315#endif
316#ifdef CONFIG_PPC_ISERIES
317 &iseries_md,
318#endif
333 NULL 319 NULL
334}; 320};
335 321
@@ -401,7 +387,8 @@ void __init early_setup(unsigned long dt_ptr)
401 /* 387 /*
402 * Initialize stab / SLB management 388 * Initialize stab / SLB management
403 */ 389 */
404 stab_initialize(lpaca->stab_real); 390 if (!firmware_has_feature(FW_FEATURE_ISERIES))
391 stab_initialize(lpaca->stab_real);
405 392
406 /* 393 /*
407 * Initialize the MMU Hash table and create the linear mapping 394 * Initialize the MMU Hash table and create the linear mapping
@@ -532,8 +519,6 @@ static void __init check_for_initrd(void)
532#endif /* CONFIG_BLK_DEV_INITRD */ 519#endif /* CONFIG_BLK_DEV_INITRD */
533} 520}
534 521
535#endif /* CONFIG_PPC_MULTIPLATFORM */
536
537/* 522/*
538 * Do some initial setup of the system. The parameters are those which 523 * Do some initial setup of the system. The parameters are those which
539 * were passed in from the bootloader. 524 * were passed in from the bootloader.
@@ -542,14 +527,6 @@ void __init setup_system(void)
542{ 527{
543 DBG(" -> setup_system()\n"); 528 DBG(" -> setup_system()\n");
544 529
545#ifdef CONFIG_PPC_ISERIES
546 /* pSeries systems are identified in prom.c via OF. */
547 if (itLpNaca.xLparInstalled == 1)
548 systemcfg->platform = PLATFORM_ISERIES_LPAR;
549
550 ppc_md.init_early();
551#else /* CONFIG_PPC_ISERIES */
552
553 /* 530 /*
554 * Unflatten the device-tree passed by prom_init or kexec 531 * Unflatten the device-tree passed by prom_init or kexec
555 */ 532 */
@@ -592,6 +569,10 @@ void __init setup_system(void)
592 */ 569 */
593 finish_device_tree(); 570 finish_device_tree();
594 571
572#ifdef CONFIG_BOOTX_TEXT
573 init_boot_display();
574#endif
575
595 /* 576 /*
596 * Initialize xmon 577 * Initialize xmon
597 */ 578 */
@@ -607,9 +588,8 @@ void __init setup_system(void)
607 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 588 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
608 589
609 parse_early_param(); 590 parse_early_param();
610#endif /* !CONFIG_PPC_ISERIES */
611 591
612#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) 592#ifdef CONFIG_SMP
613 /* 593 /*
614 * iSeries has already initialized the cpu maps at this point. 594 * iSeries has already initialized the cpu maps at this point.
615 */ 595 */
@@ -619,7 +599,7 @@ void __init setup_system(void)
619 * we can map physical -> logical CPU ids 599 * we can map physical -> logical CPU ids
620 */ 600 */
621 smp_release_cpus(); 601 smp_release_cpus();
622#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */ 602#endif
623 603
624 printk("Starting Linux PPC64 %s\n", system_utsname.version); 604 printk("Starting Linux PPC64 %s\n", system_utsname.version);
625 605
@@ -644,51 +624,6 @@ void __init setup_system(void)
644 DBG(" <- setup_system()\n"); 624 DBG(" <- setup_system()\n");
645} 625}
646 626
647/* also used by kexec */
648void machine_shutdown(void)
649{
650 if (ppc_md.nvram_sync)
651 ppc_md.nvram_sync();
652}
653
654void machine_restart(char *cmd)
655{
656 machine_shutdown();
657 ppc_md.restart(cmd);
658#ifdef CONFIG_SMP
659 smp_send_stop();
660#endif
661 printk(KERN_EMERG "System Halted, OK to turn off power\n");
662 local_irq_disable();
663 while (1) ;
664}
665
666void machine_power_off(void)
667{
668 machine_shutdown();
669 ppc_md.power_off();
670#ifdef CONFIG_SMP
671 smp_send_stop();
672#endif
673 printk(KERN_EMERG "System Halted, OK to turn off power\n");
674 local_irq_disable();
675 while (1) ;
676}
677/* Used by the G5 thermal driver */
678EXPORT_SYMBOL_GPL(machine_power_off);
679
680void machine_halt(void)
681{
682 machine_shutdown();
683 ppc_md.halt();
684#ifdef CONFIG_SMP
685 smp_send_stop();
686#endif
687 printk(KERN_EMERG "System Halted, OK to turn off power\n");
688 local_irq_disable();
689 while (1) ;
690}
691
692static int ppc64_panic_event(struct notifier_block *this, 627static int ppc64_panic_event(struct notifier_block *this,
693 unsigned long event, void *ptr) 628 unsigned long event, void *ptr)
694{ 629{
@@ -696,99 +631,6 @@ static int ppc64_panic_event(struct notifier_block *this,
696 return NOTIFY_DONE; 631 return NOTIFY_DONE;
697} 632}
698 633
699
700#ifdef CONFIG_SMP
701DEFINE_PER_CPU(unsigned int, pvr);
702#endif
703
704static int show_cpuinfo(struct seq_file *m, void *v)
705{
706 unsigned long cpu_id = (unsigned long)v - 1;
707 unsigned int pvr;
708 unsigned short maj;
709 unsigned short min;
710
711 if (cpu_id == NR_CPUS) {
712 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
713
714 if (ppc_md.get_cpuinfo != NULL)
715 ppc_md.get_cpuinfo(m);
716
717 return 0;
718 }
719
720 /* We only show online cpus: disable preempt (overzealous, I
721 * knew) to prevent cpu going down. */
722 preempt_disable();
723 if (!cpu_online(cpu_id)) {
724 preempt_enable();
725 return 0;
726 }
727
728#ifdef CONFIG_SMP
729 pvr = per_cpu(pvr, cpu_id);
730#else
731 pvr = mfspr(SPRN_PVR);
732#endif
733 maj = (pvr >> 8) & 0xFF;
734 min = pvr & 0xFF;
735
736 seq_printf(m, "processor\t: %lu\n", cpu_id);
737 seq_printf(m, "cpu\t\t: ");
738
739 if (cur_cpu_spec->pvr_mask)
740 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
741 else
742 seq_printf(m, "unknown (%08x)", pvr);
743
744#ifdef CONFIG_ALTIVEC
745 if (cpu_has_feature(CPU_FTR_ALTIVEC))
746 seq_printf(m, ", altivec supported");
747#endif /* CONFIG_ALTIVEC */
748
749 seq_printf(m, "\n");
750
751 /*
752 * Assume here that all clock rates are the same in a
753 * smp system. -- Cort
754 */
755 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
756 ppc_proc_freq % 1000000);
757
758 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
759
760 preempt_enable();
761 return 0;
762}
763
764static void *c_start(struct seq_file *m, loff_t *pos)
765{
766 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
767}
768static void *c_next(struct seq_file *m, void *v, loff_t *pos)
769{
770 ++*pos;
771 return c_start(m, pos);
772}
773static void c_stop(struct seq_file *m, void *v)
774{
775}
776struct seq_operations cpuinfo_op = {
777 .start =c_start,
778 .next = c_next,
779 .stop = c_stop,
780 .show = show_cpuinfo,
781};
782
783/*
784 * These three variables are used to save values passed to us by prom_init()
785 * via the device tree. The TCE variables are needed because with a memory_limit
786 * in force we may need to explicitly map the TCE are at the top of RAM.
787 */
788unsigned long memory_limit;
789unsigned long tce_alloc_start;
790unsigned long tce_alloc_end;
791
792#ifdef CONFIG_PPC_ISERIES 634#ifdef CONFIG_PPC_ISERIES
793/* 635/*
794 * On iSeries we just parse the mem=X option from the command line. 636 * On iSeries we just parse the mem=X option from the command line.
@@ -806,130 +648,6 @@ static int __init early_parsemem(char *p)
806early_param("mem", early_parsemem); 648early_param("mem", early_parsemem);
807#endif /* CONFIG_PPC_ISERIES */ 649#endif /* CONFIG_PPC_ISERIES */
808 650
809#ifdef CONFIG_PPC_MULTIPLATFORM
810static int __init set_preferred_console(void)
811{
812 struct device_node *prom_stdout = NULL;
813 char *name;
814 u32 *spd;
815 int offset = 0;
816
817 DBG(" -> set_preferred_console()\n");
818
819 /* The user has requested a console so this is already set up. */
820 if (strstr(saved_command_line, "console=")) {
821 DBG(" console was specified !\n");
822 return -EBUSY;
823 }
824
825 if (!of_chosen) {
826 DBG(" of_chosen is NULL !\n");
827 return -ENODEV;
828 }
829 /* We are getting a weird phandle from OF ... */
830 /* ... So use the full path instead */
831 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
832 if (name == NULL) {
833 DBG(" no linux,stdout-path !\n");
834 return -ENODEV;
835 }
836 prom_stdout = of_find_node_by_path(name);
837 if (!prom_stdout) {
838 DBG(" can't find stdout package %s !\n", name);
839 return -ENODEV;
840 }
841 DBG("stdout is %s\n", prom_stdout->full_name);
842
843 name = (char *)get_property(prom_stdout, "name", NULL);
844 if (!name) {
845 DBG(" stdout package has no name !\n");
846 goto not_found;
847 }
848 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
849
850 if (0)
851 ;
852#ifdef CONFIG_SERIAL_8250_CONSOLE
853 else if (strcmp(name, "serial") == 0) {
854 int i;
855 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
856 if (i > 8) {
857 switch (reg[1]) {
858 case 0x3f8:
859 offset = 0;
860 break;
861 case 0x2f8:
862 offset = 1;
863 break;
864 case 0x898:
865 offset = 2;
866 break;
867 case 0x890:
868 offset = 3;
869 break;
870 default:
871 /* We dont recognise the serial port */
872 goto not_found;
873 }
874 }
875 }
876#endif /* CONFIG_SERIAL_8250_CONSOLE */
877#ifdef CONFIG_PPC_PSERIES
878 else if (strcmp(name, "vty") == 0) {
879 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
880 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
881
882 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
883 /* Host Virtual Serial Interface */
884 int offset;
885 switch (reg[0]) {
886 case 0x30000000:
887 offset = 0;
888 break;
889 case 0x30000001:
890 offset = 1;
891 break;
892 default:
893 goto not_found;
894 }
895 of_node_put(prom_stdout);
896 DBG("Found hvsi console at offset %d\n", offset);
897 return add_preferred_console("hvsi", offset, NULL);
898 } else {
899 /* pSeries LPAR virtual console */
900 of_node_put(prom_stdout);
901 DBG("Found hvc console\n");
902 return add_preferred_console("hvc", 0, NULL);
903 }
904 }
905#endif /* CONFIG_PPC_PSERIES */
906#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
907 else if (strcmp(name, "ch-a") == 0)
908 offset = 0;
909 else if (strcmp(name, "ch-b") == 0)
910 offset = 1;
911#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
912 else
913 goto not_found;
914 of_node_put(prom_stdout);
915
916 DBG("Found serial console at ttyS%d\n", offset);
917
918 if (spd) {
919 static char __initdata opt[16];
920 sprintf(opt, "%d", *spd);
921 return add_preferred_console("ttyS", offset, opt);
922 } else
923 return add_preferred_console("ttyS", offset, NULL);
924
925 not_found:
926 DBG("No preferred console found !\n");
927 of_node_put(prom_stdout);
928 return -ENODEV;
929}
930console_initcall(set_preferred_console);
931#endif /* CONFIG_PPC_MULTIPLATFORM */
932
933#ifdef CONFIG_IRQSTACKS 651#ifdef CONFIG_IRQSTACKS
934static void __init irqstack_early_init(void) 652static void __init irqstack_early_init(void)
935{ 653{
@@ -983,23 +701,22 @@ void __init setup_syscall_map(void)
983{ 701{
984 unsigned int i, count64 = 0, count32 = 0; 702 unsigned int i, count64 = 0, count32 = 0;
985 extern unsigned long *sys_call_table; 703 extern unsigned long *sys_call_table;
986 extern unsigned long *sys_call_table32;
987 extern unsigned long sys_ni_syscall; 704 extern unsigned long sys_ni_syscall;
988 705
989 706
990 for (i = 0; i < __NR_syscalls; i++) { 707 for (i = 0; i < __NR_syscalls; i++) {
991 if (sys_call_table[i] == sys_ni_syscall) 708 if (sys_call_table[i*2] != sys_ni_syscall) {
992 continue; 709 count64++;
993 count64++; 710 systemcfg->syscall_map_64[i >> 5] |=
994 systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f); 711 0x80000000UL >> (i & 0x1f);
995 } 712 }
996 for (i = 0; i < __NR_syscalls; i++) { 713 if (sys_call_table[i*2+1] != sys_ni_syscall) {
997 if (sys_call_table32[i] == sys_ni_syscall) 714 count32++;
998 continue; 715 systemcfg->syscall_map_32[i >> 5] |=
999 count32++; 716 0x80000000UL >> (i & 0x1f);
1000 systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); 717 }
1001 } 718 }
1002 printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n", 719 printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
1003 count32, count64); 720 count32, count64);
1004} 721}
1005 722
@@ -1047,6 +764,10 @@ void __init setup_arch(char **cmdline_p)
1047 /* initialize the syscall map in systemcfg */ 764 /* initialize the syscall map in systemcfg */
1048 setup_syscall_map(); 765 setup_syscall_map();
1049 766
767#ifdef CONFIG_DUMMY_CONSOLE
768 conswitchp = &dummy_con;
769#endif
770
1050 ppc_md.setup_arch(); 771 ppc_md.setup_arch();
1051 772
1052 /* Use the default idle loop if the platform hasn't provided one. */ 773 /* Use the default idle loop if the platform hasn't provided one. */
@@ -1091,15 +812,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
1091 printk("[terminate]%04x %s\n", src, msg); 812 printk("[terminate]%04x %s\n", src, msg);
1092} 813}
1093 814
1094/* This should only be called on processor 0 during calibrate decr */
1095void __init setup_default_decr(void)
1096{
1097 struct paca_struct *lpaca = get_paca();
1098
1099 lpaca->default_decr = tb_ticks_per_jiffy;
1100 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
1101}
1102
1103#ifndef CONFIG_PPC_ISERIES 815#ifndef CONFIG_PPC_ISERIES
1104/* 816/*
1105 * This function can be used by platforms to "find" legacy serial ports. 817 * This function can be used by platforms to "find" legacy serial ports.
diff --git a/arch/ppc64/kernel/signal32.c b/arch/powerpc/kernel/signal_32.c
index a8b7a5a56bb4..444c3e81884c 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1,56 +1,353 @@
1/* 1/*
2 * signal32.c: Support 32bit signal syscalls. 2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright (C) 2001 IBM 6 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 * 9 *
8 * These routines maintain argument size conversion between 32bit and 64bit 10 * Derived from "arch/i386/kernel/signal.c"
9 * environment. 11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 * 13 *
11 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 15 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 16 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 17 * 2 of the License, or (at your option) any later version.
15 */ 18 */
16 19
17#include <linux/config.h> 20#include <linux/config.h>
18#include <linux/sched.h> 21#include <linux/sched.h>
19#include <linux/mm.h> 22#include <linux/mm.h>
20#include <linux/smp.h> 23#include <linux/smp.h>
21#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
23#include <linux/signal.h> 26#include <linux/signal.h>
24#include <linux/syscalls.h>
25#include <linux/errno.h> 27#include <linux/errno.h>
26#include <linux/elf.h> 28#include <linux/elf.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
27#include <linux/compat.h> 31#include <linux/compat.h>
28#include <linux/ptrace.h> 32#include <linux/ptrace.h>
29#include <asm/ppc32.h> 33#else
34#include <linux/wait.h>
35#include <linux/ptrace.h>
36#include <linux/unistd.h>
37#include <linux/stddef.h>
38#include <linux/tty.h>
39#include <linux/binfmts.h>
40#include <linux/suspend.h>
41#endif
42
30#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h>
45#ifdef CONFIG_PPC64
46#include <asm/ppc32.h>
31#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
32#include <asm/unistd.h> 48#include <asm/unistd.h>
33#include <asm/cacheflush.h>
34#include <asm/vdso.h> 49#include <asm/vdso.h>
50#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
35 54
36#define DEBUG_SIG 0 55#undef DEBUG_SIG
37 56
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 57#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 58
40#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) 59#ifdef CONFIG_PPC64
60#define do_signal do_signal32
61#define sys_sigsuspend compat_sys_sigsuspend
62#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
63#define sys_rt_sigreturn compat_sys_rt_sigreturn
64#define sys_sigaction compat_sys_sigaction
65#define sys_swapcontext compat_sys_swapcontext
66#define sys_sigreturn compat_sys_sigreturn
67
68#define old_sigaction old_sigaction32
69#define sigcontext sigcontext32
70#define mcontext mcontext32
71#define ucontext ucontext32
72
73/*
74 * Returning 0 means we return to userspace via
75 * ret_from_except and thus restore all user
76 * registers from *regs. This is what we need
77 * to do when a signal has been delivered.
78 */
79#define sigreturn_exit(regs) return 0
80
81#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
82#undef __SIGNAL_FRAMESIZE
83#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
84#undef ELF_NVRREG
85#define ELF_NVRREG ELF_NVRREG32
86
87/*
88 * Functions for flipping sigsets (thanks to brain dead generic
89 * implementation that makes things simple for little endian only)
90 */
91static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
92{
93 compat_sigset_t cset;
94
95 switch (_NSIG_WORDS) {
96 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
97 cset.sig[7] = set->sig[3] >> 32;
98 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
99 cset.sig[5] = set->sig[2] >> 32;
100 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
101 cset.sig[3] = set->sig[1] >> 32;
102 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
103 cset.sig[1] = set->sig[0] >> 32;
104 }
105 return copy_to_user(uset, &cset, sizeof(*uset));
106}
107
108static inline int get_sigset_t(sigset_t *set,
109 const compat_sigset_t __user *uset)
110{
111 compat_sigset_t s32;
112
113 if (copy_from_user(&s32, uset, sizeof(*uset)))
114 return -EFAULT;
115
116 /*
117 * Swap the 2 words of the 64-bit sigset_t (they are stored
118 * in the "wrong" endian in 32-bit user storage).
119 */
120 switch (_NSIG_WORDS) {
121 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
122 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
123 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
124 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
125 }
126 return 0;
127}
128
129static inline int get_old_sigaction(struct k_sigaction *new_ka,
130 struct old_sigaction __user *act)
131{
132 compat_old_sigset_t mask;
133 compat_uptr_t handler, restorer;
134
135 if (get_user(handler, &act->sa_handler) ||
136 __get_user(restorer, &act->sa_restorer) ||
137 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
138 __get_user(mask, &act->sa_mask))
139 return -EFAULT;
140 new_ka->sa.sa_handler = compat_ptr(handler);
141 new_ka->sa.sa_restorer = compat_ptr(restorer);
142 siginitset(&new_ka->sa.sa_mask, mask);
143 return 0;
144}
145
146static inline compat_uptr_t to_user_ptr(void *kp)
147{
148 return (compat_uptr_t)(u64)kp;
149}
150
151#define from_user_ptr(p) compat_ptr(p)
152
153static inline int save_general_regs(struct pt_regs *regs,
154 struct mcontext __user *frame)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
159 for (i = 0; i <= PT_RESULT; i ++)
160 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
161 return -EFAULT;
162 return 0;
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
169 int i;
170
171 for (i = 0; i <= PT_RESULT; i++) {
172 if ((i == PT_MSR) || (i == PT_SOFTE))
173 continue;
174 if (__get_user(gregs[i], &sr->mc_gregs[i]))
175 return -EFAULT;
176 }
177 return 0;
178}
179
180#else /* CONFIG_PPC64 */
181
182extern void sigreturn_exit(struct pt_regs *);
183
184#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
185
186static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
187{
188 return copy_to_user(uset, set, sizeof(*uset));
189}
190
191static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
192{
193 return copy_from_user(set, uset, sizeof(*uset));
194}
195
196static inline int get_old_sigaction(struct k_sigaction *new_ka,
197 struct old_sigaction __user *act)
198{
199 old_sigset_t mask;
200
201 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
202 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
203 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
204 return -EFAULT;
205 __get_user(new_ka->sa.sa_flags, &act->sa_flags);
206 __get_user(mask, &act->sa_mask);
207 siginitset(&new_ka->sa.sa_mask, mask);
208 return 0;
209}
210
211#define to_user_ptr(p) (p)
212#define from_user_ptr(p) (p)
213
214static inline int save_general_regs(struct pt_regs *regs,
215 struct mcontext __user *frame)
216{
217 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
218}
219
220static inline int restore_general_regs(struct pt_regs *regs,
221 struct mcontext __user *sr)
222{
223 /* copy up to but not including MSR */
224 if (__copy_from_user(regs, &sr->mc_gregs,
225 PT_MSR * sizeof(elf_greg_t)))
226 return -EFAULT;
227 /* copy from orig_r3 (the word after the MSR) up to the end */
228 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
229 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
230 return -EFAULT;
231 return 0;
232}
233
234#endif /* CONFIG_PPC64 */
235
236int do_signal(sigset_t *oldset, struct pt_regs *regs);
237
238/*
239 * Atomically swap in the new signal mask, and wait for a signal.
240 */
241long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
242 struct pt_regs *regs)
243{
244 sigset_t saveset;
245
246 mask &= _BLOCKABLE;
247 spin_lock_irq(&current->sighand->siglock);
248 saveset = current->blocked;
249 siginitset(&current->blocked, mask);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252
253 regs->result = -EINTR;
254 regs->gpr[3] = EINTR;
255 regs->ccr |= 0x10000000;
256 while (1) {
257 current->state = TASK_INTERRUPTIBLE;
258 schedule();
259 if (do_signal(&saveset, regs))
260 sigreturn_exit(regs);
261 }
262}
263
264long sys_rt_sigsuspend(
265#ifdef CONFIG_PPC64
266 compat_sigset_t __user *unewset,
267#else
268 sigset_t __user *unewset,
269#endif
270 size_t sigsetsize, int p3, int p4,
271 int p6, int p7, struct pt_regs *regs)
272{
273 sigset_t saveset, newset;
274
275 /* XXX: Don't preclude handling different sized sigset_t's. */
276 if (sigsetsize != sizeof(sigset_t))
277 return -EINVAL;
278
279 if (get_sigset_t(&newset, unewset))
280 return -EFAULT;
281 sigdelsetmask(&newset, ~_BLOCKABLE);
282
283 spin_lock_irq(&current->sighand->siglock);
284 saveset = current->blocked;
285 current->blocked = newset;
286 recalc_sigpending();
287 spin_unlock_irq(&current->sighand->siglock);
288
289 regs->result = -EINTR;
290 regs->gpr[3] = EINTR;
291 regs->ccr |= 0x10000000;
292 while (1) {
293 current->state = TASK_INTERRUPTIBLE;
294 schedule();
295 if (do_signal(&saveset, regs))
296 sigreturn_exit(regs);
297 }
298}
299
300#ifdef CONFIG_PPC32
301long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
302 int r6, int r7, int r8, struct pt_regs *regs)
303{
304 return do_sigaltstack(uss, uoss, regs->gpr[1]);
305}
306#endif
307
308long sys_sigaction(int sig, struct old_sigaction __user *act,
309 struct old_sigaction __user *oact)
310{
311 struct k_sigaction new_ka, old_ka;
312 int ret;
313
314#ifdef CONFIG_PPC64
315 if (sig < 0)
316 sig = -sig;
317#endif
318
319 if (act) {
320 if (get_old_sigaction(&new_ka, act))
321 return -EFAULT;
322 }
323
324 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
325 if (!ret && oact) {
326 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
327 __put_user(to_user_ptr(old_ka.sa.sa_handler),
328 &oact->sa_handler) ||
329 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
330 &oact->sa_restorer) ||
331 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
332 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
333 return -EFAULT;
334 }
335
336 return ret;
337}
41 338
42/* 339/*
43 * When we have signals to deliver, we set up on the 340 * When we have signals to deliver, we set up on the
44 * user stack, going down from the original stack pointer: 341 * user stack, going down from the original stack pointer:
45 * a sigregs32 struct 342 * a sigregs struct
46 * a sigcontext32 struct 343 * a sigcontext struct
47 * a gap of __SIGNAL_FRAMESIZE32 bytes 344 * a gap of __SIGNAL_FRAMESIZE bytes
48 * 345 *
49 * Each of these things must be a multiple of 16 bytes in size. 346 * Each of these things must be a multiple of 16 bytes in size.
50 * 347 *
51 */ 348 */
52struct sigregs32 { 349struct sigregs {
53 struct mcontext32 mctx; /* all the register values */ 350 struct mcontext mctx; /* all the register values */
54 /* 351 /*
55 * Programs using the rs6000/xcoff abi can save up to 19 gp 352 * Programs using the rs6000/xcoff abi can save up to 19 gp
56 * regs and 18 fp regs below sp before decrementing it. 353 * regs and 18 fp regs below sp before decrementing it.
@@ -64,17 +361,21 @@ struct sigregs32 {
64/* 361/*
65 * When we have rt signals to deliver, we set up on the 362 * When we have rt signals to deliver, we set up on the
66 * user stack, going down from the original stack pointer: 363 * user stack, going down from the original stack pointer:
67 * one rt_sigframe32 struct (siginfo + ucontext + ABI gap) 364 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
68 * a gap of __SIGNAL_FRAMESIZE32+16 bytes 365 * a gap of __SIGNAL_FRAMESIZE+16 bytes
69 * (the +16 is to get the siginfo and ucontext32 in the same 366 * (the +16 is to get the siginfo and ucontext in the same
70 * positions as in older kernels). 367 * positions as in older kernels).
71 * 368 *
72 * Each of these things must be a multiple of 16 bytes in size. 369 * Each of these things must be a multiple of 16 bytes in size.
73 * 370 *
74 */ 371 */
75struct rt_sigframe32 { 372struct rt_sigframe {
76 compat_siginfo_t info; 373#ifdef CONFIG_PPC64
77 struct ucontext32 uc; 374 compat_siginfo_t info;
375#else
376 struct siginfo info;
377#endif
378 struct ucontext uc;
78 /* 379 /*
79 * Programs using the rs6000/xcoff abi can save up to 19 gp 380 * Programs using the rs6000/xcoff abi can save up to 19 gp
80 * regs and 18 fp regs below sp before decrementing it. 381 * regs and 18 fp regs below sp before decrementing it.
@@ -82,76 +383,34 @@ struct rt_sigframe32 {
82 int abigap[56]; 383 int abigap[56];
83}; 384};
84 385
85
86/*
87 * Common utility functions used by signal and context support
88 *
89 */
90
91/*
92 * Restore the user process's signal mask
93 * (implemented in signal.c)
94 */
95extern void restore_sigmask(sigset_t *set);
96
97/*
98 * Functions for flipping sigsets (thanks to brain dead generic
99 * implementation that makes things simple for little endian only
100 */
101static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
102{
103 switch (_NSIG_WORDS) {
104 case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
105 compat->sig[7] = set->sig[3] >> 32;
106 case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
107 compat->sig[5] = set->sig[2] >> 32;
108 case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
109 compat->sig[3] = set->sig[1] >> 32;
110 case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
111 compat->sig[1] = set->sig[0] >> 32;
112 }
113}
114
115static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
116{
117 switch (_NSIG_WORDS) {
118 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
119 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
120 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
121 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
122 }
123}
124
125
126/* 386/*
127 * Save the current user registers on the user stack. 387 * Save the current user registers on the user stack.
128 * We only save the altivec registers if the process has used 388 * We only save the altivec/spe registers if the process has used
129 * altivec instructions at some point. 389 * altivec/spe instructions at some point.
130 */ 390 */
131static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, int sigret) 391static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
392 int sigret)
132{ 393{
133 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 394#ifdef CONFIG_PPC32
134 int i, err = 0; 395 CHECK_FULL_REGS(regs);
135 396#endif
136 /* Make sure floating point registers are stored in regs */ 397 /* Make sure floating point registers are stored in regs */
137 flush_fp_to_thread(current); 398 flush_fp_to_thread(current);
138 399
139 /* save general and floating-point registers */ 400 /* save general and floating-point registers */
140 for (i = 0; i <= PT_RESULT; i ++) 401 if (save_general_regs(regs, frame) ||
141 err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]); 402 __copy_to_user(&frame->mc_fregs, current->thread.fpr,
142 err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr, 403 ELF_NFPREG * sizeof(double)))
143 ELF_NFPREG * sizeof(double));
144 if (err)
145 return 1; 404 return 1;
146 405
147 current->thread.fpscr = 0; /* turn off all fp exceptions */ 406 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
148 407
149#ifdef CONFIG_ALTIVEC 408#ifdef CONFIG_ALTIVEC
150 /* save altivec registers */ 409 /* save altivec registers */
151 if (current->thread.used_vr) { 410 if (current->thread.used_vr) {
152 flush_altivec_to_thread(current); 411 flush_altivec_to_thread(current);
153 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 412 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
154 ELF_NVRREG32 * sizeof(vector128))) 413 ELF_NVRREG * sizeof(vector128)))
155 return 1; 414 return 1;
156 /* set MSR_VEC in the saved MSR value to indicate that 415 /* set MSR_VEC in the saved MSR value to indicate that
157 frame->mc_vregs contains valid data */ 416 frame->mc_vregs contains valid data */
@@ -169,6 +428,25 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
169 return 1; 428 return 1;
170#endif /* CONFIG_ALTIVEC */ 429#endif /* CONFIG_ALTIVEC */
171 430
431#ifdef CONFIG_SPE
432 /* save spe registers */
433 if (current->thread.used_spe) {
434 flush_spe_to_thread(current);
435 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
436 ELF_NEVRREG * sizeof(u32)))
437 return 1;
438 /* set MSR_SPE in the saved MSR value to indicate that
439 frame->mc_vregs contains valid data */
440 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
441 return 1;
442 }
443 /* else assert((regs->msr & MSR_SPE) == 0) */
444
445 /* We always copy to/from spefscr */
446 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
447 return 1;
448#endif /* CONFIG_SPE */
449
172 if (sigret) { 450 if (sigret) {
173 /* Set up the sigreturn trampoline: li r0,sigret; sc */ 451 /* Set up the sigreturn trampoline: li r0,sigret; sc */
174 if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) 452 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -186,13 +464,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
186 * (except for MSR). 464 * (except for MSR).
187 */ 465 */
188static long restore_user_regs(struct pt_regs *regs, 466static long restore_user_regs(struct pt_regs *regs,
189 struct mcontext32 __user *sr, int sig) 467 struct mcontext __user *sr, int sig)
190{ 468{
191 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 469 long err;
192 int i;
193 long err = 0;
194 unsigned int save_r2 = 0; 470 unsigned int save_r2 = 0;
195#ifdef CONFIG_ALTIVEC 471#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
196 unsigned long msr; 472 unsigned long msr;
197#endif 473#endif
198 474
@@ -202,11 +478,7 @@ static long restore_user_regs(struct pt_regs *regs,
202 */ 478 */
203 if (!sig) 479 if (!sig)
204 save_r2 = (unsigned int)regs->gpr[2]; 480 save_r2 = (unsigned int)regs->gpr[2];
205 for (i = 0; i <= PT_RESULT; i++) { 481 err = restore_general_regs(regs, sr);
206 if ((i == PT_MSR) || (i == PT_SOFTE))
207 continue;
208 err |= __get_user(gregs[i], &sr->mc_gregs[i]);
209 }
210 if (!sig) 482 if (!sig)
211 regs->gpr[2] = (unsigned long) save_r2; 483 regs->gpr[2] = (unsigned long) save_r2;
212 if (err) 484 if (err)
@@ -229,135 +501,51 @@ static long restore_user_regs(struct pt_regs *regs,
229 sizeof(sr->mc_vregs))) 501 sizeof(sr->mc_vregs)))
230 return 1; 502 return 1;
231 } else if (current->thread.used_vr) 503 } else if (current->thread.used_vr)
232 memset(current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128)); 504 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
233 505
234 /* Always get VRSAVE back */ 506 /* Always get VRSAVE back */
235 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 507 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
236 return 1; 508 return 1;
237#endif /* CONFIG_ALTIVEC */ 509#endif /* CONFIG_ALTIVEC */
238 510
511#ifdef CONFIG_SPE
512 /* force the process to reload the spe registers from
513 current->thread when it next does spe instructions */
514 regs->msr &= ~MSR_SPE;
515 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
516 /* restore spe registers from the stack */
517 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
518 ELF_NEVRREG * sizeof(u32)))
519 return 1;
520 } else if (current->thread.used_spe)
521 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
522
523 /* Always get SPEFSCR back */
524 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
525 return 1;
526#endif /* CONFIG_SPE */
527
239#ifndef CONFIG_SMP 528#ifndef CONFIG_SMP
240 preempt_disable(); 529 preempt_disable();
241 if (last_task_used_math == current) 530 if (last_task_used_math == current)
242 last_task_used_math = NULL; 531 last_task_used_math = NULL;
243 if (last_task_used_altivec == current) 532 if (last_task_used_altivec == current)
244 last_task_used_altivec = NULL; 533 last_task_used_altivec = NULL;
534#ifdef CONFIG_SPE
535 if (last_task_used_spe == current)
536 last_task_used_spe = NULL;
537#endif
245 preempt_enable(); 538 preempt_enable();
246#endif 539#endif
247 return 0; 540 return 0;
248} 541}
249 542
250 543#ifdef CONFIG_PPC64
251/* 544long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
252 * Start of nonRT signal support
253 *
254 * sigset_t is 32 bits for non-rt signals
255 *
256 * System Calls
257 * sigaction sys32_sigaction
258 * sigreturn sys32_sigreturn
259 *
260 * Note sigsuspend has no special 32 bit routine - uses the 64 bit routine
261 *
262 * Other routines
263 * setup_frame32
264 */
265
266/*
267 * Atomically swap in the new signal mask, and wait for a signal.
268 */
269long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
270 struct pt_regs *regs)
271{
272 sigset_t saveset;
273
274 mask &= _BLOCKABLE;
275 spin_lock_irq(&current->sighand->siglock);
276 saveset = current->blocked;
277 siginitset(&current->blocked, mask);
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280
281 regs->result = -EINTR;
282 regs->gpr[3] = EINTR;
283 regs->ccr |= 0x10000000;
284 while (1) {
285 current->state = TASK_INTERRUPTIBLE;
286 schedule();
287 if (do_signal32(&saveset, regs))
288 /*
289 * Returning 0 means we return to userspace via
290 * ret_from_except and thus restore all user
291 * registers from *regs. This is what we need
292 * to do when a signal has been delivered.
293 */
294 return 0;
295 }
296}
297
298long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
299 struct old_sigaction32 __user *oact)
300{
301 struct k_sigaction new_ka, old_ka;
302 int ret;
303
304 if (sig < 0)
305 sig = -sig;
306
307 if (act) {
308 compat_old_sigset_t mask;
309 compat_uptr_t handler, restorer;
310
311 if (get_user(handler, &act->sa_handler) ||
312 __get_user(restorer, &act->sa_restorer) ||
313 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
314 __get_user(mask, &act->sa_mask))
315 return -EFAULT;
316 new_ka.sa.sa_handler = compat_ptr(handler);
317 new_ka.sa.sa_restorer = compat_ptr(restorer);
318 siginitset(&new_ka.sa.sa_mask, mask);
319 }
320
321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
322 if (!ret && oact) {
323 if (put_user((long)old_ka.sa.sa_handler, &oact->sa_handler) ||
324 __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer) ||
325 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
326 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
327 return -EFAULT;
328 }
329
330 return ret;
331}
332
333
334
335/*
336 * Start of RT signal support
337 *
338 * sigset_t is 64 bits for rt signals
339 *
340 * System Calls
341 * sigaction sys32_rt_sigaction
342 * sigpending sys32_rt_sigpending
343 * sigprocmask sys32_rt_sigprocmask
344 * sigreturn sys32_rt_sigreturn
345 * sigqueueinfo sys32_rt_sigqueueinfo
346 * sigsuspend sys32_rt_sigsuspend
347 *
348 * Other routines
349 * setup_rt_frame32
350 * copy_siginfo_to_user32
351 * siginfo32to64
352 */
353
354
355long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
356 struct sigaction32 __user *oact, size_t sigsetsize) 545 struct sigaction32 __user *oact, size_t sigsetsize)
357{ 546{
358 struct k_sigaction new_ka, old_ka; 547 struct k_sigaction new_ka, old_ka;
359 int ret; 548 int ret;
360 compat_sigset_t set32;
361 549
362 /* XXX: Don't preclude handling different sized sigset_t's. */ 550 /* XXX: Don't preclude handling different sized sigset_t's. */
363 if (sigsetsize != sizeof(compat_sigset_t)) 551 if (sigsetsize != sizeof(compat_sigset_t))
@@ -368,9 +556,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
368 556
369 ret = get_user(handler, &act->sa_handler); 557 ret = get_user(handler, &act->sa_handler);
370 new_ka.sa.sa_handler = compat_ptr(handler); 558 new_ka.sa.sa_handler = compat_ptr(handler);
371 ret |= __copy_from_user(&set32, &act->sa_mask, 559 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
372 sizeof(compat_sigset_t));
373 sigset_from_compat(&new_ka.sa.sa_mask, &set32);
374 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 560 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
375 if (ret) 561 if (ret)
376 return -EFAULT; 562 return -EFAULT;
@@ -378,10 +564,8 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
378 564
379 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 565 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
380 if (!ret && oact) { 566 if (!ret && oact) {
381 compat_from_sigset(&set32, &old_ka.sa.sa_mask);
382 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler); 567 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
383 ret |= __copy_to_user(&oact->sa_mask, &set32, 568 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
384 sizeof(compat_sigset_t));
385 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 569 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
386 } 570 }
387 return ret; 571 return ret;
@@ -394,41 +578,37 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
394 * of a signed int (msr in 32-bit mode) and the register representation 578 * of a signed int (msr in 32-bit mode) and the register representation
395 * of a signed int (msr in 64-bit mode) is performed. 579 * of a signed int (msr in 64-bit mode) is performed.
396 */ 580 */
397long sys32_rt_sigprocmask(u32 how, compat_sigset_t __user *set, 581long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
398 compat_sigset_t __user *oset, size_t sigsetsize) 582 compat_sigset_t __user *oset, size_t sigsetsize)
399{ 583{
400 sigset_t s; 584 sigset_t s;
401 sigset_t __user *up; 585 sigset_t __user *up;
402 compat_sigset_t s32;
403 int ret; 586 int ret;
404 mm_segment_t old_fs = get_fs(); 587 mm_segment_t old_fs = get_fs();
405 588
406 if (set) { 589 if (set) {
407 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 590 if (get_sigset_t(&s, set))
408 return -EFAULT; 591 return -EFAULT;
409 sigset_from_compat(&s, &s32);
410 } 592 }
411 593
412 set_fs(KERNEL_DS); 594 set_fs(KERNEL_DS);
413 /* This is valid because of the set_fs() */ 595 /* This is valid because of the set_fs() */
414 up = (sigset_t __user *) &s; 596 up = (sigset_t __user *) &s;
415 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, 597 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
416 sigsetsize); 598 sigsetsize);
417 set_fs(old_fs); 599 set_fs(old_fs);
418 if (ret) 600 if (ret)
419 return ret; 601 return ret;
420 if (oset) { 602 if (oset) {
421 compat_from_sigset(&s32, &s); 603 if (put_sigset_t(oset, &s))
422 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
423 return -EFAULT; 604 return -EFAULT;
424 } 605 }
425 return 0; 606 return 0;
426} 607}
427 608
428long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) 609long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
429{ 610{
430 sigset_t s; 611 sigset_t s;
431 compat_sigset_t s32;
432 int ret; 612 int ret;
433 mm_segment_t old_fs = get_fs(); 613 mm_segment_t old_fs = get_fs();
434 614
@@ -437,8 +617,7 @@ long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
437 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 617 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
438 set_fs(old_fs); 618 set_fs(old_fs);
439 if (!ret) { 619 if (!ret) {
440 compat_from_sigset(&s32, &s); 620 if (put_sigset_t(set, &s))
441 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
442 return -EFAULT; 621 return -EFAULT;
443 } 622 }
444 return ret; 623 return ret;
@@ -500,6 +679,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
500 return err; 679 return err;
501} 680}
502 681
682#define copy_siginfo_to_user copy_siginfo_to_user32
683
503/* 684/*
504 * Note: it is necessary to treat pid and sig as unsigned ints, with the 685 * Note: it is necessary to treat pid and sig as unsigned ints, with the
505 * corresponding cast to a signed int to insure that the proper conversion 686 * corresponding cast to a signed int to insure that the proper conversion
@@ -507,12 +688,12 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
507 * (msr in 32-bit mode) and the register representation of a signed int 688 * (msr in 32-bit mode) and the register representation of a signed int
508 * (msr in 64-bit mode) is performed. 689 * (msr in 64-bit mode) is performed.
509 */ 690 */
510long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) 691long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
511{ 692{
512 siginfo_t info; 693 siginfo_t info;
513 int ret; 694 int ret;
514 mm_segment_t old_fs = get_fs(); 695 mm_segment_t old_fs = get_fs();
515 696
516 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 697 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
517 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32)) 698 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
518 return -EFAULT; 699 return -EFAULT;
@@ -522,58 +703,14 @@ long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
522 set_fs (old_fs); 703 set_fs (old_fs);
523 return ret; 704 return ret;
524} 705}
525
526int sys32_rt_sigsuspend(compat_sigset_t __user * unewset, size_t sigsetsize, int p3,
527 int p4, int p6, int p7, struct pt_regs *regs)
528{
529 sigset_t saveset, newset;
530 compat_sigset_t s32;
531
532 /* XXX: Don't preclude handling different sized sigset_t's. */
533 if (sigsetsize != sizeof(sigset_t))
534 return -EINVAL;
535
536 if (copy_from_user(&s32, unewset, sizeof(s32)))
537 return -EFAULT;
538
539 /*
540 * Swap the 2 words of the 64-bit sigset_t (they are stored
541 * in the "wrong" endian in 32-bit user storage).
542 */
543 sigset_from_compat(&newset, &s32);
544
545 sigdelsetmask(&newset, ~_BLOCKABLE);
546 spin_lock_irq(&current->sighand->siglock);
547 saveset = current->blocked;
548 current->blocked = newset;
549 recalc_sigpending();
550 spin_unlock_irq(&current->sighand->siglock);
551
552 regs->result = -EINTR;
553 regs->gpr[3] = EINTR;
554 regs->ccr |= 0x10000000;
555 while (1) {
556 current->state = TASK_INTERRUPTIBLE;
557 schedule();
558 if (do_signal32(&saveset, regs))
559 /*
560 * Returning 0 means we return to userspace via
561 * ret_from_except and thus restore all user
562 * registers from *regs. This is what we need
563 * to do when a signal has been delivered.
564 */
565 return 0;
566 }
567}
568
569/* 706/*
570 * Start Alternate signal stack support 707 * Start Alternate signal stack support
571 * 708 *
572 * System Calls 709 * System Calls
573 * sigaltatck sys32_sigaltstack 710 * sigaltatck compat_sys_sigaltstack
574 */ 711 */
575 712
576int sys32_sigaltstack(u32 __new, u32 __old, int r5, 713int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
577 int r6, int r7, int r8, struct pt_regs *regs) 714 int r6, int r7, int r8, struct pt_regs *regs)
578{ 715{
579 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new; 716 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
@@ -615,76 +752,94 @@ int sys32_sigaltstack(u32 __new, u32 __old, int r5,
615 return -EFAULT; 752 return -EFAULT;
616 return ret; 753 return ret;
617} 754}
755#endif /* CONFIG_PPC64 */
756
618 757
758/*
759 * Restore the user process's signal mask
760 */
761#ifdef CONFIG_PPC64
762extern void restore_sigmask(sigset_t *set);
763#else /* CONFIG_PPC64 */
764static void restore_sigmask(sigset_t *set)
765{
766 sigdelsetmask(set, ~_BLOCKABLE);
767 spin_lock_irq(&current->sighand->siglock);
768 current->blocked = *set;
769 recalc_sigpending();
770 spin_unlock_irq(&current->sighand->siglock);
771}
772#endif
619 773
620/* 774/*
621 * Set up a signal frame for a "real-time" signal handler 775 * Set up a signal frame for a "real-time" signal handler
622 * (one which gets siginfo). 776 * (one which gets siginfo).
623 */ 777 */
624static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, 778static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
625 siginfo_t *info, sigset_t *oldset, 779 siginfo_t *info, sigset_t *oldset,
626 struct pt_regs * regs, unsigned long newsp) 780 struct pt_regs *regs, unsigned long newsp)
627{ 781{
628 struct rt_sigframe32 __user *rt_sf; 782 struct rt_sigframe __user *rt_sf;
629 struct mcontext32 __user *frame; 783 struct mcontext __user *frame;
630 unsigned long origsp = newsp; 784 unsigned long origsp = newsp;
631 compat_sigset_t c_oldset;
632 785
633 /* Set up Signal Frame */ 786 /* Set up Signal Frame */
634 /* Put a Real Time Context onto stack */ 787 /* Put a Real Time Context onto stack */
635 newsp -= sizeof(*rt_sf); 788 newsp -= sizeof(*rt_sf);
636 rt_sf = (struct rt_sigframe32 __user *)newsp; 789 rt_sf = (struct rt_sigframe __user *)newsp;
637 790
638 /* create a stack frame for the caller of the handler */ 791 /* create a stack frame for the caller of the handler */
639 newsp -= __SIGNAL_FRAMESIZE32 + 16; 792 newsp -= __SIGNAL_FRAMESIZE + 16;
640 793
641 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp)) 794 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
642 goto badframe; 795 goto badframe;
643 796
644 compat_from_sigset(&c_oldset, oldset);
645
646 /* Put the siginfo & fill in most of the ucontext */ 797 /* Put the siginfo & fill in most of the ucontext */
647 if (copy_siginfo_to_user32(&rt_sf->info, info) 798 if (copy_siginfo_to_user(&rt_sf->info, info)
648 || __put_user(0, &rt_sf->uc.uc_flags) 799 || __put_user(0, &rt_sf->uc.uc_flags)
649 || __put_user(0, &rt_sf->uc.uc_link) 800 || __put_user(0, &rt_sf->uc.uc_link)
650 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) 801 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
651 || __put_user(sas_ss_flags(regs->gpr[1]), 802 || __put_user(sas_ss_flags(regs->gpr[1]),
652 &rt_sf->uc.uc_stack.ss_flags) 803 &rt_sf->uc.uc_stack.ss_flags)
653 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) 804 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
654 || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs) 805 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
655 || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset))) 806 &rt_sf->uc.uc_regs)
807 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
656 goto badframe; 808 goto badframe;
657 809
658 /* Save user registers on the stack */ 810 /* Save user registers on the stack */
659 frame = &rt_sf->uc.uc_mcontext; 811 frame = &rt_sf->uc.uc_mcontext;
660 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 812#ifdef CONFIG_PPC64
661 goto badframe;
662
663 if (vdso32_rt_sigtramp && current->thread.vdso_base) { 813 if (vdso32_rt_sigtramp && current->thread.vdso_base) {
664 if (save_user_regs(regs, frame, 0)) 814 if (save_user_regs(regs, frame, 0))
665 goto badframe; 815 goto badframe;
666 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp; 816 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
667 } else { 817 } else
818#endif
819 {
668 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 820 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
669 goto badframe; 821 goto badframe;
670 regs->link = (unsigned long) frame->tramp; 822 regs->link = (unsigned long) frame->tramp;
671 } 823 }
672 regs->gpr[1] = (unsigned long) newsp; 824 if (put_user(regs->gpr[1], (u32 __user *)newsp))
825 goto badframe;
826 regs->gpr[1] = newsp;
673 regs->gpr[3] = sig; 827 regs->gpr[3] = sig;
674 regs->gpr[4] = (unsigned long) &rt_sf->info; 828 regs->gpr[4] = (unsigned long) &rt_sf->info;
675 regs->gpr[5] = (unsigned long) &rt_sf->uc; 829 regs->gpr[5] = (unsigned long) &rt_sf->uc;
676 regs->gpr[6] = (unsigned long) rt_sf; 830 regs->gpr[6] = (unsigned long) rt_sf;
677 regs->nip = (unsigned long) ka->sa.sa_handler; 831 regs->nip = (unsigned long) ka->sa.sa_handler;
678 regs->trap = 0; 832 regs->trap = 0;
833#ifdef CONFIG_PPC64
679 regs->result = 0; 834 regs->result = 0;
680 835
681 if (test_thread_flag(TIF_SINGLESTEP)) 836 if (test_thread_flag(TIF_SINGLESTEP))
682 ptrace_notify(SIGTRAP); 837 ptrace_notify(SIGTRAP);
683 838#endif
684 return 1; 839 return 1;
685 840
686badframe: 841badframe:
687#if DEBUG_SIG 842#ifdef DEBUG_SIG
688 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 843 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
689 regs, frame, newsp); 844 regs, frame, newsp);
690#endif 845#endif
@@ -692,46 +847,50 @@ badframe:
692 return 0; 847 return 0;
693} 848}
694 849
695static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig) 850static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
696{ 851{
697 compat_sigset_t c_set;
698 sigset_t set; 852 sigset_t set;
699 u32 mcp; 853 struct mcontext __user *mcp;
854
855 if (get_sigset_t(&set, &ucp->uc_sigmask))
856 return -EFAULT;
857#ifdef CONFIG_PPC64
858 {
859 u32 cmcp;
700 860
701 if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set)) 861 if (__get_user(cmcp, &ucp->uc_regs))
702 || __get_user(mcp, &ucp->uc_regs)) 862 return -EFAULT;
863 mcp = (struct mcontext __user *)(u64)cmcp;
864 }
865#else
866 if (__get_user(mcp, &ucp->uc_regs))
703 return -EFAULT; 867 return -EFAULT;
704 sigset_from_compat(&set, &c_set); 868#endif
705 restore_sigmask(&set); 869 restore_sigmask(&set);
706 if (restore_user_regs(regs, (struct mcontext32 __user *)(u64)mcp, sig)) 870 if (restore_user_regs(regs, mcp, sig))
707 return -EFAULT; 871 return -EFAULT;
708 872
709 return 0; 873 return 0;
710} 874}
711 875
712/* 876long sys_swapcontext(struct ucontext __user *old_ctx,
713 * Handle {get,set,swap}_context operations for 32 bits processes 877 struct ucontext __user *new_ctx,
714 */
715
716long sys32_swapcontext(struct ucontext32 __user *old_ctx,
717 struct ucontext32 __user *new_ctx,
718 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 878 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
719{ 879{
720 unsigned char tmp; 880 unsigned char tmp;
721 compat_sigset_t c_set;
722 881
723 /* Context size is for future use. Right now, we only make sure 882 /* Context size is for future use. Right now, we only make sure
724 * we are passed something we understand 883 * we are passed something we understand
725 */ 884 */
726 if (ctx_size < sizeof(struct ucontext32)) 885 if (ctx_size < sizeof(struct ucontext))
727 return -EINVAL; 886 return -EINVAL;
728 887
729 if (old_ctx != NULL) { 888 if (old_ctx != NULL) {
730 compat_from_sigset(&c_set, &current->blocked);
731 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 889 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
732 || save_user_regs(regs, &old_ctx->uc_mcontext, 0) 890 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
733 || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set)) 891 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
734 || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs)) 892 || __put_user(to_user_ptr(&old_ctx->uc_mcontext),
893 &old_ctx->uc_regs))
735 return -EFAULT; 894 return -EFAULT;
736 } 895 }
737 if (new_ctx == NULL) 896 if (new_ctx == NULL)
@@ -752,27 +911,26 @@ long sys32_swapcontext(struct ucontext32 __user *old_ctx,
752 * or if another thread unmaps the region containing the context. 911 * or if another thread unmaps the region containing the context.
753 * We kill the task with a SIGSEGV in this situation. 912 * We kill the task with a SIGSEGV in this situation.
754 */ 913 */
755 if (do_setcontext32(new_ctx, regs, 0)) 914 if (do_setcontext(new_ctx, regs, 0))
756 do_exit(SIGSEGV); 915 do_exit(SIGSEGV);
757 916 sigreturn_exit(regs);
917 /* doesn't actually return back to here */
758 return 0; 918 return 0;
759} 919}
760 920
761long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 921long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
762 struct pt_regs *regs) 922 struct pt_regs *regs)
763{ 923{
764 struct rt_sigframe32 __user *rt_sf; 924 struct rt_sigframe __user *rt_sf;
765 int ret;
766
767 925
768 /* Always make any pending restarted system calls return -EINTR */ 926 /* Always make any pending restarted system calls return -EINTR */
769 current_thread_info()->restart_block.fn = do_no_restart_syscall; 927 current_thread_info()->restart_block.fn = do_no_restart_syscall;
770 928
771 rt_sf = (struct rt_sigframe32 __user *) 929 rt_sf = (struct rt_sigframe __user *)
772 (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16); 930 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
773 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 931 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
774 goto bad; 932 goto bad;
775 if (do_setcontext32(&rt_sf->uc, regs, 1)) 933 if (do_setcontext(&rt_sf->uc, regs, 1))
776 goto bad; 934 goto bad;
777 935
778 /* 936 /*
@@ -781,62 +939,165 @@ long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
781 * signal return. But other architectures do this and we have 939 * signal return. But other architectures do this and we have
782 * always done it up until now so it is probably better not to 940 * always done it up until now so it is probably better not to
783 * change it. -- paulus 941 * change it. -- paulus
784 * We use the sys32_ version that does the 32/64 bits conversion 942 */
943#ifdef CONFIG_PPC64
944 /*
945 * We use the compat_sys_ version that does the 32/64 bits conversion
785 * and takes userland pointer directly. What about error checking ? 946 * and takes userland pointer directly. What about error checking ?
786 * nobody does any... 947 * nobody does any...
787 */ 948 */
788 sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); 949 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
789 950 return (int)regs->result;
790 ret = regs->result; 951#else
791 952 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
792 return ret; 953 sigreturn_exit(regs); /* doesn't return here */
954 return 0;
955#endif
793 956
794 bad: 957 bad:
795 force_sig(SIGSEGV, current); 958 force_sig(SIGSEGV, current);
796 return 0; 959 return 0;
797} 960}
798 961
962#ifdef CONFIG_PPC32
963int sys_debug_setcontext(struct ucontext __user *ctx,
964 int ndbg, struct sig_dbg_op __user *dbg,
965 int r6, int r7, int r8,
966 struct pt_regs *regs)
967{
968 struct sig_dbg_op op;
969 int i;
970 unsigned long new_msr = regs->msr;
971#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
972 unsigned long new_dbcr0 = current->thread.dbcr0;
973#endif
974
975 for (i=0; i<ndbg; i++) {
976 if (__copy_from_user(&op, dbg, sizeof(op)))
977 return -EFAULT;
978 switch (op.dbg_type) {
979 case SIG_DBG_SINGLE_STEPPING:
980#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
981 if (op.dbg_value) {
982 new_msr |= MSR_DE;
983 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
984 } else {
985 new_msr &= ~MSR_DE;
986 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
987 }
988#else
989 if (op.dbg_value)
990 new_msr |= MSR_SE;
991 else
992 new_msr &= ~MSR_SE;
993#endif
994 break;
995 case SIG_DBG_BRANCH_TRACING:
996#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
997 return -EINVAL;
998#else
999 if (op.dbg_value)
1000 new_msr |= MSR_BE;
1001 else
1002 new_msr &= ~MSR_BE;
1003#endif
1004 break;
1005
1006 default:
1007 return -EINVAL;
1008 }
1009 }
1010
1011 /* We wait until here to actually install the values in the
1012 registers so if we fail in the above loop, it will not
1013 affect the contents of these registers. After this point,
1014 failure is a problem, anyway, and it's very unlikely unless
1015 the user is really doing something wrong. */
1016 regs->msr = new_msr;
1017#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1018 current->thread.dbcr0 = new_dbcr0;
1019#endif
1020
1021 /*
1022 * If we get a fault copying the context into the kernel's
1023 * image of the user's registers, we can't just return -EFAULT
1024 * because the user's registers will be corrupted. For instance
1025 * the NIP value may have been updated but not some of the
1026 * other registers. Given that we have done the access_ok
1027 * and successfully read the first and last bytes of the region
1028 * above, this should only happen in an out-of-memory situation
1029 * or if another thread unmaps the region containing the context.
1030 * We kill the task with a SIGSEGV in this situation.
1031 */
1032 if (do_setcontext(ctx, regs, 1)) {
1033 force_sig(SIGSEGV, current);
1034 goto out;
1035 }
1036
1037 /*
1038 * It's not clear whether or why it is desirable to save the
1039 * sigaltstack setting on signal delivery and restore it on
1040 * signal return. But other architectures do this and we have
1041 * always done it up until now so it is probably better not to
1042 * change it. -- paulus
1043 */
1044 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1045
1046 sigreturn_exit(regs);
1047 /* doesn't actually return back to here */
1048
1049 out:
1050 return 0;
1051}
1052#endif
799 1053
800/* 1054/*
801 * OK, we're invoking a handler 1055 * OK, we're invoking a handler
802 */ 1056 */
803static int handle_signal32(unsigned long sig, struct k_sigaction *ka, 1057static int handle_signal(unsigned long sig, struct k_sigaction *ka,
804 siginfo_t *info, sigset_t *oldset, 1058 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
805 struct pt_regs * regs, unsigned long newsp) 1059 unsigned long newsp)
806{ 1060{
807 struct sigcontext32 __user *sc; 1061 struct sigcontext __user *sc;
808 struct sigregs32 __user *frame; 1062 struct sigregs __user *frame;
809 unsigned long origsp = newsp; 1063 unsigned long origsp = newsp;
810 1064
811 /* Set up Signal Frame */ 1065 /* Set up Signal Frame */
812 newsp -= sizeof(struct sigregs32); 1066 newsp -= sizeof(struct sigregs);
813 frame = (struct sigregs32 __user *) newsp; 1067 frame = (struct sigregs __user *) newsp;
814 1068
815 /* Put a sigcontext on the stack */ 1069 /* Put a sigcontext on the stack */
816 newsp -= sizeof(*sc); 1070 newsp -= sizeof(*sc);
817 sc = (struct sigcontext32 __user *) newsp; 1071 sc = (struct sigcontext __user *) newsp;
818 1072
819 /* create a stack frame for the caller of the handler */ 1073 /* create a stack frame for the caller of the handler */
820 newsp -= __SIGNAL_FRAMESIZE32; 1074 newsp -= __SIGNAL_FRAMESIZE;
821 1075
822 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) 1076 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
823 goto badframe; 1077 goto badframe;
824 1078
825#if _NSIG != 64 1079#if _NSIG != 64
826#error "Please adjust handle_signal32()" 1080#error "Please adjust handle_signal()"
827#endif 1081#endif
828 if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler) 1082 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
829 || __put_user(oldset->sig[0], &sc->oldmask) 1083 || __put_user(oldset->sig[0], &sc->oldmask)
1084#ifdef CONFIG_PPC64
830 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) 1085 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
831 || __put_user((u32)(u64)frame, &sc->regs) 1086#else
1087 || __put_user(oldset->sig[1], &sc->_unused[3])
1088#endif
1089 || __put_user(to_user_ptr(frame), &sc->regs)
832 || __put_user(sig, &sc->signal)) 1090 || __put_user(sig, &sc->signal))
833 goto badframe; 1091 goto badframe;
834 1092
1093#ifdef CONFIG_PPC64
835 if (vdso32_sigtramp && current->thread.vdso_base) { 1094 if (vdso32_sigtramp && current->thread.vdso_base) {
836 if (save_user_regs(regs, &frame->mctx, 0)) 1095 if (save_user_regs(regs, &frame->mctx, 0))
837 goto badframe; 1096 goto badframe;
838 regs->link = current->thread.vdso_base + vdso32_sigtramp; 1097 regs->link = current->thread.vdso_base + vdso32_sigtramp;
839 } else { 1098 } else
1099#endif
1100 {
840 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1101 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
841 goto badframe; 1102 goto badframe;
842 regs->link = (unsigned long) frame->mctx.tramp; 1103 regs->link = (unsigned long) frame->mctx.tramp;
@@ -844,22 +1105,24 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
844 1105
845 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 1106 if (put_user(regs->gpr[1], (u32 __user *)newsp))
846 goto badframe; 1107 goto badframe;
847 regs->gpr[1] = (unsigned long) newsp; 1108 regs->gpr[1] = newsp;
848 regs->gpr[3] = sig; 1109 regs->gpr[3] = sig;
849 regs->gpr[4] = (unsigned long) sc; 1110 regs->gpr[4] = (unsigned long) sc;
850 regs->nip = (unsigned long) ka->sa.sa_handler; 1111 regs->nip = (unsigned long) ka->sa.sa_handler;
851 regs->trap = 0; 1112 regs->trap = 0;
1113#ifdef CONFIG_PPC64
852 regs->result = 0; 1114 regs->result = 0;
853 1115
854 if (test_thread_flag(TIF_SINGLESTEP)) 1116 if (test_thread_flag(TIF_SINGLESTEP))
855 ptrace_notify(SIGTRAP); 1117 ptrace_notify(SIGTRAP);
1118#endif
856 1119
857 return 1; 1120 return 1;
858 1121
859badframe: 1122badframe:
860#if DEBUG_SIG 1123#ifdef DEBUG_SIG
861 printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n", 1124 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
862 regs, frame, *newspp); 1125 regs, frame, newsp);
863#endif 1126#endif
864 force_sigsegv(sig, current); 1127 force_sigsegv(sig, current);
865 return 0; 1128 return 0;
@@ -868,65 +1131,69 @@ badframe:
868/* 1131/*
869 * Do a signal return; undo the signal stack. 1132 * Do a signal return; undo the signal stack.
870 */ 1133 */
871long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 1134long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
872 struct pt_regs *regs) 1135 struct pt_regs *regs)
873{ 1136{
874 struct sigcontext32 __user *sc; 1137 struct sigcontext __user *sc;
875 struct sigcontext32 sigctx; 1138 struct sigcontext sigctx;
876 struct mcontext32 __user *sr; 1139 struct mcontext __user *sr;
877 sigset_t set; 1140 sigset_t set;
878 int ret;
879 1141
880 /* Always make any pending restarted system calls return -EINTR */ 1142 /* Always make any pending restarted system calls return -EINTR */
881 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1143 current_thread_info()->restart_block.fn = do_no_restart_syscall;
882 1144
883 sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32); 1145 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
884 if (copy_from_user(&sigctx, sc, sizeof(sigctx))) 1146 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
885 goto badframe; 1147 goto badframe;
886 1148
1149#ifdef CONFIG_PPC64
887 /* 1150 /*
888 * Note that PPC32 puts the upper 32 bits of the sigmask in the 1151 * Note that PPC32 puts the upper 32 bits of the sigmask in the
889 * unused part of the signal stackframe 1152 * unused part of the signal stackframe
890 */ 1153 */
891 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); 1154 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1155#else
1156 set.sig[0] = sigctx.oldmask;
1157 set.sig[1] = sigctx._unused[3];
1158#endif
892 restore_sigmask(&set); 1159 restore_sigmask(&set);
893 1160
894 sr = (struct mcontext32 __user *)(u64)sigctx.regs; 1161 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
895 if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1162 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
896 || restore_user_regs(regs, sr, 1)) 1163 || restore_user_regs(regs, sr, 1))
897 goto badframe; 1164 goto badframe;
898 1165
899 ret = regs->result; 1166#ifdef CONFIG_PPC64
900 return ret; 1167 return (int)regs->result;
1168#else
1169 sigreturn_exit(regs); /* doesn't return */
1170 return 0;
1171#endif
901 1172
902badframe: 1173badframe:
903 force_sig(SIGSEGV, current); 1174 force_sig(SIGSEGV, current);
904 return 0; 1175 return 0;
905} 1176}
906 1177
907
908
909/*
910 * Start of do_signal32 routine
911 *
912 * This routine gets control when a pending signal needs to be processed
913 * in the 32 bit target thread -
914 *
915 * It handles both rt and non-rt signals
916 */
917
918/* 1178/*
919 * Note that 'init' is a special process: it doesn't get signals it doesn't 1179 * Note that 'init' is a special process: it doesn't get signals it doesn't
920 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1180 * want to handle. Thus you cannot kill init even with a SIGKILL even by
921 * mistake. 1181 * mistake.
922 */ 1182 */
923 1183int do_signal(sigset_t *oldset, struct pt_regs *regs)
924int do_signal32(sigset_t *oldset, struct pt_regs *regs)
925{ 1184{
926 siginfo_t info; 1185 siginfo_t info;
1186 struct k_sigaction ka;
927 unsigned int frame, newsp; 1187 unsigned int frame, newsp;
928 int signr, ret; 1188 int signr, ret;
929 struct k_sigaction ka; 1189
1190#ifdef CONFIG_PPC32
1191 if (try_to_freeze()) {
1192 signr = 0;
1193 if (!signal_pending(current))
1194 goto no_signal;
1195 }
1196#endif
930 1197
931 if (!oldset) 1198 if (!oldset)
932 oldset = &current->blocked; 1199 oldset = &current->blocked;
@@ -934,7 +1201,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
934 newsp = frame = 0; 1201 newsp = frame = 0;
935 1202
936 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 1203 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
937 1204#ifdef CONFIG_PPC32
1205no_signal:
1206#endif
938 if (TRAP(regs) == 0x0C00 /* System Call! */ 1207 if (TRAP(regs) == 0x0C00 /* System Call! */
939 && regs->ccr & 0x10000000 /* error signalled */ 1208 && regs->ccr & 0x10000000 /* error signalled */
940 && ((ret = regs->gpr[3]) == ERESTARTSYS 1209 && ((ret = regs->gpr[3]) == ERESTARTSYS
@@ -964,12 +1233,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
964 return 0; /* no signals delivered */ 1233 return 0; /* no signals delivered */
965 1234
966 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size 1235 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
967 && (!on_sig_stack(regs->gpr[1]))) 1236 && !on_sig_stack(regs->gpr[1]))
968 newsp = (current->sas_ss_sp + current->sas_ss_size); 1237 newsp = current->sas_ss_sp + current->sas_ss_size;
969 else 1238 else
970 newsp = regs->gpr[1]; 1239 newsp = regs->gpr[1];
971 newsp &= ~0xfUL; 1240 newsp &= ~0xfUL;
972 1241
1242#ifdef CONFIG_PPC64
973 /* 1243 /*
974 * Reenable the DABR before delivering the signal to 1244 * Reenable the DABR before delivering the signal to
975 * user space. The DABR will have been cleared if it 1245 * user space. The DABR will have been cleared if it
@@ -977,12 +1247,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
977 */ 1247 */
978 if (current->thread.dabr) 1248 if (current->thread.dabr)
979 set_dabr(current->thread.dabr); 1249 set_dabr(current->thread.dabr);
1250#endif
980 1251
981 /* Whee! Actually deliver the signal. */ 1252 /* Whee! Actually deliver the signal. */
982 if (ka.sa.sa_flags & SA_SIGINFO) 1253 if (ka.sa.sa_flags & SA_SIGINFO)
983 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp); 1254 ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
984 else 1255 else
985 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp); 1256 ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
986 1257
987 if (ret) { 1258 if (ret) {
988 spin_lock_irq(&current->sighand->siglock); 1259 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index e93c13458910..a8210ed5c686 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -53,8 +53,7 @@
53#include <asm/time.h> 53#include <asm/time.h>
54#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h> 55#include <asm/systemcfg.h>
56 56#include <asm/ppc-pci.h>
57#include "pci.h"
58 57
59/* readdir & getdents */ 58/* readdir & getdents */
60#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) 59#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
@@ -114,96 +113,6 @@ out:
114 return error; 113 return error;
115} 114}
116 115
117struct linux_dirent32 {
118 u32 d_ino;
119 u32 d_off;
120 unsigned short d_reclen;
121 char d_name[1];
122};
123
124struct getdents_callback32 {
125 struct linux_dirent32 __user * current_dir;
126 struct linux_dirent32 __user * previous;
127 int count;
128 int error;
129};
130
131static int filldir(void * __buf, const char * name, int namlen, off_t offset,
132 ino_t ino, unsigned int d_type)
133{
134 struct linux_dirent32 __user * dirent;
135 struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
136 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
137
138 buf->error = -EINVAL; /* only used if we fail.. */
139 if (reclen > buf->count)
140 return -EINVAL;
141 dirent = buf->previous;
142 if (dirent) {
143 if (__put_user(offset, &dirent->d_off))
144 goto efault;
145 }
146 dirent = buf->current_dir;
147 if (__put_user(ino, &dirent->d_ino))
148 goto efault;
149 if (__put_user(reclen, &dirent->d_reclen))
150 goto efault;
151 if (copy_to_user(dirent->d_name, name, namlen))
152 goto efault;
153 if (__put_user(0, dirent->d_name + namlen))
154 goto efault;
155 if (__put_user(d_type, (char __user *) dirent + reclen - 1))
156 goto efault;
157 buf->previous = dirent;
158 dirent = (void __user *)dirent + reclen;
159 buf->current_dir = dirent;
160 buf->count -= reclen;
161 return 0;
162efault:
163 buf->error = -EFAULT;
164 return -EFAULT;
165}
166
167asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent,
168 unsigned int count)
169{
170 struct file * file;
171 struct linux_dirent32 __user * lastdirent;
172 struct getdents_callback32 buf;
173 int error;
174
175 error = -EFAULT;
176 if (!access_ok(VERIFY_WRITE, dirent, count))
177 goto out;
178
179 error = -EBADF;
180 file = fget(fd);
181 if (!file)
182 goto out;
183
184 buf.current_dir = dirent;
185 buf.previous = NULL;
186 buf.count = count;
187 buf.error = 0;
188
189 error = vfs_readdir(file, (filldir_t)filldir, &buf);
190 if (error < 0)
191 goto out_putf;
192 error = buf.error;
193 lastdirent = buf.previous;
194 if (lastdirent) {
195 if (put_user(file->f_pos, &lastdirent->d_off))
196 error = -EFAULT;
197 else
198 error = count - buf.count;
199 }
200
201out_putf:
202 fput(file);
203out:
204 return error;
205}
206
207asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, 116asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
208 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 117 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
209 compat_uptr_t tvp_x) 118 compat_uptr_t tvp_x)
@@ -248,7 +157,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
248 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 157 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
249 * and the register representation of a signed int (msr in 64-bit mode) is performed. 158 * and the register representation of a signed int (msr in 64-bit mode) is performed.
250 */ 159 */
251asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2) 160asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
252{ 161{
253 return sys_sysfs((int)option, arg1, arg2); 162 return sys_sysfs((int)option, arg1, arg2);
254} 163}
@@ -270,7 +179,7 @@ struct timex32 {
270extern int do_adjtimex(struct timex *); 179extern int do_adjtimex(struct timex *);
271extern void ppc_adjtimex(void); 180extern void ppc_adjtimex(void);
272 181
273asmlinkage long sys32_adjtimex(struct timex32 __user *utp) 182asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
274{ 183{
275 struct timex txc; 184 struct timex txc;
276 int ret; 185 int ret;
@@ -329,7 +238,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
329 return ret; 238 return ret;
330} 239}
331 240
332asmlinkage long sys32_pause(void) 241asmlinkage long compat_sys_pause(void)
333{ 242{
334 current->state = TASK_INTERRUPTIBLE; 243 current->state = TASK_INTERRUPTIBLE;
335 schedule(); 244 schedule();
@@ -375,7 +284,7 @@ struct sysinfo32 {
375 char _f[20-2*sizeof(int)-sizeof(int)]; 284 char _f[20-2*sizeof(int)-sizeof(int)];
376}; 285};
377 286
378asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) 287asmlinkage long compat_sys_sysinfo(struct sysinfo32 __user *info)
379{ 288{
380 struct sysinfo s; 289 struct sysinfo s;
381 int ret, err; 290 int ret, err;
@@ -432,7 +341,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
432 sorts of things, like timeval and itimerval. */ 341 sorts of things, like timeval and itimerval. */
433extern struct timezone sys_tz; 342extern struct timezone sys_tz;
434 343
435asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 344asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
436{ 345{
437 if (tv) { 346 if (tv) {
438 struct timeval ktv; 347 struct timeval ktv;
@@ -450,7 +359,7 @@ asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct time
450 359
451 360
452 361
453asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 362asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
454{ 363{
455 struct timespec kts; 364 struct timespec kts;
456 struct timezone ktz; 365 struct timezone ktz;
@@ -468,7 +377,7 @@ asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct time
468} 377}
469 378
470#ifdef CONFIG_SYSVIPC 379#ifdef CONFIG_SYSVIPC
471long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, 380long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
472 u32 fifth) 381 u32 fifth)
473{ 382{
474 int version; 383 int version;
@@ -539,7 +448,7 @@ long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
539 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 448 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
540 * and the register representation of a signed int (msr in 64-bit mode) is performed. 449 * and the register representation of a signed int (msr in 64-bit mode) is performed.
541 */ 450 */
542asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count) 451asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
543{ 452{
544 mm_segment_t old_fs = get_fs(); 453 mm_segment_t old_fs = get_fs();
545 int ret; 454 int ret;
@@ -561,7 +470,7 @@ asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offs
561 return ret; 470 return ret;
562} 471}
563 472
564asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count) 473asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
565{ 474{
566 mm_segment_t old_fs = get_fs(); 475 mm_segment_t old_fs = get_fs();
567 int ret; 476 int ret;
@@ -583,7 +492,7 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
583 return ret; 492 return ret;
584} 493}
585 494
586long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2, 495long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
587 unsigned long a3, unsigned long a4, unsigned long a5, 496 unsigned long a3, unsigned long a4, unsigned long a5,
588 struct pt_regs *regs) 497 struct pt_regs *regs)
589{ 498{
@@ -610,58 +519,12 @@ out:
610 return error; 519 return error;
611} 520}
612 521
613/* Set up a thread for executing a new program. */
614void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
615{
616 set_fs(USER_DS);
617
618 /*
619 * If we exec out of a kernel thread then thread.regs will not be
620 * set. Do it now.
621 */
622 if (!current->thread.regs) {
623 unsigned long childregs = (unsigned long)current->thread_info +
624 THREAD_SIZE;
625 childregs -= sizeof(struct pt_regs);
626 current->thread.regs = (struct pt_regs *)childregs;
627 }
628
629 /*
630 * ELF_PLAT_INIT already clears all registers but it also sets r2.
631 * So just clear r2 here.
632 */
633 regs->gpr[2] = 0;
634
635 regs->nip = nip;
636 regs->gpr[1] = sp;
637 regs->msr = MSR_USER32;
638#ifndef CONFIG_SMP
639 if (last_task_used_math == current)
640 last_task_used_math = 0;
641#endif /* CONFIG_SMP */
642 current->thread.fpscr = 0;
643 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
644#ifdef CONFIG_ALTIVEC
645#ifndef CONFIG_SMP
646 if (last_task_used_altivec == current)
647 last_task_used_altivec = 0;
648#endif /* CONFIG_SMP */
649 memset(current->thread.vr, 0, sizeof(current->thread.vr));
650 current->thread.vscr.u[0] = 0;
651 current->thread.vscr.u[1] = 0;
652 current->thread.vscr.u[2] = 0;
653 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
654 current->thread.vrsave = 0;
655 current->thread.used_vr = 0;
656#endif /* CONFIG_ALTIVEC */
657}
658
659/* Note: it is necessary to treat option as an unsigned int, 522/* Note: it is necessary to treat option as an unsigned int,
660 * with the corresponding cast to a signed int to insure that the 523 * with the corresponding cast to a signed int to insure that the
661 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 524 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
662 * and the register representation of a signed int (msr in 64-bit mode) is performed. 525 * and the register representation of a signed int (msr in 64-bit mode) is performed.
663 */ 526 */
664asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) 527asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
665{ 528{
666 return sys_prctl((int)option, 529 return sys_prctl((int)option,
667 (unsigned long) arg2, 530 (unsigned long) arg2,
@@ -675,7 +538,7 @@ asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
675 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 538 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
676 * and the register representation of a signed int (msr in 64-bit mode) is performed. 539 * and the register representation of a signed int (msr in 64-bit mode) is performed.
677 */ 540 */
678asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval) 541asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
679{ 542{
680 struct timespec t; 543 struct timespec t;
681 int ret; 544 int ret;
@@ -690,7 +553,7 @@ asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __us
690 return ret; 553 return ret;
691} 554}
692 555
693asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 556asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
694{ 557{
695 return sys_pciconfig_read((unsigned long) bus, 558 return sys_pciconfig_read((unsigned long) bus,
696 (unsigned long) dfn, 559 (unsigned long) dfn,
@@ -699,7 +562,7 @@ asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf
699 compat_ptr(ubuf)); 562 compat_ptr(ubuf));
700} 563}
701 564
702asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 565asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
703{ 566{
704 return sys_pciconfig_write((unsigned long) bus, 567 return sys_pciconfig_write((unsigned long) bus,
705 (unsigned long) dfn, 568 (unsigned long) dfn,
@@ -708,7 +571,7 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu
708 compat_ptr(ubuf)); 571 compat_ptr(ubuf));
709} 572}
710 573
711asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) 574asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
712{ 575{
713 return sys_pciconfig_iobase(which, in_bus, in_devfn); 576 return sys_pciconfig_iobase(which, in_bus, in_devfn);
714} 577}
@@ -719,7 +582,7 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
719 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 582 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
720 * and the register representation of a signed int (msr in 64-bit mode) is performed. 583 * and the register representation of a signed int (msr in 64-bit mode) is performed.
721 */ 584 */
722asmlinkage long sys32_access(const char __user * filename, u32 mode) 585asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
723{ 586{
724 return sys_access(filename, (int)mode); 587 return sys_access(filename, (int)mode);
725} 588}
@@ -730,7 +593,7 @@ asmlinkage long sys32_access(const char __user * filename, u32 mode)
730 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 593 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
731 * and the register representation of a signed int (msr in 64-bit mode) is performed. 594 * and the register representation of a signed int (msr in 64-bit mode) is performed.
732 */ 595 */
733asmlinkage long sys32_creat(const char __user * pathname, u32 mode) 596asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
734{ 597{
735 return sys_creat(pathname, (int)mode); 598 return sys_creat(pathname, (int)mode);
736} 599}
@@ -741,7 +604,7 @@ asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
741 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 604 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
742 * and the register representation of a signed int (msr in 64-bit mode) is performed. 605 * and the register representation of a signed int (msr in 64-bit mode) is performed.
743 */ 606 */
744asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options) 607asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
745{ 608{
746 return sys_waitpid((int)pid, stat_addr, (int)options); 609 return sys_waitpid((int)pid, stat_addr, (int)options);
747} 610}
@@ -752,7 +615,7 @@ asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 opti
752 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 615 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
753 * and the register representation of a signed int (msr in 64-bit mode) is performed. 616 * and the register representation of a signed int (msr in 64-bit mode) is performed.
754 */ 617 */
755asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist) 618asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
756{ 619{
757 return sys_getgroups((int)gidsetsize, grouplist); 620 return sys_getgroups((int)gidsetsize, grouplist);
758} 621}
@@ -763,7 +626,7 @@ asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
763 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 626 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
764 * and the register representation of a signed int (msr in 64-bit mode) is performed. 627 * and the register representation of a signed int (msr in 64-bit mode) is performed.
765 */ 628 */
766asmlinkage long sys32_getpgid(u32 pid) 629asmlinkage long compat_sys_getpgid(u32 pid)
767{ 630{
768 return sys_getpgid((int)pid); 631 return sys_getpgid((int)pid);
769} 632}
@@ -775,7 +638,7 @@ asmlinkage long sys32_getpgid(u32 pid)
775 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 638 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
776 * and the register representation of a signed int (msr in 64-bit mode) is performed. 639 * and the register representation of a signed int (msr in 64-bit mode) is performed.
777 */ 640 */
778asmlinkage long sys32_getsid(u32 pid) 641asmlinkage long compat_sys_getsid(u32 pid)
779{ 642{
780 return sys_getsid((int)pid); 643 return sys_getsid((int)pid);
781} 644}
@@ -786,7 +649,7 @@ asmlinkage long sys32_getsid(u32 pid)
786 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 649 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
787 * and the register representation of a signed int (msr in 64-bit mode) is performed. 650 * and the register representation of a signed int (msr in 64-bit mode) is performed.
788 */ 651 */
789asmlinkage long sys32_kill(u32 pid, u32 sig) 652asmlinkage long compat_sys_kill(u32 pid, u32 sig)
790{ 653{
791 return sys_kill((int)pid, (int)sig); 654 return sys_kill((int)pid, (int)sig);
792} 655}
@@ -797,12 +660,12 @@ asmlinkage long sys32_kill(u32 pid, u32 sig)
797 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 660 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
798 * and the register representation of a signed int (msr in 64-bit mode) is performed. 661 * and the register representation of a signed int (msr in 64-bit mode) is performed.
799 */ 662 */
800asmlinkage long sys32_mkdir(const char __user * pathname, u32 mode) 663asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
801{ 664{
802 return sys_mkdir(pathname, (int)mode); 665 return sys_mkdir(pathname, (int)mode);
803} 666}
804 667
805long sys32_nice(u32 increment) 668long compat_sys_nice(u32 increment)
806{ 669{
807 /* sign extend increment */ 670 /* sign extend increment */
808 return sys_nice((int)increment); 671 return sys_nice((int)increment);
@@ -819,7 +682,7 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
819 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 682 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
820 * and the register representation of a signed int (msr in 64-bit mode) is performed. 683 * and the register representation of a signed int (msr in 64-bit mode) is performed.
821 */ 684 */
822asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32 bufsiz) 685asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
823{ 686{
824 return sys_readlink(path, buf, (int)bufsiz); 687 return sys_readlink(path, buf, (int)bufsiz);
825} 688}
@@ -829,7 +692,7 @@ asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32
829 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 692 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
830 * and the register representation of a signed int (msr in 64-bit mode) is performed. 693 * and the register representation of a signed int (msr in 64-bit mode) is performed.
831 */ 694 */
832asmlinkage long sys32_sched_get_priority_max(u32 policy) 695asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
833{ 696{
834 return sys_sched_get_priority_max((int)policy); 697 return sys_sched_get_priority_max((int)policy);
835} 698}
@@ -840,7 +703,7 @@ asmlinkage long sys32_sched_get_priority_max(u32 policy)
840 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 703 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
841 * and the register representation of a signed int (msr in 64-bit mode) is performed. 704 * and the register representation of a signed int (msr in 64-bit mode) is performed.
842 */ 705 */
843asmlinkage long sys32_sched_get_priority_min(u32 policy) 706asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
844{ 707{
845 return sys_sched_get_priority_min((int)policy); 708 return sys_sched_get_priority_min((int)policy);
846} 709}
@@ -851,7 +714,7 @@ asmlinkage long sys32_sched_get_priority_min(u32 policy)
851 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 714 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
852 * and the register representation of a signed int (msr in 64-bit mode) is performed. 715 * and the register representation of a signed int (msr in 64-bit mode) is performed.
853 */ 716 */
854asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param) 717asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
855{ 718{
856 return sys_sched_getparam((int)pid, param); 719 return sys_sched_getparam((int)pid, param);
857} 720}
@@ -862,7 +725,7 @@ asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
862 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 725 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
863 * and the register representation of a signed int (msr in 64-bit mode) is performed. 726 * and the register representation of a signed int (msr in 64-bit mode) is performed.
864 */ 727 */
865asmlinkage long sys32_sched_getscheduler(u32 pid) 728asmlinkage long compat_sys_sched_getscheduler(u32 pid)
866{ 729{
867 return sys_sched_getscheduler((int)pid); 730 return sys_sched_getscheduler((int)pid);
868} 731}
@@ -873,7 +736,7 @@ asmlinkage long sys32_sched_getscheduler(u32 pid)
873 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 736 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
874 * and the register representation of a signed int (msr in 64-bit mode) is performed. 737 * and the register representation of a signed int (msr in 64-bit mode) is performed.
875 */ 738 */
876asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param) 739asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
877{ 740{
878 return sys_sched_setparam((int)pid, param); 741 return sys_sched_setparam((int)pid, param);
879} 742}
@@ -884,7 +747,7 @@ asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
884 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 747 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
885 * and the register representation of a signed int (msr in 64-bit mode) is performed. 748 * and the register representation of a signed int (msr in 64-bit mode) is performed.
886 */ 749 */
887asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param) 750asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
888{ 751{
889 return sys_sched_setscheduler((int)pid, (int)policy, param); 752 return sys_sched_setscheduler((int)pid, (int)policy, param);
890} 753}
@@ -895,7 +758,7 @@ asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param
895 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 758 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
896 * and the register representation of a signed int (msr in 64-bit mode) is performed. 759 * and the register representation of a signed int (msr in 64-bit mode) is performed.
897 */ 760 */
898asmlinkage long sys32_setdomainname(char __user *name, u32 len) 761asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
899{ 762{
900 return sys_setdomainname(name, (int)len); 763 return sys_setdomainname(name, (int)len);
901} 764}
@@ -906,13 +769,13 @@ asmlinkage long sys32_setdomainname(char __user *name, u32 len)
906 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 769 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
907 * and the register representation of a signed int (msr in 64-bit mode) is performed. 770 * and the register representation of a signed int (msr in 64-bit mode) is performed.
908 */ 771 */
909asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t __user *grouplist) 772asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
910{ 773{
911 return sys_setgroups((int)gidsetsize, grouplist); 774 return sys_setgroups((int)gidsetsize, grouplist);
912} 775}
913 776
914 777
915asmlinkage long sys32_sethostname(char __user *name, u32 len) 778asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
916{ 779{
917 /* sign extend len */ 780 /* sign extend len */
918 return sys_sethostname(name, (int)len); 781 return sys_sethostname(name, (int)len);
@@ -924,30 +787,30 @@ asmlinkage long sys32_sethostname(char __user *name, u32 len)
924 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 787 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
925 * and the register representation of a signed int (msr in 64-bit mode) is performed. 788 * and the register representation of a signed int (msr in 64-bit mode) is performed.
926 */ 789 */
927asmlinkage long sys32_setpgid(u32 pid, u32 pgid) 790asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
928{ 791{
929 return sys_setpgid((int)pid, (int)pgid); 792 return sys_setpgid((int)pid, (int)pgid);
930} 793}
931 794
932long sys32_getpriority(u32 which, u32 who) 795long compat_sys_getpriority(u32 which, u32 who)
933{ 796{
934 /* sign extend which and who */ 797 /* sign extend which and who */
935 return sys_getpriority((int)which, (int)who); 798 return sys_getpriority((int)which, (int)who);
936} 799}
937 800
938long sys32_setpriority(u32 which, u32 who, u32 niceval) 801long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
939{ 802{
940 /* sign extend which, who and niceval */ 803 /* sign extend which, who and niceval */
941 return sys_setpriority((int)which, (int)who, (int)niceval); 804 return sys_setpriority((int)which, (int)who, (int)niceval);
942} 805}
943 806
944long sys32_ioprio_get(u32 which, u32 who) 807long compat_sys_ioprio_get(u32 which, u32 who)
945{ 808{
946 /* sign extend which and who */ 809 /* sign extend which and who */
947 return sys_ioprio_get((int)which, (int)who); 810 return sys_ioprio_get((int)which, (int)who);
948} 811}
949 812
950long sys32_ioprio_set(u32 which, u32 who, u32 ioprio) 813long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
951{ 814{
952 /* sign extend which, who and ioprio */ 815 /* sign extend which, who and ioprio */
953 return sys_ioprio_set((int)which, (int)who, (int)ioprio); 816 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
@@ -958,12 +821,12 @@ long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
958 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 821 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
959 * and the register representation of a signed int (msr in 64-bit mode) is performed. 822 * and the register representation of a signed int (msr in 64-bit mode) is performed.
960 */ 823 */
961asmlinkage long sys32_ssetmask(u32 newmask) 824asmlinkage long compat_sys_ssetmask(u32 newmask)
962{ 825{
963 return sys_ssetmask((int) newmask); 826 return sys_ssetmask((int) newmask);
964} 827}
965 828
966asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len) 829asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
967{ 830{
968 /* sign extend len */ 831 /* sign extend len */
969 return sys_syslog(type, buf, (int)len); 832 return sys_syslog(type, buf, (int)len);
@@ -975,7 +838,7 @@ asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
975 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 838 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
976 * and the register representation of a signed int (msr in 64-bit mode) is performed. 839 * and the register representation of a signed int (msr in 64-bit mode) is performed.
977 */ 840 */
978asmlinkage long sys32_umask(u32 mask) 841asmlinkage long compat_sys_umask(u32 mask)
979{ 842{
980 return sys_umask((int)mask); 843 return sys_umask((int)mask);
981} 844}
@@ -991,7 +854,7 @@ struct __sysctl_args32 {
991 u32 __unused[4]; 854 u32 __unused[4];
992}; 855};
993 856
994asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) 857asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
995{ 858{
996 struct __sysctl_args32 tmp; 859 struct __sysctl_args32 tmp;
997 int error; 860 int error;
@@ -1032,55 +895,7 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
1032} 895}
1033#endif 896#endif
1034 897
1035asmlinkage int sys32_uname(struct old_utsname __user * name) 898unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
1036{
1037 int err = 0;
1038
1039 down_read(&uts_sem);
1040 if (copy_to_user(name, &system_utsname, sizeof(*name)))
1041 err = -EFAULT;
1042 up_read(&uts_sem);
1043 if (!err && personality(current->personality) == PER_LINUX32) {
1044 /* change "ppc64" to "ppc" */
1045 if (__put_user(0, name->machine + 3)
1046 || __put_user(0, name->machine + 4))
1047 err = -EFAULT;
1048 }
1049 return err;
1050}
1051
1052asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
1053{
1054 int error;
1055
1056 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
1057 return -EFAULT;
1058
1059 down_read(&uts_sem);
1060 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
1061 error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
1062 error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
1063 error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
1064 error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
1065 error |= __put_user(0,name->release+__OLD_UTS_LEN);
1066 error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
1067 error |= __put_user(0,name->version+__OLD_UTS_LEN);
1068 error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
1069 error |= __put_user(0,name->machine+__OLD_UTS_LEN);
1070 if (personality(current->personality) == PER_LINUX32) {
1071 /* change "ppc64" to "ppc" */
1072 error |= __put_user(0, name->machine + 3);
1073 error |= __put_user(0, name->machine + 4);
1074 }
1075
1076 up_read(&uts_sem);
1077
1078 error = error ? -EFAULT : 0;
1079
1080 return error;
1081}
1082
1083unsigned long sys32_mmap2(unsigned long addr, size_t len,
1084 unsigned long prot, unsigned long flags, 899 unsigned long prot, unsigned long flags,
1085 unsigned long fd, unsigned long pgoff) 900 unsigned long fd, unsigned long pgoff)
1086{ 901{
@@ -1088,29 +903,7 @@ unsigned long sys32_mmap2(unsigned long addr, size_t len,
1088 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); 903 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
1089} 904}
1090 905
1091int get_compat_timeval(struct timeval *tv, struct compat_timeval __user *ctv) 906long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
1092{
1093 return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
1094 __get_user(tv->tv_sec, &ctv->tv_sec) ||
1095 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
1096}
1097
1098asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs)
1099{
1100 struct timeval ktvs[2], *ptr;
1101
1102 ptr = NULL;
1103 if (tvs) {
1104 if (get_compat_timeval(&ktvs[0], &tvs[0]) ||
1105 get_compat_timeval(&ktvs[1], &tvs[1]))
1106 return -EFAULT;
1107 ptr = ktvs;
1108 }
1109
1110 return do_utimes(filename, ptr);
1111}
1112
1113long sys32_tgkill(u32 tgid, u32 pid, int sig)
1114{ 907{
1115 /* sign extend tgid, pid */ 908 /* sign extend tgid, pid */
1116 return sys_tgkill((int)tgid, (int)pid, sig); 909 return sys_tgkill((int)tgid, (int)pid, sig);
@@ -1121,30 +914,30 @@ long sys32_tgkill(u32 tgid, u32 pid, int sig)
1121 * The 32 bit ABI passes long longs in an odd even register pair. 914 * The 32 bit ABI passes long longs in an odd even register pair.
1122 */ 915 */
1123 916
1124compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, 917compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
1125 u32 reg6, u32 poshi, u32 poslo) 918 u32 reg6, u32 poshi, u32 poslo)
1126{ 919{
1127 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 920 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1128} 921}
1129 922
1130compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count, 923compat_ssize_t compat_sys_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
1131 u32 reg6, u32 poshi, u32 poslo) 924 u32 reg6, u32 poshi, u32 poslo)
1132{ 925{
1133 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 926 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1134} 927}
1135 928
1136compat_ssize_t sys32_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count) 929compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
1137{ 930{
1138 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count); 931 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
1139} 932}
1140 933
1141asmlinkage int sys32_truncate64(const char __user * path, u32 reg4, 934asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
1142 unsigned long high, unsigned long low) 935 unsigned long high, unsigned long low)
1143{ 936{
1144 return sys_truncate(path, (high << 32) | low); 937 return sys_truncate(path, (high << 32) | low);
1145} 938}
1146 939
1147asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, 940asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
1148 unsigned long low) 941 unsigned long low)
1149{ 942{
1150 return sys_ftruncate(fd, (high << 32) | low); 943 return sys_ftruncate(fd, (high << 32) | low);
@@ -1164,13 +957,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
1164 advice); 957 advice);
1165} 958}
1166 959
1167long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
1168 u32 len_high, u32 len_low)
1169{
1170 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
1171 (u64)len_high << 32 | len_low, advice);
1172}
1173
1174long ppc32_timer_create(clockid_t clock, 960long ppc32_timer_create(clockid_t clock,
1175 struct compat_sigevent __user *ev32, 961 struct compat_sigevent __user *ev32,
1176 timer_t __user *timer_id) 962 timer_t __user *timer_id)
@@ -1203,7 +989,7 @@ long ppc32_timer_create(clockid_t clock,
1203 return err; 989 return err;
1204} 990}
1205 991
1206asmlinkage long sys32_add_key(const char __user *_type, 992asmlinkage long compat_sys_add_key(const char __user *_type,
1207 const char __user *_description, 993 const char __user *_description,
1208 const void __user *_payload, 994 const void __user *_payload,
1209 u32 plen, 995 u32 plen,
@@ -1212,7 +998,7 @@ asmlinkage long sys32_add_key(const char __user *_type,
1212 return sys_add_key(_type, _description, _payload, plen, ringid); 998 return sys_add_key(_type, _description, _payload, plen, ringid);
1213} 999}
1214 1000
1215asmlinkage long sys32_request_key(const char __user *_type, 1001asmlinkage long compat_sys_request_key(const char __user *_type,
1216 const char __user *_description, 1002 const char __user *_description,
1217 const char __user *_callout_info, 1003 const char __user *_callout_info,
1218 u32 destringid) 1004 u32 destringid)
diff --git a/arch/ppc64/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 05f16633bd2c..f72ced11212d 100644
--- a/arch/ppc64/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/sys_ppc.c 2 * Implementation of various system calls for Linux/PowerPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 5 *
7 * Derived from "arch/i386/kernel/sys_i386.c" 6 * Derived from "arch/i386/kernel/sys_i386.c"
@@ -52,9 +51,8 @@ extern unsigned long wall_jiffies;
52 * 51 *
53 * This is really horribly ugly. 52 * This is really horribly ugly.
54 */ 53 */
55asmlinkage int 54int sys_ipc(uint call, int first, unsigned long second, long third,
56sys_ipc (uint call, int first, unsigned long second, long third, 55 void __user *ptr, long fifth)
57 void __user *ptr, long fifth)
58{ 56{
59 int version, ret; 57 int version, ret;
60 58
@@ -88,7 +86,7 @@ sys_ipc (uint call, int first, unsigned long second, long third,
88 } 86 }
89 case MSGSND: 87 case MSGSND:
90 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr, 88 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
91 (size_t)second, third); 89 (size_t)second, third);
92 break; 90 break;
93 case MSGRCV: 91 case MSGRCV:
94 switch (version) { 92 switch (version) {
@@ -113,41 +111,29 @@ sys_ipc (uint call, int first, unsigned long second, long third,
113 } 111 }
114 break; 112 break;
115 case MSGGET: 113 case MSGGET:
116 ret = sys_msgget ((key_t)first, (int)second); 114 ret = sys_msgget((key_t)first, (int)second);
117 break; 115 break;
118 case MSGCTL: 116 case MSGCTL:
119 ret = sys_msgctl(first, (int)second, 117 ret = sys_msgctl(first, (int)second,
120 (struct msqid_ds __user *)ptr); 118 (struct msqid_ds __user *)ptr);
121 break; 119 break;
122 case SHMAT: 120 case SHMAT: {
123 switch (version) { 121 ulong raddr;
124 default: { 122 ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr);
125 ulong raddr; 123 if (ret)
126 ret = do_shmat(first, (char __user *) ptr,
127 (int)second, &raddr);
128 if (ret)
129 break;
130 ret = put_user (raddr, (ulong __user *) third);
131 break;
132 }
133 case 1: /* iBCS2 emulator entry point */
134 ret = -EINVAL;
135 if (!segment_eq(get_fs(), get_ds()))
136 break;
137 ret = do_shmat(first, (char __user *)ptr,
138 (int)second, (ulong *)third);
139 break; 124 break;
140 } 125 ret = put_user(raddr, (ulong __user *) third);
141 break; 126 break;
142 case SHMDT: 127 }
143 ret = sys_shmdt ((char __user *)ptr); 128 case SHMDT:
129 ret = sys_shmdt((char __user *)ptr);
144 break; 130 break;
145 case SHMGET: 131 case SHMGET:
146 ret = sys_shmget (first, (size_t)second, third); 132 ret = sys_shmget(first, (size_t)second, third);
147 break; 133 break;
148 case SHMCTL: 134 case SHMCTL:
149 ret = sys_shmctl(first, (int)second, 135 ret = sys_shmctl(first, (int)second,
150 (struct shmid_ds __user *)ptr); 136 (struct shmid_ds __user *)ptr);
151 break; 137 break;
152 } 138 }
153 139
@@ -158,43 +144,89 @@ sys_ipc (uint call, int first, unsigned long second, long third,
158 * sys_pipe() is the normal C calling standard for creating 144 * sys_pipe() is the normal C calling standard for creating
159 * a pipe. It's not the way unix traditionally does this, though. 145 * a pipe. It's not the way unix traditionally does this, though.
160 */ 146 */
161asmlinkage int sys_pipe(int __user *fildes) 147int sys_pipe(int __user *fildes)
162{ 148{
163 int fd[2]; 149 int fd[2];
164 int error; 150 int error;
165 151
166 error = do_pipe(fd); 152 error = do_pipe(fd);
167 if (!error) { 153 if (!error) {
168 if (copy_to_user(fildes, fd, 2*sizeof(int))) 154 if (copy_to_user(fildes, fd, 2*sizeof(int)))
169 error = -EFAULT; 155 error = -EFAULT;
170 } 156 }
171
172 return error; 157 return error;
173} 158}
174 159
175unsigned long sys_mmap(unsigned long addr, size_t len, 160static inline unsigned long do_mmap2(unsigned long addr, size_t len,
176 unsigned long prot, unsigned long flags, 161 unsigned long prot, unsigned long flags,
177 unsigned long fd, off_t offset) 162 unsigned long fd, unsigned long off, int shift)
178{ 163{
179 struct file * file = NULL; 164 struct file * file = NULL;
180 unsigned long ret = -EBADF; 165 unsigned long ret = -EINVAL;
181 166
167 if (shift) {
168 if (off & ((1 << shift) - 1))
169 goto out;
170 off >>= shift;
171 }
172
173 ret = -EBADF;
182 if (!(flags & MAP_ANONYMOUS)) { 174 if (!(flags & MAP_ANONYMOUS)) {
183 if (!(file = fget(fd))) 175 if (!(file = fget(fd)))
184 goto out; 176 goto out;
185 } 177 }
186 178
187 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 179 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
180
188 down_write(&current->mm->mmap_sem); 181 down_write(&current->mm->mmap_sem);
189 ret = do_mmap(file, addr, len, prot, flags, offset); 182 ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
190 up_write(&current->mm->mmap_sem); 183 up_write(&current->mm->mmap_sem);
191 if (file) 184 if (file)
192 fput(file); 185 fput(file);
193
194out: 186out:
195 return ret; 187 return ret;
196} 188}
197 189
190unsigned long sys_mmap2(unsigned long addr, size_t len,
191 unsigned long prot, unsigned long flags,
192 unsigned long fd, unsigned long pgoff)
193{
194 return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
195}
196
197unsigned long sys_mmap(unsigned long addr, size_t len,
198 unsigned long prot, unsigned long flags,
199 unsigned long fd, off_t offset)
200{
201 return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
202}
203
204#ifdef CONFIG_PPC32
205/*
206 * Due to some executables calling the wrong select we sometimes
207 * get wrong args. This determines how the args are being passed
208 * (a single ptr to them all args passed) then calls
209 * sys_select() with the appropriate args. -- Cort
210 */
211int
212ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
213{
214 if ( (unsigned long)n >= 4096 )
215 {
216 unsigned long __user *buffer = (unsigned long __user *)n;
217 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
218 || __get_user(n, buffer)
219 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
220 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
221 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
222 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
223 return -EFAULT;
224 }
225 return sys_select(n, inp, outp, exp, tvp);
226}
227#endif
228
229#ifdef CONFIG_PPC64
198long ppc64_personality(unsigned long personality) 230long ppc64_personality(unsigned long personality)
199{ 231{
200 long ret; 232 long ret;
@@ -207,8 +239,25 @@ long ppc64_personality(unsigned long personality)
207 ret = PER_LINUX; 239 ret = PER_LINUX;
208 return ret; 240 return ret;
209} 241}
242#endif
243
244#ifdef CONFIG_PPC64
245#define OVERRIDE_MACHINE (personality(current->personality) == PER_LINUX32)
246#else
247#define OVERRIDE_MACHINE 0
248#endif
249
250static inline int override_machine(char *mach)
251{
252 if (OVERRIDE_MACHINE) {
253 /* change ppc64 to ppc */
254 if (__put_user(0, mach+3) || __put_user(0, mach+4))
255 return -EFAULT;
256 }
257 return 0;
258}
210 259
211long ppc64_newuname(struct new_utsname __user * name) 260long ppc_newuname(struct new_utsname __user * name)
212{ 261{
213 int err = 0; 262 int err = 0;
214 263
@@ -216,16 +265,54 @@ long ppc64_newuname(struct new_utsname __user * name)
216 if (copy_to_user(name, &system_utsname, sizeof(*name))) 265 if (copy_to_user(name, &system_utsname, sizeof(*name)))
217 err = -EFAULT; 266 err = -EFAULT;
218 up_read(&uts_sem); 267 up_read(&uts_sem);
219 if (!err && personality(current->personality) == PER_LINUX32) { 268 if (!err)
220 /* change ppc64 to ppc */ 269 err = override_machine(name->machine);
221 if (__put_user(0, name->machine + 3)
222 || __put_user(0, name->machine + 4))
223 err = -EFAULT;
224 }
225 return err; 270 return err;
226} 271}
227 272
228asmlinkage time_t sys64_time(time_t __user * tloc) 273int sys_uname(struct old_utsname __user *name)
274{
275 int err = 0;
276
277 down_read(&uts_sem);
278 if (copy_to_user(name, &system_utsname, sizeof(*name)))
279 err = -EFAULT;
280 up_read(&uts_sem);
281 if (!err)
282 err = override_machine(name->machine);
283 return err;
284}
285
286int sys_olduname(struct oldold_utsname __user *name)
287{
288 int error;
289
290 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
291 return -EFAULT;
292
293 down_read(&uts_sem);
294 error = __copy_to_user(&name->sysname, &system_utsname.sysname,
295 __OLD_UTS_LEN);
296 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
297 error |= __copy_to_user(&name->nodename, &system_utsname.nodename,
298 __OLD_UTS_LEN);
299 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
300 error |= __copy_to_user(&name->release, &system_utsname.release,
301 __OLD_UTS_LEN);
302 error |= __put_user(0, name->release + __OLD_UTS_LEN);
303 error |= __copy_to_user(&name->version, &system_utsname.version,
304 __OLD_UTS_LEN);
305 error |= __put_user(0, name->version + __OLD_UTS_LEN);
306 error |= __copy_to_user(&name->machine, &system_utsname.machine,
307 __OLD_UTS_LEN);
308 error |= override_machine(name->machine);
309 up_read(&uts_sem);
310
311 return error? -EFAULT: 0;
312}
313
314#ifdef CONFIG_PPC64
315time_t sys64_time(time_t __user * tloc)
229{ 316{
230 time_t secs; 317 time_t secs;
231 time_t usecs; 318 time_t usecs;
@@ -247,6 +334,14 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
247 334
248 return secs; 335 return secs;
249} 336}
337#endif
338
339long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
340 u32 len_high, u32 len_low)
341{
342 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
343 (u64)len_high << 32 | len_low, advice);
344}
250 345
251void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5, 346void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
252 unsigned long r6, unsigned long r7, unsigned long r8, 347 unsigned long r6, unsigned long r7, unsigned long r8,
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644
index 000000000000..65eaea91b499
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.S
@@ -0,0 +1,321 @@
1/*
2 * This file contains the table of syscall-handling functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/ppc_asm.h>
19
20#ifdef CONFIG_PPC64
21#define SYSCALL(func) .llong .sys_##func,.sys_##func
22#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
23#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
24#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
25#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func
26#define SYSX(f, f3264, f32) .llong .f,.f3264
27#else
28#define SYSCALL(func) .long sys_##func
29#define COMPAT_SYS(func) .long sys_##func
30#define PPC_SYS(func) .long ppc_##func
31#define OLDSYS(func) .long sys_##func
32#define SYS32ONLY(func) .long sys_##func
33#define SYSX(f, f3264, f32) .long f32
34#endif
35
36#ifdef CONFIG_PPC64
37#define sys_sigpending sys_ni_syscall
38#define sys_old_getrlimit sys_ni_syscall
39#else
40#define ppc_rtas sys_ni_syscall
41#endif
42
43_GLOBAL(sys_call_table)
44SYSCALL(restart_syscall)
45SYSCALL(exit)
46PPC_SYS(fork)
47SYSCALL(read)
48SYSCALL(write)
49COMPAT_SYS(open)
50SYSCALL(close)
51COMPAT_SYS(waitpid)
52COMPAT_SYS(creat)
53SYSCALL(link)
54SYSCALL(unlink)
55COMPAT_SYS(execve)
56SYSCALL(chdir)
57SYSX(sys64_time,compat_sys_time,sys_time)
58SYSCALL(mknod)
59SYSCALL(chmod)
60SYSCALL(lchown)
61SYSCALL(ni_syscall)
62OLDSYS(stat)
63SYSX(sys_lseek,ppc32_lseek,sys_lseek)
64SYSCALL(getpid)
65COMPAT_SYS(mount)
66SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
67SYSCALL(setuid)
68SYSCALL(getuid)
69COMPAT_SYS(stime)
70COMPAT_SYS(ptrace)
71SYSCALL(alarm)
72OLDSYS(fstat)
73COMPAT_SYS(pause)
74COMPAT_SYS(utime)
75SYSCALL(ni_syscall)
76SYSCALL(ni_syscall)
77COMPAT_SYS(access)
78COMPAT_SYS(nice)
79SYSCALL(ni_syscall)
80SYSCALL(sync)
81COMPAT_SYS(kill)
82SYSCALL(rename)
83COMPAT_SYS(mkdir)
84SYSCALL(rmdir)
85SYSCALL(dup)
86SYSCALL(pipe)
87COMPAT_SYS(times)
88SYSCALL(ni_syscall)
89SYSCALL(brk)
90SYSCALL(setgid)
91SYSCALL(getgid)
92SYSCALL(signal)
93SYSCALL(geteuid)
94SYSCALL(getegid)
95SYSCALL(acct)
96SYSCALL(umount)
97SYSCALL(ni_syscall)
98COMPAT_SYS(ioctl)
99COMPAT_SYS(fcntl)
100SYSCALL(ni_syscall)
101COMPAT_SYS(setpgid)
102SYSCALL(ni_syscall)
103SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
104COMPAT_SYS(umask)
105SYSCALL(chroot)
106SYSCALL(ustat)
107SYSCALL(dup2)
108SYSCALL(getppid)
109SYSCALL(getpgrp)
110SYSCALL(setsid)
111SYS32ONLY(sigaction)
112SYSCALL(sgetmask)
113COMPAT_SYS(ssetmask)
114SYSCALL(setreuid)
115SYSCALL(setregid)
116SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
117COMPAT_SYS(sigpending)
118COMPAT_SYS(sethostname)
119COMPAT_SYS(setrlimit)
120COMPAT_SYS(old_getrlimit)
121COMPAT_SYS(getrusage)
122COMPAT_SYS(gettimeofday)
123COMPAT_SYS(settimeofday)
124COMPAT_SYS(getgroups)
125COMPAT_SYS(setgroups)
126SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
127SYSCALL(symlink)
128OLDSYS(lstat)
129COMPAT_SYS(readlink)
130SYSCALL(uselib)
131SYSCALL(swapon)
132SYSCALL(reboot)
133SYSX(sys_ni_syscall,old32_readdir,old_readdir)
134SYSCALL(mmap)
135SYSCALL(munmap)
136SYSCALL(truncate)
137SYSCALL(ftruncate)
138SYSCALL(fchmod)
139SYSCALL(fchown)
140COMPAT_SYS(getpriority)
141COMPAT_SYS(setpriority)
142SYSCALL(ni_syscall)
143COMPAT_SYS(statfs)
144COMPAT_SYS(fstatfs)
145SYSCALL(ni_syscall)
146COMPAT_SYS(socketcall)
147COMPAT_SYS(syslog)
148COMPAT_SYS(setitimer)
149COMPAT_SYS(getitimer)
150COMPAT_SYS(newstat)
151COMPAT_SYS(newlstat)
152COMPAT_SYS(newfstat)
153SYSX(sys_ni_syscall,sys_uname,sys_uname)
154SYSCALL(ni_syscall)
155SYSCALL(vhangup)
156SYSCALL(ni_syscall)
157SYSCALL(ni_syscall)
158COMPAT_SYS(wait4)
159SYSCALL(swapoff)
160COMPAT_SYS(sysinfo)
161COMPAT_SYS(ipc)
162SYSCALL(fsync)
163SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
164PPC_SYS(clone)
165COMPAT_SYS(setdomainname)
166PPC_SYS(newuname)
167SYSCALL(ni_syscall)
168COMPAT_SYS(adjtimex)
169SYSCALL(mprotect)
170SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
171SYSCALL(ni_syscall)
172SYSCALL(init_module)
173SYSCALL(delete_module)
174SYSCALL(ni_syscall)
175SYSCALL(quotactl)
176COMPAT_SYS(getpgid)
177SYSCALL(fchdir)
178SYSCALL(bdflush)
179COMPAT_SYS(sysfs)
180SYSX(ppc64_personality,ppc64_personality,sys_personality)
181SYSCALL(ni_syscall)
182SYSCALL(setfsuid)
183SYSCALL(setfsgid)
184SYSCALL(llseek)
185COMPAT_SYS(getdents)
186SYSX(sys_select,ppc32_select,ppc_select)
187SYSCALL(flock)
188SYSCALL(msync)
189COMPAT_SYS(readv)
190COMPAT_SYS(writev)
191COMPAT_SYS(getsid)
192SYSCALL(fdatasync)
193COMPAT_SYS(sysctl)
194SYSCALL(mlock)
195SYSCALL(munlock)
196SYSCALL(mlockall)
197SYSCALL(munlockall)
198COMPAT_SYS(sched_setparam)
199COMPAT_SYS(sched_getparam)
200COMPAT_SYS(sched_setscheduler)
201COMPAT_SYS(sched_getscheduler)
202SYSCALL(sched_yield)
203COMPAT_SYS(sched_get_priority_max)
204COMPAT_SYS(sched_get_priority_min)
205COMPAT_SYS(sched_rr_get_interval)
206COMPAT_SYS(nanosleep)
207SYSCALL(mremap)
208SYSCALL(setresuid)
209SYSCALL(getresuid)
210SYSCALL(ni_syscall)
211SYSCALL(poll)
212COMPAT_SYS(nfsservctl)
213SYSCALL(setresgid)
214SYSCALL(getresgid)
215COMPAT_SYS(prctl)
216SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
217COMPAT_SYS(rt_sigaction)
218COMPAT_SYS(rt_sigprocmask)
219COMPAT_SYS(rt_sigpending)
220COMPAT_SYS(rt_sigtimedwait)
221COMPAT_SYS(rt_sigqueueinfo)
222SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
223COMPAT_SYS(pread64)
224COMPAT_SYS(pwrite64)
225SYSCALL(chown)
226SYSCALL(getcwd)
227SYSCALL(capget)
228SYSCALL(capset)
229COMPAT_SYS(sigaltstack)
230SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
231SYSCALL(ni_syscall)
232SYSCALL(ni_syscall)
233PPC_SYS(vfork)
234COMPAT_SYS(getrlimit)
235COMPAT_SYS(readahead)
236SYS32ONLY(mmap2)
237SYS32ONLY(truncate64)
238SYS32ONLY(ftruncate64)
239SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
242COMPAT_SYS(pciconfig_read)
243COMPAT_SYS(pciconfig_write)
244COMPAT_SYS(pciconfig_iobase)
245SYSCALL(ni_syscall)
246SYSCALL(getdents64)
247SYSCALL(pivot_root)
248SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
249SYSCALL(madvise)
250SYSCALL(mincore)
251SYSCALL(gettid)
252SYSCALL(tkill)
253SYSCALL(setxattr)
254SYSCALL(lsetxattr)
255SYSCALL(fsetxattr)
256SYSCALL(getxattr)
257SYSCALL(lgetxattr)
258SYSCALL(fgetxattr)
259SYSCALL(listxattr)
260SYSCALL(llistxattr)
261SYSCALL(flistxattr)
262SYSCALL(removexattr)
263SYSCALL(lremovexattr)
264SYSCALL(fremovexattr)
265COMPAT_SYS(futex)
266COMPAT_SYS(sched_setaffinity)
267COMPAT_SYS(sched_getaffinity)
268SYSCALL(ni_syscall)
269SYSCALL(ni_syscall)
270SYS32ONLY(sendfile64)
271COMPAT_SYS(io_setup)
272SYSCALL(io_destroy)
273COMPAT_SYS(io_getevents)
274COMPAT_SYS(io_submit)
275SYSCALL(io_cancel)
276SYSCALL(set_tid_address)
277SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
278SYSCALL(exit_group)
279SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
280SYSCALL(epoll_create)
281SYSCALL(epoll_ctl)
282SYSCALL(epoll_wait)
283SYSCALL(remap_file_pages)
284SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
285COMPAT_SYS(timer_settime)
286COMPAT_SYS(timer_gettime)
287SYSCALL(timer_getoverrun)
288SYSCALL(timer_delete)
289COMPAT_SYS(clock_settime)
290COMPAT_SYS(clock_gettime)
291COMPAT_SYS(clock_getres)
292COMPAT_SYS(clock_nanosleep)
293SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
294COMPAT_SYS(tgkill)
295COMPAT_SYS(utimes)
296COMPAT_SYS(statfs64)
297COMPAT_SYS(fstatfs64)
298SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
299PPC_SYS(rtas)
300OLDSYS(debug_setcontext)
301SYSCALL(ni_syscall)
302SYSCALL(ni_syscall)
303COMPAT_SYS(mbind)
304COMPAT_SYS(get_mempolicy)
305COMPAT_SYS(set_mempolicy)
306COMPAT_SYS(mq_open)
307SYSCALL(mq_unlink)
308COMPAT_SYS(mq_timedsend)
309COMPAT_SYS(mq_timedreceive)
310COMPAT_SYS(mq_notify)
311COMPAT_SYS(mq_getsetattr)
312COMPAT_SYS(kexec_load)
313COMPAT_SYS(add_key)
314COMPAT_SYS(request_key)
315COMPAT_SYS(keyctl)
316COMPAT_SYS(waitid)
317COMPAT_SYS(ioprio_set)
318COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch)
diff --git a/arch/ppc64/kernel/time.c b/arch/powerpc/kernel/time.c
index b56c6a324e17..23436b6c1881 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Common time routines among all ppc machines. 2 * Common time routines among all ppc machines.
4 * 3 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
@@ -44,33 +43,32 @@
44#include <linux/interrupt.h> 43#include <linux/interrupt.h>
45#include <linux/timex.h> 44#include <linux/timex.h>
46#include <linux/kernel_stat.h> 45#include <linux/kernel_stat.h>
47#include <linux/mc146818rtc.h>
48#include <linux/time.h> 46#include <linux/time.h>
49#include <linux/init.h> 47#include <linux/init.h>
50#include <linux/profile.h> 48#include <linux/profile.h>
51#include <linux/cpu.h> 49#include <linux/cpu.h>
52#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53 53
54#include <asm/io.h> 54#include <asm/io.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/nvram.h> 56#include <asm/nvram.h>
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/machdep.h> 58#include <asm/machdep.h>
59#ifdef CONFIG_PPC_ISERIES
60#include <asm/iSeries/ItLpQueue.h>
61#include <asm/iSeries/HvCallXm.h>
62#endif
63#include <asm/uaccess.h> 59#include <asm/uaccess.h>
64#include <asm/time.h> 60#include <asm/time.h>
65#include <asm/ppcdebug.h>
66#include <asm/prom.h> 61#include <asm/prom.h>
67#include <asm/sections.h> 62#include <asm/irq.h>
63#include <asm/div64.h>
64#ifdef CONFIG_PPC64
68#include <asm/systemcfg.h> 65#include <asm/systemcfg.h>
69#include <asm/firmware.h> 66#include <asm/firmware.h>
70 67#endif
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 68#ifdef CONFIG_PPC_ISERIES
72 69#include <asm/iSeries/ItLpQueue.h>
73EXPORT_SYMBOL(jiffies_64); 70#include <asm/iSeries/HvCallXm.h>
71#endif
74 72
75/* keep track of when we need to update the rtc */ 73/* keep track of when we need to update the rtc */
76time_t last_rtc_update; 74time_t last_rtc_update;
@@ -81,27 +79,37 @@ unsigned long iSeries_recal_tb = 0;
81static unsigned long first_settimeofday = 1; 79static unsigned long first_settimeofday = 1;
82#endif 80#endif
83 81
82/* The decrementer counts down by 128 every 128ns on a 601. */
83#define DECREMENTER_COUNT_601 (1000000000 / HZ)
84
84#define XSEC_PER_SEC (1024*1024) 85#define XSEC_PER_SEC (1024*1024)
85 86
87#ifdef CONFIG_PPC64
88#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
89#else
90/* compute ((xsec << 12) * max) >> 32 */
91#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
92#endif
93
86unsigned long tb_ticks_per_jiffy; 94unsigned long tb_ticks_per_jiffy;
87unsigned long tb_ticks_per_usec = 100; /* sane default */ 95unsigned long tb_ticks_per_usec = 100; /* sane default */
88EXPORT_SYMBOL(tb_ticks_per_usec); 96EXPORT_SYMBOL(tb_ticks_per_usec);
89unsigned long tb_ticks_per_sec; 97unsigned long tb_ticks_per_sec;
90unsigned long tb_to_xs; 98u64 tb_to_xs;
91unsigned tb_to_us; 99unsigned tb_to_us;
92unsigned long processor_freq; 100unsigned long processor_freq;
93DEFINE_SPINLOCK(rtc_lock); 101DEFINE_SPINLOCK(rtc_lock);
94EXPORT_SYMBOL_GPL(rtc_lock); 102EXPORT_SYMBOL_GPL(rtc_lock);
95 103
96unsigned long tb_to_ns_scale; 104u64 tb_to_ns_scale;
97unsigned long tb_to_ns_shift; 105unsigned tb_to_ns_shift;
98 106
99struct gettimeofday_struct do_gtod; 107struct gettimeofday_struct do_gtod;
100 108
101extern unsigned long wall_jiffies; 109extern unsigned long wall_jiffies;
102extern int smp_tb_synchronized;
103 110
104extern struct timezone sys_tz; 111extern struct timezone sys_tz;
112static long timezone_offset;
105 113
106void ppc_adjtimex(void); 114void ppc_adjtimex(void);
107 115
@@ -110,6 +118,20 @@ static unsigned adjusting_time = 0;
110unsigned long ppc_proc_freq; 118unsigned long ppc_proc_freq;
111unsigned long ppc_tb_freq; 119unsigned long ppc_tb_freq;
112 120
121#ifdef CONFIG_PPC32 /* XXX for now */
122#define boot_cpuid 0
123#endif
124
125u64 tb_last_jiffy __cacheline_aligned_in_smp;
126unsigned long tb_last_stamp;
127
128/*
129 * Note that on ppc32 this only stores the bottom 32 bits of
130 * the timebase value, but that's enough to tell when a jiffy
131 * has passed.
132 */
133DEFINE_PER_CPU(unsigned long, last_jiffy);
134
113static __inline__ void timer_check_rtc(void) 135static __inline__ void timer_check_rtc(void)
114{ 136{
115 /* 137 /*
@@ -128,31 +150,31 @@ static __inline__ void timer_check_rtc(void)
128 * We should have an rtc call that only sets the minutes and 150 * We should have an rtc call that only sets the minutes and
129 * seconds like on Intel to avoid problems with non UTC clocks. 151 * seconds like on Intel to avoid problems with non UTC clocks.
130 */ 152 */
131 if (ntp_synced() && 153 if (ppc_md.set_rtc_time && ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 && 154 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 155 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) { 156 jiffies - wall_jiffies == 1) {
135 struct rtc_time tm; 157 struct rtc_time tm;
136 to_tm(xtime.tv_sec+1, &tm); 158 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
137 tm.tm_year -= 1900; 159 tm.tm_year -= 1900;
138 tm.tm_mon -= 1; 160 tm.tm_mon -= 1;
139 if (ppc_md.set_rtc_time(&tm) == 0) 161 if (ppc_md.set_rtc_time(&tm) == 0)
140 last_rtc_update = xtime.tv_sec+1; 162 last_rtc_update = xtime.tv_sec + 1;
141 else 163 else
142 /* Try again one minute later */ 164 /* Try again one minute later */
143 last_rtc_update += 60; 165 last_rtc_update += 60;
144 } 166 }
145} 167}
146 168
147/* 169/*
148 * This version of gettimeofday has microsecond resolution. 170 * This version of gettimeofday has microsecond resolution.
149 */ 171 */
150static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val) 172static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
151{ 173{
152 unsigned long sec, usec, tb_ticks; 174 unsigned long sec, usec;
153 unsigned long xsec, tb_xsec; 175 u64 tb_ticks, xsec;
154 struct gettimeofday_vars * temp_varp; 176 struct gettimeofday_vars *temp_varp;
155 unsigned long temp_tb_to_xs, temp_stamp_xsec; 177 u64 temp_tb_to_xs, temp_stamp_xsec;
156 178
157 /* 179 /*
158 * These calculations are faster (gets rid of divides) 180 * These calculations are faster (gets rid of divides)
@@ -164,11 +186,10 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
164 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 186 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
165 temp_tb_to_xs = temp_varp->tb_to_xs; 187 temp_tb_to_xs = temp_varp->tb_to_xs;
166 temp_stamp_xsec = temp_varp->stamp_xsec; 188 temp_stamp_xsec = temp_varp->stamp_xsec;
167 tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs ); 189 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
168 xsec = temp_stamp_xsec + tb_xsec;
169 sec = xsec / XSEC_PER_SEC; 190 sec = xsec / XSEC_PER_SEC;
170 xsec -= sec * XSEC_PER_SEC; 191 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
171 usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC; 192 usec = SCALE_XSEC(usec, 1000000);
172 193
173 tv->tv_sec = sec; 194 tv->tv_sec = sec;
174 tv->tv_usec = usec; 195 tv->tv_usec = usec;
@@ -176,6 +197,26 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
176 197
177void do_gettimeofday(struct timeval *tv) 198void do_gettimeofday(struct timeval *tv)
178{ 199{
200 if (__USE_RTC()) {
201 /* do this the old way */
202 unsigned long flags, seq;
203 unsigned int sec, nsec, usec, lost;
204
205 do {
206 seq = read_seqbegin_irqsave(&xtime_lock, flags);
207 sec = xtime.tv_sec;
208 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
209 lost = jiffies - wall_jiffies;
210 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
211 usec = nsec / 1000 + lost * (1000000 / HZ);
212 while (usec >= 1000000) {
213 usec -= 1000000;
214 ++sec;
215 }
216 tv->tv_sec = sec;
217 tv->tv_usec = usec;
218 return;
219 }
179 __do_gettimeofday(tv, get_tb()); 220 __do_gettimeofday(tv, get_tb());
180} 221}
181 222
@@ -185,6 +226,8 @@ EXPORT_SYMBOL(do_gettimeofday);
185 226
186static inline void timer_sync_xtime(unsigned long cur_tb) 227static inline void timer_sync_xtime(unsigned long cur_tb)
187{ 228{
229#ifdef CONFIG_PPC64
230 /* why do we do this? */
188 struct timeval my_tv; 231 struct timeval my_tv;
189 232
190 __do_gettimeofday(&my_tv, cur_tb); 233 __do_gettimeofday(&my_tv, cur_tb);
@@ -193,47 +236,76 @@ static inline void timer_sync_xtime(unsigned long cur_tb)
193 xtime.tv_sec = my_tv.tv_sec; 236 xtime.tv_sec = my_tv.tv_sec;
194 xtime.tv_nsec = my_tv.tv_usec * 1000; 237 xtime.tv_nsec = my_tv.tv_usec * 1000;
195 } 238 }
239#endif
196} 240}
197 241
198/* 242/*
199 * When the timebase - tb_orig_stamp gets too big, we do a manipulation 243 * There are two copies of tb_to_xs and stamp_xsec so that no
200 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the 244 * lock is needed to access and use these values in
201 * difference tb - tb_orig_stamp small enough to always fit inside a 245 * do_gettimeofday. We alternate the copies and as long as a
202 * 32 bits number. This is a requirement of our fast 32 bits userland 246 * reasonable time elapses between changes, there will never
203 * implementation in the vdso. If we "miss" a call to this function 247 * be inconsistent values. ntpd has a minimum of one minute
204 * (interrupt latency, CPU locked in a spinlock, ...) and we end up 248 * between updates.
205 * with a too big difference, then the vdso will fallback to calling
206 * the syscall
207 */ 249 */
208static __inline__ void timer_recalc_offset(unsigned long cur_tb) 250static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
251 u64 new_tb_to_xs)
209{ 252{
210 struct gettimeofday_vars * temp_varp;
211 unsigned temp_idx; 253 unsigned temp_idx;
212 unsigned long offset, new_stamp_xsec, new_tb_orig_stamp; 254 struct gettimeofday_vars *temp_varp;
213
214 if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)
215 return;
216 255
217 temp_idx = (do_gtod.var_idx == 0); 256 temp_idx = (do_gtod.var_idx == 0);
218 temp_varp = &do_gtod.vars[temp_idx]; 257 temp_varp = &do_gtod.vars[temp_idx];
219 258
220 new_tb_orig_stamp = cur_tb; 259 temp_varp->tb_to_xs = new_tb_to_xs;
221 offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp; 260 temp_varp->tb_orig_stamp = new_tb_stamp;
222 new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);
223
224 temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;
225 temp_varp->tb_orig_stamp = new_tb_orig_stamp;
226 temp_varp->stamp_xsec = new_stamp_xsec; 261 temp_varp->stamp_xsec = new_stamp_xsec;
227 smp_mb(); 262 smp_mb();
228 do_gtod.varp = temp_varp; 263 do_gtod.varp = temp_varp;
229 do_gtod.var_idx = temp_idx; 264 do_gtod.var_idx = temp_idx;
230 265
266#ifdef CONFIG_PPC64
267 /*
268 * tb_update_count is used to allow the userspace gettimeofday code
269 * to assure itself that it sees a consistent view of the tb_to_xs and
270 * stamp_xsec variables. It reads the tb_update_count, then reads
271 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
272 * the two values of tb_update_count match and are even then the
273 * tb_to_xs and stamp_xsec values are consistent. If not, then it
274 * loops back and reads them again until this criteria is met.
275 */
231 ++(systemcfg->tb_update_count); 276 ++(systemcfg->tb_update_count);
232 smp_wmb(); 277 smp_wmb();
233 systemcfg->tb_orig_stamp = new_tb_orig_stamp; 278 systemcfg->tb_orig_stamp = new_tb_stamp;
234 systemcfg->stamp_xsec = new_stamp_xsec; 279 systemcfg->stamp_xsec = new_stamp_xsec;
280 systemcfg->tb_to_xs = new_tb_to_xs;
235 smp_wmb(); 281 smp_wmb();
236 ++(systemcfg->tb_update_count); 282 ++(systemcfg->tb_update_count);
283#endif
284}
285
286/*
287 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
288 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
289 * difference tb - tb_orig_stamp small enough to always fit inside a
290 * 32 bits number. This is a requirement of our fast 32 bits userland
291 * implementation in the vdso. If we "miss" a call to this function
292 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
293 * with a too big difference, then the vdso will fallback to calling
294 * the syscall
295 */
296static __inline__ void timer_recalc_offset(u64 cur_tb)
297{
298 unsigned long offset;
299 u64 new_stamp_xsec;
300
301 if (__USE_RTC())
302 return;
303 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
304 if ((offset & 0x80000000u) == 0)
305 return;
306 new_stamp_xsec = do_gtod.varp->stamp_xsec
307 + mulhdu(offset, do_gtod.varp->tb_to_xs);
308 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
237} 309}
238 310
239#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
@@ -313,26 +385,37 @@ static void iSeries_tb_recal(void)
313 * call will not be needed) 385 * call will not be needed)
314 */ 386 */
315 387
316unsigned long tb_last_stamp __cacheline_aligned_in_smp;
317
318/* 388/*
319 * timer_interrupt - gets called when the decrementer overflows, 389 * timer_interrupt - gets called when the decrementer overflows,
320 * with interrupts disabled. 390 * with interrupts disabled.
321 */ 391 */
322int timer_interrupt(struct pt_regs * regs) 392void timer_interrupt(struct pt_regs * regs)
323{ 393{
324 int next_dec; 394 int next_dec;
325 unsigned long cur_tb; 395 int cpu = smp_processor_id();
326 struct paca_struct *lpaca = get_paca(); 396 unsigned long ticks;
327 unsigned long cpu = smp_processor_id(); 397
398#ifdef CONFIG_PPC32
399 if (atomic_read(&ppc_n_lost_interrupts) != 0)
400 do_IRQ(regs);
401#endif
328 402
329 irq_enter(); 403 irq_enter();
330 404
331 profile_tick(CPU_PROFILING, regs); 405 profile_tick(CPU_PROFILING, regs);
332 406
333 lpaca->lppaca.int_dword.fields.decr_int = 0; 407#ifdef CONFIG_PPC_ISERIES
408 get_paca()->lppaca.int_dword.fields.decr_int = 0;
409#endif
410
411 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
412 >= tb_ticks_per_jiffy) {
413 /* Update last_jiffy */
414 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
415 /* Handle RTCL overflow on 601 */
416 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
417 per_cpu(last_jiffy, cpu) -= 1000000000;
334 418
335 while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
336 /* 419 /*
337 * We cannot disable the decrementer, so in the period 420 * We cannot disable the decrementer, so in the period
338 * between this cpu's being marked offline in cpu_online_map 421 * between this cpu's being marked offline in cpu_online_map
@@ -342,27 +425,27 @@ int timer_interrupt(struct pt_regs * regs)
342 */ 425 */
343 if (!cpu_is_offline(cpu)) 426 if (!cpu_is_offline(cpu))
344 update_process_times(user_mode(regs)); 427 update_process_times(user_mode(regs));
428
345 /* 429 /*
346 * No need to check whether cpu is offline here; boot_cpuid 430 * No need to check whether cpu is offline here; boot_cpuid
347 * should have been fixed up by now. 431 * should have been fixed up by now.
348 */ 432 */
349 if (cpu == boot_cpuid) { 433 if (cpu != boot_cpuid)
350 write_seqlock(&xtime_lock); 434 continue;
351 tb_last_stamp = lpaca->next_jiffy_update_tb; 435
352 timer_recalc_offset(lpaca->next_jiffy_update_tb); 436 write_seqlock(&xtime_lock);
353 do_timer(regs); 437 tb_last_jiffy += tb_ticks_per_jiffy;
354 timer_sync_xtime(lpaca->next_jiffy_update_tb); 438 tb_last_stamp = per_cpu(last_jiffy, cpu);
355 timer_check_rtc(); 439 timer_recalc_offset(tb_last_jiffy);
356 write_sequnlock(&xtime_lock); 440 do_timer(regs);
357 if ( adjusting_time && (time_adjust == 0) ) 441 timer_sync_xtime(tb_last_jiffy);
358 ppc_adjtimex(); 442 timer_check_rtc();
359 } 443 write_sequnlock(&xtime_lock);
360 lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; 444 if (adjusting_time && (time_adjust == 0))
445 ppc_adjtimex();
361 } 446 }
362 447
363 next_dec = lpaca->next_jiffy_update_tb - cur_tb; 448 next_dec = tb_ticks_per_jiffy - ticks;
364 if (next_dec > lpaca->default_decr)
365 next_dec = lpaca->default_decr;
366 set_dec(next_dec); 449 set_dec(next_dec);
367 450
368#ifdef CONFIG_PPC_ISERIES 451#ifdef CONFIG_PPC_ISERIES
@@ -370,17 +453,47 @@ int timer_interrupt(struct pt_regs * regs)
370 process_hvlpevents(regs); 453 process_hvlpevents(regs);
371#endif 454#endif
372 455
456#ifdef CONFIG_PPC64
373 /* collect purr register values often, for accurate calculations */ 457 /* collect purr register values often, for accurate calculations */
374 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 458 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 459 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
376 cu->current_tb = mfspr(SPRN_PURR); 460 cu->current_tb = mfspr(SPRN_PURR);
377 } 461 }
462#endif
378 463
379 irq_exit(); 464 irq_exit();
465}
466
467void wakeup_decrementer(void)
468{
469 int i;
380 470
381 return 1; 471 set_dec(tb_ticks_per_jiffy);
472 /*
473 * We don't expect this to be called on a machine with a 601,
474 * so using get_tbl is fine.
475 */
476 tb_last_stamp = tb_last_jiffy = get_tb();
477 for_each_cpu(i)
478 per_cpu(last_jiffy, i) = tb_last_stamp;
382} 479}
383 480
481#ifdef CONFIG_SMP
482void __init smp_space_timers(unsigned int max_cpus)
483{
484 int i;
485 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
486 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
487
488 for_each_cpu(i) {
489 if (i != boot_cpuid) {
490 previous_tb += offset;
491 per_cpu(last_jiffy, i) = previous_tb;
492 }
493 }
494}
495#endif
496
384/* 497/*
385 * Scheduler clock - returns current time in nanosec units. 498 * Scheduler clock - returns current time in nanosec units.
386 * 499 *
@@ -390,6 +503,8 @@ int timer_interrupt(struct pt_regs * regs)
390 */ 503 */
391unsigned long long sched_clock(void) 504unsigned long long sched_clock(void)
392{ 505{
506 if (__USE_RTC())
507 return get_rtc();
393 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; 508 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
394} 509}
395 510
@@ -398,31 +513,31 @@ int do_settimeofday(struct timespec *tv)
398 time_t wtm_sec, new_sec = tv->tv_sec; 513 time_t wtm_sec, new_sec = tv->tv_sec;
399 long wtm_nsec, new_nsec = tv->tv_nsec; 514 long wtm_nsec, new_nsec = tv->tv_nsec;
400 unsigned long flags; 515 unsigned long flags;
401 unsigned long delta_xsec;
402 long int tb_delta; 516 long int tb_delta;
403 unsigned long new_xsec; 517 u64 new_xsec, tb_delta_xs;
404 518
405 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 519 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
406 return -EINVAL; 520 return -EINVAL;
407 521
408 write_seqlock_irqsave(&xtime_lock, flags); 522 write_seqlock_irqsave(&xtime_lock, flags);
409 /* Updating the RTC is not the job of this code. If the time is 523
410 * stepped under NTP, the RTC will be update after STA_UNSYNC 524 /*
411 * is cleared. Tool like clock/hwclock either copy the RTC 525 * Updating the RTC is not the job of this code. If the time is
526 * stepped under NTP, the RTC will be updated after STA_UNSYNC
527 * is cleared. Tools like clock/hwclock either copy the RTC
412 * to the system time, in which case there is no point in writing 528 * to the system time, in which case there is no point in writing
413 * to the RTC again, or write to the RTC but then they don't call 529 * to the RTC again, or write to the RTC but then they don't call
414 * settimeofday to perform this operation. 530 * settimeofday to perform this operation.
415 */ 531 */
416#ifdef CONFIG_PPC_ISERIES 532#ifdef CONFIG_PPC_ISERIES
417 if ( first_settimeofday ) { 533 if (first_settimeofday) {
418 iSeries_tb_recal(); 534 iSeries_tb_recal();
419 first_settimeofday = 0; 535 first_settimeofday = 0;
420 } 536 }
421#endif 537#endif
422 tb_delta = tb_ticks_since(tb_last_stamp); 538 tb_delta = tb_ticks_since(tb_last_stamp);
423 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 539 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
424 540 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
425 new_nsec -= tb_delta / tb_ticks_per_usec / 1000;
426 541
427 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 542 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
428 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 543 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -437,28 +552,18 @@ int do_settimeofday(struct timespec *tv)
437 552
438 ntp_clear(); 553 ntp_clear();
439 554
440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 555 new_xsec = 0;
441 do_gtod.varp->tb_to_xs ); 556 if (new_nsec != 0) {
442 557 new_xsec = (u64)new_nsec * XSEC_PER_SEC;
443 new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; 558 do_div(new_xsec, NSEC_PER_SEC);
444 new_xsec += new_sec * XSEC_PER_SEC;
445 if ( new_xsec > delta_xsec ) {
446 do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
447 systemcfg->stamp_xsec = new_xsec - delta_xsec;
448 }
449 else {
450 /* This is only for the case where the user is setting the time
451 * way back to a time such that the boot time would have been
452 * before 1970 ... eg. we booted ten days ago, and we are setting
453 * the time to Jan 5, 1970 */
454 do_gtod.varp->stamp_xsec = new_xsec;
455 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
456 systemcfg->stamp_xsec = new_xsec;
457 systemcfg->tb_orig_stamp = tb_last_stamp;
458 } 559 }
560 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
561 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
459 562
563#ifdef CONFIG_PPC64
460 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 564 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
461 systemcfg->tz_dsttime = sys_tz.tz_dsttime; 565 systemcfg->tz_dsttime = sys_tz.tz_dsttime;
566#endif
462 567
463 write_sequnlock_irqrestore(&xtime_lock, flags); 568 write_sequnlock_irqrestore(&xtime_lock, flags);
464 clock_was_set(); 569 clock_was_set();
@@ -467,11 +572,9 @@ int do_settimeofday(struct timespec *tv)
467 572
468EXPORT_SYMBOL(do_settimeofday); 573EXPORT_SYMBOL(do_settimeofday);
469 574
470#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
471void __init generic_calibrate_decr(void) 575void __init generic_calibrate_decr(void)
472{ 576{
473 struct device_node *cpu; 577 struct device_node *cpu;
474 struct div_result divres;
475 unsigned int *fp; 578 unsigned int *fp;
476 int node_found; 579 int node_found;
477 580
@@ -505,37 +608,74 @@ void __init generic_calibrate_decr(void)
505 ppc_proc_freq = *fp; 608 ppc_proc_freq = *fp;
506 } 609 }
507 } 610 }
611#ifdef CONFIG_BOOKE
612 /* Set the time base to zero */
613 mtspr(SPRN_TBWL, 0);
614 mtspr(SPRN_TBWU, 0);
615
616 /* Clear any pending timer interrupts */
617 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
618
619 /* Enable decrementer interrupt */
620 mtspr(SPRN_TCR, TCR_DIE);
621#endif
508 if (!node_found) 622 if (!node_found)
509 printk(KERN_ERR "WARNING: Estimating processor frequency " 623 printk(KERN_ERR "WARNING: Estimating processor frequency "
510 "(not found)\n"); 624 "(not found)\n");
511 625
512 of_node_put(cpu); 626 of_node_put(cpu);
627}
513 628
514 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", 629unsigned long get_boot_time(void)
515 ppc_tb_freq/1000000, ppc_tb_freq%1000000); 630{
516 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", 631 struct rtc_time tm;
517 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
518
519 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
520 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
521 tb_ticks_per_usec = ppc_tb_freq / 1000000;
522 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
523 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
524 tb_to_xs = divres.result_low;
525 632
526 setup_default_decr(); 633 if (ppc_md.get_boot_time)
634 return ppc_md.get_boot_time();
635 if (!ppc_md.get_rtc_time)
636 return 0;
637 ppc_md.get_rtc_time(&tm);
638 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
639 tm.tm_hour, tm.tm_min, tm.tm_sec);
527} 640}
528#endif
529 641
642/* This function is only called on the boot processor */
530void __init time_init(void) 643void __init time_init(void)
531{ 644{
532 /* This function is only called on the boot processor */
533 unsigned long flags; 645 unsigned long flags;
534 struct rtc_time tm; 646 unsigned long tm = 0;
535 struct div_result res; 647 struct div_result res;
536 unsigned long scale, shift; 648 u64 scale;
649 unsigned shift;
650
651 if (ppc_md.time_init != NULL)
652 timezone_offset = ppc_md.time_init();
653
654 if (__USE_RTC()) {
655 /* 601 processor: dec counts down by 128 every 128ns */
656 ppc_tb_freq = 1000000000;
657 tb_last_stamp = get_rtcl();
658 tb_last_jiffy = tb_last_stamp;
659 } else {
660 /* Normal PowerPC with timebase register */
661 ppc_md.calibrate_decr();
662 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
663 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
664 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
665 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
666 tb_last_stamp = tb_last_jiffy = get_tb();
667 }
668
669 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
670 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
671 tb_ticks_per_usec = ppc_tb_freq / 1000000;
672 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
673 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
674 tb_to_xs = res.result_low;
537 675
538 ppc_md.calibrate_decr(); 676#ifdef CONFIG_PPC64
677 get_paca()->default_decr = tb_ticks_per_jiffy;
678#endif
539 679
540 /* 680 /*
541 * Compute scale factor for sched_clock. 681 * Compute scale factor for sched_clock.
@@ -559,29 +699,36 @@ void __init time_init(void)
559#ifdef CONFIG_PPC_ISERIES 699#ifdef CONFIG_PPC_ISERIES
560 if (!piranha_simulator) 700 if (!piranha_simulator)
561#endif 701#endif
562 ppc_md.get_boot_time(&tm); 702 tm = get_boot_time();
563 703
564 write_seqlock_irqsave(&xtime_lock, flags); 704 write_seqlock_irqsave(&xtime_lock, flags);
565 xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 705 xtime.tv_sec = tm;
566 tm.tm_hour, tm.tm_min, tm.tm_sec); 706 xtime.tv_nsec = 0;
567 tb_last_stamp = get_tb();
568 do_gtod.varp = &do_gtod.vars[0]; 707 do_gtod.varp = &do_gtod.vars[0];
569 do_gtod.var_idx = 0; 708 do_gtod.var_idx = 0;
570 do_gtod.varp->tb_orig_stamp = tb_last_stamp; 709 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
571 get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy; 710 __get_cpu_var(last_jiffy) = tb_last_stamp;
572 do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 711 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
573 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 712 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
574 do_gtod.varp->tb_to_xs = tb_to_xs; 713 do_gtod.varp->tb_to_xs = tb_to_xs;
575 do_gtod.tb_to_us = tb_to_us; 714 do_gtod.tb_to_us = tb_to_us;
576 systemcfg->tb_orig_stamp = tb_last_stamp; 715#ifdef CONFIG_PPC64
716 systemcfg->tb_orig_stamp = tb_last_jiffy;
577 systemcfg->tb_update_count = 0; 717 systemcfg->tb_update_count = 0;
578 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 718 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
579 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 719 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
580 systemcfg->tb_to_xs = tb_to_xs; 720 systemcfg->tb_to_xs = tb_to_xs;
721#endif
581 722
582 time_freq = 0; 723 time_freq = 0;
583 724
584 xtime.tv_nsec = 0; 725 /* If platform provided a timezone (pmac), we correct the time */
726 if (timezone_offset) {
727 sys_tz.tz_minuteswest = -timezone_offset / 60;
728 sys_tz.tz_dsttime = 0;
729 xtime.tv_sec -= timezone_offset;
730 }
731
585 last_rtc_update = xtime.tv_sec; 732 last_rtc_update = xtime.tv_sec;
586 set_normalized_timespec(&wall_to_monotonic, 733 set_normalized_timespec(&wall_to_monotonic,
587 -xtime.tv_sec, -xtime.tv_nsec); 734 -xtime.tv_sec, -xtime.tv_nsec);
@@ -604,25 +751,28 @@ void __init time_init(void)
604 751
605void ppc_adjtimex(void) 752void ppc_adjtimex(void)
606{ 753{
607 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec; 754#ifdef CONFIG_PPC64
755 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
756 new_tb_to_xs, new_xsec, new_stamp_xsec;
608 unsigned long tb_ticks_per_sec_delta; 757 unsigned long tb_ticks_per_sec_delta;
609 long delta_freq, ltemp; 758 long delta_freq, ltemp;
610 struct div_result divres; 759 struct div_result divres;
611 unsigned long flags; 760 unsigned long flags;
612 struct gettimeofday_vars * temp_varp;
613 unsigned temp_idx;
614 long singleshot_ppm = 0; 761 long singleshot_ppm = 0;
615 762
616 /* Compute parts per million frequency adjustment to accomplish the time adjustment 763 /*
617 implied by time_offset to be applied over the elapsed time indicated by time_constant. 764 * Compute parts per million frequency adjustment to
618 Use SHIFT_USEC to get it into the same units as time_freq. */ 765 * accomplish the time adjustment implied by time_offset to be
766 * applied over the elapsed time indicated by time_constant.
767 * Use SHIFT_USEC to get it into the same units as
768 * time_freq.
769 */
619 if ( time_offset < 0 ) { 770 if ( time_offset < 0 ) {
620 ltemp = -time_offset; 771 ltemp = -time_offset;
621 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 772 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
622 ltemp >>= SHIFT_KG + time_constant; 773 ltemp >>= SHIFT_KG + time_constant;
623 ltemp = -ltemp; 774 ltemp = -ltemp;
624 } 775 } else {
625 else {
626 ltemp = time_offset; 776 ltemp = time_offset;
627 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 777 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
628 ltemp >>= SHIFT_KG + time_constant; 778 ltemp >>= SHIFT_KG + time_constant;
@@ -639,7 +789,10 @@ void ppc_adjtimex(void)
639 789
640 adjusting_time = 1; 790 adjusting_time = 1;
641 791
642 /* Compute parts per million frequency adjustment to match time_adjust */ 792 /*
793 * Compute parts per million frequency adjustment
794 * to match time_adjust
795 */
643 singleshot_ppm = tickadj * HZ; 796 singleshot_ppm = tickadj * HZ;
644 /* 797 /*
645 * The adjustment should be tickadj*HZ to match the code in 798 * The adjustment should be tickadj*HZ to match the code in
@@ -647,7 +800,7 @@ void ppc_adjtimex(void)
647 * large. 3/4 of tickadj*HZ seems about right 800 * large. 3/4 of tickadj*HZ seems about right
648 */ 801 */
649 singleshot_ppm -= singleshot_ppm / 4; 802 singleshot_ppm -= singleshot_ppm / 4;
650 /* Use SHIFT_USEC to get it into the same units as time_freq */ 803 /* Use SHIFT_USEC to get it into the same units as time_freq */
651 singleshot_ppm <<= SHIFT_USEC; 804 singleshot_ppm <<= SHIFT_USEC;
652 if ( time_adjust < 0 ) 805 if ( time_adjust < 0 )
653 singleshot_ppm = -singleshot_ppm; 806 singleshot_ppm = -singleshot_ppm;
@@ -663,7 +816,10 @@ void ppc_adjtimex(void)
663 /* Add up all of the frequency adjustments */ 816 /* Add up all of the frequency adjustments */
664 delta_freq = time_freq + ltemp + singleshot_ppm; 817 delta_freq = time_freq + ltemp + singleshot_ppm;
665 818
666 /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */ 819 /*
820 * Compute a new value for tb_ticks_per_sec based on
821 * the frequency adjustment
822 */
667 den = 1000000 * (1 << (SHIFT_USEC - 8)); 823 den = 1000000 * (1 << (SHIFT_USEC - 8));
668 if ( delta_freq < 0 ) { 824 if ( delta_freq < 0 ) {
669 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; 825 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
@@ -678,61 +834,37 @@ void ppc_adjtimex(void)
678 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); 834 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
679 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); 835 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
680#endif 836#endif
681 837
682 /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of 838 /*
683 stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp. This 839 * Compute a new value of tb_to_xs (used to convert tb to
684 new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs) 840 * microseconds) and a new value of stamp_xsec which is the
685 which guarantees that the current time remains the same */ 841 * time (in 1/2^20 second units) corresponding to
842 * tb_orig_stamp. This new value of stamp_xsec compensates
843 * for the change in frequency (implied by the new tb_to_xs)
844 * which guarantees that the current time remains the same.
845 */
686 write_seqlock_irqsave( &xtime_lock, flags ); 846 write_seqlock_irqsave( &xtime_lock, flags );
687 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; 847 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
688 div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres ); 848 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
689 new_tb_to_xs = divres.result_low; 849 new_tb_to_xs = divres.result_low;
690 new_xsec = mulhdu( tb_ticks, new_tb_to_xs ); 850 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
691 851
692 old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs ); 852 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
693 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; 853 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
694 854
695 /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these 855 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
696 values in do_gettimeofday. We alternate the copies and as long as a reasonable time elapses between
697 changes, there will never be inconsistent values. ntpd has a minimum of one minute between updates */
698
699 temp_idx = (do_gtod.var_idx == 0);
700 temp_varp = &do_gtod.vars[temp_idx];
701
702 temp_varp->tb_to_xs = new_tb_to_xs;
703 temp_varp->stamp_xsec = new_stamp_xsec;
704 temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp;
705 smp_mb();
706 do_gtod.varp = temp_varp;
707 do_gtod.var_idx = temp_idx;
708
709 /*
710 * tb_update_count is used to allow the problem state gettimeofday code
711 * to assure itself that it sees a consistent view of the tb_to_xs and
712 * stamp_xsec variables. It reads the tb_update_count, then reads
713 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
714 * the two values of tb_update_count match and are even then the
715 * tb_to_xs and stamp_xsec values are consistent. If not, then it
716 * loops back and reads them again until this criteria is met.
717 */
718 ++(systemcfg->tb_update_count);
719 smp_wmb();
720 systemcfg->tb_to_xs = new_tb_to_xs;
721 systemcfg->stamp_xsec = new_stamp_xsec;
722 smp_wmb();
723 ++(systemcfg->tb_update_count);
724 856
725 write_sequnlock_irqrestore( &xtime_lock, flags ); 857 write_sequnlock_irqrestore( &xtime_lock, flags );
726 858#endif /* CONFIG_PPC64 */
727} 859}
728 860
729 861
730#define TICK_SIZE tick
731#define FEBRUARY 2 862#define FEBRUARY 2
732#define STARTOFTIME 1970 863#define STARTOFTIME 1970
733#define SECDAY 86400L 864#define SECDAY 86400L
734#define SECYR (SECDAY * 365) 865#define SECYR (SECDAY * 365)
735#define leapyear(year) ((year) % 4 == 0) 866#define leapyear(year) ((year) % 4 == 0 && \
867 ((year) % 100 != 0 || (year) % 400 == 0))
736#define days_in_year(a) (leapyear(a) ? 366 : 365) 868#define days_in_year(a) (leapyear(a) ? 366 : 365)
737#define days_in_month(a) (month_days[(a) - 1]) 869#define days_in_month(a) (month_days[(a) - 1])
738 870
@@ -750,37 +882,25 @@ void GregorianDay(struct rtc_time * tm)
750 int day; 882 int day;
751 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 883 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
752 884
753 lastYear=tm->tm_year-1; 885 lastYear = tm->tm_year - 1;
754 886
755 /* 887 /*
756 * Number of leap corrections to apply up to end of last year 888 * Number of leap corrections to apply up to end of last year
757 */ 889 */
758 leapsToDate = lastYear/4 - lastYear/100 + lastYear/400; 890 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
759 891
760 /* 892 /*
761 * This year is a leap year if it is divisible by 4 except when it is 893 * This year is a leap year if it is divisible by 4 except when it is
762 * divisible by 100 unless it is divisible by 400 894 * divisible by 100 unless it is divisible by 400
763 * 895 *
764 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be 896 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
765 */ 897 */
766 if((tm->tm_year%4==0) && 898 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
767 ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
768 (tm->tm_mon>2))
769 {
770 /*
771 * We are past Feb. 29 in a leap year
772 */
773 day=1;
774 }
775 else
776 {
777 day=0;
778 }
779 899
780 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 900 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
781 tm->tm_mday; 901 tm->tm_mday;
782 902
783 tm->tm_wday=day%7; 903 tm->tm_wday = day % 7;
784} 904}
785 905
786void to_tm(int tim, struct rtc_time * tm) 906void to_tm(int tim, struct rtc_time * tm)
@@ -826,14 +946,16 @@ void to_tm(int tim, struct rtc_time * tm)
826 * oscillators and the precision with which the timebase frequency 946 * oscillators and the precision with which the timebase frequency
827 * is measured but does not harm. 947 * is measured but does not harm.
828 */ 948 */
829unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { 949unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
950{
830 unsigned mlt=0, tmp, err; 951 unsigned mlt=0, tmp, err;
831 /* No concern for performance, it's done once: use a stupid 952 /* No concern for performance, it's done once: use a stupid
832 * but safe and compact method to find the multiplier. 953 * but safe and compact method to find the multiplier.
833 */ 954 */
834 955
835 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 956 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
836 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; 957 if (mulhwu(inscale, mlt|tmp) < outscale)
958 mlt |= tmp;
837 } 959 }
838 960
839 /* We might still be off by 1 for the best approximation. 961 /* We might still be off by 1 for the best approximation.
@@ -843,39 +965,41 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
843 * some might have been forgotten in the test however. 965 * some might have been forgotten in the test however.
844 */ 966 */
845 967
846 err = inscale*(mlt+1); 968 err = inscale * (mlt+1);
847 if (err <= inscale/2) mlt++; 969 if (err <= inscale/2)
970 mlt++;
848 return mlt; 971 return mlt;
849 } 972}
850 973
851/* 974/*
852 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 975 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
853 * result. 976 * result.
854 */ 977 */
855 978void div128_by_32(u64 dividend_high, u64 dividend_low,
856void div128_by_32( unsigned long dividend_high, unsigned long dividend_low, 979 unsigned divisor, struct div_result *dr)
857 unsigned divisor, struct div_result *dr )
858{ 980{
859 unsigned long a,b,c,d, w,x,y,z, ra,rb,rc; 981 unsigned long a, b, c, d;
982 unsigned long w, x, y, z;
983 u64 ra, rb, rc;
860 984
861 a = dividend_high >> 32; 985 a = dividend_high >> 32;
862 b = dividend_high & 0xffffffff; 986 b = dividend_high & 0xffffffff;
863 c = dividend_low >> 32; 987 c = dividend_low >> 32;
864 d = dividend_low & 0xffffffff; 988 d = dividend_low & 0xffffffff;
865 989
866 w = a/divisor; 990 w = a / divisor;
867 ra = (a - (w * divisor)) << 32; 991 ra = ((u64)(a - (w * divisor)) << 32) + b;
868 992
869 x = (ra + b)/divisor; 993 rb = ((u64) do_div(ra, divisor) << 32) + c;
870 rb = ((ra + b) - (x * divisor)) << 32; 994 x = ra;
871 995
872 y = (rb + c)/divisor; 996 rc = ((u64) do_div(rb, divisor) << 32) + d;
873 rc = ((rb + c) - (y * divisor)) << 32; 997 y = rb;
874 998
875 z = (rc + d)/divisor; 999 do_div(rc, divisor);
1000 z = rc;
876 1001
877 dr->result_high = (w << 32) + x; 1002 dr->result_high = ((u64)w << 32) + x;
878 dr->result_low = (y << 32) + z; 1003 dr->result_low = ((u64)y << 32) + z;
879 1004
880} 1005}
881
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644
index 000000000000..5d638ecddbd0
--- /dev/null
+++ b/arch/powerpc/kernel/traps.c
@@ -0,0 +1,1101 @@
1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/prctl.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34
35#include <asm/kdebug.h>
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/machdep.h>
41#include <asm/rtas.h>
42#include <asm/xmon.h>
43#include <asm/pmc.h>
44#ifdef CONFIG_PPC32
45#include <asm/reg.h>
46#endif
47#ifdef CONFIG_PMAC_BACKLIGHT
48#include <asm/backlight.h>
49#endif
50#ifdef CONFIG_PPC64
51#include <asm/firmware.h>
52#include <asm/processor.h>
53#include <asm/systemcfg.h>
54#endif
55
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
60#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs);
63int (*__debugger_bpt)(struct pt_regs *regs);
64int (*__debugger_sstep)(struct pt_regs *regs);
65int (*__debugger_iabr_match)(struct pt_regs *regs);
66int (*__debugger_dabr_match)(struct pt_regs *regs);
67int (*__debugger_fault_handler)(struct pt_regs *regs);
68
69EXPORT_SYMBOL(__debugger);
70EXPORT_SYMBOL(__debugger_ipi);
71EXPORT_SYMBOL(__debugger_bpt);
72EXPORT_SYMBOL(__debugger_sstep);
73EXPORT_SYMBOL(__debugger_iabr_match);
74EXPORT_SYMBOL(__debugger_dabr_match);
75EXPORT_SYMBOL(__debugger_fault_handler);
76#endif
77
78struct notifier_block *powerpc_die_chain;
79static DEFINE_SPINLOCK(die_notifier_lock);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 int err = 0;
84 unsigned long flags;
85
86 spin_lock_irqsave(&die_notifier_lock, flags);
87 err = notifier_chain_register(&powerpc_die_chain, nb);
88 spin_unlock_irqrestore(&die_notifier_lock, flags);
89 return err;
90}
91
92/*
93 * Trap & Exception support
94 */
95
96static DEFINE_SPINLOCK(die_lock);
97
98int die(const char *str, struct pt_regs *regs, long err)
99{
100 static int die_counter;
101 int nl = 0;
102
103 if (debugger(regs))
104 return 1;
105
106 console_verbose();
107 spin_lock_irq(&die_lock);
108 bust_spinlocks(1);
109#ifdef CONFIG_PMAC_BACKLIGHT
110 if (_machine == _MACH_Pmac) {
111 set_backlight_enable(1);
112 set_backlight_level(BACKLIGHT_MAX);
113 }
114#endif
115 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
116#ifdef CONFIG_PREEMPT
117 printk("PREEMPT ");
118 nl = 1;
119#endif
120#ifdef CONFIG_SMP
121 printk("SMP NR_CPUS=%d ", NR_CPUS);
122 nl = 1;
123#endif
124#ifdef CONFIG_DEBUG_PAGEALLOC
125 printk("DEBUG_PAGEALLOC ");
126 nl = 1;
127#endif
128#ifdef CONFIG_NUMA
129 printk("NUMA ");
130 nl = 1;
131#endif
132#ifdef CONFIG_PPC64
133 switch (systemcfg->platform) {
134 case PLATFORM_PSERIES:
135 printk("PSERIES ");
136 nl = 1;
137 break;
138 case PLATFORM_PSERIES_LPAR:
139 printk("PSERIES LPAR ");
140 nl = 1;
141 break;
142 case PLATFORM_ISERIES_LPAR:
143 printk("ISERIES LPAR ");
144 nl = 1;
145 break;
146 case PLATFORM_POWERMAC:
147 printk("POWERMAC ");
148 nl = 1;
149 break;
150 case PLATFORM_BPA:
151 printk("BPA ");
152 nl = 1;
153 break;
154 }
155#endif
156 if (nl)
157 printk("\n");
158 print_modules();
159 show_regs(regs);
160 bust_spinlocks(0);
161 spin_unlock_irq(&die_lock);
162
163 if (in_interrupt())
164 panic("Fatal exception in interrupt");
165
166 if (panic_on_oops) {
167#ifdef CONFIG_PPC64
168 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
169 ssleep(5);
170#endif
171 panic("Fatal exception");
172 }
173 do_exit(err);
174
175 return 0;
176}
177
178void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
179{
180 siginfo_t info;
181
182 if (!user_mode(regs)) {
183 if (die("Exception in kernel mode", regs, signr))
184 return;
185 }
186
187 memset(&info, 0, sizeof(info));
188 info.si_signo = signr;
189 info.si_code = code;
190 info.si_addr = (void __user *) addr;
191 force_sig_info(signr, &info, current);
192
193 /*
194 * Init gets no signals that it doesn't have a handler for.
195 * That's all very well, but if it has caused a synchronous
196 * exception and we ignore the resulting signal, it will just
197 * generate the same exception over and over again and we get
198 * nowhere. Better to kill it and let the kernel panic.
199 */
200 if (current->pid == 1) {
201 __sighandler_t handler;
202
203 spin_lock_irq(&current->sighand->siglock);
204 handler = current->sighand->action[signr-1].sa.sa_handler;
205 spin_unlock_irq(&current->sighand->siglock);
206 if (handler == SIG_DFL) {
207 /* init has generated a synchronous exception
208 and it doesn't have a handler for the signal */
209 printk(KERN_CRIT "init has generated signal %d "
210 "but has no handler for it\n", signr);
211 do_exit(signr);
212 }
213 }
214}
215
216#ifdef CONFIG_PPC64
217void system_reset_exception(struct pt_regs *regs)
218{
219 /* See if any machine dependent calls */
220 if (ppc_md.system_reset_exception)
221 ppc_md.system_reset_exception(regs);
222
223 die("System Reset", regs, SIGABRT);
224
225 /* Must die if the interrupt is not recoverable */
226 if (!(regs->msr & MSR_RI))
227 panic("Unrecoverable System Reset");
228
229 /* What should we do here? We could issue a shutdown or hard reset. */
230}
231#endif
232
233/*
234 * I/O accesses can cause machine checks on powermacs.
235 * Check if the NIP corresponds to the address of a sync
236 * instruction for which there is an entry in the exception
237 * table.
238 * Note that the 601 only takes a machine check on TEA
239 * (transfer error ack) signal assertion, and does not
240 * set any of the top 16 bits of SRR1.
241 * -- paulus.
242 */
243static inline int check_io_access(struct pt_regs *regs)
244{
245#ifdef CONFIG_PPC_PMAC
246 unsigned long msr = regs->msr;
247 const struct exception_table_entry *entry;
248 unsigned int *nip = (unsigned int *)regs->nip;
249
250 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
251 && (entry = search_exception_tables(regs->nip)) != NULL) {
252 /*
253 * Check that it's a sync instruction, or somewhere
254 * in the twi; isync; nop sequence that inb/inw/inl uses.
255 * As the address is in the exception table
256 * we should be able to read the instr there.
257 * For the debug message, we look at the preceding
258 * load or store.
259 */
260 if (*nip == 0x60000000) /* nop */
261 nip -= 2;
262 else if (*nip == 0x4c00012c) /* isync */
263 --nip;
264 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
265 /* sync or twi */
266 unsigned int rb;
267
268 --nip;
269 rb = (*nip >> 11) & 0x1f;
270 printk(KERN_DEBUG "%s bad port %lx at %p\n",
271 (*nip & 0x100)? "OUT to": "IN from",
272 regs->gpr[rb] - _IO_BASE, nip);
273 regs->msr |= MSR_RI;
274 regs->nip = entry->fixup;
275 return 1;
276 }
277 }
278#endif /* CONFIG_PPC_PMAC */
279 return 0;
280}
281
282#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
283/* On 4xx, the reason for the machine check or program exception
284 is in the ESR. */
285#define get_reason(regs) ((regs)->dsisr)
286#ifndef CONFIG_FSL_BOOKE
287#define get_mc_reason(regs) ((regs)->dsisr)
288#else
289#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
290#endif
291#define REASON_FP ESR_FP
292#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
293#define REASON_PRIVILEGED ESR_PPR
294#define REASON_TRAP ESR_PTR
295
296/* single-step stuff */
297#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
298#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
299
300#else
301/* On non-4xx, the reason for the machine check or program
302 exception is in the MSR. */
303#define get_reason(regs) ((regs)->msr)
304#define get_mc_reason(regs) ((regs)->msr)
305#define REASON_FP 0x100000
306#define REASON_ILLEGAL 0x80000
307#define REASON_PRIVILEGED 0x40000
308#define REASON_TRAP 0x20000
309
310#define single_stepping(regs) ((regs)->msr & MSR_SE)
311#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
312#endif
313
314/*
315 * This is "fall-back" implementation for configurations
316 * which don't provide platform-specific machine check info
317 */
318void __attribute__ ((weak))
319platform_machine_check(struct pt_regs *regs)
320{
321}
322
323void machine_check_exception(struct pt_regs *regs)
324{
325#ifdef CONFIG_PPC64
326 int recover = 0;
327
328 /* See if any machine dependent calls */
329 if (ppc_md.machine_check_exception)
330 recover = ppc_md.machine_check_exception(regs);
331
332 if (recover)
333 return;
334#else
335 unsigned long reason = get_mc_reason(regs);
336
337 if (user_mode(regs)) {
338 regs->msr |= MSR_RI;
339 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
340 return;
341 }
342
343#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
344 /* the qspan pci read routines can cause machine checks -- Cort */
345 bad_page_fault(regs, regs->dar, SIGBUS);
346 return;
347#endif
348
349 if (debugger_fault_handler(regs)) {
350 regs->msr |= MSR_RI;
351 return;
352 }
353
354 if (check_io_access(regs))
355 return;
356
357#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
358 if (reason & ESR_IMCP) {
359 printk("Instruction");
360 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 } else
362 printk("Data");
363 printk(" machine check in kernel mode.\n");
364#elif defined(CONFIG_440A)
365 printk("Machine check in kernel mode.\n");
366 if (reason & ESR_IMCP){
367 printk("Instruction Synchronous Machine Check exception\n");
368 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
369 }
370 else {
371 u32 mcsr = mfspr(SPRN_MCSR);
372 if (mcsr & MCSR_IB)
373 printk("Instruction Read PLB Error\n");
374 if (mcsr & MCSR_DRB)
375 printk("Data Read PLB Error\n");
376 if (mcsr & MCSR_DWB)
377 printk("Data Write PLB Error\n");
378 if (mcsr & MCSR_TLBP)
379 printk("TLB Parity Error\n");
380 if (mcsr & MCSR_ICP){
381 flush_instruction_cache();
382 printk("I-Cache Parity Error\n");
383 }
384 if (mcsr & MCSR_DCSP)
385 printk("D-Cache Search Parity Error\n");
386 if (mcsr & MCSR_DCFP)
387 printk("D-Cache Flush Parity Error\n");
388 if (mcsr & MCSR_IMPE)
389 printk("Machine Check exception is imprecise\n");
390
391 /* Clear MCSR */
392 mtspr(SPRN_MCSR, mcsr);
393 }
394#elif defined (CONFIG_E500)
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
397
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_GL_CI)
407 printk("Guarded Load or Cache-Inhibited stwcx.\n");
408 if (reason & MCSR_BUS_IAERR)
409 printk("Bus - Instruction Address Error\n");
410 if (reason & MCSR_BUS_RAERR)
411 printk("Bus - Read Address Error\n");
412 if (reason & MCSR_BUS_WAERR)
413 printk("Bus - Write Address Error\n");
414 if (reason & MCSR_BUS_IBERR)
415 printk("Bus - Instruction Data Error\n");
416 if (reason & MCSR_BUS_RBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_WBERR)
419 printk("Bus - Read Data Bus Error\n");
420 if (reason & MCSR_BUS_IPERR)
421 printk("Bus - Instruction Parity Error\n");
422 if (reason & MCSR_BUS_RPERR)
423 printk("Bus - Read Parity Error\n");
424#elif defined (CONFIG_E200)
425 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from MCSR=%lx): ", reason);
427
428 if (reason & MCSR_MCP)
429 printk("Machine Check Signal\n");
430 if (reason & MCSR_CP_PERR)
431 printk("Cache Push Parity Error\n");
432 if (reason & MCSR_CPERR)
433 printk("Cache Parity Error\n");
434 if (reason & MCSR_EXCP_ERR)
435 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
436 if (reason & MCSR_BUS_IRERR)
437 printk("Bus - Read Bus Error on instruction fetch\n");
438 if (reason & MCSR_BUS_DRERR)
439 printk("Bus - Read Bus Error on data load\n");
440 if (reason & MCSR_BUS_WRERR)
441 printk("Bus - Write Bus Error on buffered store or cache line push\n");
442#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
443 printk("Machine check in kernel mode.\n");
444 printk("Caused by (from SRR1=%lx): ", reason);
445 switch (reason & 0x601F0000) {
446 case 0x80000:
447 printk("Machine check signal\n");
448 break;
449 case 0: /* for 601 */
450 case 0x40000:
451 case 0x140000: /* 7450 MSS error and TEA */
452 printk("Transfer error ack signal\n");
453 break;
454 case 0x20000:
455 printk("Data parity error signal\n");
456 break;
457 case 0x10000:
458 printk("Address parity error signal\n");
459 break;
460 case 0x20000000:
461 printk("L1 Data Cache error\n");
462 break;
463 case 0x40000000:
464 printk("L1 Instruction Cache error\n");
465 break;
466 case 0x00100000:
467 printk("L2 data cache parity error\n");
468 break;
469 default:
470 printk("Unknown values in msr\n");
471 }
472#endif /* CONFIG_4xx */
473
474 /*
475 * Optional platform-provided routine to print out
476 * additional info, e.g. bus error registers.
477 */
478 platform_machine_check(regs);
479#endif /* CONFIG_PPC64 */
480
481 if (debugger_fault_handler(regs))
482 return;
483 die("Machine check", regs, SIGBUS);
484
485 /* Must die if the interrupt is not recoverable */
486 if (!(regs->msr & MSR_RI))
487 panic("Unrecoverable Machine check");
488}
489
490void SMIException(struct pt_regs *regs)
491{
492 die("System Management Interrupt", regs, SIGABRT);
493}
494
495void unknown_exception(struct pt_regs *regs)
496{
497 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
498 regs->nip, regs->msr, regs->trap);
499
500 _exception(SIGTRAP, regs, 0, 0);
501}
502
503void instruction_breakpoint_exception(struct pt_regs *regs)
504{
505 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
506 5, SIGTRAP) == NOTIFY_STOP)
507 return;
508 if (debugger_iabr_match(regs))
509 return;
510 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
511}
512
513void RunModeException(struct pt_regs *regs)
514{
515 _exception(SIGTRAP, regs, 0, 0);
516}
517
518void __kprobes single_step_exception(struct pt_regs *regs)
519{
520 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
521
522 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
523 5, SIGTRAP) == NOTIFY_STOP)
524 return;
525 if (debugger_sstep(regs))
526 return;
527
528 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
529}
530
531/*
532 * After we have successfully emulated an instruction, we have to
533 * check if the instruction was being single-stepped, and if so,
534 * pretend we got a single-step exception. This was pointed out
535 * by Kumar Gala. -- paulus
536 */
537static void emulate_single_step(struct pt_regs *regs)
538{
539 if (single_stepping(regs)) {
540 clear_single_step(regs);
541 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
542 }
543}
544
545static void parse_fpe(struct pt_regs *regs)
546{
547 int code = 0;
548 unsigned long fpscr;
549
550 flush_fp_to_thread(current);
551
552 fpscr = current->thread.fpscr.val;
553
554 /* Invalid operation */
555 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
556 code = FPE_FLTINV;
557
558 /* Overflow */
559 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
560 code = FPE_FLTOVF;
561
562 /* Underflow */
563 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
564 code = FPE_FLTUND;
565
566 /* Divide by zero */
567 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
568 code = FPE_FLTDIV;
569
570 /* Inexact result */
571 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
572 code = FPE_FLTRES;
573
574 _exception(SIGFPE, regs, code, regs->nip);
575}
576
577/*
578 * Illegal instruction emulation support. Originally written to
579 * provide the PVR to user applications using the mfspr rd, PVR.
580 * Return non-zero if we can't emulate, or -EFAULT if the associated
581 * memory access caused an access fault. Return zero on success.
582 *
583 * There are a couple of ways to do this, either "decode" the instruction
584 * or directly match lots of bits. In this case, matching lots of
585 * bits is faster and easier.
586 *
587 */
588#define INST_MFSPR_PVR 0x7c1f42a6
589#define INST_MFSPR_PVR_MASK 0xfc1fffff
590
591#define INST_DCBA 0x7c0005ec
592#define INST_DCBA_MASK 0x7c0007fe
593
594#define INST_MCRXR 0x7c000400
595#define INST_MCRXR_MASK 0x7c0007fe
596
597#define INST_STRING 0x7c00042a
598#define INST_STRING_MASK 0x7c0007fe
599#define INST_STRING_GEN_MASK 0x7c00067e
600#define INST_LSWI 0x7c0004aa
601#define INST_LSWX 0x7c00042a
602#define INST_STSWI 0x7c0005aa
603#define INST_STSWX 0x7c00052a
604
605static int emulate_string_inst(struct pt_regs *regs, u32 instword)
606{
607 u8 rT = (instword >> 21) & 0x1f;
608 u8 rA = (instword >> 16) & 0x1f;
609 u8 NB_RB = (instword >> 11) & 0x1f;
610 u32 num_bytes;
611 unsigned long EA;
612 int pos = 0;
613
614 /* Early out if we are an invalid form of lswx */
615 if ((instword & INST_STRING_MASK) == INST_LSWX)
616 if ((rT == rA) || (rT == NB_RB))
617 return -EINVAL;
618
619 EA = (rA == 0) ? 0 : regs->gpr[rA];
620
621 switch (instword & INST_STRING_MASK) {
622 case INST_LSWX:
623 case INST_STSWX:
624 EA += NB_RB;
625 num_bytes = regs->xer & 0x7f;
626 break;
627 case INST_LSWI:
628 case INST_STSWI:
629 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 while (num_bytes != 0)
636 {
637 u8 val;
638 u32 shift = 8 * (3 - (pos & 0x3));
639
640 switch ((instword & INST_STRING_MASK)) {
641 case INST_LSWX:
642 case INST_LSWI:
643 if (get_user(val, (u8 __user *)EA))
644 return -EFAULT;
645 /* first time updating this reg,
646 * zero it out */
647 if (pos == 0)
648 regs->gpr[rT] = 0;
649 regs->gpr[rT] |= val << shift;
650 break;
651 case INST_STSWI:
652 case INST_STSWX:
653 val = regs->gpr[rT] >> shift;
654 if (put_user(val, (u8 __user *)EA))
655 return -EFAULT;
656 break;
657 }
658 /* move EA to next address */
659 EA += 1;
660 num_bytes--;
661
662 /* manage our position within the register */
663 if (++pos == 4) {
664 pos = 0;
665 if (++rT == 32)
666 rT = 0;
667 }
668 }
669
670 return 0;
671}
672
673static int emulate_instruction(struct pt_regs *regs)
674{
675 u32 instword;
676 u32 rd;
677
678 if (!user_mode(regs))
679 return -EINVAL;
680 CHECK_FULL_REGS(regs);
681
682 if (get_user(instword, (u32 __user *)(regs->nip)))
683 return -EFAULT;
684
685 /* Emulate the mfspr rD, PVR. */
686 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
687 rd = (instword >> 21) & 0x1f;
688 regs->gpr[rd] = mfspr(SPRN_PVR);
689 return 0;
690 }
691
692 /* Emulating the dcba insn is just a no-op. */
693 if ((instword & INST_DCBA_MASK) == INST_DCBA)
694 return 0;
695
696 /* Emulate the mcrxr insn. */
697 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
698 int shift = (instword >> 21) & 0x1c;
699 unsigned long msk = 0xf0000000UL >> shift;
700
701 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
702 regs->xer &= ~0xf0000000UL;
703 return 0;
704 }
705
706 /* Emulate load/store string insn. */
707 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
708 return emulate_string_inst(regs, instword);
709
710 return -EINVAL;
711}
712
713/*
714 * Look through the list of trap instructions that are used for BUG(),
715 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
716 * that the exception was caused by a trap instruction of some kind.
717 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
718 * otherwise.
719 */
720extern struct bug_entry __start___bug_table[], __stop___bug_table[];
721
722#ifndef CONFIG_MODULES
723#define module_find_bug(x) NULL
724#endif
725
726struct bug_entry *find_bug(unsigned long bugaddr)
727{
728 struct bug_entry *bug;
729
730 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
731 if (bugaddr == bug->bug_addr)
732 return bug;
733 return module_find_bug(bugaddr);
734}
735
736static int check_bug_trap(struct pt_regs *regs)
737{
738 struct bug_entry *bug;
739 unsigned long addr;
740
741 if (regs->msr & MSR_PR)
742 return 0; /* not in kernel */
743 addr = regs->nip; /* address of trap instruction */
744 if (addr < PAGE_OFFSET)
745 return 0;
746 bug = find_bug(regs->nip);
747 if (bug == NULL)
748 return 0;
749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n",
757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack();
760 return 1;
761 }
762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
764 bug->function, bug->file, bug->line);
765 xmon(regs);
766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
768 bug->function, bug->file, bug->line);
769
770 return 0;
771}
772
773void __kprobes program_check_exception(struct pt_regs *regs)
774{
775 unsigned int reason = get_reason(regs);
776 extern int do_mathemu(struct pt_regs *regs);
777
778#ifdef CONFIG_MATH_EMULATION
779 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
780 * but there seems to be a hardware bug on the 405GP (RevD)
781 * that means ESR is sometimes set incorrectly - either to
782 * ESR_DST (!?) or 0. In the process of chasing this with the
783 * hardware people - not sure if it can happen on any illegal
784 * instruction or only on FP instructions, whether there is a
785 * pattern to occurences etc. -dgibson 31/Mar/2003 */
786 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
787 emulate_single_step(regs);
788 return;
789 }
790#endif /* CONFIG_MATH_EMULATION */
791
792 if (reason & REASON_FP) {
793 /* IEEE FP exception */
794 parse_fpe(regs);
795 return;
796 }
797 if (reason & REASON_TRAP) {
798 /* trap exception */
799 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
800 == NOTIFY_STOP)
801 return;
802 if (debugger_bpt(regs))
803 return;
804 if (check_bug_trap(regs)) {
805 regs->nip += 4;
806 return;
807 }
808 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
809 return;
810 }
811
812 /* Try to emulate it if we should. */
813 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
814 switch (emulate_instruction(regs)) {
815 case 0:
816 regs->nip += 4;
817 emulate_single_step(regs);
818 return;
819 case -EFAULT:
820 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
821 return;
822 }
823 }
824
825 if (reason & REASON_PRIVILEGED)
826 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
827 else
828 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
829}
830
831void alignment_exception(struct pt_regs *regs)
832{
833 int fixed;
834
835 fixed = fix_alignment(regs);
836
837 if (fixed == 1) {
838 regs->nip += 4; /* skip over emulated instruction */
839 emulate_single_step(regs);
840 return;
841 }
842
843 /* Operand address was bad */
844 if (fixed == -EFAULT) {
845 if (user_mode(regs))
846 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
847 else
848 /* Search exception table */
849 bad_page_fault(regs, regs->dar, SIGSEGV);
850 return;
851 }
852 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
853}
854
855void StackOverflow(struct pt_regs *regs)
856{
857 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
858 current, regs->gpr[1]);
859 debugger(regs);
860 show_regs(regs);
861 panic("kernel stack overflow");
862}
863
864void nonrecoverable_exception(struct pt_regs *regs)
865{
866 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
867 regs->nip, regs->msr);
868 debugger(regs);
869 die("nonrecoverable exception", regs, SIGKILL);
870}
871
872void trace_syscall(struct pt_regs *regs)
873{
874 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
875 current, current->pid, regs->nip, regs->link, regs->gpr[0],
876 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
877}
878
879void kernel_fp_unavailable_exception(struct pt_regs *regs)
880{
881 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
882 "%lx at %lx\n", regs->trap, regs->nip);
883 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
884}
885
886void altivec_unavailable_exception(struct pt_regs *regs)
887{
888#if !defined(CONFIG_ALTIVEC)
889 if (user_mode(regs)) {
890 /* A user program has executed an altivec instruction,
891 but this kernel doesn't support altivec. */
892 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
893 return;
894 }
895#endif
896 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
897 "%lx at %lx\n", regs->trap, regs->nip);
898 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
899}
900
901#ifdef CONFIG_PPC64
902extern perf_irq_t perf_irq;
903#endif
904
905#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
906void performance_monitor_exception(struct pt_regs *regs)
907{
908 perf_irq(regs);
909}
910#endif
911
912#ifdef CONFIG_8xx
913void SoftwareEmulation(struct pt_regs *regs)
914{
915 extern int do_mathemu(struct pt_regs *);
916 extern int Soft_emulate_8xx(struct pt_regs *);
917 int errcode;
918
919 CHECK_FULL_REGS(regs);
920
921 if (!user_mode(regs)) {
922 debugger(regs);
923 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
924 }
925
926#ifdef CONFIG_MATH_EMULATION
927 errcode = do_mathemu(regs);
928#else
929 errcode = Soft_emulate_8xx(regs);
930#endif
931 if (errcode) {
932 if (errcode > 0)
933 _exception(SIGFPE, regs, 0, 0);
934 else if (errcode == -EFAULT)
935 _exception(SIGSEGV, regs, 0, 0);
936 else
937 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
938 } else
939 emulate_single_step(regs);
940}
941#endif /* CONFIG_8xx */
942
943#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
944
945void DebugException(struct pt_regs *regs, unsigned long debug_status)
946{
947 if (debug_status & DBSR_IC) { /* instruction completion */
948 regs->msr &= ~MSR_DE;
949 if (user_mode(regs)) {
950 current->thread.dbcr0 &= ~DBCR0_IC;
951 } else {
952 /* Disable instruction completion */
953 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
954 /* Clear the instruction completion event */
955 mtspr(SPRN_DBSR, DBSR_IC);
956 if (debugger_sstep(regs))
957 return;
958 }
959 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
960 }
961}
962#endif /* CONFIG_4xx || CONFIG_BOOKE */
963
964#if !defined(CONFIG_TAU_INT)
965void TAUException(struct pt_regs *regs)
966{
967 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
968 regs->nip, regs->msr, regs->trap, print_tainted());
969}
970#endif /* CONFIG_INT_TAU */
971
972#ifdef CONFIG_ALTIVEC
973void altivec_assist_exception(struct pt_regs *regs)
974{
975 int err;
976
977 if (!user_mode(regs)) {
978 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
979 " at %lx\n", regs->nip);
980 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
981 }
982
983 flush_altivec_to_thread(current);
984
985 err = emulate_altivec(regs);
986 if (err == 0) {
987 regs->nip += 4; /* skip emulated instruction */
988 emulate_single_step(regs);
989 return;
990 }
991
992 if (err == -EFAULT) {
993 /* got an error reading the instruction */
994 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
995 } else {
996 /* didn't recognize the instruction */
997 /* XXX quick hack for now: set the non-Java bit in the VSCR */
998 if (printk_ratelimit())
999 printk(KERN_ERR "Unrecognized altivec instruction "
1000 "in %s at %lx\n", current->comm, regs->nip);
1001 current->thread.vscr.u[3] |= 0x10000;
1002 }
1003}
1004#endif /* CONFIG_ALTIVEC */
1005
1006#ifdef CONFIG_FSL_BOOKE
1007void CacheLockingException(struct pt_regs *regs, unsigned long address,
1008 unsigned long error_code)
1009{
1010 /* We treat cache locking instructions from the user
1011 * as priv ops, in the future we could try to do
1012 * something smarter
1013 */
1014 if (error_code & (ESR_DLK|ESR_ILK))
1015 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1016 return;
1017}
1018#endif /* CONFIG_FSL_BOOKE */
1019
1020#ifdef CONFIG_SPE
1021void SPEFloatingPointException(struct pt_regs *regs)
1022{
1023 unsigned long spefscr;
1024 int fpexc_mode;
1025 int code = 0;
1026
1027 spefscr = current->thread.spefscr;
1028 fpexc_mode = current->thread.fpexc_mode;
1029
1030 /* Hardware does not neccessarily set sticky
1031 * underflow/overflow/invalid flags */
1032 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1033 code = FPE_FLTOVF;
1034 spefscr |= SPEFSCR_FOVFS;
1035 }
1036 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1037 code = FPE_FLTUND;
1038 spefscr |= SPEFSCR_FUNFS;
1039 }
1040 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1041 code = FPE_FLTDIV;
1042 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1043 code = FPE_FLTINV;
1044 spefscr |= SPEFSCR_FINVS;
1045 }
1046 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1047 code = FPE_FLTRES;
1048
1049 current->thread.spefscr = spefscr;
1050
1051 _exception(SIGFPE, regs, code, regs->nip);
1052 return;
1053}
1054#endif
1055
1056/*
1057 * We enter here if we get an unrecoverable exception, that is, one
1058 * that happened at a point where the RI (recoverable interrupt) bit
1059 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1060 * we therefore lost state by taking this exception.
1061 */
1062void unrecoverable_exception(struct pt_regs *regs)
1063{
1064 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1065 regs->trap, regs->nip);
1066 die("Unrecoverable exception", regs, SIGABRT);
1067}
1068
1069#ifdef CONFIG_BOOKE_WDT
1070/*
1071 * Default handler for a Watchdog exception,
1072 * spins until a reboot occurs
1073 */
1074void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1075{
1076 /* Generic WatchdogHandler, implement your own */
1077 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1078 return;
1079}
1080
1081void WatchdogException(struct pt_regs *regs)
1082{
1083 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1084 WatchdogHandler(regs);
1085}
1086#endif
1087
1088/*
1089 * We enter here if we discover during exception entry that we are
1090 * running in supervisor mode with a userspace value in the stack pointer.
1091 */
1092void kernel_bad_stack(struct pt_regs *regs)
1093{
1094 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1095 regs->gpr[1], regs->nip);
1096 die("Bad kernel stack pointer", regs, SIGABRT);
1097}
1098
1099void __init trap_init(void)
1100{
1101}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d0947cb20..604d0947cb20 100644
--- a/arch/ppc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
diff --git a/arch/ppc64/kernel/vector.S b/arch/powerpc/kernel/vector.S
index b79d33e4001e..66b3d03c5fa5 100644
--- a/arch/ppc64/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,11 +1,26 @@
1#include <linux/config.h>
1#include <asm/ppc_asm.h> 2#include <asm/ppc_asm.h>
2#include <asm/processor.h> 3#include <asm/reg.h>
3 4
4/* 5/*
5 * The routines below are in assembler so we can closely control the 6 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called 7 * usage of floating-point registers. These routines must be called
7 * with preempt disabled. 8 * with preempt disabled.
8 */ 9 */
10#ifdef CONFIG_PPC32
11 .data
12fpzero:
13 .long 0
14fpone:
15 .long 0x3f800000 /* 1.0 in single-precision FP */
16fphalf:
17 .long 0x3f000000 /* 0.5 in single-precision FP */
18
19#define LDCONST(fr, name) \
20 lis r11,name@ha; \
21 lfs fr,name@l(r11)
22#else
23
9 .section ".toc","aw" 24 .section ".toc","aw"
10fpzero: 25fpzero:
11 .tc FD_0_0[TC],0 26 .tc FD_0_0[TC],0
@@ -14,32 +29,42 @@ fpone:
14fphalf: 29fphalf:
15 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ 30 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
16 31
32#define LDCONST(fr, name) \
33 lfd fr,name@toc(r2)
34#endif
35
17 .text 36 .text
18/* 37/*
19 * Internal routine to enable floating point and set FPSCR to 0. 38 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention. 39 * Don't call it from C; it doesn't use the normal calling convention.
21 */ 40 */
22fpenable: 41fpenable:
42#ifdef CONFIG_PPC32
43 stwu r1,-64(r1)
44#else
45 stdu r1,-64(r1)
46#endif
23 mfmsr r10 47 mfmsr r10
24 ori r11,r10,MSR_FP 48 ori r11,r10,MSR_FP
25 mtmsr r11 49 mtmsr r11
26 isync 50 isync
27 stfd fr31,-8(r1) 51 stfd fr0,24(r1)
28 stfd fr0,-16(r1) 52 stfd fr1,16(r1)
29 stfd fr1,-24(r1) 53 stfd fr31,8(r1)
54 LDCONST(fr1, fpzero)
30 mffs fr31 55 mffs fr31
31 lfd fr1,fpzero@toc(r2)
32 mtfsf 0xff,fr1 56 mtfsf 0xff,fr1
33 blr 57 blr
34 58
35fpdisable: 59fpdisable:
36 mtlr r12 60 mtlr r12
37 mtfsf 0xff,fr31 61 mtfsf 0xff,fr31
38 lfd fr1,-24(r1) 62 lfd fr31,8(r1)
39 lfd fr0,-16(r1) 63 lfd fr1,16(r1)
40 lfd fr31,-8(r1) 64 lfd fr0,24(r1)
41 mtmsr r10 65 mtmsr r10
42 isync 66 isync
67 addi r1,r1,64
43 blr 68 blr
44 69
45/* 70/*
@@ -82,7 +107,7 @@ _GLOBAL(vsubfp)
82_GLOBAL(vmaddfp) 107_GLOBAL(vmaddfp)
83 mflr r12 108 mflr r12
84 bl fpenable 109 bl fpenable
85 stfd fr2,-32(r1) 110 stfd fr2,32(r1)
86 li r0,4 111 li r0,4
87 mtctr r0 112 mtctr r0
88 li r7,0 113 li r7,0
@@ -93,7 +118,7 @@ _GLOBAL(vmaddfp)
93 stfsx fr0,r3,r7 118 stfsx fr0,r3,r7
94 addi r7,r7,4 119 addi r7,r7,4
95 bdnz 1b 120 bdnz 1b
96 lfd fr2,-32(r1) 121 lfd fr2,32(r1)
97 b fpdisable 122 b fpdisable
98 123
99/* 124/*
@@ -102,7 +127,7 @@ _GLOBAL(vmaddfp)
102_GLOBAL(vnmsubfp) 127_GLOBAL(vnmsubfp)
103 mflr r12 128 mflr r12
104 bl fpenable 129 bl fpenable
105 stfd fr2,-32(r1) 130 stfd fr2,32(r1)
106 li r0,4 131 li r0,4
107 mtctr r0 132 mtctr r0
108 li r7,0 133 li r7,0
@@ -113,7 +138,7 @@ _GLOBAL(vnmsubfp)
113 stfsx fr0,r3,r7 138 stfsx fr0,r3,r7
114 addi r7,r7,4 139 addi r7,r7,4
115 bdnz 1b 140 bdnz 1b
116 lfd fr2,-32(r1) 141 lfd fr2,32(r1)
117 b fpdisable 142 b fpdisable
118 143
119/* 144/*
@@ -124,7 +149,7 @@ _GLOBAL(vrefp)
124 mflr r12 149 mflr r12
125 bl fpenable 150 bl fpenable
126 li r0,4 151 li r0,4
127 lfd fr1,fpone@toc(r2) 152 LDCONST(fr1, fpone)
128 mtctr r0 153 mtctr r0
129 li r6,0 154 li r6,0
1301: lfsx fr0,r4,r6 1551: lfsx fr0,r4,r6
@@ -143,13 +168,13 @@ _GLOBAL(vrefp)
143_GLOBAL(vrsqrtefp) 168_GLOBAL(vrsqrtefp)
144 mflr r12 169 mflr r12
145 bl fpenable 170 bl fpenable
146 stfd fr2,-32(r1) 171 stfd fr2,32(r1)
147 stfd fr3,-40(r1) 172 stfd fr3,40(r1)
148 stfd fr4,-48(r1) 173 stfd fr4,48(r1)
149 stfd fr5,-56(r1) 174 stfd fr5,56(r1)
150 li r0,4 175 li r0,4
151 lfd fr4,fpone@toc(r2) 176 LDCONST(fr4, fpone)
152 lfd fr5,fphalf@toc(r2) 177 LDCONST(fr5, fphalf)
153 mtctr r0 178 mtctr r0
154 li r6,0 179 li r6,0
1551: lfsx fr0,r4,r6 1801: lfsx fr0,r4,r6
@@ -165,8 +190,8 @@ _GLOBAL(vrsqrtefp)
165 stfsx fr1,r3,r6 190 stfsx fr1,r3,r6
166 addi r6,r6,4 191 addi r6,r6,4
167 bdnz 1b 192 bdnz 1b
168 lfd fr5,-56(r1) 193 lfd fr5,56(r1)
169 lfd fr4,-48(r1) 194 lfd fr4,48(r1)
170 lfd fr3,-40(r1) 195 lfd fr3,40(r1)
171 lfd fr2,-32(r1) 196 lfd fr2,32(r1)
172 b fpdisable 197 b fpdisable
diff --git a/arch/ppc64/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 0e555b7a6587..97082a4203ad 100644
--- a/arch/ppc64/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -69,6 +69,16 @@ static int vio_bus_remove(struct device *dev)
69 return 1; 69 return 1;
70} 70}
71 71
72/* convert from struct device to struct vio_dev and pass to driver. */
73static void vio_bus_shutdown(struct device *dev)
74{
75 struct vio_dev *viodev = to_vio_dev(dev);
76 struct vio_driver *viodrv = to_vio_driver(dev->driver);
77
78 if (viodrv->shutdown)
79 viodrv->shutdown(viodev);
80}
81
72/** 82/**
73 * vio_register_driver: - Register a new vio driver 83 * vio_register_driver: - Register a new vio driver
74 * @drv: The vio_driver structure to be registered. 84 * @drv: The vio_driver structure to be registered.
@@ -76,13 +86,13 @@ static int vio_bus_remove(struct device *dev)
76int vio_register_driver(struct vio_driver *viodrv) 86int vio_register_driver(struct vio_driver *viodrv)
77{ 87{
78 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__, 88 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__,
79 viodrv->name); 89 viodrv->driver.name);
80 90
81 /* fill in 'struct driver' fields */ 91 /* fill in 'struct driver' fields */
82 viodrv->driver.name = viodrv->name;
83 viodrv->driver.bus = &vio_bus_type; 92 viodrv->driver.bus = &vio_bus_type;
84 viodrv->driver.probe = vio_bus_probe; 93 viodrv->driver.probe = vio_bus_probe;
85 viodrv->driver.remove = vio_bus_remove; 94 viodrv->driver.remove = vio_bus_remove;
95 viodrv->driver.shutdown = vio_bus_shutdown;
86 96
87 return driver_register(&viodrv->driver); 97 return driver_register(&viodrv->driver);
88} 98}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d4dfcfbce272
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -0,0 +1,279 @@
1#include <linux/config.h>
2#ifdef CONFIG_PPC64
3#include <asm/page.h>
4#else
5#define PAGE_SIZE 4096
6#endif
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_PPC64
10OUTPUT_ARCH(powerpc:common64)
11jiffies = jiffies_64;
12#else
13OUTPUT_ARCH(powerpc:common)
14jiffies = jiffies_64 + 4;
15#endif
16SECTIONS
17{
18 /* Sections to be discarded. */
19 /DISCARD/ : {
20 *(.exitcall.exit)
21 *(.exit.data)
22 }
23
24
25 /* Read-only sections, merged into text segment: */
26#ifdef CONFIG_PPC32
27 . = + SIZEOF_HEADERS;
28 .interp : { *(.interp) }
29 .hash : { *(.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .rel.text : { *(.rel.text) }
33 .rela.text : { *(.rela.text) }
34 .rel.data : { *(.rel.data) }
35 .rela.data : { *(.rela.data) }
36 .rel.rodata : { *(.rel.rodata) }
37 .rela.rodata : { *(.rela.rodata) }
38 .rel.got : { *(.rel.got) }
39 .rela.got : { *(.rela.got) }
40 .rel.ctors : { *(.rel.ctors) }
41 .rela.ctors : { *(.rela.ctors) }
42 .rel.dtors : { *(.rel.dtors) }
43 .rela.dtors : { *(.rela.dtors) }
44 .rel.bss : { *(.rel.bss) }
45 .rela.bss : { *(.rela.bss) }
46 .rel.plt : { *(.rel.plt) }
47 .rela.plt : { *(.rela.plt) }
48/* .init : { *(.init) } =0*/
49 .plt : { *(.plt) }
50#endif
51 .text : {
52 *(.text .text.*)
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.fixup)
57#ifdef CONFIG_PPC32
58 *(.got1)
59 __got2_start = .;
60 *(.got2)
61 __got2_end = .;
62#else
63 . = ALIGN(PAGE_SIZE);
64 _etext = .;
65#endif
66 }
67#ifdef CONFIG_PPC32
68 _etext = .;
69 PROVIDE (etext = .);
70
71 RODATA
72 .fini : { *(.fini) } =0
73 .ctors : { *(.ctors) }
74 .dtors : { *(.dtors) }
75
76 .fixup : { *(.fixup) }
77#endif
78
79 __ex_table : {
80 __start___ex_table = .;
81 *(__ex_table)
82 __stop___ex_table = .;
83 }
84
85 __bug_table : {
86 __start___bug_table = .;
87 *(__bug_table)
88 __stop___bug_table = .;
89 }
90
91#ifdef CONFIG_PPC64
92 __ftr_fixup : {
93 __start___ftr_fixup = .;
94 *(__ftr_fixup)
95 __stop___ftr_fixup = .;
96 }
97
98 RODATA
99#endif
100
101#ifdef CONFIG_PPC32
102 /* Read-write section, merged into data segment: */
103 . = ALIGN(PAGE_SIZE);
104 _sdata = .;
105 .data :
106 {
107 *(.data)
108 *(.data1)
109 *(.sdata)
110 *(.sdata2)
111 *(.got.plt) *(.got)
112 *(.dynamic)
113 CONSTRUCTORS
114 }
115
116 . = ALIGN(PAGE_SIZE);
117 __nosave_begin = .;
118 .data_nosave : { *(.data.nosave) }
119 . = ALIGN(PAGE_SIZE);
120 __nosave_end = .;
121
122 . = ALIGN(32);
123 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
124
125 _edata = .;
126 PROVIDE (edata = .);
127
128 . = ALIGN(8192);
129 .data.init_task : { *(.data.init_task) }
130#endif
131
132 /* will be freed after init */
133 . = ALIGN(PAGE_SIZE);
134 __init_begin = .;
135 .init.text : {
136 _sinittext = .;
137 *(.init.text)
138 _einittext = .;
139 }
140#ifdef CONFIG_PPC32
141 /* .exit.text is discarded at runtime, not link time,
142 to deal with references from __bug_table */
143 .exit.text : { *(.exit.text) }
144#endif
145 .init.data : {
146 *(.init.data);
147 __vtop_table_begin = .;
148 *(.vtop_fixup);
149 __vtop_table_end = .;
150 __ptov_table_begin = .;
151 *(.ptov_fixup);
152 __ptov_table_end = .;
153 }
154
155 . = ALIGN(16);
156 .init.setup : {
157 __setup_start = .;
158 *(.init.setup)
159 __setup_end = .;
160 }
161
162 .initcall.init : {
163 __initcall_start = .;
164 *(.initcall1.init)
165 *(.initcall2.init)
166 *(.initcall3.init)
167 *(.initcall4.init)
168 *(.initcall5.init)
169 *(.initcall6.init)
170 *(.initcall7.init)
171 __initcall_end = .;
172 }
173
174 .con_initcall.init : {
175 __con_initcall_start = .;
176 *(.con_initcall.init)
177 __con_initcall_end = .;
178 }
179
180 SECURITY_INIT
181
182#ifdef CONFIG_PPC32
183 __start___ftr_fixup = .;
184 __ftr_fixup : { *(__ftr_fixup) }
185 __stop___ftr_fixup = .;
186#else
187 . = ALIGN(PAGE_SIZE);
188 .init.ramfs : {
189 __initramfs_start = .;
190 *(.init.ramfs)
191 __initramfs_end = .;
192 }
193#endif
194
195#ifdef CONFIG_PPC32
196 . = ALIGN(32);
197#endif
198 .data.percpu : {
199 __per_cpu_start = .;
200 *(.data.percpu)
201 __per_cpu_end = .;
202 }
203
204 . = ALIGN(PAGE_SIZE);
205#ifdef CONFIG_PPC64
206 . = ALIGN(16384);
207 __init_end = .;
208 /* freed after init ends here */
209
210 /* Read/write sections */
211 . = ALIGN(PAGE_SIZE);
212 . = ALIGN(16384);
213 _sdata = .;
214 /* The initial task and kernel stack */
215 .data.init_task : {
216 *(.data.init_task)
217 }
218
219 . = ALIGN(PAGE_SIZE);
220 .data.page_aligned : {
221 *(.data.page_aligned)
222 }
223
224 .data.cacheline_aligned : {
225 *(.data.cacheline_aligned)
226 }
227
228 .data : {
229 *(.data .data.rel* .toc1)
230 *(.branch_lt)
231 }
232
233 .opd : {
234 *(.opd)
235 }
236
237 .got : {
238 __toc_start = .;
239 *(.got)
240 *(.toc)
241 . = ALIGN(PAGE_SIZE);
242 _edata = .;
243 }
244
245 . = ALIGN(PAGE_SIZE);
246#else
247 __initramfs_start = .;
248 .init.ramfs : {
249 *(.init.ramfs)
250 }
251 __initramfs_end = .;
252
253 . = ALIGN(4096);
254 __init_end = .;
255
256 . = ALIGN(4096);
257 _sextratext = .;
258 _eextratext = .;
259
260 __bss_start = .;
261#endif
262
263 .bss : {
264 __bss_start = .;
265 *(.sbss) *(.scommon)
266 *(.dynbss)
267 *(.bss)
268 *(COMMON)
269 __bss_stop = .;
270 }
271
272#ifdef CONFIG_PPC64
273 . = ALIGN(PAGE_SIZE);
274#endif
275 _end = . ;
276#ifdef CONFIG_PPC32
277 PROVIDE (end = .);
278#endif
279}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
new file mode 100644
index 000000000000..e6b2be3bcec1
--- /dev/null
+++ b/arch/powerpc/lib/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for ppc-specific library files..
3#
4
5ifeq ($(CONFIG_PPC_MERGE),y)
6obj-y := string.o
7endif
8
9obj-y += strcase.o
10obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
12 memcpy_64.o usercopy_64.o mem_64.o
13obj-$(CONFIG_PPC_ISERIES) += e2a.o
14obj-$(CONFIG_XMON) += sstep.o
15
16ifeq ($(CONFIG_PPC64),y)
17obj-$(CONFIG_SMP) += locks.o
18obj-$(CONFIG_DEBUG_KERNEL) += sstep.o
19endif
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
new file mode 100644
index 000000000000..7874e8a80455
--- /dev/null
+++ b/arch/powerpc/lib/checksum_32.S
@@ -0,0 +1,225 @@
1/*
2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
13 */
14
15#include <linux/sys.h>
16#include <asm/processor.h>
17#include <asm/errno.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/*
23 * ip_fast_csum(buf, len) -- Optimized for IP header
24 * len is in words and is always >= 5.
25 */
26_GLOBAL(ip_fast_csum)
27 lwz r0,0(r3)
28 lwzu r5,4(r3)
29 addic. r4,r4,-2
30 addc r0,r0,r5
31 mtctr r4
32 blelr-
331: lwzu r4,4(r3)
34 adde r0,r0,r4
35 bdnz 1b
36 addze r0,r0 /* add in final carry */
37 rlwinm r3,r0,16,0,31 /* fold two halves together */
38 add r3,r0,r3
39 not r3,r3
40 srwi r3,r3,16
41 blr
42
43/*
44 * Compute checksum of TCP or UDP pseudo-header:
45 * csum_tcpudp_magic(saddr, daddr, len, proto, sum)
46 */
47_GLOBAL(csum_tcpudp_magic)
48 rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
49 addc r0,r3,r4 /* add 4 32-bit words together */
50 adde r0,r0,r5
51 adde r0,r0,r7
52 addze r0,r0 /* add in final carry */
53 rlwinm r3,r0,16,0,31 /* fold two halves together */
54 add r3,r0,r3
55 not r3,r3
56 srwi r3,r3,16
57 blr
58
59/*
60 * computes the checksum of a memory block at buff, length len,
61 * and adds in "sum" (32-bit)
62 *
63 * csum_partial(buff, len, sum)
64 */
65_GLOBAL(csum_partial)
66 addic r0,r5,0
67 subi r3,r3,4
68 srwi. r6,r4,2
69 beq 3f /* if we're doing < 4 bytes */
70 andi. r5,r3,2 /* Align buffer to longword boundary */
71 beq+ 1f
72 lhz r5,4(r3) /* do 2 bytes to get aligned */
73 addi r3,r3,2
74 subi r4,r4,2
75 addc r0,r0,r5
76 srwi. r6,r4,2 /* # words to do */
77 beq 3f
781: mtctr r6
792: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */
80 adde r0,r0,r5 /* be unnecessary to unroll this loop */
81 bdnz 2b
82 andi. r4,r4,3
833: cmpwi 0,r4,2
84 blt+ 4f
85 lhz r5,4(r3)
86 addi r3,r3,2
87 subi r4,r4,2
88 adde r0,r0,r5
894: cmpwi 0,r4,1
90 bne+ 5f
91 lbz r5,4(r3)
92 slwi r5,r5,8 /* Upper byte of word */
93 adde r0,r0,r5
945: addze r3,r0 /* add in final carry */
95 blr
96
97/*
98 * Computes the checksum of a memory block at src, length len,
99 * and adds in "sum" (32-bit), while copying the block to dst.
100 * If an access exception occurs on src or dst, it stores -EFAULT
101 * to *src_err or *dst_err respectively, and (for an error on
102 * src) zeroes the rest of dst.
103 *
104 * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
105 */
106_GLOBAL(csum_partial_copy_generic)
107 addic r0,r6,0
108 subi r3,r3,4
109 subi r4,r4,4
110 srwi. r6,r5,2
111 beq 3f /* if we're doing < 4 bytes */
112 andi. r9,r4,2 /* Align dst to longword boundary */
113 beq+ 1f
11481: lhz r6,4(r3) /* do 2 bytes to get aligned */
115 addi r3,r3,2
116 subi r5,r5,2
11791: sth r6,4(r4)
118 addi r4,r4,2
119 addc r0,r0,r6
120 srwi. r6,r5,2 /* # words to do */
121 beq 3f
1221: srwi. r6,r5,4 /* # groups of 4 words to do */
123 beq 10f
124 mtctr r6
12571: lwz r6,4(r3)
12672: lwz r9,8(r3)
12773: lwz r10,12(r3)
12874: lwzu r11,16(r3)
129 adde r0,r0,r6
13075: stw r6,4(r4)
131 adde r0,r0,r9
13276: stw r9,8(r4)
133 adde r0,r0,r10
13477: stw r10,12(r4)
135 adde r0,r0,r11
13678: stwu r11,16(r4)
137 bdnz 71b
13810: rlwinm. r6,r5,30,30,31 /* # words left to do */
139 beq 13f
140 mtctr r6
14182: lwzu r9,4(r3)
14292: stwu r9,4(r4)
143 adde r0,r0,r9
144 bdnz 82b
14513: andi. r5,r5,3
1463: cmpwi 0,r5,2
147 blt+ 4f
14883: lhz r6,4(r3)
149 addi r3,r3,2
150 subi r5,r5,2
15193: sth r6,4(r4)
152 addi r4,r4,2
153 adde r0,r0,r6
1544: cmpwi 0,r5,1
155 bne+ 5f
15684: lbz r6,4(r3)
15794: stb r6,4(r4)
158 slwi r6,r6,8 /* Upper byte of word */
159 adde r0,r0,r6
1605: addze r3,r0 /* add in final carry */
161 blr
162
163/* These shouldn't go in the fixup section, since that would
164 cause the ex_table addresses to get out of order. */
165
166src_error_4:
167 mfctr r6 /* update # bytes remaining from ctr */
168 rlwimi r5,r6,4,0,27
169 b 79f
170src_error_1:
171 li r6,0
172 subi r5,r5,2
17395: sth r6,4(r4)
174 addi r4,r4,2
17579: srwi. r6,r5,2
176 beq 3f
177 mtctr r6
178src_error_2:
179 li r6,0
18096: stwu r6,4(r4)
181 bdnz 96b
1823: andi. r5,r5,3
183 beq src_error
184src_error_3:
185 li r6,0
186 mtctr r5
187 addi r4,r4,3
18897: stbu r6,1(r4)
189 bdnz 97b
190src_error:
191 cmpwi 0,r7,0
192 beq 1f
193 li r6,-EFAULT
194 stw r6,0(r7)
1951: addze r3,r0
196 blr
197
198dst_error:
199 cmpwi 0,r8,0
200 beq 1f
201 li r6,-EFAULT
202 stw r6,0(r8)
2031: addze r3,r0
204 blr
205
206.section __ex_table,"a"
207 .long 81b,src_error_1
208 .long 91b,dst_error
209 .long 71b,src_error_4
210 .long 72b,src_error_4
211 .long 73b,src_error_4
212 .long 74b,src_error_4
213 .long 75b,dst_error
214 .long 76b,dst_error
215 .long 77b,dst_error
216 .long 78b,dst_error
217 .long 82b,src_error_2
218 .long 92b,dst_error
219 .long 83b,src_error_3
220 .long 93b,dst_error
221 .long 84b,src_error_3
222 .long 94b,dst_error
223 .long 95b,dst_error
224 .long 96b,dst_error
225 .long 97b,dst_error
diff --git a/arch/ppc64/lib/checksum.S b/arch/powerpc/lib/checksum_64.S
index ef96c6c58efc..ef96c6c58efc 100644
--- a/arch/ppc64/lib/checksum.S
+++ b/arch/powerpc/lib/checksum_64.S
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
new file mode 100644
index 000000000000..bee51414812e
--- /dev/null
+++ b/arch/powerpc/lib/copy_32.S
@@ -0,0 +1,543 @@
1/*
2 * Memory copy functions for 32-bit PowerPC.
3 *
4 * Copyright (C) 1996-2005 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/cache.h>
14#include <asm/errno.h>
15#include <asm/ppc_asm.h>
16
17#define COPY_16_BYTES \
18 lwz r7,4(r4); \
19 lwz r8,8(r4); \
20 lwz r9,12(r4); \
21 lwzu r10,16(r4); \
22 stw r7,4(r6); \
23 stw r8,8(r6); \
24 stw r9,12(r6); \
25 stwu r10,16(r6)
26
27#define COPY_16_BYTES_WITHEX(n) \
288 ## n ## 0: \
29 lwz r7,4(r4); \
308 ## n ## 1: \
31 lwz r8,8(r4); \
328 ## n ## 2: \
33 lwz r9,12(r4); \
348 ## n ## 3: \
35 lwzu r10,16(r4); \
368 ## n ## 4: \
37 stw r7,4(r6); \
388 ## n ## 5: \
39 stw r8,8(r6); \
408 ## n ## 6: \
41 stw r9,12(r6); \
428 ## n ## 7: \
43 stwu r10,16(r6)
44
45#define COPY_16_BYTES_EXCODE(n) \
469 ## n ## 0: \
47 addi r5,r5,-(16 * n); \
48 b 104f; \
499 ## n ## 1: \
50 addi r5,r5,-(16 * n); \
51 b 105f; \
52.section __ex_table,"a"; \
53 .align 2; \
54 .long 8 ## n ## 0b,9 ## n ## 0b; \
55 .long 8 ## n ## 1b,9 ## n ## 0b; \
56 .long 8 ## n ## 2b,9 ## n ## 0b; \
57 .long 8 ## n ## 3b,9 ## n ## 0b; \
58 .long 8 ## n ## 4b,9 ## n ## 1b; \
59 .long 8 ## n ## 5b,9 ## n ## 1b; \
60 .long 8 ## n ## 6b,9 ## n ## 1b; \
61 .long 8 ## n ## 7b,9 ## n ## 1b; \
62 .text
63
64 .text
65 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
66 .stabs "copy32.S",N_SO,0,0,0f
670:
68
69CACHELINE_BYTES = L1_CACHE_BYTES
70LG_CACHELINE_BYTES = L1_CACHE_SHIFT
71CACHELINE_MASK = (L1_CACHE_BYTES-1)
72
73/*
74 * Use dcbz on the complete cache lines in the destination
75 * to set them to zero. This requires that the destination
76 * area is cacheable. -- paulus
77 */
78_GLOBAL(cacheable_memzero)
79 mr r5,r4
80 li r4,0
81 addi r6,r3,-4
82 cmplwi 0,r5,4
83 blt 7f
84 stwu r4,4(r6)
85 beqlr
86 andi. r0,r6,3
87 add r5,r0,r5
88 subf r6,r0,r6
89 clrlwi r7,r6,32-LG_CACHELINE_BYTES
90 add r8,r7,r5
91 srwi r9,r8,LG_CACHELINE_BYTES
92 addic. r9,r9,-1 /* total number of complete cachelines */
93 ble 2f
94 xori r0,r7,CACHELINE_MASK & ~3
95 srwi. r0,r0,2
96 beq 3f
97 mtctr r0
984: stwu r4,4(r6)
99 bdnz 4b
1003: mtctr r9
101 li r7,4
102#if !defined(CONFIG_8xx)
10310: dcbz r7,r6
104#else
10510: stw r4, 4(r6)
106 stw r4, 8(r6)
107 stw r4, 12(r6)
108 stw r4, 16(r6)
109#if CACHE_LINE_SIZE >= 32
110 stw r4, 20(r6)
111 stw r4, 24(r6)
112 stw r4, 28(r6)
113 stw r4, 32(r6)
114#endif /* CACHE_LINE_SIZE */
115#endif
116 addi r6,r6,CACHELINE_BYTES
117 bdnz 10b
118 clrlwi r5,r8,32-LG_CACHELINE_BYTES
119 addi r5,r5,4
1202: srwi r0,r5,2
121 mtctr r0
122 bdz 6f
1231: stwu r4,4(r6)
124 bdnz 1b
1256: andi. r5,r5,3
1267: cmpwi 0,r5,0
127 beqlr
128 mtctr r5
129 addi r6,r6,3
1308: stbu r4,1(r6)
131 bdnz 8b
132 blr
133
134_GLOBAL(memset)
135 rlwimi r4,r4,8,16,23
136 rlwimi r4,r4,16,0,15
137 addi r6,r3,-4
138 cmplwi 0,r5,4
139 blt 7f
140 stwu r4,4(r6)
141 beqlr
142 andi. r0,r6,3
143 add r5,r0,r5
144 subf r6,r0,r6
145 srwi r0,r5,2
146 mtctr r0
147 bdz 6f
1481: stwu r4,4(r6)
149 bdnz 1b
1506: andi. r5,r5,3
1517: cmpwi 0,r5,0
152 beqlr
153 mtctr r5
154 addi r6,r6,3
1558: stbu r4,1(r6)
156 bdnz 8b
157 blr
158
159/*
160 * This version uses dcbz on the complete cache lines in the
161 * destination area to reduce memory traffic. This requires that
162 * the destination area is cacheable.
163 * We only use this version if the source and dest don't overlap.
164 * -- paulus.
165 */
166_GLOBAL(cacheable_memcpy)
167 add r7,r3,r5 /* test if the src & dst overlap */
168 add r8,r4,r5
169 cmplw 0,r4,r7
170 cmplw 1,r3,r8
171 crand 0,0,4 /* cr0.lt &= cr1.lt */
172 blt memcpy /* if regions overlap */
173
174 addi r4,r4,-4
175 addi r6,r3,-4
176 neg r0,r3
177 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
178 beq 58f
179
180 cmplw 0,r5,r0 /* is this more than total to do? */
181 blt 63f /* if not much to do */
182 andi. r8,r0,3 /* get it word-aligned first */
183 subf r5,r0,r5
184 mtctr r8
185 beq+ 61f
18670: lbz r9,4(r4) /* do some bytes */
187 stb r9,4(r6)
188 addi r4,r4,1
189 addi r6,r6,1
190 bdnz 70b
19161: srwi. r0,r0,2
192 mtctr r0
193 beq 58f
19472: lwzu r9,4(r4) /* do some words */
195 stwu r9,4(r6)
196 bdnz 72b
197
19858: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
199 clrlwi r5,r5,32-LG_CACHELINE_BYTES
200 li r11,4
201 mtctr r0
202 beq 63f
20353:
204#if !defined(CONFIG_8xx)
205 dcbz r11,r6
206#endif
207 COPY_16_BYTES
208#if L1_CACHE_BYTES >= 32
209 COPY_16_BYTES
210#if L1_CACHE_BYTES >= 64
211 COPY_16_BYTES
212 COPY_16_BYTES
213#if L1_CACHE_BYTES >= 128
214 COPY_16_BYTES
215 COPY_16_BYTES
216 COPY_16_BYTES
217 COPY_16_BYTES
218#endif
219#endif
220#endif
221 bdnz 53b
222
22363: srwi. r0,r5,2
224 mtctr r0
225 beq 64f
22630: lwzu r0,4(r4)
227 stwu r0,4(r6)
228 bdnz 30b
229
23064: andi. r0,r5,3
231 mtctr r0
232 beq+ 65f
23340: lbz r0,4(r4)
234 stb r0,4(r6)
235 addi r4,r4,1
236 addi r6,r6,1
237 bdnz 40b
23865: blr
239
240_GLOBAL(memmove)
241 cmplw 0,r3,r4
242 bgt backwards_memcpy
243 /* fall through */
244
245_GLOBAL(memcpy)
246 srwi. r7,r5,3
247 addi r6,r3,-4
248 addi r4,r4,-4
249 beq 2f /* if less than 8 bytes to do */
250 andi. r0,r6,3 /* get dest word aligned */
251 mtctr r7
252 bne 5f
2531: lwz r7,4(r4)
254 lwzu r8,8(r4)
255 stw r7,4(r6)
256 stwu r8,8(r6)
257 bdnz 1b
258 andi. r5,r5,7
2592: cmplwi 0,r5,4
260 blt 3f
261 lwzu r0,4(r4)
262 addi r5,r5,-4
263 stwu r0,4(r6)
2643: cmpwi 0,r5,0
265 beqlr
266 mtctr r5
267 addi r4,r4,3
268 addi r6,r6,3
2694: lbzu r0,1(r4)
270 stbu r0,1(r6)
271 bdnz 4b
272 blr
2735: subfic r0,r0,4
274 mtctr r0
2756: lbz r7,4(r4)
276 addi r4,r4,1
277 stb r7,4(r6)
278 addi r6,r6,1
279 bdnz 6b
280 subf r5,r0,r5
281 rlwinm. r7,r5,32-3,3,31
282 beq 2b
283 mtctr r7
284 b 1b
285
286_GLOBAL(backwards_memcpy)
287 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
288 add r6,r3,r5
289 add r4,r4,r5
290 beq 2f
291 andi. r0,r6,3
292 mtctr r7
293 bne 5f
2941: lwz r7,-4(r4)
295 lwzu r8,-8(r4)
296 stw r7,-4(r6)
297 stwu r8,-8(r6)
298 bdnz 1b
299 andi. r5,r5,7
3002: cmplwi 0,r5,4
301 blt 3f
302 lwzu r0,-4(r4)
303 subi r5,r5,4
304 stwu r0,-4(r6)
3053: cmpwi 0,r5,0
306 beqlr
307 mtctr r5
3084: lbzu r0,-1(r4)
309 stbu r0,-1(r6)
310 bdnz 4b
311 blr
3125: mtctr r0
3136: lbzu r7,-1(r4)
314 stbu r7,-1(r6)
315 bdnz 6b
316 subf r5,r0,r5
317 rlwinm. r7,r5,32-3,3,31
318 beq 2b
319 mtctr r7
320 b 1b
321
322_GLOBAL(__copy_tofrom_user)
323 addi r4,r4,-4
324 addi r6,r3,-4
325 neg r0,r3
326 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
327 beq 58f
328
329 cmplw 0,r5,r0 /* is this more than total to do? */
330 blt 63f /* if not much to do */
331 andi. r8,r0,3 /* get it word-aligned first */
332 mtctr r8
333 beq+ 61f
33470: lbz r9,4(r4) /* do some bytes */
33571: stb r9,4(r6)
336 addi r4,r4,1
337 addi r6,r6,1
338 bdnz 70b
33961: subf r5,r0,r5
340 srwi. r0,r0,2
341 mtctr r0
342 beq 58f
34372: lwzu r9,4(r4) /* do some words */
34473: stwu r9,4(r6)
345 bdnz 72b
346
347 .section __ex_table,"a"
348 .align 2
349 .long 70b,100f
350 .long 71b,101f
351 .long 72b,102f
352 .long 73b,103f
353 .text
354
35558: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
356 clrlwi r5,r5,32-LG_CACHELINE_BYTES
357 li r11,4
358 beq 63f
359
360#ifdef CONFIG_8xx
361 /* Don't use prefetch on 8xx */
362 mtctr r0
363 li r0,0
36453: COPY_16_BYTES_WITHEX(0)
365 bdnz 53b
366
367#else /* not CONFIG_8xx */
368 /* Here we decide how far ahead to prefetch the source */
369 li r3,4
370 cmpwi r0,1
371 li r7,0
372 ble 114f
373 li r7,1
374#if MAX_COPY_PREFETCH > 1
375 /* Heuristically, for large transfers we prefetch
376 MAX_COPY_PREFETCH cachelines ahead. For small transfers
377 we prefetch 1 cacheline ahead. */
378 cmpwi r0,MAX_COPY_PREFETCH
379 ble 112f
380 li r7,MAX_COPY_PREFETCH
381112: mtctr r7
382111: dcbt r3,r4
383 addi r3,r3,CACHELINE_BYTES
384 bdnz 111b
385#else
386 dcbt r3,r4
387 addi r3,r3,CACHELINE_BYTES
388#endif /* MAX_COPY_PREFETCH > 1 */
389
390114: subf r8,r7,r0
391 mr r0,r7
392 mtctr r8
393
39453: dcbt r3,r4
39554: dcbz r11,r6
396 .section __ex_table,"a"
397 .align 2
398 .long 54b,105f
399 .text
400/* the main body of the cacheline loop */
401 COPY_16_BYTES_WITHEX(0)
402#if L1_CACHE_BYTES >= 32
403 COPY_16_BYTES_WITHEX(1)
404#if L1_CACHE_BYTES >= 64
405 COPY_16_BYTES_WITHEX(2)
406 COPY_16_BYTES_WITHEX(3)
407#if L1_CACHE_BYTES >= 128
408 COPY_16_BYTES_WITHEX(4)
409 COPY_16_BYTES_WITHEX(5)
410 COPY_16_BYTES_WITHEX(6)
411 COPY_16_BYTES_WITHEX(7)
412#endif
413#endif
414#endif
415 bdnz 53b
416 cmpwi r0,0
417 li r3,4
418 li r7,0
419 bne 114b
420#endif /* CONFIG_8xx */
421
42263: srwi. r0,r5,2
423 mtctr r0
424 beq 64f
42530: lwzu r0,4(r4)
42631: stwu r0,4(r6)
427 bdnz 30b
428
42964: andi. r0,r5,3
430 mtctr r0
431 beq+ 65f
43240: lbz r0,4(r4)
43341: stb r0,4(r6)
434 addi r4,r4,1
435 addi r6,r6,1
436 bdnz 40b
43765: li r3,0
438 blr
439
440/* read fault, initial single-byte copy */
441100: li r9,0
442 b 90f
443/* write fault, initial single-byte copy */
444101: li r9,1
44590: subf r5,r8,r5
446 li r3,0
447 b 99f
448/* read fault, initial word copy */
449102: li r9,0
450 b 91f
451/* write fault, initial word copy */
452103: li r9,1
45391: li r3,2
454 b 99f
455
456/*
457 * this stuff handles faults in the cacheline loop and branches to either
458 * 104f (if in read part) or 105f (if in write part), after updating r5
459 */
460 COPY_16_BYTES_EXCODE(0)
461#if L1_CACHE_BYTES >= 32
462 COPY_16_BYTES_EXCODE(1)
463#if L1_CACHE_BYTES >= 64
464 COPY_16_BYTES_EXCODE(2)
465 COPY_16_BYTES_EXCODE(3)
466#if L1_CACHE_BYTES >= 128
467 COPY_16_BYTES_EXCODE(4)
468 COPY_16_BYTES_EXCODE(5)
469 COPY_16_BYTES_EXCODE(6)
470 COPY_16_BYTES_EXCODE(7)
471#endif
472#endif
473#endif
474
475/* read fault in cacheline loop */
476104: li r9,0
477 b 92f
478/* fault on dcbz (effectively a write fault) */
479/* or write fault in cacheline loop */
480105: li r9,1
48192: li r3,LG_CACHELINE_BYTES
482 mfctr r8
483 add r0,r0,r8
484 b 106f
485/* read fault in final word loop */
486108: li r9,0
487 b 93f
488/* write fault in final word loop */
489109: li r9,1
49093: andi. r5,r5,3
491 li r3,2
492 b 99f
493/* read fault in final byte loop */
494110: li r9,0
495 b 94f
496/* write fault in final byte loop */
497111: li r9,1
49894: li r5,0
499 li r3,0
500/*
501 * At this stage the number of bytes not copied is
502 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
503 */
50499: mfctr r0
505106: slw r3,r0,r3
506 add. r3,r3,r5
507 beq 120f /* shouldn't happen */
508 cmpwi 0,r9,0
509 bne 120f
510/* for a read fault, first try to continue the copy one byte at a time */
511 mtctr r3
512130: lbz r0,4(r4)
513131: stb r0,4(r6)
514 addi r4,r4,1
515 addi r6,r6,1
516 bdnz 130b
517/* then clear out the destination: r3 bytes starting at 4(r6) */
518132: mfctr r3
519 srwi. r0,r3,2
520 li r9,0
521 mtctr r0
522 beq 113f
523112: stwu r9,4(r6)
524 bdnz 112b
525113: andi. r0,r3,3
526 mtctr r0
527 beq 120f
528114: stb r9,4(r6)
529 addi r6,r6,1
530 bdnz 114b
531120: blr
532
533 .section __ex_table,"a"
534 .align 2
535 .long 30b,108b
536 .long 31b,109b
537 .long 40b,110b
538 .long 41b,111b
539 .long 130b,132b
540 .long 131b,120b
541 .long 112b,120b
542 .long 114b,120b
543 .text
diff --git a/arch/ppc64/lib/copypage.S b/arch/powerpc/lib/copypage_64.S
index 733d61618bbf..733d61618bbf 100644
--- a/arch/ppc64/lib/copypage.S
+++ b/arch/powerpc/lib/copypage_64.S
diff --git a/arch/ppc64/lib/copyuser.S b/arch/powerpc/lib/copyuser_64.S
index a0b3fbbd6fb1..a0b3fbbd6fb1 100644
--- a/arch/ppc64/lib/copyuser.S
+++ b/arch/powerpc/lib/copyuser_64.S
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S
new file mode 100644
index 000000000000..83d9832fd919
--- /dev/null
+++ b/arch/powerpc/lib/div64.S
@@ -0,0 +1,59 @@
1/*
2 * Divide a 64-bit unsigned number by a 32-bit unsigned number.
3 * This routine assumes that the top 32 bits of the dividend are
4 * non-zero to start with.
5 * On entry, r3 points to the dividend, which get overwritten with
6 * the 64-bit quotient, and r4 contains the divisor.
7 * On exit, r3 contains the remainder.
8 *
9 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#include <asm/ppc_asm.h>
17#include <asm/processor.h>
18
19_GLOBAL(__div64_32)
20 lwz r5,0(r3) # get the dividend into r5/r6
21 lwz r6,4(r3)
22 cmplw r5,r4
23 li r7,0
24 li r8,0
25 blt 1f
26 divwu r7,r5,r4 # if dividend.hi >= divisor,
27 mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
28 subf. r5,r0,r5 # dividend.hi %= divisor
29 beq 3f
301: mr r11,r5 # here dividend.hi != 0
31 andis. r0,r5,0xc000
32 bne 2f
33 cntlzw r0,r5 # we are shifting the dividend right
34 li r10,-1 # to make it < 2^32, and shifting
35 srw r10,r10,r0 # the divisor right the same amount,
36 addc r9,r4,r10 # rounding up (so the estimate cannot
37 andc r11,r6,r10 # ever be too large, only too small)
38 andc r9,r9,r10
39 addze r9,r9
40 or r11,r5,r11
41 rotlw r9,r9,r0
42 rotlw r11,r11,r0
43 divwu r11,r11,r9 # then we divide the shifted quantities
442: mullw r10,r11,r4 # to get an estimate of the quotient,
45 mulhwu r9,r11,r4 # multiply the estimate by the divisor,
46 subfc r6,r10,r6 # take the product from the divisor,
47 add r8,r8,r11 # and add the estimate to the accumulated
48 subfe. r5,r9,r5 # quotient
49 bne 1b
503: cmplw r6,r4
51 blt 4f
52 divwu r0,r6,r4 # perform the remaining 32-bit division
53 mullw r10,r0,r4 # and get the remainder
54 add r8,r8,r0
55 subf r6,r10,r6
564: stw r7,0(r3) # return the quotient in *r3
57 stw r8,4(r3)
58 mr r3,r6 # return the remainder in r3
59 blr
diff --git a/arch/ppc64/lib/e2a.c b/arch/powerpc/lib/e2a.c
index d2b834887920..d2b834887920 100644
--- a/arch/ppc64/lib/e2a.c
+++ b/arch/powerpc/lib/e2a.c
diff --git a/arch/ppc64/lib/locks.c b/arch/powerpc/lib/locks.c
index 033643ab69e0..3794715b2972 100644
--- a/arch/ppc64/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -17,11 +17,12 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/stringify.h> 19#include <linux/stringify.h>
20#include <asm/hvcall.h> 20#include <linux/smp.h>
21#include <asm/iSeries/HvCall.h>
22 21
23/* waiting for a spinlock... */ 22/* waiting for a spinlock... */
24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 23#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
24#include <asm/hvcall.h>
25#include <asm/iSeries/HvCall.h>
25 26
26void __spin_yield(raw_spinlock_t *lock) 27void __spin_yield(raw_spinlock_t *lock)
27{ 28{
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
new file mode 100644
index 000000000000..68df20283ff5
--- /dev/null
+++ b/arch/powerpc/lib/mem_64.S
@@ -0,0 +1,119 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/errno.h>
13#include <asm/ppc_asm.h>
14
15_GLOBAL(memset)
16 neg r0,r3
17 rlwimi r4,r4,8,16,23
18 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
19 rlwimi r4,r4,16,0,15
20 cmplw cr1,r5,r0 /* do we get that far? */
21 rldimi r4,r4,32,0
22 mtcrf 1,r0
23 mr r6,r3
24 blt cr1,8f
25 beq+ 3f /* if already 8-byte aligned */
26 subf r5,r0,r5
27 bf 31,1f
28 stb r4,0(r6)
29 addi r6,r6,1
301: bf 30,2f
31 sth r4,0(r6)
32 addi r6,r6,2
332: bf 29,3f
34 stw r4,0(r6)
35 addi r6,r6,4
363: srdi. r0,r5,6
37 clrldi r5,r5,58
38 mtctr r0
39 beq 5f
404: std r4,0(r6)
41 std r4,8(r6)
42 std r4,16(r6)
43 std r4,24(r6)
44 std r4,32(r6)
45 std r4,40(r6)
46 std r4,48(r6)
47 std r4,56(r6)
48 addi r6,r6,64
49 bdnz 4b
505: srwi. r0,r5,3
51 clrlwi r5,r5,29
52 mtcrf 1,r0
53 beq 8f
54 bf 29,6f
55 std r4,0(r6)
56 std r4,8(r6)
57 std r4,16(r6)
58 std r4,24(r6)
59 addi r6,r6,32
606: bf 30,7f
61 std r4,0(r6)
62 std r4,8(r6)
63 addi r6,r6,16
647: bf 31,8f
65 std r4,0(r6)
66 addi r6,r6,8
678: cmpwi r5,0
68 mtcrf 1,r5
69 beqlr+
70 bf 29,9f
71 stw r4,0(r6)
72 addi r6,r6,4
739: bf 30,10f
74 sth r4,0(r6)
75 addi r6,r6,2
7610: bflr 31
77 stb r4,0(r6)
78 blr
79
80_GLOBAL(memmove)
81 cmplw 0,r3,r4
82 bgt .backwards_memcpy
83 b .memcpy
84
85_GLOBAL(backwards_memcpy)
86 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
87 add r6,r3,r5
88 add r4,r4,r5
89 beq 2f
90 andi. r0,r6,3
91 mtctr r7
92 bne 5f
931: lwz r7,-4(r4)
94 lwzu r8,-8(r4)
95 stw r7,-4(r6)
96 stwu r8,-8(r6)
97 bdnz 1b
98 andi. r5,r5,7
992: cmplwi 0,r5,4
100 blt 3f
101 lwzu r0,-4(r4)
102 subi r5,r5,4
103 stwu r0,-4(r6)
1043: cmpwi 0,r5,0
105 beqlr
106 mtctr r5
1074: lbzu r0,-1(r4)
108 stbu r0,-1(r6)
109 bdnz 4b
110 blr
1115: mtctr r0
1126: lbzu r7,-1(r4)
113 stbu r7,-1(r6)
114 bdnz 6b
115 subf r5,r0,r5
116 rlwinm. r7,r5,32-3,3,31
117 beq 2b
118 mtctr r7
119 b 1b
diff --git a/arch/ppc64/lib/memcpy.S b/arch/powerpc/lib/memcpy_64.S
index 9ccacdf5bcb9..9ccacdf5bcb9 100644
--- a/arch/ppc64/lib/memcpy.S
+++ b/arch/powerpc/lib/memcpy_64.S
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
new file mode 100644
index 000000000000..42c5de2c898f
--- /dev/null
+++ b/arch/powerpc/lib/rheap.c
@@ -0,0 +1,693 @@
1/*
2 * arch/ppc/syslib/rheap.c
3 *
4 * A Remote Heap. Remote means that we don't touch the memory that the
5 * heap points to. Normal heap implementations use the memory they manage
6 * to place their list. We cannot do that because the memory we manage may
7 * have special properties, for example it is uncachable or of different
8 * endianess.
9 *
10 * Author: Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
13 * the terms of the GNU General Public License version 2. This program
14 * is licensed "as is" without any warranty of any kind, whether express
15 * or implied.
16 */
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21
22#include <asm/rheap.h>
23
24/*
25 * Fixup a list_head, needed when copying lists. If the pointers fall
26 * between s and e, apply the delta. This assumes that
27 * sizeof(struct list_head *) == sizeof(unsigned long *).
28 */
29static inline void fixup(unsigned long s, unsigned long e, int d,
30 struct list_head *l)
31{
32 unsigned long *pp;
33
34 pp = (unsigned long *)&l->next;
35 if (*pp >= s && *pp < e)
36 *pp += d;
37
38 pp = (unsigned long *)&l->prev;
39 if (*pp >= s && *pp < e)
40 *pp += d;
41}
42
43/* Grow the allocated blocks */
44static int grow(rh_info_t * info, int max_blocks)
45{
46 rh_block_t *block, *blk;
47 int i, new_blocks;
48 int delta;
49 unsigned long blks, blke;
50
51 if (max_blocks <= info->max_blocks)
52 return -EINVAL;
53
54 new_blocks = max_blocks - info->max_blocks;
55
56 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
57 if (block == NULL)
58 return -ENOMEM;
59
60 if (info->max_blocks > 0) {
61
62 /* copy old block area */
63 memcpy(block, info->block,
64 sizeof(rh_block_t) * info->max_blocks);
65
66 delta = (char *)block - (char *)info->block;
67
68 /* and fixup list pointers */
69 blks = (unsigned long)info->block;
70 blke = (unsigned long)(info->block + info->max_blocks);
71
72 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
73 fixup(blks, blke, delta, &blk->list);
74
75 fixup(blks, blke, delta, &info->empty_list);
76 fixup(blks, blke, delta, &info->free_list);
77 fixup(blks, blke, delta, &info->taken_list);
78
79 /* free the old allocated memory */
80 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
81 kfree(info->block);
82 }
83
84 info->block = block;
85 info->empty_slots += new_blocks;
86 info->max_blocks = max_blocks;
87 info->flags &= ~RHIF_STATIC_BLOCK;
88
89 /* add all new blocks to the free list */
90 for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
91 list_add(&blk->list, &info->empty_list);
92
93 return 0;
94}
95
96/*
97 * Assure at least the required amount of empty slots. If this function
98 * causes a grow in the block area then all pointers kept to the block
99 * area are invalid!
100 */
101static int assure_empty(rh_info_t * info, int slots)
102{
103 int max_blocks;
104
105 /* This function is not meant to be used to grow uncontrollably */
106 if (slots >= 4)
107 return -EINVAL;
108
109 /* Enough space */
110 if (info->empty_slots >= slots)
111 return 0;
112
113 /* Next 16 sized block */
114 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
115
116 return grow(info, max_blocks);
117}
118
119static rh_block_t *get_slot(rh_info_t * info)
120{
121 rh_block_t *blk;
122
123 /* If no more free slots, and failure to extend. */
124 /* XXX: You should have called assure_empty before */
125 if (info->empty_slots == 0) {
126 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
127 return NULL;
128 }
129
130 /* Get empty slot to use */
131 blk = list_entry(info->empty_list.next, rh_block_t, list);
132 list_del_init(&blk->list);
133 info->empty_slots--;
134
135 /* Initialize */
136 blk->start = NULL;
137 blk->size = 0;
138 blk->owner = NULL;
139
140 return blk;
141}
142
143static inline void release_slot(rh_info_t * info, rh_block_t * blk)
144{
145 list_add(&blk->list, &info->empty_list);
146 info->empty_slots++;
147}
148
149static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
150{
151 rh_block_t *blk;
152 rh_block_t *before;
153 rh_block_t *after;
154 rh_block_t *next;
155 int size;
156 unsigned long s, e, bs, be;
157 struct list_head *l;
158
159 /* We assume that they are aligned properly */
160 size = blkn->size;
161 s = (unsigned long)blkn->start;
162 e = s + size;
163
164 /* Find the blocks immediately before and after the given one
165 * (if any) */
166 before = NULL;
167 after = NULL;
168 next = NULL;
169
170 list_for_each(l, &info->free_list) {
171 blk = list_entry(l, rh_block_t, list);
172
173 bs = (unsigned long)blk->start;
174 be = bs + blk->size;
175
176 if (next == NULL && s >= bs)
177 next = blk;
178
179 if (be == s)
180 before = blk;
181
182 if (e == bs)
183 after = blk;
184
185 /* If both are not null, break now */
186 if (before != NULL && after != NULL)
187 break;
188 }
189
190 /* Now check if they are really adjacent */
191 if (before != NULL && s != (unsigned long)before->start + before->size)
192 before = NULL;
193
194 if (after != NULL && e != (unsigned long)after->start)
195 after = NULL;
196
197 /* No coalescing; list insert and return */
198 if (before == NULL && after == NULL) {
199
200 if (next != NULL)
201 list_add(&blkn->list, &next->list);
202 else
203 list_add(&blkn->list, &info->free_list);
204
205 return;
206 }
207
208 /* We don't need it anymore */
209 release_slot(info, blkn);
210
211 /* Grow the before block */
212 if (before != NULL && after == NULL) {
213 before->size += size;
214 return;
215 }
216
217 /* Grow the after block backwards */
218 if (before == NULL && after != NULL) {
219 after->start = (int8_t *)after->start - size;
220 after->size += size;
221 return;
222 }
223
224 /* Grow the before block, and release the after block */
225 before->size += size + after->size;
226 list_del(&after->list);
227 release_slot(info, after);
228}
229
230static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
231{
232 rh_block_t *blk;
233 struct list_head *l;
234
235 /* Find the block immediately before the given one (if any) */
236 list_for_each(l, &info->taken_list) {
237 blk = list_entry(l, rh_block_t, list);
238 if (blk->start > blkn->start) {
239 list_add_tail(&blkn->list, &blk->list);
240 return;
241 }
242 }
243
244 list_add_tail(&blkn->list, &info->taken_list);
245}
246
247/*
248 * Create a remote heap dynamically. Note that no memory for the blocks
249 * are allocated. It will upon the first allocation
250 */
251rh_info_t *rh_create(unsigned int alignment)
252{
253 rh_info_t *info;
254
255 /* Alignment must be a power of two */
256 if ((alignment & (alignment - 1)) != 0)
257 return ERR_PTR(-EINVAL);
258
259 info = kmalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return ERR_PTR(-ENOMEM);
262
263 info->alignment = alignment;
264
265 /* Initially everything as empty */
266 info->block = NULL;
267 info->max_blocks = 0;
268 info->empty_slots = 0;
269 info->flags = 0;
270
271 INIT_LIST_HEAD(&info->empty_list);
272 INIT_LIST_HEAD(&info->free_list);
273 INIT_LIST_HEAD(&info->taken_list);
274
275 return info;
276}
277
278/*
279 * Destroy a dynamically created remote heap. Deallocate only if the areas
280 * are not static
281 */
282void rh_destroy(rh_info_t * info)
283{
284 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
285 kfree(info->block);
286
287 if ((info->flags & RHIF_STATIC_INFO) == 0)
288 kfree(info);
289}
290
291/*
292 * Initialize in place a remote heap info block. This is needed to support
293 * operation very early in the startup of the kernel, when it is not yet safe
294 * to call kmalloc.
295 */
296void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
297 rh_block_t * block)
298{
299 int i;
300 rh_block_t *blk;
301
302 /* Alignment must be a power of two */
303 if ((alignment & (alignment - 1)) != 0)
304 return;
305
306 info->alignment = alignment;
307
308 /* Initially everything as empty */
309 info->block = block;
310 info->max_blocks = max_blocks;
311 info->empty_slots = max_blocks;
312 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
313
314 INIT_LIST_HEAD(&info->empty_list);
315 INIT_LIST_HEAD(&info->free_list);
316 INIT_LIST_HEAD(&info->taken_list);
317
318 /* Add all new blocks to the free list */
319 for (i = 0, blk = block; i < max_blocks; i++, blk++)
320 list_add(&blk->list, &info->empty_list);
321}
322
323/* Attach a free memory region, coalesces regions if adjuscent */
324int rh_attach_region(rh_info_t * info, void *start, int size)
325{
326 rh_block_t *blk;
327 unsigned long s, e, m;
328 int r;
329
330 /* The region must be aligned */
331 s = (unsigned long)start;
332 e = s + size;
333 m = info->alignment - 1;
334
335 /* Round start up */
336 s = (s + m) & ~m;
337
338 /* Round end down */
339 e = e & ~m;
340
341 /* Take final values */
342 start = (void *)s;
343 size = (int)(e - s);
344
345 /* Grow the blocks, if needed */
346 r = assure_empty(info, 1);
347 if (r < 0)
348 return r;
349
350 blk = get_slot(info);
351 blk->start = start;
352 blk->size = size;
353 blk->owner = NULL;
354
355 attach_free_block(info, blk);
356
357 return 0;
358}
359
360/* Detatch given address range, splits free block if needed. */
361void *rh_detach_region(rh_info_t * info, void *start, int size)
362{
363 struct list_head *l;
364 rh_block_t *blk, *newblk;
365 unsigned long s, e, m, bs, be;
366
367 /* Validate size */
368 if (size <= 0)
369 return ERR_PTR(-EINVAL);
370
371 /* The region must be aligned */
372 s = (unsigned long)start;
373 e = s + size;
374 m = info->alignment - 1;
375
376 /* Round start up */
377 s = (s + m) & ~m;
378
379 /* Round end down */
380 e = e & ~m;
381
382 if (assure_empty(info, 1) < 0)
383 return ERR_PTR(-ENOMEM);
384
385 blk = NULL;
386 list_for_each(l, &info->free_list) {
387 blk = list_entry(l, rh_block_t, list);
388 /* The range must lie entirely inside one free block */
389 bs = (unsigned long)blk->start;
390 be = (unsigned long)blk->start + blk->size;
391 if (s >= bs && e <= be)
392 break;
393 blk = NULL;
394 }
395
396 if (blk == NULL)
397 return ERR_PTR(-ENOMEM);
398
399 /* Perfect fit */
400 if (bs == s && be == e) {
401 /* Delete from free list, release slot */
402 list_del(&blk->list);
403 release_slot(info, blk);
404 return (void *)s;
405 }
406
407 /* blk still in free list, with updated start and/or size */
408 if (bs == s || be == e) {
409 if (bs == s)
410 blk->start = (int8_t *)blk->start + size;
411 blk->size -= size;
412
413 } else {
414 /* The front free fragment */
415 blk->size = s - bs;
416
417 /* the back free fragment */
418 newblk = get_slot(info);
419 newblk->start = (void *)e;
420 newblk->size = be - e;
421
422 list_add(&newblk->list, &blk->list);
423 }
424
425 return (void *)s;
426}
427
428void *rh_alloc(rh_info_t * info, int size, const char *owner)
429{
430 struct list_head *l;
431 rh_block_t *blk;
432 rh_block_t *newblk;
433 void *start;
434
435 /* Validate size */
436 if (size <= 0)
437 return ERR_PTR(-EINVAL);
438
439 /* Align to configured alignment */
440 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
441
442 if (assure_empty(info, 1) < 0)
443 return ERR_PTR(-ENOMEM);
444
445 blk = NULL;
446 list_for_each(l, &info->free_list) {
447 blk = list_entry(l, rh_block_t, list);
448 if (size <= blk->size)
449 break;
450 blk = NULL;
451 }
452
453 if (blk == NULL)
454 return ERR_PTR(-ENOMEM);
455
456 /* Just fits */
457 if (blk->size == size) {
458 /* Move from free list to taken list */
459 list_del(&blk->list);
460 blk->owner = owner;
461 start = blk->start;
462
463 attach_taken_block(info, blk);
464
465 return start;
466 }
467
468 newblk = get_slot(info);
469 newblk->start = blk->start;
470 newblk->size = size;
471 newblk->owner = owner;
472
473 /* blk still in free list, with updated start, size */
474 blk->start = (int8_t *)blk->start + size;
475 blk->size -= size;
476
477 start = newblk->start;
478
479 attach_taken_block(info, newblk);
480
481 return start;
482}
483
484/* allocate at precisely the given address */
485void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
486{
487 struct list_head *l;
488 rh_block_t *blk, *newblk1, *newblk2;
489 unsigned long s, e, m, bs, be;
490
491 /* Validate size */
492 if (size <= 0)
493 return ERR_PTR(-EINVAL);
494
495 /* The region must be aligned */
496 s = (unsigned long)start;
497 e = s + size;
498 m = info->alignment - 1;
499
500 /* Round start up */
501 s = (s + m) & ~m;
502
503 /* Round end down */
504 e = e & ~m;
505
506 if (assure_empty(info, 2) < 0)
507 return ERR_PTR(-ENOMEM);
508
509 blk = NULL;
510 list_for_each(l, &info->free_list) {
511 blk = list_entry(l, rh_block_t, list);
512 /* The range must lie entirely inside one free block */
513 bs = (unsigned long)blk->start;
514 be = (unsigned long)blk->start + blk->size;
515 if (s >= bs && e <= be)
516 break;
517 }
518
519 if (blk == NULL)
520 return ERR_PTR(-ENOMEM);
521
522 /* Perfect fit */
523 if (bs == s && be == e) {
524 /* Move from free list to taken list */
525 list_del(&blk->list);
526 blk->owner = owner;
527
528 start = blk->start;
529 attach_taken_block(info, blk);
530
531 return start;
532
533 }
534
535 /* blk still in free list, with updated start and/or size */
536 if (bs == s || be == e) {
537 if (bs == s)
538 blk->start = (int8_t *)blk->start + size;
539 blk->size -= size;
540
541 } else {
542 /* The front free fragment */
543 blk->size = s - bs;
544
545 /* The back free fragment */
546 newblk2 = get_slot(info);
547 newblk2->start = (void *)e;
548 newblk2->size = be - e;
549
550 list_add(&newblk2->list, &blk->list);
551 }
552
553 newblk1 = get_slot(info);
554 newblk1->start = (void *)s;
555 newblk1->size = e - s;
556 newblk1->owner = owner;
557
558 start = newblk1->start;
559 attach_taken_block(info, newblk1);
560
561 return start;
562}
563
564int rh_free(rh_info_t * info, void *start)
565{
566 rh_block_t *blk, *blk2;
567 struct list_head *l;
568 int size;
569
570 /* Linear search for block */
571 blk = NULL;
572 list_for_each(l, &info->taken_list) {
573 blk2 = list_entry(l, rh_block_t, list);
574 if (start < blk2->start)
575 break;
576 blk = blk2;
577 }
578
579 if (blk == NULL || start > (blk->start + blk->size))
580 return -EINVAL;
581
582 /* Remove from taken list */
583 list_del(&blk->list);
584
585 /* Get size of freed block */
586 size = blk->size;
587 attach_free_block(info, blk);
588
589 return size;
590}
591
592int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
593{
594 rh_block_t *blk;
595 struct list_head *l;
596 struct list_head *h;
597 int nr;
598
599 switch (what) {
600
601 case RHGS_FREE:
602 h = &info->free_list;
603 break;
604
605 case RHGS_TAKEN:
606 h = &info->taken_list;
607 break;
608
609 default:
610 return -EINVAL;
611 }
612
613 /* Linear search for block */
614 nr = 0;
615 list_for_each(l, h) {
616 blk = list_entry(l, rh_block_t, list);
617 if (stats != NULL && nr < max_stats) {
618 stats->start = blk->start;
619 stats->size = blk->size;
620 stats->owner = blk->owner;
621 stats++;
622 }
623 nr++;
624 }
625
626 return nr;
627}
628
629int rh_set_owner(rh_info_t * info, void *start, const char *owner)
630{
631 rh_block_t *blk, *blk2;
632 struct list_head *l;
633 int size;
634
635 /* Linear search for block */
636 blk = NULL;
637 list_for_each(l, &info->taken_list) {
638 blk2 = list_entry(l, rh_block_t, list);
639 if (start < blk2->start)
640 break;
641 blk = blk2;
642 }
643
644 if (blk == NULL || start > (blk->start + blk->size))
645 return -EINVAL;
646
647 blk->owner = owner;
648 size = blk->size;
649
650 return size;
651}
652
653void rh_dump(rh_info_t * info)
654{
655 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
656 int maxnr;
657 int i, nr;
658
659 maxnr = sizeof(st) / sizeof(st[0]);
660
661 printk(KERN_INFO
662 "info @0x%p (%d slots empty / %d max)\n",
663 info, info->empty_slots, info->max_blocks);
664
665 printk(KERN_INFO " Free:\n");
666 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
667 if (nr > maxnr)
668 nr = maxnr;
669 for (i = 0; i < nr; i++)
670 printk(KERN_INFO
671 " 0x%p-0x%p (%u)\n",
672 st[i].start, (int8_t *) st[i].start + st[i].size,
673 st[i].size);
674 printk(KERN_INFO "\n");
675
676 printk(KERN_INFO " Taken:\n");
677 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
678 if (nr > maxnr)
679 nr = maxnr;
680 for (i = 0; i < nr; i++)
681 printk(KERN_INFO
682 " 0x%p-0x%p (%u) %s\n",
683 st[i].start, (int8_t *) st[i].start + st[i].size,
684 st[i].size, st[i].owner != NULL ? st[i].owner : "");
685 printk(KERN_INFO "\n");
686}
687
688void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
689{
690 printk(KERN_INFO
691 "blk @0x%p: 0x%p-0x%p (%u)\n",
692 blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
693}
diff --git a/arch/ppc64/lib/sstep.c b/arch/powerpc/lib/sstep.c
index e79123d1485c..666c2aa55016 100644
--- a/arch/ppc64/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -10,13 +10,18 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/config.h>
13#include <asm/sstep.h> 14#include <asm/sstep.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15 16
16extern char system_call_common[]; 17extern char system_call_common[];
17 18
19#ifdef CONFIG_PPC64
18/* Bits in SRR1 that are copied from MSR */ 20/* Bits in SRR1 that are copied from MSR */
19#define MSR_MASK 0xffffffff87c0ffff 21#define MSR_MASK 0xffffffff87c0ffff
22#else
23#define MSR_MASK 0x87c0ffff
24#endif
20 25
21/* 26/*
22 * Determine whether a conditional branch instruction would branch. 27 * Determine whether a conditional branch instruction would branch.
@@ -66,6 +71,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
66 if (branch_taken(instr, regs)) 71 if (branch_taken(instr, regs))
67 regs->nip = imm; 72 regs->nip = imm;
68 return 1; 73 return 1;
74#ifdef CONFIG_PPC64
69 case 17: /* sc */ 75 case 17: /* sc */
70 /* 76 /*
71 * N.B. this uses knowledge about how the syscall 77 * N.B. this uses knowledge about how the syscall
@@ -79,6 +85,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
79 regs->nip = (unsigned long) &system_call_common; 85 regs->nip = (unsigned long) &system_call_common;
80 regs->msr = MSR_KERNEL; 86 regs->msr = MSR_KERNEL;
81 return 1; 87 return 1;
88#endif
82 case 18: /* b */ 89 case 18: /* b */
83 imm = instr & 0x03fffffc; 90 imm = instr & 0x03fffffc;
84 if (imm & 0x02000000) 91 if (imm & 0x02000000)
@@ -121,6 +128,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
121 if ((regs->msr & MSR_SF) == 0) 128 if ((regs->msr & MSR_SF) == 0)
122 regs->nip &= 0xffffffffUL; 129 regs->nip &= 0xffffffffUL;
123 return 1; 130 return 1;
131 case 0x124: /* mtmsr */
132 imm = regs->gpr[rd];
133 if ((imm & MSR_RI) == 0)
134 /* can't step mtmsr that would clear MSR_RI */
135 return -1;
136 regs->msr = imm;
137 regs->nip += 4;
138 return 1;
139#ifdef CONFIG_PPC64
124 case 0x164: /* mtmsrd */ 140 case 0x164: /* mtmsrd */
125 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 141 /* only MSR_EE and MSR_RI get changed if bit 15 set */
126 /* mtmsrd doesn't change MSR_HV and MSR_ME */ 142 /* mtmsrd doesn't change MSR_HV and MSR_ME */
@@ -135,6 +151,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
135 if ((imm & MSR_SF) == 0) 151 if ((imm & MSR_SF) == 0)
136 regs->nip &= 0xffffffffUL; 152 regs->nip &= 0xffffffffUL;
137 return 1; 153 return 1;
154#endif
138 } 155 }
139 } 156 }
140 return 0; 157 return 0;
diff --git a/arch/ppc64/lib/strcase.c b/arch/powerpc/lib/strcase.c
index e84f243368c0..36b521091bbc 100644
--- a/arch/ppc64/lib/strcase.c
+++ b/arch/powerpc/lib/strcase.c
@@ -1,11 +1,3 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/ctype.h> 1#include <linux/ctype.h>
10 2
11int strcasecmp(const char *s1, const char *s2) 3int strcasecmp(const char *s1, const char *s2)
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
new file mode 100644
index 000000000000..b9ca84ed8927
--- /dev/null
+++ b/arch/powerpc/lib/string.S
@@ -0,0 +1,198 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/errno.h>
14#include <asm/ppc_asm.h>
15
16 .section __ex_table,"a"
17#ifdef CONFIG_PPC64
18 .align 3
19#define EXTBL .llong
20#else
21 .align 2
22#define EXTBL .long
23#endif
24 .text
25
26_GLOBAL(strcpy)
27 addi r5,r3,-1
28 addi r4,r4,-1
291: lbzu r0,1(r4)
30 cmpwi 0,r0,0
31 stbu r0,1(r5)
32 bne 1b
33 blr
34
35/* This clears out any unused part of the destination buffer,
36 just as the libc version does. -- paulus */
37_GLOBAL(strncpy)
38 cmpwi 0,r5,0
39 beqlr
40 mtctr r5
41 addi r6,r3,-1
42 addi r4,r4,-1
431: lbzu r0,1(r4)
44 cmpwi 0,r0,0
45 stbu r0,1(r6)
46 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
47 bnelr /* if we didn't hit a null char, we're done */
48 mfctr r5
49 cmpwi 0,r5,0 /* any space left in destination buffer? */
50 beqlr /* we know r0 == 0 here */
512: stbu r0,1(r6) /* clear it out if so */
52 bdnz 2b
53 blr
54
55_GLOBAL(strcat)
56 addi r5,r3,-1
57 addi r4,r4,-1
581: lbzu r0,1(r5)
59 cmpwi 0,r0,0
60 bne 1b
61 addi r5,r5,-1
621: lbzu r0,1(r4)
63 cmpwi 0,r0,0
64 stbu r0,1(r5)
65 bne 1b
66 blr
67
68_GLOBAL(strcmp)
69 addi r5,r3,-1
70 addi r4,r4,-1
711: lbzu r3,1(r5)
72 cmpwi 1,r3,0
73 lbzu r0,1(r4)
74 subf. r3,r0,r3
75 beqlr 1
76 beq 1b
77 blr
78
79_GLOBAL(strlen)
80 addi r4,r3,-1
811: lbzu r0,1(r4)
82 cmpwi 0,r0,0
83 bne 1b
84 subf r3,r3,r4
85 blr
86
87_GLOBAL(memcmp)
88 cmpwi 0,r5,0
89 ble- 2f
90 mtctr r5
91 addi r6,r3,-1
92 addi r4,r4,-1
931: lbzu r3,1(r6)
94 lbzu r0,1(r4)
95 subf. r3,r0,r3
96 bdnzt 2,1b
97 blr
982: li r3,0
99 blr
100
101_GLOBAL(memchr)
102 cmpwi 0,r5,0
103 ble- 2f
104 mtctr r5
105 addi r3,r3,-1
1061: lbzu r0,1(r3)
107 cmpw 0,r0,r4
108 bdnzf 2,1b
109 beqlr
1102: li r3,0
111 blr
112
113_GLOBAL(__clear_user)
114 addi r6,r3,-4
115 li r3,0
116 li r5,0
117 cmplwi 0,r4,4
118 blt 7f
119 /* clear a single word */
12011: stwu r5,4(r6)
121 beqlr
122 /* clear word sized chunks */
123 andi. r0,r6,3
124 add r4,r0,r4
125 subf r6,r0,r6
126 srwi r0,r4,2
127 andi. r4,r4,3
128 mtctr r0
129 bdz 7f
1301: stwu r5,4(r6)
131 bdnz 1b
132 /* clear byte sized chunks */
1337: cmpwi 0,r4,0
134 beqlr
135 mtctr r4
136 addi r6,r6,3
1378: stbu r5,1(r6)
138 bdnz 8b
139 blr
14090: mr r3,r4
141 blr
14291: mfctr r3
143 slwi r3,r3,2
144 add r3,r3,r4
145 blr
14692: mfctr r3
147 blr
148
149 .section __ex_table,"a"
150 EXTBL 11b,90b
151 EXTBL 1b,91b
152 EXTBL 8b,92b
153 .text
154
155_GLOBAL(__strncpy_from_user)
156 addi r6,r3,-1
157 addi r4,r4,-1
158 cmpwi 0,r5,0
159 beq 2f
160 mtctr r5
1611: lbzu r0,1(r4)
162 cmpwi 0,r0,0
163 stbu r0,1(r6)
164 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
165 beq 3f
1662: addi r6,r6,1
1673: subf r3,r3,r6
168 blr
16999: li r3,-EFAULT
170 blr
171
172 .section __ex_table,"a"
173 EXTBL 1b,99b
174 .text
175
176/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
177_GLOBAL(__strnlen_user)
178 addi r7,r3,-1
179 subf r6,r7,r5 /* top+1 - str */
180 cmplw 0,r4,r6
181 bge 0f
182 mr r6,r4
1830: mtctr r6 /* ctr = min(len, top - str) */
1841: lbzu r0,1(r7) /* get next byte */
185 cmpwi 0,r0,0
186 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
187 addi r7,r7,1
188 subf r3,r3,r7 /* number of bytes we have looked at */
189 beqlr /* return if we found a 0 byte */
190 cmpw 0,r3,r4 /* did we look at all len bytes? */
191 blt 99f /* if not, must have hit top */
192 addi r3,r4,1 /* return len + 1 to indicate no null found */
193 blr
19499: li r3,0 /* bad address, return 0 */
195 blr
196
197 .section __ex_table,"a"
198 EXTBL 1b,99b
diff --git a/arch/ppc64/lib/usercopy.c b/arch/powerpc/lib/usercopy_64.c
index 5eea6f3c1e03..5eea6f3c1e03 100644
--- a/arch/ppc64/lib/usercopy.c
+++ b/arch/powerpc/lib/usercopy_64.c
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
new file mode 100644
index 000000000000..3d79ce281b67
--- /dev/null
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -0,0 +1,120 @@
1/*
2 * Modifications by Matt Porter (mporter@mvista.com) to support
3 * PPC44x Book E processors.
4 *
5 * This file contains the routines for initializing the MMU
6 * on the 4xx series of chips.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 *
17 * Derived from "arch/i386/mm/init.c"
18 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/signal.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/ptrace.h>
35#include <linux/mman.h>
36#include <linux/mm.h>
37#include <linux/swap.h>
38#include <linux/stddef.h>
39#include <linux/vmalloc.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/highmem.h>
43
44#include <asm/pgalloc.h>
45#include <asm/prom.h>
46#include <asm/io.h>
47#include <asm/mmu_context.h>
48#include <asm/pgtable.h>
49#include <asm/mmu.h>
50#include <asm/uaccess.h>
51#include <asm/smp.h>
52#include <asm/bootx.h>
53#include <asm/machdep.h>
54#include <asm/setup.h>
55
56#include "mmu_decl.h"
57
58extern char etext[], _stext[];
59
60/* Used by the 44x TLB replacement exception handler.
61 * Just needed it declared someplace.
62 */
63unsigned int tlb_44x_index = 0;
64unsigned int tlb_44x_hwater = 62;
65
66/*
67 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
68 */
69static void __init
70ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
71{
72 unsigned long attrib = 0;
73
74 __asm__ __volatile__("\
75 clrrwi %2,%2,10\n\
76 ori %2,%2,%4\n\
77 clrrwi %1,%1,10\n\
78 li %0,0\n\
79 ori %0,%0,%5\n\
80 tlbwe %2,%3,%6\n\
81 tlbwe %1,%3,%7\n\
82 tlbwe %0,%3,%8"
83 :
84 : "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
85 "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
86 "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
87 "i" (PPC44x_TLB_PAGEID),
88 "i" (PPC44x_TLB_XLAT),
89 "i" (PPC44x_TLB_ATTRIB));
90}
91
92/*
93 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
94 */
95void __init MMU_init_hw(void)
96{
97 flush_instruction_cache();
98}
99
100unsigned long __init mmu_mapin_ram(void)
101{
102 unsigned int pinned_tlbs = 1;
103 int i;
104
105 /* Determine number of entries necessary to cover lowmem */
106 pinned_tlbs = (unsigned int)
107 (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
108
109 /* Write upper watermark to save location */
110 tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
111
112 /* If necessary, set additional pinned TLBs */
113 if (pinned_tlbs > 1)
114 for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
115 unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
116 ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
117 }
118
119 return total_lowmem;
120}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
new file mode 100644
index 000000000000..b7bcbc232f39
--- /dev/null
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -0,0 +1,141 @@
1/*
2 * This file contains the routines for initializing the MMU
3 * on the 4xx series of chips.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * Derived from "arch/i386/mm/init.c"
15 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/highmem.h>
40
41#include <asm/pgalloc.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/uaccess.h>
48#include <asm/smp.h>
49#include <asm/bootx.h>
50#include <asm/machdep.h>
51#include <asm/setup.h>
52#include "mmu_decl.h"
53
54extern int __map_without_ltlbs;
55/*
56 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
57 */
58void __init MMU_init_hw(void)
59{
60 /*
61 * The Zone Protection Register (ZPR) defines how protection will
62 * be applied to every page which is a member of a given zone. At
63 * present, we utilize only two of the 4xx's zones.
64 * The zone index bits (of ZSEL) in the PTE are used for software
65 * indicators, except the LSB. For user access, zone 1 is used,
66 * for kernel access, zone 0 is used. We set all but zone 1
67 * to zero, allowing only kernel access as indicated in the PTE.
68 * For zone 1, we set a 01 binary (a value of 10 will not work)
69 * to allow user access as indicated in the PTE. This also allows
70 * kernel access as indicated in the PTE.
71 */
72
73 mtspr(SPRN_ZPR, 0x10000000);
74
75 flush_instruction_cache();
76
77 /*
78 * Set up the real-mode cache parameters for the exception vector
79 * handlers (which are run in real-mode).
80 */
81
82 mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
83
84 /*
85 * Cache instruction and data space where the exception
86 * vectors and the kernel live in real-mode.
87 */
88
89 mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
90 mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
91}
92
93#define LARGE_PAGE_SIZE_16M (1<<24)
94#define LARGE_PAGE_SIZE_4M (1<<22)
95
96unsigned long __init mmu_mapin_ram(void)
97{
98 unsigned long v, s;
99 phys_addr_t p;
100
101 v = KERNELBASE;
102 p = PPC_MEMSTART;
103 s = 0;
104
105 if (__map_without_ltlbs) {
106 return s;
107 }
108
109 while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
110 pmd_t *pmdp;
111 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
112
113 spin_lock(&init_mm.page_table_lock);
114 pmdp = pmd_offset(pgd_offset_k(v), v);
115 pmd_val(*pmdp++) = val;
116 pmd_val(*pmdp++) = val;
117 pmd_val(*pmdp++) = val;
118 pmd_val(*pmdp++) = val;
119 spin_unlock(&init_mm.page_table_lock);
120
121 v += LARGE_PAGE_SIZE_16M;
122 p += LARGE_PAGE_SIZE_16M;
123 s += LARGE_PAGE_SIZE_16M;
124 }
125
126 while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
127 pmd_t *pmdp;
128 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
129
130 spin_lock(&init_mm.page_table_lock);
131 pmdp = pmd_offset(pgd_offset_k(v), v);
132 pmd_val(*pmdp) = val;
133 spin_unlock(&init_mm.page_table_lock);
134
135 v += LARGE_PAGE_SIZE_4M;
136 p += LARGE_PAGE_SIZE_4M;
137 s += LARGE_PAGE_SIZE_4M;
138 }
139
140 return s;
141}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
new file mode 100644
index 000000000000..93441e7a2921
--- /dev/null
+++ b/arch/powerpc/mm/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8
9obj-y := fault.o mem.o lmb.o
10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \
15 $(hash-y)
16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
17obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o
19obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
20obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
21obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/ppc64/mm/fault.c b/arch/powerpc/mm/fault.c
index be3f25cf3e9f..841d8b6323a8 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/ppc/mm/fault.c 2 * arch/ppc/mm/fault.c
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 6 *
7 * Derived from "arch/i386/mm/fault.c" 7 * Derived from "arch/i386/mm/fault.c"
@@ -24,10 +24,11 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/ptrace.h>
27#include <linux/mman.h> 28#include <linux/mman.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
30#include <linux/smp_lock.h> 31#include <linux/highmem.h>
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/kprobes.h> 33#include <linux/kprobes.h>
33 34
@@ -37,6 +38,7 @@
37#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/tlbflush.h>
40#include <asm/kdebug.h> 42#include <asm/kdebug.h>
41#include <asm/siginfo.h> 43#include <asm/siginfo.h>
42 44
@@ -78,6 +80,7 @@ static int store_updates_sp(struct pt_regs *regs)
78 return 0; 80 return 0;
79} 81}
80 82
83#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
81static void do_dabr(struct pt_regs *regs, unsigned long error_code) 84static void do_dabr(struct pt_regs *regs, unsigned long error_code)
82{ 85{
83 siginfo_t info; 86 siginfo_t info;
@@ -99,12 +102,18 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
99 info.si_addr = (void __user *)regs->nip; 102 info.si_addr = (void __user *)regs->nip;
100 force_sig_info(SIGTRAP, &info, current); 103 force_sig_info(SIGTRAP, &info, current);
101} 104}
105#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
102 106
103/* 107/*
104 * The error_code parameter is 108 * For 600- and 800-family processors, the error_code parameter is DSISR
109 * for a data fault, SRR1 for an instruction fault. For 400-family processors
110 * the error_code parameter is ESR for a data fault, 0 for an instruction
111 * fault.
112 * For 64-bit processors, the error_code parameter is
105 * - DSISR for a non-SLB data access fault, 113 * - DSISR for a non-SLB data access fault,
106 * - SRR1 & 0x08000000 for a non-SLB instruction access fault 114 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
107 * - 0 any SLB fault. 115 * - 0 any SLB fault.
116 *
108 * The return value is 0 if the fault was handled, or the signal 117 * The return value is 0 if the fault was handled, or the signal
109 * number if this is a kernel fault that can't be handled here. 118 * number if this is a kernel fault that can't be handled here.
110 */ 119 */
@@ -114,12 +123,25 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
114 struct vm_area_struct * vma; 123 struct vm_area_struct * vma;
115 struct mm_struct *mm = current->mm; 124 struct mm_struct *mm = current->mm;
116 siginfo_t info; 125 siginfo_t info;
117 unsigned long code = SEGV_MAPERR; 126 int code = SEGV_MAPERR;
118 unsigned long is_write = error_code & DSISR_ISSTORE; 127 int is_write = 0;
119 unsigned long trap = TRAP(regs); 128 int trap = TRAP(regs);
120 unsigned long is_exec = trap == 0x400; 129 int is_exec = trap == 0x400;
121 130
122 BUG_ON((trap == 0x380) || (trap == 0x480)); 131#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
132 /*
133 * Fortunately the bit assignments in SRR1 for an instruction
134 * fault and DSISR for a data fault are mostly the same for the
135 * bits we are interested in. But there are some bits which
136 * indicate errors in DSISR but can validly be set in SRR1.
137 */
138 if (trap == 0x400)
139 error_code &= 0x48200000;
140 else
141 is_write = error_code & DSISR_ISSTORE;
142#else
143 is_write = error_code & ESR_DST;
144#endif /* CONFIG_4xx || CONFIG_BOOKE */
123 145
124 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, 146 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
125 11, SIGSEGV) == NOTIFY_STOP) 147 11, SIGSEGV) == NOTIFY_STOP)
@@ -134,10 +156,13 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
134 if (!user_mode(regs) && (address >= TASK_SIZE)) 156 if (!user_mode(regs) && (address >= TASK_SIZE))
135 return SIGSEGV; 157 return SIGSEGV;
136 158
159#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
137 if (error_code & DSISR_DABRMATCH) { 160 if (error_code & DSISR_DABRMATCH) {
161 /* DABR match */
138 do_dabr(regs, error_code); 162 do_dabr(regs, error_code);
139 return 0; 163 return 0;
140 } 164 }
165#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
141 166
142 if (in_atomic() || mm == NULL) { 167 if (in_atomic() || mm == NULL) {
143 if (!user_mode(regs)) 168 if (!user_mode(regs))
@@ -176,10 +201,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
176 vma = find_vma(mm, address); 201 vma = find_vma(mm, address);
177 if (!vma) 202 if (!vma)
178 goto bad_area; 203 goto bad_area;
179 204 if (vma->vm_start <= address)
180 if (vma->vm_start <= address) {
181 goto good_area; 205 goto good_area;
182 }
183 if (!(vma->vm_flags & VM_GROWSDOWN)) 206 if (!(vma->vm_flags & VM_GROWSDOWN))
184 goto bad_area; 207 goto bad_area;
185 208
@@ -214,35 +237,76 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
214 && (!user_mode(regs) || !store_updates_sp(regs))) 237 && (!user_mode(regs) || !store_updates_sp(regs)))
215 goto bad_area; 238 goto bad_area;
216 } 239 }
217
218 if (expand_stack(vma, address)) 240 if (expand_stack(vma, address))
219 goto bad_area; 241 goto bad_area;
220 242
221good_area: 243good_area:
222 code = SEGV_ACCERR; 244 code = SEGV_ACCERR;
245#if defined(CONFIG_6xx)
246 if (error_code & 0x95700000)
247 /* an error such as lwarx to I/O controller space,
248 address matching DABR, eciwx, etc. */
249 goto bad_area;
250#endif /* CONFIG_6xx */
251#if defined(CONFIG_8xx)
252 /* The MPC8xx seems to always set 0x80000000, which is
253 * "undefined". Of those that can be set, this is the only
254 * one which seems bad.
255 */
256 if (error_code & 0x10000000)
257 /* Guarded storage error. */
258 goto bad_area;
259#endif /* CONFIG_8xx */
223 260
224 if (is_exec) { 261 if (is_exec) {
262#ifdef CONFIG_PPC64
225 /* protection fault */ 263 /* protection fault */
226 if (error_code & DSISR_PROTFAULT) 264 if (error_code & DSISR_PROTFAULT)
227 goto bad_area; 265 goto bad_area;
228 if (!(vma->vm_flags & VM_EXEC)) 266 if (!(vma->vm_flags & VM_EXEC))
229 goto bad_area; 267 goto bad_area;
268#endif
269#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
270 pte_t *ptep;
271
272 /* Since 4xx/Book-E supports per-page execute permission,
273 * we lazily flush dcache to icache. */
274 ptep = NULL;
275 if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
276 struct page *page = pte_page(*ptep);
277
278 if (! test_bit(PG_arch_1, &page->flags)) {
279 flush_dcache_icache_page(page);
280 set_bit(PG_arch_1, &page->flags);
281 }
282 pte_update(ptep, 0, _PAGE_HWEXEC);
283 _tlbie(address);
284 pte_unmap(ptep);
285 up_read(&mm->mmap_sem);
286 return 0;
287 }
288 if (ptep != NULL)
289 pte_unmap(ptep);
290#endif
230 /* a write */ 291 /* a write */
231 } else if (is_write) { 292 } else if (is_write) {
232 if (!(vma->vm_flags & VM_WRITE)) 293 if (!(vma->vm_flags & VM_WRITE))
233 goto bad_area; 294 goto bad_area;
234 /* a read */ 295 /* a read */
235 } else { 296 } else {
236 if (!(vma->vm_flags & VM_READ)) 297 /* protection fault */
298 if (error_code & 0x08000000)
299 goto bad_area;
300 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
237 goto bad_area; 301 goto bad_area;
238 } 302 }
239 303
240 survive:
241 /* 304 /*
242 * If for any reason at all we couldn't handle the fault, 305 * If for any reason at all we couldn't handle the fault,
243 * make sure we exit gracefully rather than endlessly redo 306 * make sure we exit gracefully rather than endlessly redo
244 * the fault. 307 * the fault.
245 */ 308 */
309 survive:
246 switch (handle_mm_fault(mm, vma, address, is_write)) { 310 switch (handle_mm_fault(mm, vma, address, is_write)) {
247 311
248 case VM_FAULT_MINOR: 312 case VM_FAULT_MINOR:
@@ -268,15 +332,11 @@ bad_area:
268bad_area_nosemaphore: 332bad_area_nosemaphore:
269 /* User mode accesses cause a SIGSEGV */ 333 /* User mode accesses cause a SIGSEGV */
270 if (user_mode(regs)) { 334 if (user_mode(regs)) {
271 info.si_signo = SIGSEGV; 335 _exception(SIGSEGV, regs, code, address);
272 info.si_errno = 0;
273 info.si_code = code;
274 info.si_addr = (void __user *) address;
275 force_sig_info(SIGSEGV, &info, current);
276 return 0; 336 return 0;
277 } 337 }
278 338
279 if (trap == 0x400 && (error_code & DSISR_PROTFAULT) 339 if (is_exec && (error_code & DSISR_PROTFAULT)
280 && printk_ratelimit()) 340 && printk_ratelimit())
281 printk(KERN_CRIT "kernel tried to execute NX-protected" 341 printk(KERN_CRIT "kernel tried to execute NX-protected"
282 " page (%lx) - exploit attempt? (uid: %d)\n", 342 " page (%lx) - exploit attempt? (uid: %d)\n",
@@ -315,8 +375,8 @@ do_sigbus:
315 375
316/* 376/*
317 * bad_page_fault is called when we have a bad access from the kernel. 377 * bad_page_fault is called when we have a bad access from the kernel.
318 * It is called from do_page_fault above and from some of the procedures 378 * It is called from the DSI and ISI handlers in head.S and from some
319 * in traps.c. 379 * of the procedures in traps.c.
320 */ 380 */
321void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 381void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
322{ 382{
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
new file mode 100644
index 000000000000..af9ca0eb6d55
--- /dev/null
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -0,0 +1,237 @@
1/*
2 * Modifications by Kumar Gala (kumar.gala@freescale.com) to support
3 * E500 Book E processors.
4 *
5 * Copyright 2004 Freescale Semiconductor, Inc
6 *
7 * This file contains the routines for initializing the MMU
8 * on the 4xx series of chips.
9 * -- paulus
10 *
11 * Derived from arch/ppc/mm/init.c:
12 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
13 *
14 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
15 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
16 * Copyright (C) 1996 Paul Mackerras
17 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 *
19 * Derived from "arch/i386/mm/init.c"
20 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <linux/config.h>
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <linux/ptrace.h>
37#include <linux/mman.h>
38#include <linux/mm.h>
39#include <linux/swap.h>
40#include <linux/stddef.h>
41#include <linux/vmalloc.h>
42#include <linux/init.h>
43#include <linux/delay.h>
44#include <linux/highmem.h>
45
46#include <asm/pgalloc.h>
47#include <asm/prom.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/bootx.h>
55#include <asm/machdep.h>
56#include <asm/setup.h>
57
58extern void loadcam_entry(unsigned int index);
59unsigned int tlbcam_index;
60unsigned int num_tlbcam_entries;
61static unsigned long __cam0, __cam1, __cam2;
62extern unsigned long total_lowmem;
63extern unsigned long __max_low_memory;
64#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
65
66#define NUM_TLBCAMS (16)
67
68struct tlbcam {
69 u32 MAS0;
70 u32 MAS1;
71 u32 MAS2;
72 u32 MAS3;
73 u32 MAS7;
74} TLBCAM[NUM_TLBCAMS];
75
76struct tlbcamrange {
77 unsigned long start;
78 unsigned long limit;
79 phys_addr_t phys;
80} tlbcam_addrs[NUM_TLBCAMS];
81
82extern unsigned int tlbcam_index;
83
84/*
85 * Return PA for this VA if it is mapped by a CAM, or 0
86 */
87unsigned long v_mapped_by_tlbcam(unsigned long va)
88{
89 int b;
90 for (b = 0; b < tlbcam_index; ++b)
91 if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
92 return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
93 return 0;
94}
95
96/*
97 * Return VA for a given PA or 0 if not mapped
98 */
99unsigned long p_mapped_by_tlbcam(unsigned long pa)
100{
101 int b;
102 for (b = 0; b < tlbcam_index; ++b)
103 if (pa >= tlbcam_addrs[b].phys
104 && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
105 +tlbcam_addrs[b].phys)
106 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
107 return 0;
108}
109
110/*
111 * Set up one of the I/D BAT (block address translation) register pairs.
112 * The parameters are not checked; in particular size must be a power
113 * of 4 between 4k and 256M.
114 */
115void settlbcam(int index, unsigned long virt, phys_addr_t phys,
116 unsigned int size, int flags, unsigned int pid)
117{
118 unsigned int tsize, lz;
119
120 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
121 tsize = (21 - lz) / 2;
122
123#ifdef CONFIG_SMP
124 if ((flags & _PAGE_NO_CACHE) == 0)
125 flags |= _PAGE_COHERENT;
126#endif
127
128 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
129 TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
130 TLBCAM[index].MAS2 = virt & PAGE_MASK;
131
132 TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
133 TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
134 TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
135 TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
136 TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
137
138 TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
139 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
140
141#ifndef CONFIG_KGDB /* want user access for breakpoints */
142 if (flags & _PAGE_USER) {
143 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
144 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
145 }
146#else
147 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
148 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
149#endif
150
151 tlbcam_addrs[index].start = virt;
152 tlbcam_addrs[index].limit = virt + size - 1;
153 tlbcam_addrs[index].phys = phys;
154
155 loadcam_entry(index);
156}
157
158void invalidate_tlbcam_entry(int index)
159{
160 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
161 TLBCAM[index].MAS1 = ~MAS1_VALID;
162
163 loadcam_entry(index);
164}
165
166void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
167 unsigned long cam2)
168{
169 settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
170 tlbcam_index++;
171 if (cam1) {
172 tlbcam_index++;
173 settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
174 }
175 if (cam2) {
176 tlbcam_index++;
177 settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
178 }
179}
180
181/*
182 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
183 */
184void __init MMU_init_hw(void)
185{
186 flush_instruction_cache();
187}
188
189unsigned long __init mmu_mapin_ram(void)
190{
191 cam_mapin_ram(__cam0, __cam1, __cam2);
192
193 return __cam0 + __cam1 + __cam2;
194}
195
196
197void __init
198adjust_total_lowmem(void)
199{
200 unsigned long max_low_mem = MAX_LOW_MEM;
201 unsigned long cam_max = 0x10000000;
202 unsigned long ram;
203
204 /* adjust CAM size to max_low_mem */
205 if (max_low_mem < cam_max)
206 cam_max = max_low_mem;
207
208 /* adjust lowmem size to max_low_mem */
209 if (max_low_mem < total_lowmem)
210 ram = max_low_mem;
211 else
212 ram = total_lowmem;
213
214 /* Calculate CAM values */
215 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
216 if (__cam0 > cam_max)
217 __cam0 = cam_max;
218 ram -= __cam0;
219 if (ram) {
220 __cam1 = 1UL << 2 * (__ilog2(ram) / 2);
221 if (__cam1 > cam_max)
222 __cam1 = cam_max;
223 ram -= __cam1;
224 }
225 if (ram) {
226 __cam2 = 1UL << 2 * (__ilog2(ram) / 2);
227 if (__cam2 > cam_max)
228 __cam2 = cam_max;
229 ram -= __cam2;
230 }
231
232 printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
233 " CAM2=%ldMb residual: %ldMb\n",
234 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
235 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
236 __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
237}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
new file mode 100644
index 000000000000..12ccd7155bac
--- /dev/null
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -0,0 +1,618 @@
1/*
2 * arch/ppc/kernel/hashtable.S
3 *
4 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 *
15 * This file contains low-level assembler routines for managing
16 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
17 * hash table, so this file is not used on them.)
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <asm/reg.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/ppc_asm.h>
32#include <asm/thread_info.h>
33#include <asm/asm-offsets.h>
34
35#ifdef CONFIG_SMP
36 .comm mmu_hash_lock,4
37#endif /* CONFIG_SMP */
38
39/*
40 * Sync CPUs with hash_page taking & releasing the hash
41 * table lock
42 */
43#ifdef CONFIG_SMP
44 .text
45_GLOBAL(hash_page_sync)
46 lis r8,mmu_hash_lock@h
47 ori r8,r8,mmu_hash_lock@l
48 lis r0,0x0fff
49 b 10f
5011: lwz r6,0(r8)
51 cmpwi 0,r6,0
52 bne 11b
5310: lwarx r6,0,r8
54 cmpwi 0,r6,0
55 bne- 11b
56 stwcx. r0,0,r8
57 bne- 10b
58 isync
59 eieio
60 li r0,0
61 stw r0,0(r8)
62 blr
63#endif
64
65/*
66 * Load a PTE into the hash table, if possible.
67 * The address is in r4, and r3 contains an access flag:
68 * _PAGE_RW (0x400) if a write.
69 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
70 * SPRG3 contains the physical address of the current task's thread.
71 *
72 * Returns to the caller if the access is illegal or there is no
73 * mapping for the address. Otherwise it places an appropriate PTE
74 * in the hash table and returns from the exception.
75 * Uses r0, r3 - r8, ctr, lr.
76 */
77 .text
78_GLOBAL(hash_page)
79#ifdef CONFIG_PPC64BRIDGE
80 mfmsr r0
81 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
82 MTMSRD(r0)
83 isync
84#endif
85 tophys(r7,0) /* gets -KERNELBASE into r7 */
86#ifdef CONFIG_SMP
87 addis r8,r7,mmu_hash_lock@h
88 ori r8,r8,mmu_hash_lock@l
89 lis r0,0x0fff
90 b 10f
9111: lwz r6,0(r8)
92 cmpwi 0,r6,0
93 bne 11b
9410: lwarx r6,0,r8
95 cmpwi 0,r6,0
96 bne- 11b
97 stwcx. r0,0,r8
98 bne- 10b
99 isync
100#endif
101 /* Get PTE (linux-style) and check access */
102 lis r0,KERNELBASE@h /* check if kernel address */
103 cmplw 0,r4,r0
104 mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
105 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
106 lwz r5,PGDIR(r8) /* virt page-table root */
107 blt+ 112f /* assume user more likely */
108 lis r5,swapper_pg_dir@ha /* if kernel address, use */
109 addi r5,r5,swapper_pg_dir@l /* kernel page table */
110 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
111112: add r5,r5,r7 /* convert to phys addr */
112 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
113 lwz r8,0(r5) /* get pmd entry */
114 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
115#ifdef CONFIG_SMP
116 beq- hash_page_out /* return if no mapping */
117#else
118 /* XXX it seems like the 601 will give a machine fault on the
119 rfi if its alignment is wrong (bottom 4 bits of address are
120 8 or 0xc) and we have had a not-taken conditional branch
121 to the address following the rfi. */
122 beqlr-
123#endif
124 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
125 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
126 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
127
128 /*
129 * Update the linux PTE atomically. We do the lwarx up-front
130 * because almost always, there won't be a permission violation
131 * and there won't already be an HPTE, and thus we will have
132 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
133 */
134retry:
135 lwarx r6,0,r8 /* get linux-style pte */
136 andc. r5,r3,r6 /* check access & ~permission */
137#ifdef CONFIG_SMP
138 bne- hash_page_out /* return if access not permitted */
139#else
140 bnelr-
141#endif
142 or r5,r0,r6 /* set accessed/dirty bits */
143 stwcx. r5,0,r8 /* attempt to update PTE */
144 bne- retry /* retry if someone got there first */
145
146 mfsrin r3,r4 /* get segment reg for segment */
147 mfctr r0
148 stw r0,_CTR(r11)
149 bl create_hpte /* add the hash table entry */
150
151#ifdef CONFIG_SMP
152 eieio
153 addis r8,r7,mmu_hash_lock@ha
154 li r0,0
155 stw r0,mmu_hash_lock@l(r8)
156#endif
157
158 /* Return from the exception */
159 lwz r5,_CTR(r11)
160 mtctr r5
161 lwz r0,GPR0(r11)
162 lwz r7,GPR7(r11)
163 lwz r8,GPR8(r11)
164 b fast_exception_return
165
166#ifdef CONFIG_SMP
167hash_page_out:
168 eieio
169 addis r8,r7,mmu_hash_lock@ha
170 li r0,0
171 stw r0,mmu_hash_lock@l(r8)
172 blr
173#endif /* CONFIG_SMP */
174
175/*
176 * Add an entry for a particular page to the hash table.
177 *
178 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
179 *
180 * We assume any necessary modifications to the pte (e.g. setting
181 * the accessed bit) have already been done and that there is actually
182 * a hash table in use (i.e. we're not on a 603).
183 */
184_GLOBAL(add_hash_page)
185 mflr r0
186 stw r0,4(r1)
187
188 /* Convert context and va to VSID */
189 mulli r3,r3,897*16 /* multiply context by context skew */
190 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
191 mulli r0,r0,0x111 /* multiply by ESID skew */
192 add r3,r3,r0 /* note create_hpte trims to 24 bits */
193
194#ifdef CONFIG_SMP
195 rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
196 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
197 oris r8,r8,12
198#endif /* CONFIG_SMP */
199
200 /*
201 * We disable interrupts here, even on UP, because we don't
202 * want to race with hash_page, and because we want the
203 * _PAGE_HASHPTE bit to be a reliable indication of whether
204 * the HPTE exists (or at least whether one did once).
205 * We also turn off the MMU for data accesses so that we
206 * we can't take a hash table miss (assuming the code is
207 * covered by a BAT). -- paulus
208 */
209 mfmsr r10
210 SYNC
211 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
212 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
213 mtmsr r0
214 SYNC_601
215 isync
216
217 tophys(r7,0)
218
219#ifdef CONFIG_SMP
220 addis r9,r7,mmu_hash_lock@ha
221 addi r9,r9,mmu_hash_lock@l
22210: lwarx r0,0,r9 /* take the mmu_hash_lock */
223 cmpi 0,r0,0
224 bne- 11f
225 stwcx. r8,0,r9
226 beq+ 12f
22711: lwz r0,0(r9)
228 cmpi 0,r0,0
229 beq 10b
230 b 11b
23112: isync
232#endif
233
234 /*
235 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
236 * If _PAGE_HASHPTE was already set, we don't replace the existing
237 * HPTE, so we just unlock and return.
238 */
239 mr r8,r5
240 rlwimi r8,r4,22,20,29
2411: lwarx r6,0,r8
242 andi. r0,r6,_PAGE_HASHPTE
243 bne 9f /* if HASHPTE already set, done */
244 ori r5,r6,_PAGE_HASHPTE
245 stwcx. r5,0,r8
246 bne- 1b
247
248 bl create_hpte
249
2509:
251#ifdef CONFIG_SMP
252 eieio
253 li r0,0
254 stw r0,0(r9) /* clear mmu_hash_lock */
255#endif
256
257 /* reenable interrupts and DR */
258 mtmsr r10
259 SYNC_601
260 isync
261
262 lwz r0,4(r1)
263 mtlr r0
264 blr
265
266/*
267 * This routine adds a hardware PTE to the hash table.
268 * It is designed to be called with the MMU either on or off.
269 * r3 contains the VSID, r4 contains the virtual address,
270 * r5 contains the linux PTE, r6 contains the old value of the
271 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
272 * offset to be added to addresses (0 if the MMU is on,
273 * -KERNELBASE if it is off).
274 * On SMP, the caller should have the mmu_hash_lock held.
275 * We assume that the caller has (or will) set the _PAGE_HASHPTE
276 * bit in the linux PTE in memory. The value passed in r6 should
277 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
278 * this routine will skip the search for an existing HPTE.
279 * This procedure modifies r0, r3 - r6, r8, cr0.
280 * -- paulus.
281 *
282 * For speed, 4 of the instructions get patched once the size and
283 * physical address of the hash table are known. These definitions
284 * of Hash_base and Hash_bits below are just an example.
285 */
286Hash_base = 0xc0180000
287Hash_bits = 12 /* e.g. 256kB hash table */
288Hash_msk = (((1 << Hash_bits) - 1) * 64)
289
290#ifndef CONFIG_PPC64BRIDGE
291/* defines for the PTE format for 32-bit PPCs */
292#define PTE_SIZE 8
293#define PTEG_SIZE 64
294#define LG_PTEG_SIZE 6
295#define LDPTEu lwzu
296#define STPTE stw
297#define CMPPTE cmpw
298#define PTE_H 0x40
299#define PTE_V 0x80000000
300#define TST_V(r) rlwinm. r,r,0,0,0
301#define SET_V(r) oris r,r,PTE_V@h
302#define CLR_V(r,t) rlwinm r,r,0,1,31
303
304#else
305/* defines for the PTE format for 64-bit PPCs */
306#define PTE_SIZE 16
307#define PTEG_SIZE 128
308#define LG_PTEG_SIZE 7
309#define LDPTEu ldu
310#define STPTE std
311#define CMPPTE cmpd
312#define PTE_H 2
313#define PTE_V 1
314#define TST_V(r) andi. r,r,PTE_V
315#define SET_V(r) ori r,r,PTE_V
316#define CLR_V(r,t) li t,PTE_V; andc r,r,t
317#endif /* CONFIG_PPC64BRIDGE */
318
319#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
320#define HASH_RIGHT 31-LG_PTEG_SIZE
321
322_GLOBAL(create_hpte)
323 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
324 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
325 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
326 and r8,r8,r0 /* writable if _RW & _DIRTY */
327 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
328 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
329 ori r8,r8,0xe14 /* clear out reserved bits and M */
330 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
331BEGIN_FTR_SECTION
332 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
333END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
334
335 /* Construct the high word of the PPC-style PTE (r5) */
336#ifndef CONFIG_PPC64BRIDGE
337 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
338 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
339#else /* CONFIG_PPC64BRIDGE */
340 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
341 sldi r5,r3,12 /* shift vsid into position */
342 rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
343#endif /* CONFIG_PPC64BRIDGE */
344 SET_V(r5) /* set V (valid) bit */
345
346 /* Get the address of the primary PTE group in the hash table (r3) */
347_GLOBAL(hash_page_patch_A)
348 addis r0,r7,Hash_base@h /* base address of hash table */
349 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
350 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
351 xor r3,r3,r0 /* make primary hash */
352 li r0,8 /* PTEs/group */
353
354 /*
355 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
356 * if it is clear, meaning that the HPTE isn't there already...
357 */
358 andi. r6,r6,_PAGE_HASHPTE
359 beq+ 10f /* no PTE: go look for an empty slot */
360 tlbie r4
361
362 addis r4,r7,htab_hash_searches@ha
363 lwz r6,htab_hash_searches@l(r4)
364 addi r6,r6,1 /* count how many searches we do */
365 stw r6,htab_hash_searches@l(r4)
366
367 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
368 mtctr r0
369 addi r4,r3,-PTE_SIZE
3701: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
371 CMPPTE 0,r6,r5
372 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
373 beq+ found_slot
374
375 /* Search the secondary PTEG for a matching PTE */
376 ori r5,r5,PTE_H /* set H (secondary hash) bit */
377_GLOBAL(hash_page_patch_B)
378 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
379 xori r4,r4,(-PTEG_SIZE & 0xffff)
380 addi r4,r4,-PTE_SIZE
381 mtctr r0
3822: LDPTEu r6,PTE_SIZE(r4)
383 CMPPTE 0,r6,r5
384 bdnzf 2,2b
385 beq+ found_slot
386 xori r5,r5,PTE_H /* clear H bit again */
387
388 /* Search the primary PTEG for an empty slot */
38910: mtctr r0
390 addi r4,r3,-PTE_SIZE /* search primary PTEG */
3911: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
392 TST_V(r6) /* test valid bit */
393 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
394 beq+ found_empty
395
396 /* update counter of times that the primary PTEG is full */
397 addis r4,r7,primary_pteg_full@ha
398 lwz r6,primary_pteg_full@l(r4)
399 addi r6,r6,1
400 stw r6,primary_pteg_full@l(r4)
401
402 /* Search the secondary PTEG for an empty slot */
403 ori r5,r5,PTE_H /* set H (secondary hash) bit */
404_GLOBAL(hash_page_patch_C)
405 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
406 xori r4,r4,(-PTEG_SIZE & 0xffff)
407 addi r4,r4,-PTE_SIZE
408 mtctr r0
4092: LDPTEu r6,PTE_SIZE(r4)
410 TST_V(r6)
411 bdnzf 2,2b
412 beq+ found_empty
413 xori r5,r5,PTE_H /* clear H bit again */
414
415 /*
416 * Choose an arbitrary slot in the primary PTEG to overwrite.
417 * Since both the primary and secondary PTEGs are full, and we
418 * have no information that the PTEs in the primary PTEG are
419 * more important or useful than those in the secondary PTEG,
420 * and we know there is a definite (although small) speed
421 * advantage to putting the PTE in the primary PTEG, we always
422 * put the PTE in the primary PTEG.
423 */
424 addis r4,r7,next_slot@ha
425 lwz r6,next_slot@l(r4)
426 addi r6,r6,PTE_SIZE
427 andi. r6,r6,7*PTE_SIZE
428 stw r6,next_slot@l(r4)
429 add r4,r3,r6
430
431#ifndef CONFIG_SMP
432 /* Store PTE in PTEG */
433found_empty:
434 STPTE r5,0(r4)
435found_slot:
436 STPTE r8,PTE_SIZE/2(r4)
437
438#else /* CONFIG_SMP */
439/*
440 * Between the tlbie above and updating the hash table entry below,
441 * another CPU could read the hash table entry and put it in its TLB.
442 * There are 3 cases:
443 * 1. using an empty slot
444 * 2. updating an earlier entry to change permissions (i.e. enable write)
445 * 3. taking over the PTE for an unrelated address
446 *
447 * In each case it doesn't really matter if the other CPUs have the old
448 * PTE in their TLB. So we don't need to bother with another tlbie here,
449 * which is convenient as we've overwritten the register that had the
450 * address. :-) The tlbie above is mainly to make sure that this CPU comes
451 * and gets the new PTE from the hash table.
452 *
453 * We do however have to make sure that the PTE is never in an invalid
454 * state with the V bit set.
455 */
456found_empty:
457found_slot:
458 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
459 STPTE r5,0(r4)
460 sync
461 TLBSYNC
462 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
463 sync
464 SET_V(r5)
465 STPTE r5,0(r4) /* finally set V bit in PTE */
466#endif /* CONFIG_SMP */
467
468 sync /* make sure pte updates get to memory */
469 blr
470
471 .comm next_slot,4
472 .comm primary_pteg_full,4
473 .comm htab_hash_searches,4
474
475/*
476 * Flush the entry for a particular page from the hash table.
477 *
478 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
479 * int count)
480 *
481 * We assume that there is a hash table in use (Hash != 0).
482 */
483_GLOBAL(flush_hash_pages)
484 tophys(r7,0)
485
486 /*
487 * We disable interrupts here, even on UP, because we want
488 * the _PAGE_HASHPTE bit to be a reliable indication of
489 * whether the HPTE exists (or at least whether one did once).
490 * We also turn off the MMU for data accesses so that we
491 * we can't take a hash table miss (assuming the code is
492 * covered by a BAT). -- paulus
493 */
494 mfmsr r10
495 SYNC
496 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
497 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
498 mtmsr r0
499 SYNC_601
500 isync
501
502 /* First find a PTE in the range that has _PAGE_HASHPTE set */
503 rlwimi r5,r4,22,20,29
5041: lwz r0,0(r5)
505 cmpwi cr1,r6,1
506 andi. r0,r0,_PAGE_HASHPTE
507 bne 2f
508 ble cr1,19f
509 addi r4,r4,0x1000
510 addi r5,r5,4
511 addi r6,r6,-1
512 b 1b
513
514 /* Convert context and va to VSID */
5152: mulli r3,r3,897*16 /* multiply context by context skew */
516 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
517 mulli r0,r0,0x111 /* multiply by ESID skew */
518 add r3,r3,r0 /* note code below trims to 24 bits */
519
520 /* Construct the high word of the PPC-style PTE (r11) */
521#ifndef CONFIG_PPC64BRIDGE
522 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
523 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
524#else /* CONFIG_PPC64BRIDGE */
525 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
526 sldi r11,r3,12 /* shift vsid into position */
527 rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
528#endif /* CONFIG_PPC64BRIDGE */
529 SET_V(r11) /* set V (valid) bit */
530
531#ifdef CONFIG_SMP
532 addis r9,r7,mmu_hash_lock@ha
533 addi r9,r9,mmu_hash_lock@l
534 rlwinm r8,r1,0,0,18
535 add r8,r8,r7
536 lwz r8,TI_CPU(r8)
537 oris r8,r8,9
53810: lwarx r0,0,r9
539 cmpi 0,r0,0
540 bne- 11f
541 stwcx. r8,0,r9
542 beq+ 12f
54311: lwz r0,0(r9)
544 cmpi 0,r0,0
545 beq 10b
546 b 11b
54712: isync
548#endif
549
550 /*
551 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
552 * already clear, we're done (for this pte). If not,
553 * clear it (atomically) and proceed. -- paulus.
554 */
55533: lwarx r8,0,r5 /* fetch the pte */
556 andi. r0,r8,_PAGE_HASHPTE
557 beq 8f /* done if HASHPTE is already clear */
558 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
559 stwcx. r8,0,r5 /* update the pte */
560 bne- 33b
561
562 /* Get the address of the primary PTE group in the hash table (r3) */
563_GLOBAL(flush_hash_patch_A)
564 addis r8,r7,Hash_base@h /* base address of hash table */
565 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
566 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
567 xor r8,r0,r8 /* make primary hash */
568
569 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
570 li r0,8 /* PTEs/group */
571 mtctr r0
572 addi r12,r8,-PTE_SIZE
5731: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
574 CMPPTE 0,r0,r11
575 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
576 beq+ 3f
577
578 /* Search the secondary PTEG for a matching PTE */
579 ori r11,r11,PTE_H /* set H (secondary hash) bit */
580 li r0,8 /* PTEs/group */
581_GLOBAL(flush_hash_patch_B)
582 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
583 xori r12,r12,(-PTEG_SIZE & 0xffff)
584 addi r12,r12,-PTE_SIZE
585 mtctr r0
5862: LDPTEu r0,PTE_SIZE(r12)
587 CMPPTE 0,r0,r11
588 bdnzf 2,2b
589 xori r11,r11,PTE_H /* clear H again */
590 bne- 4f /* should rarely fail to find it */
591
5923: li r0,0
593 STPTE r0,0(r12) /* invalidate entry */
5944: sync
595 tlbie r4 /* in hw tlb too */
596 sync
597
5988: ble cr1,9f /* if all ptes checked */
59981: addi r6,r6,-1
600 addi r5,r5,4 /* advance to next pte */
601 addi r4,r4,0x1000
602 lwz r0,0(r5) /* check next pte */
603 cmpwi cr1,r6,1
604 andi. r0,r0,_PAGE_HASHPTE
605 bne 33b
606 bgt cr1,81b
607
6089:
609#ifdef CONFIG_SMP
610 TLBSYNC
611 li r0,0
612 stw r0,0(r9) /* clear mmu_hash_lock */
613#endif
614
61519: mtmsr r10
616 SYNC_601
617 isync
618 blr
diff --git a/arch/ppc64/mm/hash_low.S b/arch/powerpc/mm/hash_low_64.S
index ee5a5d36bfa8..d6ed9102eeea 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -10,7 +10,7 @@
10 * described in the kernel's COPYING file. 10 * described in the kernel's COPYING file.
11 */ 11 */
12 12
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/page.h> 16#include <asm/page.h>
diff --git a/arch/ppc64/mm/hash_native.c b/arch/powerpc/mm/hash_native_64.c
index bfd385b7713c..174d14576c28 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -335,10 +335,9 @@ static void native_hpte_clear(void)
335 local_irq_restore(flags); 335 local_irq_restore(flags);
336} 336}
337 337
338static void native_flush_hash_range(unsigned long context, 338static void native_flush_hash_range(unsigned long number, int local)
339 unsigned long number, int local)
340{ 339{
341 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; 340 unsigned long va, vpn, hash, secondary, slot, flags, avpn;
342 int i, j; 341 int i, j;
343 hpte_t *hptep; 342 hpte_t *hptep;
344 unsigned long hpte_v; 343 unsigned long hpte_v;
@@ -349,13 +348,7 @@ static void native_flush_hash_range(unsigned long context,
349 348
350 j = 0; 349 j = 0;
351 for (i = 0; i < number; i++) { 350 for (i = 0; i < number; i++) {
352 if (batch->addr[i] < KERNELBASE) 351 va = batch->vaddr[j];
353 vsid = get_vsid(context, batch->addr[i]);
354 else
355 vsid = get_kernel_vsid(batch->addr[i]);
356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va;
359 if (large) 352 if (large)
360 vpn = va >> HPAGE_SHIFT; 353 vpn = va >> HPAGE_SHIFT;
361 else 354 else
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/powerpc/mm/hash_utils_64.c
index 09475c8edf7c..6e9e05cce02c 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -78,7 +78,7 @@ extern unsigned long dart_tablebase;
78hpte_t *htab_address; 78hpte_t *htab_address;
79unsigned long htab_hash_mask; 79unsigned long htab_hash_mask;
80 80
81extern unsigned long _SDR1; 81unsigned long _SDR1;
82 82
83#define KB (1024) 83#define KB (1024)
84#define MB (1024*KB) 84#define MB (1024*KB)
@@ -90,7 +90,6 @@ static inline void loop_forever(void)
90 ; 90 ;
91} 91}
92 92
93#ifdef CONFIG_PPC_MULTIPLATFORM
94static inline void create_pte_mapping(unsigned long start, unsigned long end, 93static inline void create_pte_mapping(unsigned long start, unsigned long end,
95 unsigned long mode, int large) 94 unsigned long mode, int large)
96{ 95{
@@ -111,7 +110,7 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
111 unsigned long vpn, hash, hpteg; 110 unsigned long vpn, hash, hpteg;
112 unsigned long vsid = get_kernel_vsid(addr); 111 unsigned long vsid = get_kernel_vsid(addr);
113 unsigned long va = (vsid << 28) | (addr & 0xfffffff); 112 unsigned long va = (vsid << 28) | (addr & 0xfffffff);
114 int ret; 113 int ret = -1;
115 114
116 if (large) 115 if (large)
117 vpn = va >> HPAGE_SHIFT; 116 vpn = va >> HPAGE_SHIFT;
@@ -129,16 +128,25 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
129 128
130 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 129 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
131 130
131#ifdef CONFIG_PPC_ISERIES
132 if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
133 ret = iSeries_hpte_bolt_or_insert(hpteg, va,
134 virt_to_abs(addr) >> PAGE_SHIFT,
135 vflags, tmp_mode);
136 else
137#endif
132#ifdef CONFIG_PPC_PSERIES 138#ifdef CONFIG_PPC_PSERIES
133 if (systemcfg->platform & PLATFORM_LPAR) 139 if (systemcfg->platform & PLATFORM_LPAR)
134 ret = pSeries_lpar_hpte_insert(hpteg, va, 140 ret = pSeries_lpar_hpte_insert(hpteg, va,
135 virt_to_abs(addr) >> PAGE_SHIFT, 141 virt_to_abs(addr) >> PAGE_SHIFT,
136 vflags, tmp_mode); 142 vflags, tmp_mode);
137 else 143 else
138#endif /* CONFIG_PPC_PSERIES */ 144#endif
145#ifdef CONFIG_PPC_MULTIPLATFORM
139 ret = native_hpte_insert(hpteg, va, 146 ret = native_hpte_insert(hpteg, va,
140 virt_to_abs(addr) >> PAGE_SHIFT, 147 virt_to_abs(addr) >> PAGE_SHIFT,
141 vflags, tmp_mode); 148 vflags, tmp_mode);
149#endif
142 150
143 if (ret == -1) { 151 if (ret == -1) {
144 ppc64_terminate_msg(0x20, "create_pte_mapping"); 152 ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -147,6 +155,27 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
147 } 155 }
148} 156}
149 157
158static unsigned long get_hashtable_size(void)
159{
160 unsigned long rnd_mem_size, pteg_count;
161
162 /* If hash size wasn't obtained in prom.c, we calculate it now based on
163 * the total RAM size
164 */
165 if (ppc64_pft_size)
166 return 1UL << ppc64_pft_size;
167
168 /* round mem_size up to next power of 2 */
169 rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
170 if (rnd_mem_size < systemcfg->physicalMemorySize)
171 rnd_mem_size <<= 1;
172
173 /* # pages / 2 */
174 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
175
176 return pteg_count << 7;
177}
178
150void __init htab_initialize(void) 179void __init htab_initialize(void)
151{ 180{
152 unsigned long table, htab_size_bytes; 181 unsigned long table, htab_size_bytes;
@@ -162,7 +191,7 @@ void __init htab_initialize(void)
162 * Calculate the required size of the htab. We want the number of 191 * Calculate the required size of the htab. We want the number of
163 * PTEGs to equal one half the number of real pages. 192 * PTEGs to equal one half the number of real pages.
164 */ 193 */
165 htab_size_bytes = 1UL << ppc64_pft_size; 194 htab_size_bytes = get_hashtable_size();
166 pteg_count = htab_size_bytes >> 7; 195 pteg_count = htab_size_bytes >> 7;
167 196
168 /* For debug, make the HTAB 1/8 as big as it normally would be. */ 197 /* For debug, make the HTAB 1/8 as big as it normally would be. */
@@ -261,7 +290,6 @@ void __init htab_initialize(void)
261} 290}
262#undef KB 291#undef KB
263#undef MB 292#undef MB
264#endif /* CONFIG_PPC_MULTIPLATFORM */
265 293
266/* 294/*
267 * Called by asm hashtable.S for doing lazy icache flush 295 * Called by asm hashtable.S for doing lazy icache flush
@@ -355,18 +383,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
355 return ret; 383 return ret;
356} 384}
357 385
358void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 386void flush_hash_page(unsigned long va, pte_t pte, int local)
359 int local)
360{ 387{
361 unsigned long vsid, vpn, va, hash, secondary, slot; 388 unsigned long vpn, hash, secondary, slot;
362 unsigned long huge = pte_huge(pte); 389 unsigned long huge = pte_huge(pte);
363 390
364 if (ea < KERNELBASE)
365 vsid = get_vsid(context, ea);
366 else
367 vsid = get_kernel_vsid(ea);
368
369 va = (vsid << 28) | (ea & 0x0fffffff);
370 if (huge) 391 if (huge)
371 vpn = va >> HPAGE_SHIFT; 392 vpn = va >> HPAGE_SHIFT;
372 else 393 else
@@ -381,17 +402,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
381 ppc_md.hpte_invalidate(slot, va, huge, local); 402 ppc_md.hpte_invalidate(slot, va, huge, local);
382} 403}
383 404
384void flush_hash_range(unsigned long context, unsigned long number, int local) 405void flush_hash_range(unsigned long number, int local)
385{ 406{
386 if (ppc_md.flush_hash_range) { 407 if (ppc_md.flush_hash_range) {
387 ppc_md.flush_hash_range(context, number, local); 408 ppc_md.flush_hash_range(number, local);
388 } else { 409 } else {
389 int i; 410 int i;
390 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 411 struct ppc64_tlb_batch *batch =
412 &__get_cpu_var(ppc64_tlb_batch);
391 413
392 for (i = 0; i < number; i++) 414 for (i = 0; i < number; i++)
393 flush_hash_page(context, batch->addr[i], batch->pte[i], 415 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
394 local);
395 } 416 }
396} 417}
397 418
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ea0994ed974..0ea0994ed974 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
diff --git a/arch/ppc64/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f4ca29cf5364..f4ca29cf5364 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
new file mode 100644
index 000000000000..4612a79dfb6e
--- /dev/null
+++ b/arch/powerpc/mm/init_32.c
@@ -0,0 +1,254 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/sections.h>
49
50#include "mmu_decl.h"
51
52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
53/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
56#endif
57#endif
58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
59
60DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
61
62unsigned long total_memory;
63unsigned long total_lowmem;
64
65unsigned long ppc_memstart;
66unsigned long ppc_memoffset = PAGE_OFFSET;
67
68int boot_mapsize;
69#ifdef CONFIG_PPC_PMAC
70unsigned long agp_special_page;
71EXPORT_SYMBOL(agp_special_page);
72#endif
73
74#ifdef CONFIG_HIGHMEM
75pte_t *kmap_pte;
76pgprot_t kmap_prot;
77
78EXPORT_SYMBOL(kmap_prot);
79EXPORT_SYMBOL(kmap_pte);
80#endif
81
82void MMU_init(void);
83
84/* XXX should be in current.h -- paulus */
85extern struct task_struct *current_set[NR_CPUS];
86
87char *klimit = _end;
88struct device_node *memory_node;
89
90extern int init_bootmem_done;
91
92/*
93 * this tells the system to map all of ram with the segregs
94 * (i.e. page tables) instead of the bats.
95 * -- Cort
96 */
97int __map_without_bats;
98int __map_without_ltlbs;
99
100/* max amount of low RAM to map in */
101unsigned long __max_low_memory = MAX_LOW_MEM;
102
103/*
104 * limit of what is accessible with initial MMU setup -
105 * 256MB usually, but only 16MB on 601.
106 */
107unsigned long __initial_memory_limit = 0x10000000;
108
109/*
110 * Check for command-line options that affect what MMU_init will do.
111 */
112void MMU_setup(void)
113{
114 /* Check for nobats option (used in mapin_ram). */
115 if (strstr(cmd_line, "nobats")) {
116 __map_without_bats = 1;
117 }
118
119 if (strstr(cmd_line, "noltlbs")) {
120 __map_without_ltlbs = 1;
121 }
122}
123
124/*
125 * MMU_init sets up the basic memory mappings for the kernel,
126 * including both RAM and possibly some I/O regions,
127 * and sets up the page tables and the MMU hardware ready to go.
128 */
129void __init MMU_init(void)
130{
131 if (ppc_md.progress)
132 ppc_md.progress("MMU:enter", 0x111);
133
134 /* 601 can only access 16MB at the moment */
135 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
136 __initial_memory_limit = 0x01000000;
137
138 /* parse args from command line */
139 MMU_setup();
140
141 if (lmb.memory.cnt > 1) {
142 lmb.memory.cnt = 1;
143 lmb_analyze();
144 printk(KERN_WARNING "Only using first contiguous memory region");
145 }
146
147 total_memory = lmb_end_of_DRAM();
148 total_lowmem = total_memory;
149
150#ifdef CONFIG_FSL_BOOKE
151 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
152 * entries, so we need to adjust lowmem to match the amount we can map
153 * in the fixed entries */
154 adjust_total_lowmem();
155#endif /* CONFIG_FSL_BOOKE */
156
157 if (total_lowmem > __max_low_memory) {
158 total_lowmem = __max_low_memory;
159#ifndef CONFIG_HIGHMEM
160 total_memory = total_lowmem;
161 lmb_enforce_memory_limit(total_lowmem);
162 lmb_analyze();
163#endif /* CONFIG_HIGHMEM */
164 }
165
166 /* Initialize the MMU hardware */
167 if (ppc_md.progress)
168 ppc_md.progress("MMU:hw init", 0x300);
169 MMU_init_hw();
170
171 /* Map in all of RAM starting at KERNELBASE */
172 if (ppc_md.progress)
173 ppc_md.progress("MMU:mapin", 0x301);
174 mapin_ram();
175
176#ifdef CONFIG_HIGHMEM
177 ioremap_base = PKMAP_BASE;
178#else
179 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
180#endif /* CONFIG_HIGHMEM */
181 ioremap_bot = ioremap_base;
182
183 /* Map in I/O resources */
184 if (ppc_md.progress)
185 ppc_md.progress("MMU:setio", 0x302);
186 if (ppc_md.setup_io_mappings)
187 ppc_md.setup_io_mappings();
188
189 /* Initialize the context management stuff */
190 mmu_context_init();
191
192 if (ppc_md.progress)
193 ppc_md.progress("MMU:exit", 0x211);
194}
195
196/* This is only called until mem_init is done. */
197void __init *early_get_page(void)
198{
199 void *p;
200
201 if (init_bootmem_done) {
202 p = alloc_bootmem_pages(PAGE_SIZE);
203 } else {
204 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
205 __initial_memory_limit));
206 }
207 return p;
208}
209
210/* Free up now-unused memory */
211static void free_sec(unsigned long start, unsigned long end, const char *name)
212{
213 unsigned long cnt = 0;
214
215 while (start < end) {
216 ClearPageReserved(virt_to_page(start));
217 set_page_count(virt_to_page(start), 1);
218 free_page(start);
219 cnt++;
220 start += PAGE_SIZE;
221 }
222 if (cnt) {
223 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
224 totalram_pages += cnt;
225 }
226}
227
228void free_initmem(void)
229{
230#define FREESEC(TYPE) \
231 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
232 (unsigned long)(&__ ## TYPE ## _end), \
233 #TYPE);
234
235 printk ("Freeing unused kernel memory:");
236 FREESEC(init);
237 printk("\n");
238 ppc_md.progress = NULL;
239#undef FREESEC
240}
241
242#ifdef CONFIG_BLK_DEV_INITRD
243void free_initrd_mem(unsigned long start, unsigned long end)
244{
245 if (start < end)
246 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
247 for (; start < end; start += PAGE_SIZE) {
248 ClearPageReserved(virt_to_page(start));
249 set_page_count(virt_to_page(start), 1);
250 free_page(start);
251 totalram_pages++;
252 }
253}
254#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
new file mode 100644
index 000000000000..b0fc822ec29f
--- /dev/null
+++ b/arch/powerpc/mm/init_64.c
@@ -0,0 +1,223 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76unsigned long klimit = (unsigned long)_end;
77
78/* max amount of RAM to use */
79unsigned long __max_memory;
80
81/* info on what we think the IO hole is */
82unsigned long io_hole_start;
83unsigned long io_hole_size;
84
85/*
86 * Do very early mm setup.
87 */
88void __init mm_init_ppc64(void)
89{
90#ifndef CONFIG_PPC_ISERIES
91 unsigned long i;
92#endif
93
94 ppc64_boot_msg(0x100, "MM Init");
95
96 /* This is the story of the IO hole... please, keep seated,
97 * unfortunately, we are out of oxygen masks at the moment.
98 * So we need some rough way to tell where your big IO hole
99 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
100 * that area as well, on POWER4 we don't have one, etc...
101 * We need that as a "hint" when sizing the TCE table on POWER3
102 * So far, the simplest way that seem work well enough for us it
103 * to just assume that the first discontinuity in our physical
104 * RAM layout is the IO hole. That may not be correct in the future
105 * (and isn't on iSeries but then we don't care ;)
106 */
107
108#ifndef CONFIG_PPC_ISERIES
109 for (i = 1; i < lmb.memory.cnt; i++) {
110 unsigned long base, prevbase, prevsize;
111
112 prevbase = lmb.memory.region[i-1].base;
113 prevsize = lmb.memory.region[i-1].size;
114 base = lmb.memory.region[i].base;
115 if (base > (prevbase + prevsize)) {
116 io_hole_start = prevbase + prevsize;
117 io_hole_size = base - (prevbase + prevsize);
118 break;
119 }
120 }
121#endif /* CONFIG_PPC_ISERIES */
122 if (io_hole_start)
123 printk("IO Hole assumed to be %lx -> %lx\n",
124 io_hole_start, io_hole_start + io_hole_size - 1);
125
126 ppc64_boot_msg(0x100, "MM Init Done");
127}
128
129void free_initmem(void)
130{
131 unsigned long addr;
132
133 addr = (unsigned long)__init_begin;
134 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
135 memset((void *)addr, 0xcc, PAGE_SIZE);
136 ClearPageReserved(virt_to_page(addr));
137 set_page_count(virt_to_page(addr), 1);
138 free_page(addr);
139 totalram_pages++;
140 }
141 printk ("Freeing unused kernel memory: %luk freed\n",
142 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
143}
144
145#ifdef CONFIG_BLK_DEV_INITRD
146void free_initrd_mem(unsigned long start, unsigned long end)
147{
148 if (start < end)
149 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
150 for (; start < end; start += PAGE_SIZE) {
151 ClearPageReserved(virt_to_page(start));
152 set_page_count(virt_to_page(start), 1);
153 free_page(start);
154 totalram_pages++;
155 }
156}
157#endif
158
159static struct kcore_list kcore_vmem;
160
161static int __init setup_kcore(void)
162{
163 int i;
164
165 for (i=0; i < lmb.memory.cnt; i++) {
166 unsigned long base, size;
167 struct kcore_list *kcore_mem;
168
169 base = lmb.memory.region[i].base;
170 size = lmb.memory.region[i].size;
171
172 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
173 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
174 if (!kcore_mem)
175 panic("mem_init: kmalloc failed\n");
176
177 kclist_add(kcore_mem, __va(base), size);
178 }
179
180 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
181
182 return 0;
183}
184module_init(setup_kcore);
185
186static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
187{
188 memset(addr, 0, kmem_cache_size(cache));
189}
190
191static const int pgtable_cache_size[2] = {
192 PTE_TABLE_SIZE, PMD_TABLE_SIZE
193};
194static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
195 "pgd_pte_cache", "pud_pmd_cache",
196};
197
198kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
199
200void pgtable_cache_init(void)
201{
202 int i;
203
204 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
205 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
206 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
207 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
208
209 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
210 int size = pgtable_cache_size[i];
211 const char *name = pgtable_cache_name[i];
212
213 pgtable_cache[i] = kmem_cache_create(name,
214 size, size,
215 SLAB_HWCACHE_ALIGN
216 | SLAB_MUST_HWCACHE_ALIGN,
217 zero_ctor,
218 NULL);
219 if (! pgtable_cache[i])
220 panic("pgtable_cache_init(): could not create %s!\n",
221 name);
222 }
223}
diff --git a/arch/ppc64/kernel/lmb.c b/arch/powerpc/mm/lmb.c
index 5adaca2ddc9d..9b5aa6808eb8 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Procedures for interfacing to Open Firmware. 2 * Procedures for maintaining information about logical memory blocks.
3 * 3 *
4 * Peter Bergner, IBM Corp. June 2001. 4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner. 5 * Copyright (C) 2001 Peter Bergner.
@@ -18,7 +18,9 @@
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/abs_addr.h> 21#ifdef CONFIG_PPC32
22#include "mmu_decl.h" /* for __max_low_memory */
23#endif
22 24
23struct lmb lmb; 25struct lmb lmb;
24 26
@@ -54,16 +56,14 @@ void lmb_dump_all(void)
54#endif /* DEBUG */ 56#endif /* DEBUG */
55} 57}
56 58
57static unsigned long __init 59static unsigned long __init lmb_addrs_overlap(unsigned long base1,
58lmb_addrs_overlap(unsigned long base1, unsigned long size1, 60 unsigned long size1, unsigned long base2, unsigned long size2)
59 unsigned long base2, unsigned long size2)
60{ 61{
61 return ((base1 < (base2+size2)) && (base2 < (base1+size1))); 62 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
62} 63}
63 64
64static long __init 65static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
65lmb_addrs_adjacent(unsigned long base1, unsigned long size1, 66 unsigned long base2, unsigned long size2)
66 unsigned long base2, unsigned long size2)
67{ 67{
68 if (base2 == base1 + size1) 68 if (base2 == base1 + size1)
69 return 1; 69 return 1;
@@ -73,8 +73,8 @@ lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
73 return 0; 73 return 0;
74} 74}
75 75
76static long __init 76static long __init lmb_regions_adjacent(struct lmb_region *rgn,
77lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 77 unsigned long r1, unsigned long r2)
78{ 78{
79 unsigned long base1 = rgn->region[r1].base; 79 unsigned long base1 = rgn->region[r1].base;
80 unsigned long size1 = rgn->region[r1].size; 80 unsigned long size1 = rgn->region[r1].size;
@@ -85,8 +85,8 @@ lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
85} 85}
86 86
87/* Assumption: base addr of region 1 < base addr of region 2 */ 87/* Assumption: base addr of region 1 < base addr of region 2 */
88static void __init 88static void __init lmb_coalesce_regions(struct lmb_region *rgn,
89lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 89 unsigned long r1, unsigned long r2)
90{ 90{
91 unsigned long i; 91 unsigned long i;
92 92
@@ -99,8 +99,7 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
99} 99}
100 100
101/* This routine called with relocation disabled. */ 101/* This routine called with relocation disabled. */
102void __init 102void __init lmb_init(void)
103lmb_init(void)
104{ 103{
105 /* Create a dummy zero size LMB which will get coalesced away later. 104 /* Create a dummy zero size LMB which will get coalesced away later.
106 * This simplifies the lmb_add() code below... 105 * This simplifies the lmb_add() code below...
@@ -115,9 +114,8 @@ lmb_init(void)
115 lmb.reserved.cnt = 1; 114 lmb.reserved.cnt = 1;
116} 115}
117 116
118/* This routine called with relocation disabled. */ 117/* This routine may be called with relocation disabled. */
119void __init 118void __init lmb_analyze(void)
120lmb_analyze(void)
121{ 119{
122 int i; 120 int i;
123 121
@@ -128,8 +126,8 @@ lmb_analyze(void)
128} 126}
129 127
130/* This routine called with relocation disabled. */ 128/* This routine called with relocation disabled. */
131static long __init 129static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
132lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 130 unsigned long size)
133{ 131{
134 unsigned long i, coalesced = 0; 132 unsigned long i, coalesced = 0;
135 long adjacent; 133 long adjacent;
@@ -158,18 +156,17 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
158 coalesced++; 156 coalesced++;
159 } 157 }
160 158
161 if ( coalesced ) { 159 if (coalesced)
162 return coalesced; 160 return coalesced;
163 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) { 161 if (rgn->cnt >= MAX_LMB_REGIONS)
164 return -1; 162 return -1;
165 }
166 163
167 /* Couldn't coalesce the LMB, so add it to the sorted table. */ 164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
168 for (i=rgn->cnt-1; i >= 0; i--) { 165 for (i = rgn->cnt-1; i >= 0; i--) {
169 if (base < rgn->region[i].base) { 166 if (base < rgn->region[i].base) {
170 rgn->region[i+1].base = rgn->region[i].base; 167 rgn->region[i+1].base = rgn->region[i].base;
171 rgn->region[i+1].size = rgn->region[i].size; 168 rgn->region[i+1].size = rgn->region[i].size;
172 } else { 169 } else {
173 rgn->region[i+1].base = base; 170 rgn->region[i+1].base = base;
174 rgn->region[i+1].size = size; 171 rgn->region[i+1].size = size;
175 break; 172 break;
@@ -180,30 +177,28 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
180 return 0; 177 return 0;
181} 178}
182 179
183/* This routine called with relocation disabled. */ 180/* This routine may be called with relocation disabled. */
184long __init 181long __init lmb_add(unsigned long base, unsigned long size)
185lmb_add(unsigned long base, unsigned long size)
186{ 182{
187 struct lmb_region *_rgn = &(lmb.memory); 183 struct lmb_region *_rgn = &(lmb.memory);
188 184
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 185 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if ( base == 0 ) 186 if (base == 0)
191 lmb.rmo_size = size; 187 lmb.rmo_size = size;
192 188
193 return lmb_add_region(_rgn, base, size); 189 return lmb_add_region(_rgn, base, size);
194 190
195} 191}
196 192
197long __init 193long __init lmb_reserve(unsigned long base, unsigned long size)
198lmb_reserve(unsigned long base, unsigned long size)
199{ 194{
200 struct lmb_region *_rgn = &(lmb.reserved); 195 struct lmb_region *_rgn = &(lmb.reserved);
201 196
202 return lmb_add_region(_rgn, base, size); 197 return lmb_add_region(_rgn, base, size);
203} 198}
204 199
205long __init 200long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
206lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 201 unsigned long size)
207{ 202{
208 unsigned long i; 203 unsigned long i;
209 204
@@ -218,39 +213,44 @@ lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long si
218 return (i < rgn->cnt) ? i : -1; 213 return (i < rgn->cnt) ? i : -1;
219} 214}
220 215
221unsigned long __init 216unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
222lmb_alloc(unsigned long size, unsigned long align)
223{ 217{
224 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); 218 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
225} 219}
226 220
227unsigned long __init 221unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
228lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr) 222 unsigned long max_addr)
229{ 223{
230 long i, j; 224 long i, j;
231 unsigned long base = 0; 225 unsigned long base = 0;
232 226
233 for (i=lmb.memory.cnt-1; i >= 0; i--) { 227#ifdef CONFIG_PPC32
228 /* On 32-bit, make sure we allocate lowmem */
229 if (max_addr == LMB_ALLOC_ANYWHERE)
230 max_addr = __max_low_memory;
231#endif
232 for (i = lmb.memory.cnt-1; i >= 0; i--) {
234 unsigned long lmbbase = lmb.memory.region[i].base; 233 unsigned long lmbbase = lmb.memory.region[i].base;
235 unsigned long lmbsize = lmb.memory.region[i].size; 234 unsigned long lmbsize = lmb.memory.region[i].size;
236 235
237 if ( max_addr == LMB_ALLOC_ANYWHERE ) 236 if (max_addr == LMB_ALLOC_ANYWHERE)
238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 237 base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
239 else if ( lmbbase < max_addr ) 238 else if (lmbbase < max_addr) {
240 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align); 239 base = min(lmbbase + lmbsize, max_addr);
241 else 240 base = _ALIGN_DOWN(base - size, align);
241 } else
242 continue; 242 continue;
243 243
244 while ( (lmbbase <= base) && 244 while ((lmbbase <= base) &&
245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) { 245 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align); 246 base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
247 } 247 align);
248 248
249 if ( (base != 0) && (lmbbase <= base) ) 249 if ((base != 0) && (lmbbase <= base))
250 break; 250 break;
251 } 251 }
252 252
253 if ( i < 0 ) 253 if (i < 0)
254 return 0; 254 return 0;
255 255
256 lmb_add_region(&lmb.reserved, base, size); 256 lmb_add_region(&lmb.reserved, base, size);
@@ -259,14 +259,12 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
259} 259}
260 260
261/* You must call lmb_analyze() before this. */ 261/* You must call lmb_analyze() before this. */
262unsigned long __init 262unsigned long __init lmb_phys_mem_size(void)
263lmb_phys_mem_size(void)
264{ 263{
265 return lmb.memory.size; 264 return lmb.memory.size;
266} 265}
267 266
268unsigned long __init 267unsigned long __init lmb_end_of_DRAM(void)
269lmb_end_of_DRAM(void)
270{ 268{
271 int idx = lmb.memory.cnt - 1; 269 int idx = lmb.memory.cnt - 1;
272 270
@@ -277,9 +275,8 @@ lmb_end_of_DRAM(void)
277 * Truncate the lmb list to memory_limit if it's set 275 * Truncate the lmb list to memory_limit if it's set
278 * You must call lmb_analyze() after this. 276 * You must call lmb_analyze() after this.
279 */ 277 */
280void __init lmb_enforce_memory_limit(void) 278void __init lmb_enforce_memory_limit(unsigned long memory_limit)
281{ 279{
282 extern unsigned long memory_limit;
283 unsigned long i, limit; 280 unsigned long i, limit;
284 281
285 if (! memory_limit) 282 if (! memory_limit)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
new file mode 100644
index 000000000000..117b00012e14
--- /dev/null
+++ b/arch/powerpc/mm/mem.c
@@ -0,0 +1,564 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/sections.h>
49#ifdef CONFIG_PPC64
50#include <asm/vdso.h>
51#endif
52
53#include "mmu_decl.h"
54
55#ifndef CPU_FTR_COHERENT_ICACHE
56#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
57#define CPU_FTR_NOEXECUTE 0
58#endif
59
60int init_bootmem_done;
61int mem_init_done;
62unsigned long memory_limit;
63
64/*
65 * This is called by /dev/mem to know if a given address has to
66 * be mapped non-cacheable or not
67 */
68int page_is_ram(unsigned long pfn)
69{
70 unsigned long paddr = (pfn << PAGE_SHIFT);
71
72#ifndef CONFIG_PPC64 /* XXX for now */
73 return paddr < __pa(high_memory);
74#else
75 int i;
76 for (i=0; i < lmb.memory.cnt; i++) {
77 unsigned long base;
78
79 base = lmb.memory.region[i].base;
80
81 if ((paddr >= base) &&
82 (paddr < (base + lmb.memory.region[i].size))) {
83 return 1;
84 }
85 }
86
87 return 0;
88#endif
89}
90EXPORT_SYMBOL(page_is_ram);
91
92pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
93 unsigned long size, pgprot_t vma_prot)
94{
95 if (ppc_md.phys_mem_access_prot)
96 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
97
98 if (!page_is_ram(pfn))
99 vma_prot = __pgprot(pgprot_val(vma_prot)
100 | _PAGE_GUARDED | _PAGE_NO_CACHE);
101 return vma_prot;
102}
103EXPORT_SYMBOL(phys_mem_access_prot);
104
105#ifdef CONFIG_MEMORY_HOTPLUG
106
107void online_page(struct page *page)
108{
109 ClearPageReserved(page);
110 free_cold_page(page);
111 totalram_pages++;
112 num_physpages++;
113}
114
115/*
116 * This works only for the non-NUMA case. Later, we'll need a lookup
117 * to convert from real physical addresses to nid, that doesn't use
118 * pfn_to_nid().
119 */
120int __devinit add_memory(u64 start, u64 size)
121{
122 struct pglist_data *pgdata = NODE_DATA(0);
123 struct zone *zone;
124 unsigned long start_pfn = start >> PAGE_SHIFT;
125 unsigned long nr_pages = size >> PAGE_SHIFT;
126
127 /* this should work for most non-highmem platforms */
128 zone = pgdata->node_zones;
129
130 return __add_pages(zone, start_pfn, nr_pages);
131
132 return 0;
133}
134
135/*
136 * First pass at this code will check to determine if the remove
137 * request is within the RMO. Do not allow removal within the RMO.
138 */
139int __devinit remove_memory(u64 start, u64 size)
140{
141 struct zone *zone;
142 unsigned long start_pfn, end_pfn, nr_pages;
143
144 start_pfn = start >> PAGE_SHIFT;
145 nr_pages = size >> PAGE_SHIFT;
146 end_pfn = start_pfn + nr_pages;
147
148 printk("%s(): Attempting to remove memoy in range "
149 "%lx to %lx\n", __func__, start, start+size);
150 /*
151 * check for range within RMO
152 */
153 zone = page_zone(pfn_to_page(start_pfn));
154
155 printk("%s(): memory will be removed from "
156 "the %s zone\n", __func__, zone->name);
157
158 /*
159 * not handling removing memory ranges that
160 * overlap multiple zones yet
161 */
162 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
163 goto overlap;
164
165 /* make sure it is NOT in RMO */
166 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
167 printk("%s(): range to be removed must NOT be in RMO!\n",
168 __func__);
169 goto in_rmo;
170 }
171
172 return __remove_pages(zone, start_pfn, nr_pages);
173
174overlap:
175 printk("%s(): memory range to be removed overlaps "
176 "multiple zones!!!\n", __func__);
177in_rmo:
178 return -1;
179}
180#endif /* CONFIG_MEMORY_HOTPLUG */
181
182void show_mem(void)
183{
184 unsigned long total = 0, reserved = 0;
185 unsigned long shared = 0, cached = 0;
186 unsigned long highmem = 0;
187 struct page *page;
188 pg_data_t *pgdat;
189 unsigned long i;
190
191 printk("Mem-info:\n");
192 show_free_areas();
193 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
194 for_each_pgdat(pgdat) {
195 unsigned long flags;
196 pgdat_resize_lock(pgdat, &flags);
197 for (i = 0; i < pgdat->node_spanned_pages; i++) {
198 page = pgdat_page_nr(pgdat, i);
199 total++;
200 if (PageHighMem(page))
201 highmem++;
202 if (PageReserved(page))
203 reserved++;
204 else if (PageSwapCache(page))
205 cached++;
206 else if (page_count(page))
207 shared += page_count(page) - 1;
208 }
209 pgdat_resize_unlock(pgdat, &flags);
210 }
211 printk("%ld pages of RAM\n", total);
212#ifdef CONFIG_HIGHMEM
213 printk("%ld pages of HIGHMEM\n", highmem);
214#endif
215 printk("%ld reserved pages\n", reserved);
216 printk("%ld pages shared\n", shared);
217 printk("%ld pages swap cached\n", cached);
218}
219
220/*
221 * Initialize the bootmem system and give it all the memory we
222 * have available. If we are using highmem, we only put the
223 * lowmem into the bootmem system.
224 */
225#ifndef CONFIG_NEED_MULTIPLE_NODES
226void __init do_init_bootmem(void)
227{
228 unsigned long i;
229 unsigned long start, bootmap_pages;
230 unsigned long total_pages;
231 int boot_mapsize;
232
233 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
234#ifdef CONFIG_HIGHMEM
235 total_pages = total_lowmem >> PAGE_SHIFT;
236#endif
237
238 /*
239 * Find an area to use for the bootmem bitmap. Calculate the size of
240 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
241 * Add 1 additional page in case the address isn't page-aligned.
242 */
243 bootmap_pages = bootmem_bootmap_pages(total_pages);
244
245 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
246 BUG_ON(!start);
247
248 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
249
250 /* Add all physical memory to the bootmem map, mark each area
251 * present.
252 */
253 for (i = 0; i < lmb.memory.cnt; i++) {
254 unsigned long base = lmb.memory.region[i].base;
255 unsigned long size = lmb_size_bytes(&lmb.memory, i);
256#ifdef CONFIG_HIGHMEM
257 if (base >= total_lowmem)
258 continue;
259 if (base + size > total_lowmem)
260 size = total_lowmem - base;
261#endif
262 free_bootmem(base, size);
263 }
264
265 /* reserve the sections we're already using */
266 for (i = 0; i < lmb.reserved.cnt; i++)
267 reserve_bootmem(lmb.reserved.region[i].base,
268 lmb_size_bytes(&lmb.reserved, i));
269
270 /* XXX need to clip this if using highmem? */
271 for (i = 0; i < lmb.memory.cnt; i++)
272 memory_present(0, lmb_start_pfn(&lmb.memory, i),
273 lmb_end_pfn(&lmb.memory, i));
274 init_bootmem_done = 1;
275}
276
277/*
278 * paging_init() sets up the page tables - in fact we've already done this.
279 */
280void __init paging_init(void)
281{
282 unsigned long zones_size[MAX_NR_ZONES];
283 unsigned long zholes_size[MAX_NR_ZONES];
284 unsigned long total_ram = lmb_phys_mem_size();
285 unsigned long top_of_ram = lmb_end_of_DRAM();
286
287#ifdef CONFIG_HIGHMEM
288 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
289 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
290 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
291 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
292 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
293 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
294 kmap_prot = PAGE_KERNEL;
295#endif /* CONFIG_HIGHMEM */
296
297 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
298 top_of_ram, total_ram);
299 printk(KERN_INFO "Memory hole size: %ldMB\n",
300 (top_of_ram - total_ram) >> 20);
301 /*
302 * All pages are DMA-able so we put them all in the DMA zone.
303 */
304 memset(zones_size, 0, sizeof(zones_size));
305 memset(zholes_size, 0, sizeof(zholes_size));
306
307 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
308 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
309
310#ifdef CONFIG_HIGHMEM
311 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
312 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
313 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
314#else
315 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
316 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
317#endif /* CONFIG_HIGHMEM */
318
319 free_area_init_node(0, NODE_DATA(0), zones_size,
320 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
321}
322#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
323
324void __init mem_init(void)
325{
326#ifdef CONFIG_NEED_MULTIPLE_NODES
327 int nid;
328#endif
329 pg_data_t *pgdat;
330 unsigned long i;
331 struct page *page;
332 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
333
334 num_physpages = max_pfn; /* RAM is assumed contiguous */
335 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
336
337#ifdef CONFIG_NEED_MULTIPLE_NODES
338 for_each_online_node(nid) {
339 if (NODE_DATA(nid)->node_spanned_pages != 0) {
340 printk("freeing bootmem node %x\n", nid);
341 totalram_pages +=
342 free_all_bootmem_node(NODE_DATA(nid));
343 }
344 }
345#else
346 max_mapnr = num_physpages;
347 totalram_pages += free_all_bootmem();
348#endif
349 for_each_pgdat(pgdat) {
350 for (i = 0; i < pgdat->node_spanned_pages; i++) {
351 page = pgdat_page_nr(pgdat, i);
352 if (PageReserved(page))
353 reservedpages++;
354 }
355 }
356
357 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
358 datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
359 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
360 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
361
362#ifdef CONFIG_HIGHMEM
363 {
364 unsigned long pfn, highmem_mapnr;
365
366 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
367 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
368 struct page *page = pfn_to_page(pfn);
369
370 ClearPageReserved(page);
371 set_page_count(page, 1);
372 __free_page(page);
373 totalhigh_pages++;
374 }
375 totalram_pages += totalhigh_pages;
376 printk(KERN_INFO "High memory: %luk\n",
377 totalhigh_pages << (PAGE_SHIFT-10));
378 }
379#endif /* CONFIG_HIGHMEM */
380
381 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
382 "%luk reserved, %luk data, %luk bss, %luk init)\n",
383 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
384 num_physpages << (PAGE_SHIFT-10),
385 codesize >> 10,
386 reservedpages << (PAGE_SHIFT-10),
387 datasize >> 10,
388 bsssize >> 10,
389 initsize >> 10);
390
391 mem_init_done = 1;
392
393#ifdef CONFIG_PPC64
394 /* Initialize the vDSO */
395 vdso_init();
396#endif
397}
398
399/*
400 * This is called when a page has been modified by the kernel.
401 * It just marks the page as not i-cache clean. We do the i-cache
402 * flush later when the page is given to a user process, if necessary.
403 */
404void flush_dcache_page(struct page *page)
405{
406 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
407 return;
408 /* avoid an atomic op if possible */
409 if (test_bit(PG_arch_1, &page->flags))
410 clear_bit(PG_arch_1, &page->flags);
411}
412EXPORT_SYMBOL(flush_dcache_page);
413
414void flush_dcache_icache_page(struct page *page)
415{
416#ifdef CONFIG_BOOKE
417 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
418 __flush_dcache_icache(start);
419 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
420#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
421 /* On 8xx there is no need to kmap since highmem is not supported */
422 __flush_dcache_icache(page_address(page));
423#else
424 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
425#endif
426
427}
428void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
429{
430 clear_page(page);
431
432 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
433 return;
434 /*
435 * We shouldnt have to do this, but some versions of glibc
436 * require it (ld.so assumes zero filled pages are icache clean)
437 * - Anton
438 */
439
440 /* avoid an atomic op if possible */
441 if (test_bit(PG_arch_1, &pg->flags))
442 clear_bit(PG_arch_1, &pg->flags);
443}
444EXPORT_SYMBOL(clear_user_page);
445
446void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
447 struct page *pg)
448{
449 copy_page(vto, vfrom);
450
451 /*
452 * We should be able to use the following optimisation, however
453 * there are two problems.
454 * Firstly a bug in some versions of binutils meant PLT sections
455 * were not marked executable.
456 * Secondly the first word in the GOT section is blrl, used
457 * to establish the GOT address. Until recently the GOT was
458 * not marked executable.
459 * - Anton
460 */
461#if 0
462 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
463 return;
464#endif
465
466 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
467 return;
468
469 /* avoid an atomic op if possible */
470 if (test_bit(PG_arch_1, &pg->flags))
471 clear_bit(PG_arch_1, &pg->flags);
472}
473
474void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
475 unsigned long addr, int len)
476{
477 unsigned long maddr;
478
479 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
480 flush_icache_range(maddr, maddr + len);
481 kunmap(page);
482}
483EXPORT_SYMBOL(flush_icache_user_range);
484
485/*
486 * This is called at the end of handling a user page fault, when the
487 * fault has been handled by updating a PTE in the linux page tables.
488 * We use it to preload an HPTE into the hash table corresponding to
489 * the updated linux PTE.
490 *
491 * This must always be called with the mm->page_table_lock held
492 */
493void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
494 pte_t pte)
495{
496 /* handle i-cache coherency */
497 unsigned long pfn = pte_pfn(pte);
498#ifdef CONFIG_PPC32
499 pmd_t *pmd;
500#else
501 unsigned long vsid;
502 void *pgdir;
503 pte_t *ptep;
504 int local = 0;
505 cpumask_t tmp;
506 unsigned long flags;
507#endif
508
509 /* handle i-cache coherency */
510 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
511 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
512 pfn_valid(pfn)) {
513 struct page *page = pfn_to_page(pfn);
514 if (!PageReserved(page)
515 && !test_bit(PG_arch_1, &page->flags)) {
516 if (vma->vm_mm == current->active_mm) {
517#ifdef CONFIG_8xx
518 /* On 8xx, cache control instructions (particularly
519 * "dcbst" from flush_dcache_icache) fault as write
520 * operation if there is an unpopulated TLB entry
521 * for the address in question. To workaround that,
522 * we invalidate the TLB here, thus avoiding dcbst
523 * misbehaviour.
524 */
525 _tlbie(address);
526#endif
527 __flush_dcache_icache((void *) address);
528 } else
529 flush_dcache_icache_page(page);
530 set_bit(PG_arch_1, &page->flags);
531 }
532 }
533
534#ifdef CONFIG_PPC_STD_MMU
535 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
536 if (!pte_young(pte) || address >= TASK_SIZE)
537 return;
538#ifdef CONFIG_PPC32
539 if (Hash == 0)
540 return;
541 pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
542 if (!pmd_none(*pmd))
543 add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
544#else
545 pgdir = vma->vm_mm->pgd;
546 if (pgdir == NULL)
547 return;
548
549 ptep = find_linux_pte(pgdir, address);
550 if (!ptep)
551 return;
552
553 vsid = get_vsid(vma->vm_mm->context.id, address);
554
555 local_irq_save(flags);
556 tmp = cpumask_of_cpu(smp_processor_id());
557 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
558 local = 1;
559
560 __hash_page(address, 0, vsid, ptep, 0x300, local);
561 local_irq_restore(flags);
562#endif
563#endif
564}
diff --git a/arch/ppc64/mm/mmap.c b/arch/powerpc/mm/mmap.c
index fe65f522aff3..fe65f522aff3 100644
--- a/arch/ppc64/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
new file mode 100644
index 000000000000..a8816e0f6a86
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -0,0 +1,86 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/mm.h>
28#include <linux/init.h>
29
30#include <asm/mmu_context.h>
31#include <asm/tlbflush.h>
32
33mm_context_t next_mmu_context;
34unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
35#ifdef FEW_CONTEXTS
36atomic_t nr_free_contexts;
37struct mm_struct *context_mm[LAST_CONTEXT+1];
38void steal_context(void);
39#endif /* FEW_CONTEXTS */
40
41/*
42 * Initialize the context management stuff.
43 */
44void __init
45mmu_context_init(void)
46{
47 /*
48 * Some processors have too few contexts to reserve one for
49 * init_mm, and require using context 0 for a normal task.
50 * Other processors reserve the use of context zero for the kernel.
51 * This code assumes FIRST_CONTEXT < 32.
52 */
53 context_map[0] = (1 << FIRST_CONTEXT) - 1;
54 next_mmu_context = FIRST_CONTEXT;
55#ifdef FEW_CONTEXTS
56 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
57#endif /* FEW_CONTEXTS */
58}
59
60#ifdef FEW_CONTEXTS
61/*
62 * Steal a context from a task that has one at the moment.
63 * This is only used on 8xx and 4xx and we presently assume that
64 * they don't do SMP. If they do then this will have to check
65 * whether the MM we steal is in use.
66 * We also assume that this is only used on systems that don't
67 * use an MMU hash table - this is true for 8xx and 4xx.
68 * This isn't an LRU system, it just frees up each context in
69 * turn (sort-of pseudo-random replacement :). This would be the
70 * place to implement an LRU scheme if anyone was motivated to do it.
71 * -- paulus
72 */
73void
74steal_context(void)
75{
76 struct mm_struct *mm;
77
78 /* free up context `next_mmu_context' */
79 /* if we shouldn't free context 0, don't... */
80 if (next_mmu_context < FIRST_CONTEXT)
81 next_mmu_context = FIRST_CONTEXT;
82 mm = context_mm[next_mmu_context];
83 flush_tlb_mm(mm);
84 destroy_context(mm);
85}
86#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
new file mode 100644
index 000000000000..714a84dd8d5d
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -0,0 +1,63 @@
1/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/mm.h>
20#include <linux/spinlock.h>
21#include <linux/idr.h>
22
23#include <asm/mmu_context.h>
24
25static DEFINE_SPINLOCK(mmu_context_lock);
26static DEFINE_IDR(mmu_context_idr);
27
28int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
29{
30 int index;
31 int err;
32
33again:
34 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
35 return -ENOMEM;
36
37 spin_lock(&mmu_context_lock);
38 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
39 spin_unlock(&mmu_context_lock);
40
41 if (err == -EAGAIN)
42 goto again;
43 else if (err)
44 return err;
45
46 if (index > MAX_CONTEXT) {
47 idr_remove(&mmu_context_idr, index);
48 return -ENOMEM;
49 }
50
51 mm->context.id = index;
52
53 return 0;
54}
55
56void destroy_context(struct mm_struct *mm)
57{
58 spin_lock(&mmu_context_lock);
59 idr_remove(&mmu_context_idr, mm->context.id);
60 spin_unlock(&mmu_context_lock);
61
62 mm->context.id = NO_CONTEXT;
63}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
new file mode 100644
index 000000000000..a4d7a327c0e5
--- /dev/null
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -0,0 +1,87 @@
1/*
2 * Declarations of procedures and variables shared between files
3 * in arch/ppc/mm/.
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22#include <asm/tlbflush.h>
23#include <asm/mmu.h>
24
25#ifdef CONFIG_PPC32
26extern void mapin_ram(void);
27extern int map_page(unsigned long va, phys_addr_t pa, int flags);
28extern void setbat(int index, unsigned long virt, unsigned long phys,
29 unsigned int size, int flags);
30extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
31 unsigned int size, int flags, unsigned int pid);
32extern void invalidate_tlbcam_entry(int index);
33
34extern int __map_without_bats;
35extern unsigned long ioremap_base;
36extern unsigned long ioremap_bot;
37extern unsigned int rtas_data, rtas_size;
38
39extern PTE *Hash, *Hash_end;
40extern unsigned long Hash_size, Hash_mask;
41
42extern unsigned int num_tlbcam_entries;
43#endif
44
45extern unsigned long __max_low_memory;
46extern unsigned long __initial_memory_limit;
47extern unsigned long total_memory;
48extern unsigned long total_lowmem;
49
50/* ...and now those things that may be slightly different between processor
51 * architectures. -- Dan
52 */
53#if defined(CONFIG_8xx)
54#define flush_HPTE(X, va, pg) _tlbie(va)
55#define MMU_init_hw() do { } while(0)
56#define mmu_mapin_ram() (0UL)
57
58#elif defined(CONFIG_4xx)
59#define flush_HPTE(X, va, pg) _tlbie(va)
60extern void MMU_init_hw(void);
61extern unsigned long mmu_mapin_ram(void);
62
63#elif defined(CONFIG_FSL_BOOKE)
64#define flush_HPTE(X, va, pg) _tlbie(va)
65extern void MMU_init_hw(void);
66extern unsigned long mmu_mapin_ram(void);
67extern void adjust_total_lowmem(void);
68
69#elif defined(CONFIG_PPC32)
70/* anything 32-bit except 4xx or 8xx */
71extern void MMU_init_hw(void);
72extern unsigned long mmu_mapin_ram(void);
73
74/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
75 * which includes all new 82xx processors. We need tlbie/tlbsync here
76 * in that case (I think). -- Dan.
77 */
78static inline void flush_HPTE(unsigned context, unsigned long va,
79 unsigned long pdval)
80{
81 if ((Hash != 0) &&
82 cpu_has_feature(CPU_FTR_HPTE_TABLE))
83 flush_hash_pages(0, va, pdval, 1);
84 else
85 _tlbie(va);
86}
87#endif
diff --git a/arch/ppc64/mm/numa.c b/arch/powerpc/mm/numa.c
index cb864b8f2750..4035cad8d7f1 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,6 +20,7 @@
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/abs_addr.h> 22#include <asm/abs_addr.h>
23#include <asm/system.h>
23 24
24static int numa_enabled = 1; 25static int numa_enabled = 1;
25 26
@@ -300,7 +301,6 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsig
300 * we've already adjusted it for the limit and it takes care of 301 * we've already adjusted it for the limit and it takes care of
301 * having memory holes below the limit. 302 * having memory holes below the limit.
302 */ 303 */
303 extern unsigned long memory_limit;
304 304
305 if (! memory_limit) 305 if (! memory_limit)
306 return size; 306 return size;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
new file mode 100644
index 000000000000..f4e5ac122615
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -0,0 +1,467 @@
1/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/io.h>
35
36#include "mmu_decl.h"
37
38unsigned long ioremap_base;
39unsigned long ioremap_bot;
40int io_bat_index;
41
42#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
43#define HAVE_BATS 1
44#endif
45
46#if defined(CONFIG_FSL_BOOKE)
47#define HAVE_TLBCAM 1
48#endif
49
50extern char etext[], _stext[];
51
52#ifdef CONFIG_SMP
53extern void hash_page_sync(void);
54#endif
55
56#ifdef HAVE_BATS
57extern unsigned long v_mapped_by_bats(unsigned long va);
58extern unsigned long p_mapped_by_bats(unsigned long pa);
59void setbat(int index, unsigned long virt, unsigned long phys,
60 unsigned int size, int flags);
61
62#else /* !HAVE_BATS */
63#define v_mapped_by_bats(x) (0UL)
64#define p_mapped_by_bats(x) (0UL)
65#endif /* HAVE_BATS */
66
67#ifdef HAVE_TLBCAM
68extern unsigned int tlbcam_index;
69extern unsigned long v_mapped_by_tlbcam(unsigned long va);
70extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
71#else /* !HAVE_TLBCAM */
72#define v_mapped_by_tlbcam(x) (0UL)
73#define p_mapped_by_tlbcam(x) (0UL)
74#endif /* HAVE_TLBCAM */
75
76#ifdef CONFIG_PTE_64BIT
77/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
78#define PGDIR_ORDER 1
79#else
80#define PGDIR_ORDER 0
81#endif
82
83pgd_t *pgd_alloc(struct mm_struct *mm)
84{
85 pgd_t *ret;
86
87 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
88 return ret;
89}
90
91void pgd_free(pgd_t *pgd)
92{
93 free_pages((unsigned long)pgd, PGDIR_ORDER);
94}
95
96pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
97{
98 pte_t *pte;
99 extern int mem_init_done;
100 extern void *early_get_page(void);
101
102 if (mem_init_done) {
103 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
104 } else {
105 pte = (pte_t *)early_get_page();
106 if (pte)
107 clear_page(pte);
108 }
109 return pte;
110}
111
112struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
113{
114 struct page *ptepage;
115
116#ifdef CONFIG_HIGHPTE
117 gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
118#else
119 gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
120#endif
121
122 ptepage = alloc_pages(flags, 0);
123 if (ptepage)
124 clear_highpage(ptepage);
125 return ptepage;
126}
127
128void pte_free_kernel(pte_t *pte)
129{
130#ifdef CONFIG_SMP
131 hash_page_sync();
132#endif
133 free_page((unsigned long)pte);
134}
135
136void pte_free(struct page *ptepage)
137{
138#ifdef CONFIG_SMP
139 hash_page_sync();
140#endif
141 __free_page(ptepage);
142}
143
144#ifndef CONFIG_PHYS_64BIT
145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size)
147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE);
149}
150#else /* CONFIG_PHYS_64BIT */
151void __iomem *
152ioremap64(unsigned long long addr, unsigned long size)
153{
154 return __ioremap(addr, size, _PAGE_NO_CACHE);
155}
156
157void __iomem *
158ioremap(phys_addr_t addr, unsigned long size)
159{
160 phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
161
162 return ioremap64(addr64, size);
163}
164#endif /* CONFIG_PHYS_64BIT */
165
166void __iomem *
167__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
168{
169 unsigned long v, i;
170 phys_addr_t p;
171 int err;
172
173 /*
174 * Choose an address to map it to.
175 * Once the vmalloc system is running, we use it.
176 * Before then, we use space going down from ioremap_base
177 * (ioremap_bot records where we're up to).
178 */
179 p = addr & PAGE_MASK;
180 size = PAGE_ALIGN(addr + size) - p;
181
182 /*
183 * If the address lies within the first 16 MB, assume it's in ISA
184 * memory space
185 */
186 if (p < 16*1024*1024)
187 p += _ISA_MEM_BASE;
188
189 /*
190 * Don't allow anybody to remap normal RAM that we're using.
191 * mem_init() sets high_memory so only do the check after that.
192 */
193 if (mem_init_done && (p < virt_to_phys(high_memory))) {
194 printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
195 __builtin_return_address(0));
196 return NULL;
197 }
198
199 if (size == 0)
200 return NULL;
201
202 /*
203 * Is it already mapped? Perhaps overlapped by a previous
204 * BAT mapping. If the whole area is mapped then we're done,
205 * otherwise remap it since we want to keep the virt addrs for
206 * each request contiguous.
207 *
208 * We make the assumption here that if the bottom and top
209 * of the range we want are mapped then it's mapped to the
210 * same virt address (and this is contiguous).
211 * -- Cort
212 */
213 if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
214 goto out;
215
216 if ((v = p_mapped_by_tlbcam(p)))
217 goto out;
218
219 if (mem_init_done) {
220 struct vm_struct *area;
221 area = get_vm_area(size, VM_IOREMAP);
222 if (area == 0)
223 return NULL;
224 v = (unsigned long) area->addr;
225 } else {
226 v = (ioremap_bot -= size);
227 }
228
229 if ((flags & _PAGE_PRESENT) == 0)
230 flags |= _PAGE_KERNEL;
231 if (flags & _PAGE_NO_CACHE)
232 flags |= _PAGE_GUARDED;
233
234 /*
235 * Should check if it is a candidate for a BAT mapping
236 */
237
238 err = 0;
239 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
240 err = map_page(v+i, p+i, flags);
241 if (err) {
242 if (mem_init_done)
243 vunmap((void *)v);
244 return NULL;
245 }
246
247out:
248 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
249}
250
251void iounmap(volatile void __iomem *addr)
252{
253 /*
254 * If mapped by BATs then there is nothing to do.
255 * Calling vfree() generates a benign warning.
256 */
257 if (v_mapped_by_bats((unsigned long)addr)) return;
258
259 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
260 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
261}
262
263void __iomem *ioport_map(unsigned long port, unsigned int len)
264{
265 return (void __iomem *) (port + _IO_BASE);
266}
267
268void ioport_unmap(void __iomem *addr)
269{
270 /* Nothing to do */
271}
272EXPORT_SYMBOL(ioport_map);
273EXPORT_SYMBOL(ioport_unmap);
274
275int
276map_page(unsigned long va, phys_addr_t pa, int flags)
277{
278 pmd_t *pd;
279 pte_t *pg;
280 int err = -ENOMEM;
281
282 /* Use upper 10 bits of VA to index the first level map */
283 pd = pmd_offset(pgd_offset_k(va), va);
284 /* Use middle 10 bits of VA to index the second-level map */
285 pg = pte_alloc_kernel(pd, va);
286 if (pg != 0) {
287 err = 0;
288 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
289 if (mem_init_done)
290 flush_HPTE(0, va, pmd_val(*pd));
291 }
292 return err;
293}
294
295/*
296 * Map in all of physical memory starting at KERNELBASE.
297 */
298void __init mapin_ram(void)
299{
300 unsigned long v, p, s, f;
301
302 s = mmu_mapin_ram();
303 v = KERNELBASE + s;
304 p = PPC_MEMSTART + s;
305 for (; s < total_lowmem; s += PAGE_SIZE) {
306 if ((char *) v >= _stext && (char *) v < etext)
307 f = _PAGE_RAM_TEXT;
308 else
309 f = _PAGE_RAM;
310 map_page(v, p, f);
311 v += PAGE_SIZE;
312 p += PAGE_SIZE;
313 }
314}
315
316/* is x a power of 2? */
317#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
318
319/* is x a power of 4? */
320#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
321
322/*
323 * Set up a mapping for a block of I/O.
324 * virt, phys, size must all be page-aligned.
325 * This should only be called before ioremap is called.
326 */
327void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
328 unsigned int size, int flags)
329{
330 int i;
331
332 if (virt > KERNELBASE && virt < ioremap_bot)
333 ioremap_bot = ioremap_base = virt;
334
335#ifdef HAVE_BATS
336 /*
337 * Use a BAT for this if possible...
338 */
339 if (io_bat_index < 2 && is_power_of_2(size)
340 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
341 setbat(io_bat_index, virt, phys, size, flags);
342 ++io_bat_index;
343 return;
344 }
345#endif /* HAVE_BATS */
346
347#ifdef HAVE_TLBCAM
348 /*
349 * Use a CAM for this if possible...
350 */
351 if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
352 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
353 settlbcam(tlbcam_index, virt, phys, size, flags, 0);
354 ++tlbcam_index;
355 return;
356 }
357#endif /* HAVE_TLBCAM */
358
359 /* No BATs available, put it in the page tables. */
360 for (i = 0; i < size; i += PAGE_SIZE)
361 map_page(virt + i, phys + i, flags);
362}
363
364/* Scan the real Linux page tables and return a PTE pointer for
365 * a virtual address in a context.
366 * Returns true (1) if PTE was found, zero otherwise. The pointer to
367 * the PTE pointer is unmodified if PTE is not found.
368 */
369int
370get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
371{
372 pgd_t *pgd;
373 pmd_t *pmd;
374 pte_t *pte;
375 int retval = 0;
376
377 pgd = pgd_offset(mm, addr & PAGE_MASK);
378 if (pgd) {
379 pmd = pmd_offset(pgd, addr & PAGE_MASK);
380 if (pmd_present(*pmd)) {
381 pte = pte_offset_map(pmd, addr & PAGE_MASK);
382 if (pte) {
383 retval = 1;
384 *ptep = pte;
385 /* XXX caller needs to do pte_unmap, yuck */
386 }
387 }
388 }
389 return(retval);
390}
391
392/* Find physical address for this virtual address. Normally used by
393 * I/O functions, but anyone can call it.
394 */
395unsigned long iopa(unsigned long addr)
396{
397 unsigned long pa;
398
399 /* I don't know why this won't work on PMacs or CHRP. It
400 * appears there is some bug, or there is some implicit
401 * mapping done not properly represented by BATs or in page
402 * tables.......I am actively working on resolving this, but
403 * can't hold up other stuff. -- Dan
404 */
405 pte_t *pte;
406 struct mm_struct *mm;
407
408 /* Check the BATs */
409 pa = v_mapped_by_bats(addr);
410 if (pa)
411 return pa;
412
413 /* Allow mapping of user addresses (within the thread)
414 * for DMA if necessary.
415 */
416 if (addr < TASK_SIZE)
417 mm = current->mm;
418 else
419 mm = &init_mm;
420
421 pa = 0;
422 if (get_pteptr(mm, addr, &pte)) {
423 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
424 pte_unmap(pte);
425 }
426
427 return(pa);
428}
429
430/* This is will find the virtual address for a physical one....
431 * Swiped from APUS, could be dangerous :-).
432 * This is only a placeholder until I really find a way to make this
433 * work. -- Dan
434 */
435unsigned long
436mm_ptov (unsigned long paddr)
437{
438 unsigned long ret;
439#if 0
440 if (paddr < 16*1024*1024)
441 ret = ZTWO_VADDR(paddr);
442 else {
443 int i;
444
445 for (i = 0; i < kmap_chunk_count;){
446 unsigned long phys = kmap_chunks[i++];
447 unsigned long size = kmap_chunks[i++];
448 unsigned long virt = kmap_chunks[i++];
449 if (paddr >= phys
450 && paddr < (phys + size)){
451 ret = virt + paddr - phys;
452 goto exit;
453 }
454 }
455
456 ret = (unsigned long) __va(paddr);
457 }
458exit:
459#ifdef DEBUGPV
460 printk ("PTOV(%lx)=%lx\n", paddr, ret);
461#endif
462#else
463 ret = (unsigned long)paddr + KERNELBASE;
464#endif
465 return ret;
466}
467
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
new file mode 100644
index 000000000000..b79a78206135
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -0,0 +1,347 @@
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/signal.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/bootmem.h>
40#include <linux/highmem.h>
41#include <linux/idr.h>
42#include <linux/nodemask.h>
43#include <linux/module.h>
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/ppcdebug.h>
63#include <asm/sections.h>
64#include <asm/system.h>
65#include <asm/iommu.h>
66#include <asm/abs_addr.h>
67#include <asm/vdso.h>
68#include <asm/imalloc.h>
69
70unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
73#ifdef CONFIG_PPC_ISERIES
74
75void __iomem *ioremap(unsigned long addr, unsigned long size)
76{
77 return (void __iomem *)addr;
78}
79
80extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
81 unsigned long flags)
82{
83 return (void __iomem *)addr;
84}
85
86void iounmap(volatile void __iomem *addr)
87{
88 return;
89}
90
91#else
92
93/*
94 * map_io_page currently only called by __ioremap
95 * map_io_page adds an entry to the ioremap page table
96 * and adds an entry to the HPT, possibly bolting it
97 */
98static int map_io_page(unsigned long ea, unsigned long pa, int flags)
99{
100 pgd_t *pgdp;
101 pud_t *pudp;
102 pmd_t *pmdp;
103 pte_t *ptep;
104 unsigned long vsid;
105
106 if (mem_init_done) {
107 pgdp = pgd_offset_k(ea);
108 pudp = pud_alloc(&init_mm, pgdp, ea);
109 if (!pudp)
110 return -ENOMEM;
111 pmdp = pmd_alloc(&init_mm, pudp, ea);
112 if (!pmdp)
113 return -ENOMEM;
114 ptep = pte_alloc_kernel(pmdp, ea);
115 if (!ptep)
116 return -ENOMEM;
117 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
118 __pgprot(flags)));
119 } else {
120 unsigned long va, vpn, hash, hpteg;
121
122 /*
123 * If the mm subsystem is not fully up, we cannot create a
124 * linux page table entry for this mapping. Simply bolt an
125 * entry in the hardware page table.
126 */
127 vsid = get_kernel_vsid(ea);
128 va = (vsid << 28) | (ea & 0xFFFFFFF);
129 vpn = va >> PAGE_SHIFT;
130
131 hash = hpt_hash(vpn, 0);
132
133 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
134
135 /* Panic if a pte grpup is full */
136 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
137 HPTE_V_BOLTED,
138 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
139 == -1) {
140 panic("map_io_page: could not insert mapping");
141 }
142 }
143 return 0;
144}
145
146
147static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
148 unsigned long ea, unsigned long size,
149 unsigned long flags)
150{
151 unsigned long i;
152
153 if ((flags & _PAGE_PRESENT) == 0)
154 flags |= pgprot_val(PAGE_KERNEL);
155
156 for (i = 0; i < size; i += PAGE_SIZE)
157 if (map_io_page(ea+i, pa+i, flags))
158 return NULL;
159
160 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
161}
162
163
164void __iomem *
165ioremap(unsigned long addr, unsigned long size)
166{
167 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
168}
169
170void __iomem * __ioremap(unsigned long addr, unsigned long size,
171 unsigned long flags)
172{
173 unsigned long pa, ea;
174 void __iomem *ret;
175
176 /*
177 * Choose an address to map it to.
178 * Once the imalloc system is running, we use it.
179 * Before that, we map using addresses going
180 * up from ioremap_bot. imalloc will use
181 * the addresses from ioremap_bot through
182 * IMALLOC_END
183 *
184 */
185 pa = addr & PAGE_MASK;
186 size = PAGE_ALIGN(addr + size) - pa;
187
188 if (size == 0)
189 return NULL;
190
191 if (mem_init_done) {
192 struct vm_struct *area;
193 area = im_get_free_area(size);
194 if (area == NULL)
195 return NULL;
196 ea = (unsigned long)(area->addr);
197 ret = __ioremap_com(addr, pa, ea, size, flags);
198 if (!ret)
199 im_free(area->addr);
200 } else {
201 ea = ioremap_bot;
202 ret = __ioremap_com(addr, pa, ea, size, flags);
203 if (ret)
204 ioremap_bot += size;
205 }
206 return ret;
207}
208
209#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
210
211int __ioremap_explicit(unsigned long pa, unsigned long ea,
212 unsigned long size, unsigned long flags)
213{
214 struct vm_struct *area;
215 void __iomem *ret;
216
217 /* For now, require page-aligned values for pa, ea, and size */
218 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
219 !IS_PAGE_ALIGNED(size)) {
220 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
221 return 1;
222 }
223
224 if (!mem_init_done) {
225 /* Two things to consider in this case:
226 * 1) No records will be kept (imalloc, etc) that the region
227 * has been remapped
228 * 2) It won't be easy to iounmap() the region later (because
229 * of 1)
230 */
231 ;
232 } else {
233 area = im_get_area(ea, size,
234 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
235 if (area == NULL) {
236 /* Expected when PHB-dlpar is in play */
237 return 1;
238 }
239 if (ea != (unsigned long) area->addr) {
240 printk(KERN_ERR "unexpected addr return from "
241 "im_get_area\n");
242 return 1;
243 }
244 }
245
246 ret = __ioremap_com(pa, pa, ea, size, flags);
247 if (ret == NULL) {
248 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
249 return 1;
250 }
251 if (ret != (void *) ea) {
252 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
253 return 1;
254 }
255
256 return 0;
257}
258
259/*
260 * Unmap an IO region and remove it from imalloc'd list.
261 * Access to IO memory should be serialized by driver.
262 * This code is modeled after vmalloc code - unmap_vm_area()
263 *
264 * XXX what about calls before mem_init_done (ie python_countermeasures())
265 */
266void iounmap(volatile void __iomem *token)
267{
268 void *addr;
269
270 if (!mem_init_done)
271 return;
272
273 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
274
275 im_free(addr);
276}
277
278static int iounmap_subset_regions(unsigned long addr, unsigned long size)
279{
280 struct vm_struct *area;
281
282 /* Check whether subsets of this region exist */
283 area = im_get_area(addr, size, IM_REGION_SUPERSET);
284 if (area == NULL)
285 return 1;
286
287 while (area) {
288 iounmap((void __iomem *) area->addr);
289 area = im_get_area(addr, size,
290 IM_REGION_SUPERSET);
291 }
292
293 return 0;
294}
295
296int iounmap_explicit(volatile void __iomem *start, unsigned long size)
297{
298 struct vm_struct *area;
299 unsigned long addr;
300 int rc;
301
302 addr = (unsigned long __force) start & PAGE_MASK;
303
304 /* Verify that the region either exists or is a subset of an existing
305 * region. In the latter case, split the parent region to create
306 * the exact region
307 */
308 area = im_get_area(addr, size,
309 IM_REGION_EXISTS | IM_REGION_SUBSET);
310 if (area == NULL) {
311 /* Determine whether subset regions exist. If so, unmap */
312 rc = iounmap_subset_regions(addr, size);
313 if (rc) {
314 printk(KERN_ERR
315 "%s() cannot unmap nonexistent range 0x%lx\n",
316 __FUNCTION__, addr);
317 return 1;
318 }
319 } else {
320 iounmap((void __iomem *) area->addr);
321 }
322 /*
323 * FIXME! This can't be right:
324 iounmap(area->addr);
325 * Maybe it should be "iounmap(area);"
326 */
327 return 0;
328}
329
330#endif
331
332EXPORT_SYMBOL(ioremap);
333EXPORT_SYMBOL(__ioremap);
334EXPORT_SYMBOL(iounmap);
335
336void __iomem * reserve_phb_iospace(unsigned long size)
337{
338 void __iomem *virt_addr;
339
340 if (phbs_io_bot >= IMALLOC_BASE)
341 panic("reserve_phb_iospace(): phb io space overflow\n");
342
343 virt_addr = (void __iomem *) phbs_io_bot;
344 phbs_io_bot += size;
345
346 return virt_addr;
347}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
new file mode 100644
index 000000000000..cef9e83cc7e9
--- /dev/null
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -0,0 +1,285 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/prom.h>
33#include <asm/mmu.h>
34#include <asm/machdep.h>
35#include <asm/lmb.h>
36
37#include "mmu_decl.h"
38
39PTE *Hash, *Hash_end;
40unsigned long Hash_size, Hash_mask;
41unsigned long _SDR1;
42
43union ubat { /* BAT register values to be loaded */
44 BAT bat;
45#ifdef CONFIG_PPC64BRIDGE
46 u64 word[2];
47#else
48 u32 word[2];
49#endif
50} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
51
52struct batrange { /* stores address ranges mapped by BATs */
53 unsigned long start;
54 unsigned long limit;
55 unsigned long phys;
56} bat_addrs[4];
57
58/*
59 * Return PA for this VA if it is mapped by a BAT, or 0
60 */
61unsigned long v_mapped_by_bats(unsigned long va)
62{
63 int b;
64 for (b = 0; b < 4; ++b)
65 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
66 return bat_addrs[b].phys + (va - bat_addrs[b].start);
67 return 0;
68}
69
70/*
71 * Return VA for a given PA or 0 if not mapped
72 */
73unsigned long p_mapped_by_bats(unsigned long pa)
74{
75 int b;
76 for (b = 0; b < 4; ++b)
77 if (pa >= bat_addrs[b].phys
78 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
79 +bat_addrs[b].phys)
80 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
81 return 0;
82}
83
84unsigned long __init mmu_mapin_ram(void)
85{
86#ifdef CONFIG_POWER4
87 return 0;
88#else
89 unsigned long tot, bl, done;
90 unsigned long max_size = (256<<20);
91 unsigned long align;
92
93 if (__map_without_bats)
94 return 0;
95
96 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
97
98 /* Make sure we don't map a block larger than the
99 smallest alignment of the physical address. */
100 /* alignment of PPC_MEMSTART */
101 align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
102 /* set BAT block size to MIN(max_size, align) */
103 if (align && align < max_size)
104 max_size = align;
105
106 tot = total_lowmem;
107 for (bl = 128<<10; bl < max_size; bl <<= 1) {
108 if (bl * 2 > tot)
109 break;
110 }
111
112 setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
113 done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
114 if ((done < tot) && !bat_addrs[3].limit) {
115 /* use BAT3 to cover a bit more */
116 tot -= done;
117 for (bl = 128<<10; bl < max_size; bl <<= 1)
118 if (bl * 2 > tot)
119 break;
120 setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
121 done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
122 }
123
124 return done;
125#endif
126}
127
128/*
129 * Set up one of the I/D BAT (block address translation) register pairs.
130 * The parameters are not checked; in particular size must be a power
131 * of 2 between 128k and 256M.
132 */
133void __init setbat(int index, unsigned long virt, unsigned long phys,
134 unsigned int size, int flags)
135{
136 unsigned int bl;
137 int wimgxpp;
138 union ubat *bat = BATS[index];
139
140 if (((flags & _PAGE_NO_CACHE) == 0) &&
141 cpu_has_feature(CPU_FTR_NEED_COHERENT))
142 flags |= _PAGE_COHERENT;
143
144 bl = (size >> 17) - 1;
145 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
146 /* 603, 604, etc. */
147 /* Do DBAT first */
148 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
149 | _PAGE_COHERENT | _PAGE_GUARDED);
150 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
151 bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
152 bat[1].word[1] = phys | wimgxpp;
153#ifndef CONFIG_KGDB /* want user access for breakpoints */
154 if (flags & _PAGE_USER)
155#endif
156 bat[1].bat.batu.vp = 1;
157 if (flags & _PAGE_GUARDED) {
158 /* G bit must be zero in IBATs */
159 bat[0].word[0] = bat[0].word[1] = 0;
160 } else {
161 /* make IBAT same as DBAT */
162 bat[0] = bat[1];
163 }
164 } else {
165 /* 601 cpu */
166 if (bl > BL_8M)
167 bl = BL_8M;
168 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
169 | _PAGE_COHERENT);
170 wimgxpp |= (flags & _PAGE_RW)?
171 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
172 bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
173 bat->word[1] = phys | bl | 0x40; /* V=1 */
174 }
175
176 bat_addrs[index].start = virt;
177 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
178 bat_addrs[index].phys = phys;
179}
180
181/*
182 * Initialize the hash table and patch the instructions in hashtable.S.
183 */
184void __init MMU_init_hw(void)
185{
186 unsigned int hmask, mb, mb2;
187 unsigned int n_hpteg, lg_n_hpteg;
188
189 extern unsigned int hash_page_patch_A[];
190 extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
191 extern unsigned int hash_page[];
192 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
193
194 if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
195 /*
196 * Put a blr (procedure return) instruction at the
197 * start of hash_page, since we can still get DSI
198 * exceptions on a 603.
199 */
200 hash_page[0] = 0x4e800020;
201 flush_icache_range((unsigned long) &hash_page[0],
202 (unsigned long) &hash_page[1]);
203 return;
204 }
205
206 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
207
208#ifdef CONFIG_PPC64BRIDGE
209#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
210#define SDR1_LOW_BITS (lg_n_hpteg - 11)
211#define MIN_N_HPTEG 2048 /* min 256kB hash table */
212#else
213#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
214#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
215#define MIN_N_HPTEG 1024 /* min 64kB hash table */
216#endif
217
218 /*
219 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
220 * This is less than the recommended amount, but then
221 * Linux ain't AIX.
222 */
223 n_hpteg = total_memory / (PAGE_SIZE * 8);
224 if (n_hpteg < MIN_N_HPTEG)
225 n_hpteg = MIN_N_HPTEG;
226 lg_n_hpteg = __ilog2(n_hpteg);
227 if (n_hpteg & (n_hpteg - 1)) {
228 ++lg_n_hpteg; /* round up if not power of 2 */
229 n_hpteg = 1 << lg_n_hpteg;
230 }
231 Hash_size = n_hpteg << LG_HPTEG_SIZE;
232
233 /*
234 * Find some memory for the hash table.
235 */
236 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
237 Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
238 __initial_memory_limit));
239 cacheable_memzero(Hash, Hash_size);
240 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
241
242 Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
243
244 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
245 total_memory >> 20, Hash_size >> 10, Hash);
246
247
248 /*
249 * Patch up the instructions in hashtable.S:create_hpte
250 */
251 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
252 Hash_mask = n_hpteg - 1;
253 hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
254 mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
255 if (lg_n_hpteg > 16)
256 mb2 = 16 - LG_HPTEG_SIZE;
257
258 hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
259 | ((unsigned int)(Hash) >> 16);
260 hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
261 hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
262 hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
263 hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
264
265 /*
266 * Ensure that the locations we've patched have been written
267 * out from the data cache and invalidated in the instruction
268 * cache, on those machines with split caches.
269 */
270 flush_icache_range((unsigned long) &hash_page_patch_A[0],
271 (unsigned long) &hash_page_patch_C[1]);
272
273 /*
274 * Patch up the instructions in hashtable.S:flush_hash_page
275 */
276 flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
277 | ((unsigned int)(Hash) >> 16);
278 flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
279 flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
280 flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
281 flush_icache_range((unsigned long) &flush_hash_patch_A[0],
282 (unsigned long) &flush_hash_patch_B[1]);
283
284 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
285}
diff --git a/arch/ppc64/mm/slb.c b/arch/powerpc/mm/slb.c
index 0473953f6a37..0473953f6a37 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
diff --git a/arch/ppc64/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a3a03da503bc..a3a03da503bc 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
diff --git a/arch/ppc64/mm/stab.c b/arch/powerpc/mm/stab.c
index 1b83f002bf27..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
new file mode 100644
index 000000000000..6c3dc3c44c86
--- /dev/null
+++ b/arch/powerpc/mm/tlb_32.c
@@ -0,0 +1,183 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU uses a hash table to store virtual to
4 * physical translations, these routines flush entries from the
5 * hash table also.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34#include "mmu_decl.h"
35
36/*
37 * Called when unmapping pages to flush entries from the TLB/hash table.
38 */
39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40{
41 unsigned long ptephys;
42
43 if (Hash != 0) {
44 ptephys = __pa(ptep) & PAGE_MASK;
45 flush_hash_pages(mm->context, addr, ptephys, 1);
46 }
47}
48
49/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the
51 * DSI handler can't just "fixup" the TLB on a write fault
52 */
53void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
54{
55 if (Hash != 0)
56 return;
57 _tlbie(addr);
58}
59
60/*
61 * Called at the end of a mmu_gather operation to make sure the
62 * TLB flush is completely done.
63 */
64void tlb_flush(struct mmu_gather *tlb)
65{
66 if (Hash == 0) {
67 /*
68 * 603 needs to flush the whole TLB here since
69 * it doesn't use a hash table.
70 */
71 _tlbia();
72 }
73}
74
75/*
76 * TLB flushing:
77 *
78 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
79 * - flush_tlb_page(vma, vmaddr) flushes one page
80 * - flush_tlb_range(vma, start, end) flushes a range of pages
81 * - flush_tlb_kernel_range(start, end) flushes kernel pages
82 *
83 * since the hardware hash table functions as an extension of the
84 * tlb as far as the linux tables are concerned, flush it too.
85 * -- Cort
86 */
87
88/*
89 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
90 * the cache operations on the bus. Hence we need to use an IPI
91 * to get the other CPU(s) to invalidate their TLBs.
92 */
93#ifdef CONFIG_SMP_750
94#define FINISH_FLUSH smp_send_tlb_invalidate(0)
95#else
96#define FINISH_FLUSH do { } while (0)
97#endif
98
99static void flush_range(struct mm_struct *mm, unsigned long start,
100 unsigned long end)
101{
102 pmd_t *pmd;
103 unsigned long pmd_end;
104 int count;
105 unsigned int ctx = mm->context;
106
107 if (Hash == 0) {
108 _tlbia();
109 return;
110 }
111 start &= PAGE_MASK;
112 if (start >= end)
113 return;
114 end = (end - 1) | ~PAGE_MASK;
115 pmd = pmd_offset(pgd_offset(mm, start), start);
116 for (;;) {
117 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
118 if (pmd_end > end)
119 pmd_end = end;
120 if (!pmd_none(*pmd)) {
121 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
122 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
123 }
124 if (pmd_end == end)
125 break;
126 start = pmd_end + 1;
127 ++pmd;
128 }
129}
130
131/*
132 * Flush kernel TLB entries in the given range
133 */
134void flush_tlb_kernel_range(unsigned long start, unsigned long end)
135{
136 flush_range(&init_mm, start, end);
137 FINISH_FLUSH;
138}
139
140/*
141 * Flush all the (user) entries for the address space described by mm.
142 */
143void flush_tlb_mm(struct mm_struct *mm)
144{
145 struct vm_area_struct *mp;
146
147 if (Hash == 0) {
148 _tlbia();
149 return;
150 }
151
152 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
153 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
154 FINISH_FLUSH;
155}
156
157void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
158{
159 struct mm_struct *mm;
160 pmd_t *pmd;
161
162 if (Hash == 0) {
163 _tlbie(vmaddr);
164 return;
165 }
166 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
167 pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
168 if (!pmd_none(*pmd))
169 flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
170 FINISH_FLUSH;
171}
172
173/*
174 * For each address in the range, find the pte for the address
175 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
176 * the corresponding HPTE.
177 */
178void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
179 unsigned long end)
180{
181 flush_range(vma->vm_mm, start, end);
182 FINISH_FLUSH;
183}
diff --git a/arch/ppc64/mm/tlb.c b/arch/powerpc/mm/tlb_64.c
index 21fbffb23a43..09ab81a10f4f 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
128void hpte_update(struct mm_struct *mm, unsigned long addr, 128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot) 129 unsigned long pte, int wrprot)
130{ 130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 131 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
132 unsigned long vsid;
133 int i;
134 134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index; 135 i = batch->index;
138 136
139 /* 137 /*
@@ -143,19 +141,21 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 141 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 142 * will change mid stream.
145 */ 143 */
146 if (i != 0 && (context != batch->context || 144 if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
147 batch->large != pte_huge(pte))) {
148 flush_tlb_pending(); 145 flush_tlb_pending();
149 i = 0; 146 i = 0;
150 } 147 }
151
152 if (i == 0) { 148 if (i == 0) {
153 batch->context = context;
154 batch->mm = mm; 149 batch->mm = mm;
155 batch->large = pte_huge(pte); 150 batch->large = pte_huge(pte);
156 } 151 }
152 if (addr < KERNELBASE) {
153 vsid = get_vsid(mm->context.id, addr);
154 WARN_ON(vsid == 0);
155 } else
156 vsid = get_kernel_vsid(addr);
157 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
157 batch->pte[i] = __pte(pte); 158 batch->pte[i] = __pte(pte);
158 batch->addr[i] = addr;
159 batch->index = ++i; 159 batch->index = ++i;
160 if (i >= PPC64_TLB_BATCH_NR) 160 if (i >= PPC64_TLB_BATCH_NR)
161 flush_tlb_pending(); 161 flush_tlb_pending();
@@ -177,10 +177,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
177 local = 1; 177 local = 1;
178 178
179 if (i == 1) 179 if (i == 1)
180 flush_hash_page(batch->context, batch->addr[0], batch->pte[0], 180 flush_hash_page(batch->vaddr[0], batch->pte[0], local);
181 local);
182 else 181 else
183 flush_hash_range(batch->context, i, local); 182 flush_hash_range(i, local);
184 batch->index = 0; 183 batch->index = 0;
185 put_cpu(); 184 put_cpu();
186} 185}
diff --git a/arch/ppc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index 19d37730b664..19d37730b664 100644
--- a/arch/ppc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
diff --git a/arch/ppc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index e2218d32a4eb..0782d0cca89c 100644
--- a/arch/ppc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -7,8 +7,5 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) common.o 9oprofile-y := $(DRIVER_OBJS) common.o
10 10oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
11ifeq ($(CONFIG_FSL_BOOKE),y) 11oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
12 oprofile-y += op_model_fsl_booke.o
13endif
14
diff --git a/arch/ppc64/oprofile/common.c b/arch/powerpc/oprofile/common.c
index e5f572710aa0..af2c05d20ba5 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * PPC 64 oprofile support:
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM 3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 * PPC 32 oprofile support: (based on PPC 64 support)
5 * Copyright (C) Freescale Semiconductor, Inc 2004
6 * Author: Andy Fleming
3 * 7 *
4 * Based on alpha version. 8 * Based on alpha version.
5 * 9 *
@@ -10,6 +14,9 @@
10 */ 14 */
11 15
12#include <linux/oprofile.h> 16#include <linux/oprofile.h>
17#ifndef __powerpc64__
18#include <linux/slab.h>
19#endif /* ! __powerpc64__ */
13#include <linux/init.h> 20#include <linux/init.h>
14#include <linux/smp.h> 21#include <linux/smp.h>
15#include <linux/errno.h> 22#include <linux/errno.h>
@@ -19,17 +26,21 @@
19#include <asm/cputable.h> 26#include <asm/cputable.h>
20#include <asm/oprofile_impl.h> 27#include <asm/oprofile_impl.h>
21 28
22static struct op_ppc64_model *model; 29static struct op_powerpc_model *model;
23 30
24static struct op_counter_config ctr[OP_MAX_COUNTER]; 31static struct op_counter_config ctr[OP_MAX_COUNTER];
25static struct op_system_config sys; 32static struct op_system_config sys;
26 33
34#ifndef __powerpc64__
35static char *cpu_type;
36#endif /* ! __powerpc64__ */
37
27static void op_handle_interrupt(struct pt_regs *regs) 38static void op_handle_interrupt(struct pt_regs *regs)
28{ 39{
29 model->handle_interrupt(regs, ctr); 40 model->handle_interrupt(regs, ctr);
30} 41}
31 42
32static int op_ppc64_setup(void) 43static int op_powerpc_setup(void)
33{ 44{
34 int err; 45 int err;
35 46
@@ -42,41 +53,49 @@ static int op_ppc64_setup(void)
42 model->reg_setup(ctr, &sys, model->num_counters); 53 model->reg_setup(ctr, &sys, model->num_counters);
43 54
44 /* Configure the registers on all cpus. */ 55 /* Configure the registers on all cpus. */
56#ifdef __powerpc64__
45 on_each_cpu(model->cpu_setup, NULL, 0, 1); 57 on_each_cpu(model->cpu_setup, NULL, 0, 1);
58#else /* __powerpc64__ */
59#if 0
60 /* FIXME: Make multi-cpu work */
61 on_each_cpu(model->reg_setup, NULL, 0, 1);
62#endif
63#endif /* __powerpc64__ */
46 64
47 return 0; 65 return 0;
48} 66}
49 67
50static void op_ppc64_shutdown(void) 68static void op_powerpc_shutdown(void)
51{ 69{
52 release_pmc_hardware(); 70 release_pmc_hardware();
53} 71}
54 72
55static void op_ppc64_cpu_start(void *dummy) 73static void op_powerpc_cpu_start(void *dummy)
56{ 74{
57 model->start(ctr); 75 model->start(ctr);
58} 76}
59 77
60static int op_ppc64_start(void) 78static int op_powerpc_start(void)
61{ 79{
62 on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1); 80 on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
63 return 0; 81 return 0;
64} 82}
65 83
66static inline void op_ppc64_cpu_stop(void *dummy) 84static inline void op_powerpc_cpu_stop(void *dummy)
67{ 85{
68 model->stop(); 86 model->stop();
69} 87}
70 88
71static void op_ppc64_stop(void) 89static void op_powerpc_stop(void)
72{ 90{
73 on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1); 91 on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
74} 92}
75 93
76static int op_ppc64_create_files(struct super_block *sb, struct dentry *root) 94static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
77{ 95{
78 int i; 96 int i;
79 97
98#ifdef __powerpc64__
80 /* 99 /*
81 * There is one mmcr0, mmcr1 and mmcra for setting the events for 100 * There is one mmcr0, mmcr1 and mmcra for setting the events for
82 * all of the counters. 101 * all of the counters.
@@ -84,6 +103,7 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
84 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 103 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
85 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 104 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
86 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 105 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
106#endif /* __powerpc64__ */
87 107
88 for (i = 0; i < model->num_counters; ++i) { 108 for (i = 0; i < model->num_counters; ++i) {
89 struct dentry *dir; 109 struct dentry *dir;
@@ -95,44 +115,70 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
95 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 115 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
96 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 116 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
97 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 117 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
118#ifdef __powerpc64__
98 /* 119 /*
99 * We dont support per counter user/kernel selection, but 120 * We dont support per counter user/kernel selection, but
100 * we leave the entries because userspace expects them 121 * we leave the entries because userspace expects them
101 */ 122 */
123#endif /* __powerpc64__ */
102 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 124 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
103 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 125 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
126
127#ifndef __powerpc64__
128 /* FIXME: Not sure if this is used */
129#endif /* ! __powerpc64__ */
104 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 130 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
105 } 131 }
106 132
107 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 133 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
108 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 134 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
135#ifdef __powerpc64__
109 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", 136 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
110 &sys.backtrace_spinlocks); 137 &sys.backtrace_spinlocks);
138#endif /* __powerpc64__ */
111 139
112 /* Default to tracing both kernel and user */ 140 /* Default to tracing both kernel and user */
113 sys.enable_kernel = 1; 141 sys.enable_kernel = 1;
114 sys.enable_user = 1; 142 sys.enable_user = 1;
115 143#ifdef __powerpc64__
116 /* Turn on backtracing through spinlocks by default */ 144 /* Turn on backtracing through spinlocks by default */
117 sys.backtrace_spinlocks = 1; 145 sys.backtrace_spinlocks = 1;
146#endif /* __powerpc64__ */
118 147
119 return 0; 148 return 0;
120} 149}
121 150
122int __init oprofile_arch_init(struct oprofile_operations *ops) 151int __init oprofile_arch_init(struct oprofile_operations *ops)
123{ 152{
153#ifndef __powerpc64__
154#ifdef CONFIG_FSL_BOOKE
155 model = &op_model_fsl_booke;
156#else
157 return -ENODEV;
158#endif
159
160 cpu_type = kmalloc(32, GFP_KERNEL);
161 if (NULL == cpu_type)
162 return -ENOMEM;
163
164 sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
165
166 model->num_counters = cur_cpu_spec->num_pmcs;
167
168 ops->cpu_type = cpu_type;
169#else /* __powerpc64__ */
124 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type) 170 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
125 return -ENODEV; 171 return -ENODEV;
126
127 model = cur_cpu_spec->oprofile_model; 172 model = cur_cpu_spec->oprofile_model;
128 model->num_counters = cur_cpu_spec->num_pmcs; 173 model->num_counters = cur_cpu_spec->num_pmcs;
129 174
130 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; 175 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
131 ops->create_files = op_ppc64_create_files; 176#endif /* __powerpc64__ */
132 ops->setup = op_ppc64_setup; 177 ops->create_files = op_powerpc_create_files;
133 ops->shutdown = op_ppc64_shutdown; 178 ops->setup = op_powerpc_setup;
134 ops->start = op_ppc64_start; 179 ops->shutdown = op_powerpc_shutdown;
135 ops->stop = op_ppc64_stop; 180 ops->start = op_powerpc_start;
181 ops->stop = op_powerpc_stop;
136 182
137 printk(KERN_INFO "oprofile: using %s performance monitoring.\n", 183 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
138 ops->cpu_type); 184 ops->cpu_type);
@@ -142,4 +188,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
142 188
143void oprofile_arch_exit(void) 189void oprofile_arch_exit(void)
144{ 190{
191#ifndef __powerpc64__
192 kfree(cpu_type);
193 cpu_type = NULL;
194#endif /* ! __powerpc64__ */
145} 195}
diff --git a/arch/ppc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c
index fc9c859358c6..86124a94c9af 100644
--- a/arch/ppc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_booke.c
@@ -24,9 +24,8 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/reg_booke.h> 25#include <asm/reg_booke.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/perfmon.h> 27#include <asm/pmc.h>
28 28#include <asm/oprofile_impl.h>
29#include "op_impl.h"
30 29
31static unsigned long reset_value[OP_MAX_COUNTER]; 30static unsigned long reset_value[OP_MAX_COUNTER];
32 31
@@ -176,7 +175,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
176 pmc_start_ctrs(1); 175 pmc_start_ctrs(1);
177} 176}
178 177
179struct op_ppc32_model op_model_fsl_booke = { 178struct op_powerpc_model op_model_fsl_booke = {
180 .reg_setup = fsl_booke_reg_setup, 179 .reg_setup = fsl_booke_reg_setup,
181 .start = fsl_booke_start, 180 .start = fsl_booke_start,
182 .stop = fsl_booke_stop, 181 .stop = fsl_booke_stop,
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 32b2bb5625fe..886449315847 100644
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -300,7 +300,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
300 mtspr(SPRN_MMCR0, mmcr0); 300 mtspr(SPRN_MMCR0, mmcr0);
301} 301}
302 302
303struct op_ppc64_model op_model_power4 = { 303struct op_powerpc_model op_model_power4 = {
304 .reg_setup = power4_reg_setup, 304 .reg_setup = power4_reg_setup,
305 .cpu_setup = power4_cpu_setup, 305 .cpu_setup = power4_cpu_setup,
306 .start = power4_start, 306 .start = power4_start,
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 08c5b333f5c4..e010b85996e8 100644
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -209,7 +209,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
209 mtspr(SPRN_MMCR0, mmcr0); 209 mtspr(SPRN_MMCR0, mmcr0);
210} 210}
211 211
212struct op_ppc64_model op_model_rs64 = { 212struct op_powerpc_model op_model_rs64 = {
213 .reg_setup = rs64_reg_setup, 213 .reg_setup = rs64_reg_setup,
214 .cpu_setup = rs64_cpu_setup, 214 .cpu_setup = rs64_cpu_setup,
215 .start = rs64_start, 215 .start = rs64_start,
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
new file mode 100644
index 000000000000..ed39d6a3d22a
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -0,0 +1,280 @@
1config 4xx
2 bool
3 depends on 40x || 44x
4 default y
5
6config WANT_EARLY_SERIAL
7 bool
8 select SERIAL_8250
9 default n
10
11menu "AMCC 4xx options"
12 depends on 4xx
13
14choice
15 prompt "Machine Type"
16 depends on 40x
17 default WALNUT
18
19config BUBINGA
20 bool "Bubinga"
21 select WANT_EARLY_SERIAL
22 help
23 This option enables support for the IBM 405EP evaluation board.
24
25config CPCI405
26 bool "CPCI405"
27 help
28 This option enables support for the CPCI405 board.
29
30config EP405
31 bool "EP405/EP405PC"
32 help
33 This option enables support for the EP405/EP405PC boards.
34
35config REDWOOD_5
36 bool "Redwood-5"
37 help
38 This option enables support for the IBM STB04 evaluation board.
39
40config REDWOOD_6
41 bool "Redwood-6"
42 help
43 This option enables support for the IBM STBx25xx evaluation board.
44
45config SYCAMORE
46 bool "Sycamore"
47 help
48 This option enables support for the IBM PPC405GPr evaluation board.
49
50config WALNUT
51 bool "Walnut"
52 help
53 This option enables support for the IBM PPC405GP evaluation board.
54
55config XILINX_ML300
56 bool "Xilinx-ML300"
57 help
58 This option enables support for the Xilinx ML300 evaluation board.
59
60endchoice
61
62choice
63 prompt "Machine Type"
64 depends on 44x
65 default EBONY
66
67config BAMBOO
68 bool "Bamboo"
69 select WANT_EARLY_SERIAL
70 help
71 This option enables support for the IBM PPC440EP evaluation board.
72
73config EBONY
74 bool "Ebony"
75 select WANT_EARLY_SERIAL
76 help
77 This option enables support for the IBM PPC440GP evaluation board.
78
79config LUAN
80 bool "Luan"
81 select WANT_EARLY_SERIAL
82 help
83 This option enables support for the IBM PPC440SP evaluation board.
84
85config OCOTEA
86 bool "Ocotea"
87 select WANT_EARLY_SERIAL
88 help
89 This option enables support for the IBM PPC440GX evaluation board.
90
91endchoice
92
93config EP405PC
94 bool "EP405PC Support"
95 depends on EP405
96
97
98# It's often necessary to know the specific 4xx processor type.
99# Fortunately, it is impled (so far) from the board type, so we
100# don't need to ask more redundant questions.
101config NP405H
102 bool
103 depends on ASH
104 default y
105
106config 440EP
107 bool
108 depends on BAMBOO
109 select PPC_FPU
110 default y
111
112config 440GP
113 bool
114 depends on EBONY
115 default y
116
117config 440GX
118 bool
119 depends on OCOTEA
120 default y
121
122config 440SP
123 bool
124 depends on LUAN
125 default y
126
127config 440
128 bool
129 depends on 440GP || 440SP || 440EP
130 default y
131
132config 440A
133 bool
134 depends on 440GX
135 default y
136
137config IBM440EP_ERR42
138 bool
139 depends on 440EP
140 default y
141
142# All 405-based cores up until the 405GPR and 405EP have this errata.
143config IBM405_ERR77
144 bool
145 depends on 40x && !403GCX && !405GPR && !405EP
146 default y
147
148# All 40x-based cores, up until the 405GPR and 405EP have this errata.
149config IBM405_ERR51
150 bool
151 depends on 40x && !405GPR && !405EP
152 default y
153
154config BOOKE
155 bool
156 depends on 44x
157 default y
158
159config IBM_OCP
160 bool
161 depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
162 default y
163
164config XILINX_OCP
165 bool
166 depends on XILINX_ML300
167 default y
168
169config IBM_EMAC4
170 bool
171 depends on 440GX || 440SP
172 default y
173
174config BIOS_FIXUP
175 bool
176 depends on BUBINGA || EP405 || SYCAMORE || WALNUT
177 default y
178
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX
181 bool
182 depends OAK
183 default y
184
185config 405EP
186 bool
187 depends on BUBINGA
188 default y
189
190config 405GP
191 bool
192 depends on CPCI405 || EP405 || WALNUT
193 default y
194
195config 405GPR
196 bool
197 depends on SYCAMORE
198 default y
199
200config VIRTEX_II_PRO
201 bool
202 depends on XILINX_ML300
203 default y
204
205config STB03xxx
206 bool
207 depends on REDWOOD_5 || REDWOOD_6
208 default y
209
210config EMBEDDEDBOOT
211 bool
212 depends on EP405 || XILINX_ML300
213 default y
214
215config IBM_OPENBIOS
216 bool
217 depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
218 default y
219
220config PPC4xx_DMA
221 bool "PPC4xx DMA controller support"
222 depends on 4xx
223
224config PPC4xx_EDMA
225 bool
226 depends on !STB03xxx && PPC4xx_DMA
227 default y
228
229config PPC_GEN550
230 bool
231 depends on 4xx
232 default y
233
234choice
235 prompt "TTYS0 device and default console"
236 depends on 40x
237 default UART0_TTYS0
238
239config UART0_TTYS0
240 bool "UART0"
241
242config UART0_TTYS1
243 bool "UART1"
244
245endchoice
246
247config SERIAL_SICC
248 bool "SICC Serial port support"
249 depends on STB03xxx
250
251config UART1_DFLT_CONSOLE
252 bool
253 depends on SERIAL_SICC && UART0_TTYS1
254 default y
255
256config SERIAL_SICC_CONSOLE
257 bool
258 depends on SERIAL_SICC && UART0_TTYS1
259 default y
260endmenu
261
262
263menu "IBM 40x options"
264 depends on 40x
265
266config SERIAL_SICC
267 bool "SICC Serial port"
268 depends on STB03xxx
269
270config UART1_DFLT_CONSOLE
271 bool
272 depends on SERIAL_SICC && UART0_TTYS1
273 default y
274
275config SERIAL_SICC_CONSOLE
276 bool
277 depends on SERIAL_SICC && UART0_TTYS1
278 default y
279
280endmenu
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
new file mode 100644
index 000000000000..79ff6b1e887c
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works \ No newline at end of file
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
new file mode 100644
index 000000000000..c5bc2821d991
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -0,0 +1,86 @@
1config 85xx
2 bool
3 depends on E500
4 default y
5
6config PPC_INDIRECT_PCI_BE
7 bool
8 depends on 85xx
9 default y
10
11menu "Freescale 85xx options"
12 depends on E500
13
14choice
15 prompt "Machine Type"
16 depends on 85xx
17 default MPC8540_ADS
18
19config MPC8540_ADS
20 bool "Freescale MPC8540 ADS"
21 help
22 This option enables support for the MPC 8540 ADS evaluation board.
23
24config MPC8548_CDS
25 bool "Freescale MPC8548 CDS"
26 help
27 This option enablese support for the MPC8548 CDS evaluation board.
28
29config MPC8555_CDS
30 bool "Freescale MPC8555 CDS"
31 help
32 This option enablese support for the MPC8555 CDS evaluation board.
33
34config MPC8560_ADS
35 bool "Freescale MPC8560 ADS"
36 help
37 This option enables support for the MPC 8560 ADS evaluation board.
38
39config SBC8560
40 bool "WindRiver PowerQUICC III SBC8560"
41 help
42 This option enables support for the WindRiver PowerQUICC III
43 SBC8560 board.
44
45config STX_GP3
46 bool "Silicon Turnkey Express GP3"
47 help
48 This option enables support for the Silicon Turnkey Express GP3
49 board.
50
51endchoice
52
53# It's often necessary to know the specific 85xx processor type.
54# Fortunately, it is implied (so far) from the board type, so we
55# don't need to ask more redundant questions.
56config MPC8540
57 bool
58 depends on MPC8540_ADS
59 default y
60
61config MPC8548
62 bool
63 depends on MPC8548_CDS
64 default y
65
66config MPC8555
67 bool
68 depends on MPC8555_CDS
69 default y
70
71config MPC8560
72 bool
73 depends on SBC8560 || MPC8560_ADS || STX_GP3
74 default y
75
76config 85xx_PCI2
77 bool "Supprt for 2nd PCI host controller"
78 depends on MPC8555_CDS
79 default y
80
81config PPC_GEN550
82 bool
83 depends on MPC8540 || SBC8560 || MPC8555
84 default y
85
86endmenu
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
new file mode 100644
index 000000000000..6407197ffd89
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
new file mode 100644
index 000000000000..c8c0ba3cf8e8
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -0,0 +1,352 @@
1config FADS
2 bool
3
4choice
5 prompt "8xx Machine Type"
6 depends on 8xx
7 default RPXLITE
8
9config RPXLITE
10 bool "RPX-Lite"
11 ---help---
12 Single-board computers based around the PowerPC MPC8xx chips and
13 intended for embedded applications. The following types are
14 supported:
15
16 RPX-Lite:
17 Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
18
19 RPX-Classic:
20 Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
21 the MPC 860
22
23 BSE-IP:
24 Bright Star Engineering ip-Engine.
25
26 TQM823L:
27 TQM850L:
28 TQM855L:
29 TQM860L:
30 MPC8xx based family of mini modules, half credit card size,
31 up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
32 2 x CAN bus interface, ...
33 Manufacturer: TQ Components, www.tq-group.de
34 Date of Release: October (?) 1999
35 End of Life: not yet :-)
36 URL:
37 - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
38 - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
39 - images: <http://www.denx.de/embedded-ppc-en.html>
40
41 FPS850L:
42 FingerPrint Sensor System (based on TQM850L)
43 Manufacturer: IKENDI AG, <http://www.ikendi.com/>
44 Date of Release: November 1999
45 End of life: end 2000 ?
46 URL: see TQM850L
47
48 IVMS8:
49 MPC860 based board used in the "Integrated Voice Mail System",
50 Small Version (8 voice channels)
51 Manufacturer: Speech Design, <http://www.speech-design.de/>
52 Date of Release: December 2000 (?)
53 End of life: -
54 URL: <http://www.speech-design.de/>
55
56 IVML24:
57 MPC860 based board used in the "Integrated Voice Mail System",
58 Large Version (24 voice channels)
59 Manufacturer: Speech Design, <http://www.speech-design.de/>
60 Date of Release: March 2001 (?)
61 End of life: -
62 URL: <http://www.speech-design.de/>
63
64 HERMES:
65 Hermes-Pro ISDN/LAN router with integrated 8 x hub
66 Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
67 <http://www.multidata.de/>
68 Date of Release: 2000 (?)
69 End of life: -
70 URL: <http://www.multidata.de/english/products/hpro.htm>
71
72 IP860:
73 VMEBus IP (Industry Pack) carrier board with MPC860
74 Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
75 Date of Release: ?
76 End of life: -
77 URL: <http://www.microsys.de/html/ip860.html>
78
79 PCU_E:
80 PCU = Peripheral Controller Unit, Extended
81 Manufacturer: Siemens AG, ICN (Information and Communication Networks)
82 <http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
83 Date of Release: April 2001
84 End of life: August 2001
85 URL: n. a.
86
87config RPXCLASSIC
88 bool "RPX-Classic"
89 help
90 The RPX-Classic is a single-board computer based on the Motorola
91 MPC860. It features 16MB of DRAM and a variable amount of flash,
92 I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
93 LEDs. Variants with Ethernet ports exist. Say Y here to support it
94 directly.
95
96config BSEIP
97 bool "BSE-IP"
98 help
99 Say Y here to support the Bright Star Engineering ipEngine SBC.
100 This is a credit-card-sized device featuring a MPC823 processor,
101 26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
102 controller, and two RS232 ports.
103
104config MPC8XXFADS
105 bool "FADS"
106 select FADS
107
108config MPC86XADS
109 bool "MPC86XADS"
110 help
111 MPC86x Application Development System by Freescale Semiconductor.
112 The MPC86xADS is meant to serve as a platform for s/w and h/w
113 development around the MPC86X processor families.
114 select FADS
115
116config MPC885ADS
117 bool "MPC885ADS"
118 help
119 Freescale Semiconductor MPC885 Application Development System (ADS).
120 Also known as DUET.
121 The MPC885ADS is meant to serve as a platform for s/w and h/w
122 development around the MPC885 processor family.
123
124config TQM823L
125 bool "TQM823L"
126 help
127 Say Y here to support the TQM823L, one of an MPC8xx-based family of
128 mini SBCs (half credit-card size) from TQ Components first released
129 in late 1999. Technical references are at
130 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
131 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
132 <http://www.denx.de/embedded-ppc-en.html>.
133
134config TQM850L
135 bool "TQM850L"
136 help
137 Say Y here to support the TQM850L, one of an MPC8xx-based family of
138 mini SBCs (half credit-card size) from TQ Components first released
139 in late 1999. Technical references are at
140 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
141 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
142 <http://www.denx.de/embedded-ppc-en.html>.
143
144config TQM855L
145 bool "TQM855L"
146 help
147 Say Y here to support the TQM855L, one of an MPC8xx-based family of
148 mini SBCs (half credit-card size) from TQ Components first released
149 in late 1999. Technical references are at
150 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
151 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
152 <http://www.denx.de/embedded-ppc-en.html>.
153
154config TQM860L
155 bool "TQM860L"
156 help
157 Say Y here to support the TQM860L, one of an MPC8xx-based family of
158 mini SBCs (half credit-card size) from TQ Components first released
159 in late 1999. Technical references are at
160 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
161 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
162 <http://www.denx.de/embedded-ppc-en.html>.
163
164config FPS850L
165 bool "FPS850L"
166
167config IVMS8
168 bool "IVMS8"
169 help
170 Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
171 from Speech Design, released March 2001. The manufacturer's website
172 is at <http://www.speech-design.de/>.
173
174config IVML24
175 bool "IVML24"
176 help
177 Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
178 from Speech Design, released March 2001. The manufacturer's website
179 is at <http://www.speech-design.de/>.
180
181config HERMES_PRO
182 bool "HERMES"
183
184config IP860
185 bool "IP860"
186
187config LWMON
188 bool "LWMON"
189
190config PCU_E
191 bool "PCU_E"
192
193config CCM
194 bool "CCM"
195
196config LANTEC
197 bool "LANTEC"
198
199config MBX
200 bool "MBX"
201 help
202 MBX is a line of Motorola single-board computer based around the
203 MPC821 and MPC860 processors, and intended for embedded-controller
204 applications. Say Y here to support these boards directly.
205
206config WINCEPT
207 bool "WinCept"
208 help
209 The Wincept 100/110 is a Motorola single-board computer based on the
210 MPC821 PowerPC, introduced in 1998 and designed to be used in
211 thin-client machines. Say Y to support it directly.
212
213endchoice
214
215#
216# MPC8xx Communication options
217#
218
219menu "MPC8xx CPM Options"
220 depends on 8xx
221
222config SCC_ENET
223 bool "CPM SCC Ethernet"
224 depends on NET_ETHERNET
225 help
226 Enable Ethernet support via the Motorola MPC8xx serial
227 communications controller.
228
229choice
230 prompt "SCC used for Ethernet"
231 depends on SCC_ENET
232 default SCC1_ENET
233
234config SCC1_ENET
235 bool "SCC1"
236 help
237 Use MPC8xx serial communications controller 1 to drive Ethernet
238 (default).
239
240config SCC2_ENET
241 bool "SCC2"
242 help
243 Use MPC8xx serial communications controller 2 to drive Ethernet.
244
245config SCC3_ENET
246 bool "SCC3"
247 help
248 Use MPC8xx serial communications controller 3 to drive Ethernet.
249
250endchoice
251
252config FEC_ENET
253 bool "860T FEC Ethernet"
254 depends on NET_ETHERNET
255 help
256 Enable Ethernet support via the Fast Ethernet Controller (FCC) on
257 the Motorola MPC8260.
258
259config USE_MDIO
260 bool "Use MDIO for PHY configuration"
261 depends on FEC_ENET
262 help
263 On some boards the hardware configuration of the ethernet PHY can be
264 used without any software interaction over the MDIO interface, so
265 all MII code can be omitted. Say N here if unsure or if you don't
266 need link status reports.
267
268config FEC_AM79C874
269 bool "Support AMD79C874 PHY"
270 depends on USE_MDIO
271
272config FEC_LXT970
273 bool "Support LXT970 PHY"
274 depends on USE_MDIO
275
276config FEC_LXT971
277 bool "Support LXT971 PHY"
278 depends on USE_MDIO
279
280config FEC_QS6612
281 bool "Support QS6612 PHY"
282 depends on USE_MDIO
283
284config ENET_BIG_BUFFERS
285 bool "Use Big CPM Ethernet Buffers"
286 depends on SCC_ENET || FEC_ENET
287 help
288 Allocate large buffers for MPC8xx Ethernet. Increases throughput
289 and decreases the likelihood of dropped packets, but costs memory.
290
291config HTDMSOUND
292 bool "Embedded Planet HIOX Audio"
293 depends on SOUND=y
294
295# This doesn't really belong here, but it is convenient to ask
296# 8xx specific questions.
297comment "Generic MPC8xx Options"
298
299config 8xx_COPYBACK
300 bool "Copy-Back Data Cache (else Writethrough)"
301 help
302 Saying Y here will cause the cache on an MPC8xx processor to be used
303 in Copy-Back mode. If you say N here, it is used in Writethrough
304 mode.
305
306 If in doubt, say Y here.
307
308config 8xx_CPU6
309 bool "CPU6 Silicon Errata (860 Pre Rev. C)"
310 help
311 MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
312 require workarounds for Linux (and most other OSes to work). If you
313 get a BUG() very early in boot, this might fix the problem. For
314 more details read the document entitled "MPC860 Family Device Errata
315 Reference" on Motorola's website. This option also incurs a
316 performance hit.
317
318 If in doubt, say N here.
319
320choice
321 prompt "Microcode patch selection"
322 default NO_UCODE_PATCH
323 help
324 Help not implemented yet, coming soon.
325
326config NO_UCODE_PATCH
327 bool "None"
328
329config USB_SOF_UCODE_PATCH
330 bool "USB SOF patch"
331 help
332 Help not implemented yet, coming soon.
333
334config I2C_SPI_UCODE_PATCH
335 bool "I2C/SPI relocation patch"
336 help
337 Help not implemented yet, coming soon.
338
339config I2C_SPI_SMC1_UCODE_PATCH
340 bool "I2C/SPI/SMC1 relocation patch"
341 help
342 Help not implemented yet, coming soon.
343
344endchoice
345
346config UCODE_PATCH
347 bool
348 default y
349 depends on !NO_UCODE_PATCH
350
351endmenu
352
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
new file mode 100644
index 000000000000..172c0db63504
--- /dev/null
+++ b/arch/powerpc/platforms/Makefile
@@ -0,0 +1,13 @@
1ifeq ($(CONFIG_PPC_MERGE),y)
2obj-$(CONFIG_PPC_PMAC) += powermac/
3else
4ifeq ($(CONFIG_PPC64),y)
5obj-$(CONFIG_PPC_PMAC) += powermac/
6endif
7endif
8obj-$(CONFIG_PPC_CHRP) += chrp/
9obj-$(CONFIG_4xx) += 4xx/
10obj-$(CONFIG_85xx) += 85xx/
11obj-$(CONFIG_PPC_PSERIES) += pseries/
12obj-$(CONFIG_PPC_ISERIES) += iseries/
13obj-$(CONFIG_PPC_MAPLE) += maple/
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
new file mode 100644
index 000000000000..6bde3bffed86
--- /dev/null
+++ b/arch/powerpc/platforms/apus/Kconfig
@@ -0,0 +1,130 @@
1
2config AMIGA
3 bool
4 depends on APUS
5 default y
6 help
7 This option enables support for the Amiga series of computers.
8
9config ZORRO
10 bool
11 depends on APUS
12 default y
13 help
14 This enables support for the Zorro bus in the Amiga. If you have
15 expansion cards in your Amiga that conform to the Amiga
16 AutoConfig(tm) specification, say Y, otherwise N. Note that even
17 expansion cards that do not fit in the Zorro slots but fit in e.g.
18 the CPU slot may fall in this category, so you have to say Y to let
19 Linux use these.
20
21config ABSTRACT_CONSOLE
22 bool
23 depends on APUS
24 default y
25
26config APUS_FAST_EXCEPT
27 bool
28 depends on APUS
29 default y
30
31config AMIGA_PCMCIA
32 bool "Amiga 1200/600 PCMCIA support"
33 depends on APUS && EXPERIMENTAL
34 help
35 Include support in the kernel for pcmcia on Amiga 1200 and Amiga
36 600. If you intend to use pcmcia cards say Y; otherwise say N.
37
38config AMIGA_BUILTIN_SERIAL
39 tristate "Amiga builtin serial support"
40 depends on APUS
41 help
42 If you want to use your Amiga's built-in serial port in Linux,
43 answer Y.
44
45 To compile this driver as a module, choose M here.
46
47config GVPIOEXT
48 tristate "GVP IO-Extender support"
49 depends on APUS
50 help
51 If you want to use a GVP IO-Extender serial card in Linux, say Y.
52 Otherwise, say N.
53
54config GVPIOEXT_LP
55 tristate "GVP IO-Extender parallel printer support"
56 depends on GVPIOEXT
57 help
58 Say Y to enable driving a printer from the parallel port on your
59 GVP IO-Extender card, N otherwise.
60
61config GVPIOEXT_PLIP
62 tristate "GVP IO-Extender PLIP support"
63 depends on GVPIOEXT
64 help
65 Say Y to enable doing IP over the parallel port on your GVP
66 IO-Extender card, N otherwise.
67
68config MULTIFACE_III_TTY
69 tristate "Multiface Card III serial support"
70 depends on APUS
71 help
72 If you want to use a Multiface III card's serial port in Linux,
73 answer Y.
74
75 To compile this driver as a module, choose M here.
76
77config A2232
78 tristate "Commodore A2232 serial support (EXPERIMENTAL)"
79 depends on EXPERIMENTAL && APUS
80 ---help---
81 This option supports the 2232 7-port serial card shipped with the
82 Amiga 2000 and other Zorro-bus machines, dating from 1989. At
83 a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
84 each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
85 ports were connected with 8 pin DIN connectors on the card bracket,
86 for which 8 pin to DB25 adapters were supplied. The card also had
87 jumpers internally to toggle various pinning configurations.
88
89 This driver can be built as a module; but then "generic_serial"
90 will also be built as a module. This has to be loaded before
91 "ser_a2232". If you want to do this, answer M here.
92
93config WHIPPET_SERIAL
94 tristate "Hisoft Whippet PCMCIA serial support"
95 depends on AMIGA_PCMCIA
96 help
97 HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
98 is no listing for the Whippet in their Amiga section.
99
100config APNE
101 tristate "PCMCIA NE2000 support"
102 depends on AMIGA_PCMCIA
103 help
104 If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
105 say N.
106
107 To compile this driver as a module, choose M here: the
108 module will be called apne.
109
110config SERIAL_CONSOLE
111 bool "Support for serial port console"
112 depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
113
114config HEARTBEAT
115 bool "Use power LED as a heartbeat"
116 depends on APUS
117 help
118 Use the power-on LED on your machine as a load meter. The exact
119 behavior is platform-dependent, but normally the flash frequency is
120 a hyperbolic function of the 5-minute load average.
121
122config PROC_HARDWARE
123 bool "/proc/hardware support"
124 depends on APUS
125
126source "drivers/zorro/Kconfig"
127
128config PCI_PERMEDIA
129 bool "PCI for Permedia2"
130 depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
new file mode 100644
index 000000000000..902feb1ac431
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -0,0 +1,4 @@
1obj-y += setup.o time.o pegasos_eth.o
2obj-$(CONFIG_PCI) += pci.o
3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_NVRAM) += nvram.o
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
new file mode 100644
index 000000000000..3a2057fa314a
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -0,0 +1,12 @@
1/*
2 * Declarations of CHRP platform-specific things.
3 */
4
5extern void chrp_nvram_init(void);
6extern void chrp_get_rtc_time(struct rtc_time *);
7extern int chrp_set_rtc_time(struct rtc_time *);
8extern void chrp_calibrate_decr(void);
9extern long chrp_time_init(void);
10
11extern void chrp_find_bridges(void);
12extern void chrp_event_scan(void);
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
new file mode 100644
index 000000000000..4ac7125aa09c
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -0,0 +1,84 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /dev/nvram driver for PPC
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <asm/uaccess.h>
18#include <asm/prom.h>
19#include <asm/machdep.h>
20#include "chrp.h"
21
22static unsigned int nvram_size;
23static unsigned char nvram_buf[4];
24static DEFINE_SPINLOCK(nvram_lock);
25
26static unsigned char chrp_nvram_read(int addr)
27{
28 unsigned long done, flags;
29 unsigned char ret;
30
31 if (addr >= nvram_size) {
32 printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
33 current->comm, addr, nvram_size);
34 return 0xff;
35 }
36 spin_lock_irqsave(&nvram_lock, flags);
37 if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
38 ret = 0xff;
39 else
40 ret = nvram_buf[0];
41 spin_unlock_irqrestore(&nvram_lock, flags);
42
43 return ret;
44}
45
46static void chrp_nvram_write(int addr, unsigned char val)
47{
48 unsigned long done, flags;
49
50 if (addr >= nvram_size) {
51 printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
52 current->comm, addr, nvram_size);
53 return;
54 }
55 spin_lock_irqsave(&nvram_lock, flags);
56 nvram_buf[0] = val;
57 if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
58 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
59 spin_unlock_irqrestore(&nvram_lock, flags);
60}
61
62void __init chrp_nvram_init(void)
63{
64 struct device_node *nvram;
65 unsigned int *nbytes_p, proplen;
66
67 nvram = of_find_node_by_type(NULL, "nvram");
68 if (nvram == NULL)
69 return;
70
71 nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen);
72 if (nbytes_p == NULL || proplen != sizeof(unsigned int))
73 return;
74
75 nvram_size = *nbytes_p;
76
77 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
78 of_node_put(nvram);
79
80 ppc_md.nvram_read_val = chrp_nvram_read;
81 ppc_md.nvram_write_val = chrp_nvram_write;
82
83 return;
84}
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
new file mode 100644
index 000000000000..82c429d487f3
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -0,0 +1,310 @@
1/*
2 * CHRP pci routines.
3 */
4
5#include <linux/config.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/delay.h>
9#include <linux/string.h>
10#include <linux/init.h>
11#include <linux/ide.h>
12
13#include <asm/io.h>
14#include <asm/pgtable.h>
15#include <asm/irq.h>
16#include <asm/hydra.h>
17#include <asm/prom.h>
18#include <asm/gg2.h>
19#include <asm/machdep.h>
20#include <asm/sections.h>
21#include <asm/pci-bridge.h>
22#include <asm/open_pic.h>
23#include <asm/grackle.h>
24#include <asm/rtas.h>
25
26/* LongTrail */
27void __iomem *gg2_pci_config_base;
28
29/*
30 * The VLSI Golden Gate II has only 512K of PCI configuration space, so we
31 * limit the bus number to 3 bits
32 */
33
34int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
35 int len, u32 *val)
36{
37 volatile void __iomem *cfg_data;
38 struct pci_controller *hose = bus->sysdata;
39
40 if (bus->number > 7)
41 return PCIBIOS_DEVICE_NOT_FOUND;
42 /*
43 * Note: the caller has already checked that off is
44 * suitably aligned and that len is 1, 2 or 4.
45 */
46 cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
47 switch (len) {
48 case 1:
49 *val = in_8(cfg_data);
50 break;
51 case 2:
52 *val = in_le16(cfg_data);
53 break;
54 default:
55 *val = in_le32(cfg_data);
56 break;
57 }
58 return PCIBIOS_SUCCESSFUL;
59}
60
61int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
62 int len, u32 val)
63{
64 volatile void __iomem *cfg_data;
65 struct pci_controller *hose = bus->sysdata;
66
67 if (bus->number > 7)
68 return PCIBIOS_DEVICE_NOT_FOUND;
69 /*
70 * Note: the caller has already checked that off is
71 * suitably aligned and that len is 1, 2 or 4.
72 */
73 cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
74 switch (len) {
75 case 1:
76 out_8(cfg_data, val);
77 break;
78 case 2:
79 out_le16(cfg_data, val);
80 break;
81 default:
82 out_le32(cfg_data, val);
83 break;
84 }
85 return PCIBIOS_SUCCESSFUL;
86}
87
88static struct pci_ops gg2_pci_ops =
89{
90 gg2_read_config,
91 gg2_write_config
92};
93
94/*
95 * Access functions for PCI config space using RTAS calls.
96 */
97int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
98 int len, u32 *val)
99{
100 struct pci_controller *hose = bus->sysdata;
101 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
102 | (((bus->number - hose->first_busno) & 0xff) << 16)
103 | (hose->index << 24);
104 int ret = -1;
105 int rval;
106
107 rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
108 *val = ret;
109 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
110}
111
112int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
113 int len, u32 val)
114{
115 struct pci_controller *hose = bus->sysdata;
116 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
117 | (((bus->number - hose->first_busno) & 0xff) << 16)
118 | (hose->index << 24);
119 int rval;
120
121 rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
122 addr, len, val);
123 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
124}
125
126static struct pci_ops rtas_pci_ops =
127{
128 rtas_read_config,
129 rtas_write_config
130};
131
132volatile struct Hydra __iomem *Hydra = NULL;
133
134int __init
135hydra_init(void)
136{
137 struct device_node *np;
138
139 np = find_devices("mac-io");
140 if (np == NULL || np->n_addrs == 0)
141 return 0;
142 Hydra = ioremap(np->addrs[0].address, np->addrs[0].size);
143 printk("Hydra Mac I/O at %lx\n", np->addrs[0].address);
144 printk("Hydra Feature_Control was %x",
145 in_le32(&Hydra->Feature_Control));
146 out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
147 HYDRA_FC_SCSI_CELL_EN |
148 HYDRA_FC_SCCA_ENABLE |
149 HYDRA_FC_SCCB_ENABLE |
150 HYDRA_FC_ARB_BYPASS |
151 HYDRA_FC_MPIC_ENABLE |
152 HYDRA_FC_SLOW_SCC_PCLK |
153 HYDRA_FC_MPIC_IS_MASTER));
154 printk(", now %x\n", in_le32(&Hydra->Feature_Control));
155 return 1;
156}
157
158void __init
159chrp_pcibios_fixup(void)
160{
161 struct pci_dev *dev = NULL;
162 struct device_node *np;
163
164 /* PCI interrupts are controlled by the OpenPIC */
165 for_each_pci_dev(dev) {
166 np = pci_device_to_OF_node(dev);
167 if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
168 dev->irq = np->intrs[0].line;
169 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
170 }
171}
172
173#define PRG_CL_RESET_VALID 0x00010000
174
175static void __init
176setup_python(struct pci_controller *hose, struct device_node *dev)
177{
178 u32 __iomem *reg;
179 u32 val;
180 unsigned long addr = dev->addrs[0].address;
181
182 setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010);
183
184 /* Clear the magic go-slow bit */
185 reg = ioremap(dev->addrs[0].address + 0xf6000, 0x40);
186 val = in_be32(&reg[12]);
187 if (val & PRG_CL_RESET_VALID) {
188 out_be32(&reg[12], val & ~PRG_CL_RESET_VALID);
189 in_be32(&reg[12]);
190 }
191 iounmap(reg);
192}
193
194/* Marvell Discovery II based Pegasos 2 */
195static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev)
196{
197 struct device_node *root = find_path_device("/");
198 struct device_node *rtas;
199
200 rtas = of_find_node_by_name (root, "rtas");
201 if (rtas) {
202 hose->ops = &rtas_pci_ops;
203 } else {
204 printk ("RTAS supporting Pegasos OF not found, please upgrade"
205 " your firmware\n");
206 }
207 pci_assign_all_buses = 1;
208}
209
210void __init
211chrp_find_bridges(void)
212{
213 struct device_node *dev;
214 int *bus_range;
215 int len, index = -1;
216 struct pci_controller *hose;
217 unsigned int *dma;
218 char *model, *machine;
219 int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
220 struct device_node *root = find_path_device("/");
221
222 /*
223 * The PCI host bridge nodes on some machines don't have
224 * properties to adequately identify them, so we have to
225 * look at what sort of machine this is as well.
226 */
227 machine = get_property(root, "model", NULL);
228 if (machine != NULL) {
229 is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0;
230 is_mot = strncmp(machine, "MOT", 3) == 0;
231 if (strncmp(machine, "Pegasos2", 8) == 0)
232 is_pegasos = 2;
233 else if (strncmp(machine, "Pegasos", 7) == 0)
234 is_pegasos = 1;
235 }
236 for (dev = root->child; dev != NULL; dev = dev->sibling) {
237 if (dev->type == NULL || strcmp(dev->type, "pci") != 0)
238 continue;
239 ++index;
240 /* The GG2 bridge on the LongTrail doesn't have an address */
241 if (dev->n_addrs < 1 && !is_longtrail) {
242 printk(KERN_WARNING "Can't use %s: no address\n",
243 dev->full_name);
244 continue;
245 }
246 bus_range = (int *) get_property(dev, "bus-range", &len);
247 if (bus_range == NULL || len < 2 * sizeof(int)) {
248 printk(KERN_WARNING "Can't get bus-range for %s\n",
249 dev->full_name);
250 continue;
251 }
252 if (bus_range[1] == bus_range[0])
253 printk(KERN_INFO "PCI bus %d", bus_range[0]);
254 else
255 printk(KERN_INFO "PCI buses %d..%d",
256 bus_range[0], bus_range[1]);
257 printk(" controlled by %s", dev->type);
258 if (dev->n_addrs > 0)
259 printk(" at %lx", dev->addrs[0].address);
260 printk("\n");
261
262 hose = pcibios_alloc_controller();
263 if (!hose) {
264 printk("Can't allocate PCI controller structure for %s\n",
265 dev->full_name);
266 continue;
267 }
268 hose->arch_data = dev;
269 hose->first_busno = bus_range[0];
270 hose->last_busno = bus_range[1];
271
272 model = get_property(dev, "model", NULL);
273 if (model == NULL)
274 model = "<none>";
275 if (device_is_compatible(dev, "IBM,python")) {
276 setup_python(hose, dev);
277 } else if (is_mot
278 || strncmp(model, "Motorola, Grackle", 17) == 0) {
279 setup_grackle(hose);
280 } else if (is_longtrail) {
281 void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
282 hose->ops = &gg2_pci_ops;
283 hose->cfg_data = p;
284 gg2_pci_config_base = p;
285 } else if (is_pegasos == 1) {
286 setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc);
287 } else if (is_pegasos == 2) {
288 setup_peg2(hose, dev);
289 } else {
290 printk("No methods for %s (model %s), using RTAS\n",
291 dev->full_name, model);
292 hose->ops = &rtas_pci_ops;
293 }
294
295 pci_process_bridge_OF_ranges(hose, dev, index == 0);
296
297 /* check the first bridge for a property that we can
298 use to set pci_dram_offset */
299 dma = (unsigned int *)
300 get_property(dev, "ibm,dma-ranges", &len);
301 if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) {
302 pci_dram_offset = dma[2] - dma[3];
303 printk("pci_dram_offset = %lx\n", pci_dram_offset);
304 }
305 }
306
307 /* Do not fixup interrupts from OF tree on pegasos */
308 if (is_pegasos == 0)
309 ppc_md.pcibios_fixup = chrp_pcibios_fixup;
310}
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
new file mode 100644
index 000000000000..a9052305c35d
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -0,0 +1,213 @@
1/*
2 * arch/ppc/platforms/chrp_pegasos_eth.c
3 *
4 * Copyright (C) 2005 Sven Luther <sl@bplan-gmbh.de>
5 * Thanks to :
6 * Dale Farnsworth <dale@farnsworth.org>
7 * Mark A. Greer <mgreer@mvista.com>
8 * Nicolas DET <nd@bplan-gmbh.de>
9 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 * And anyone else who helped me on this.
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/ioport.h>
16#include <linux/device.h>
17#include <linux/mv643xx.h>
18#include <linux/pci.h>
19
20#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
21#define PEGASOS2_MARVELL_REGSIZE (0x00004000)
22#define PEGASOS2_SRAM_BASE (0xf2000000)
23#define PEGASOS2_SRAM_SIZE (256*1024)
24
25#define PEGASOS2_SRAM_BASE_ETH0 (PEGASOS2_SRAM_BASE)
26#define PEGASOS2_SRAM_BASE_ETH1 (PEGASOS2_SRAM_BASE_ETH0 + (PEGASOS2_SRAM_SIZE / 2) )
27
28
29#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
30#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
31
32#undef BE_VERBOSE
33
34static struct resource mv643xx_eth_shared_resources[] = {
35 [0] = {
36 .name = "ethernet shared base",
37 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
38 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
39 MV643XX_ETH_SHARED_REGS_SIZE - 1,
40 .flags = IORESOURCE_MEM,
41 },
42};
43
44static struct platform_device mv643xx_eth_shared_device = {
45 .name = MV643XX_ETH_SHARED_NAME,
46 .id = 0,
47 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
48 .resource = mv643xx_eth_shared_resources,
49};
50
51static struct resource mv643xx_eth0_resources[] = {
52 [0] = {
53 .name = "eth0 irq",
54 .start = 9,
55 .end = 9,
56 .flags = IORESOURCE_IRQ,
57 },
58};
59
60
61static struct mv643xx_eth_platform_data eth0_pd = {
62 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
63 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
64 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
65
66 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH0 + PEGASOS2_SRAM_TXRING_SIZE,
67 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
68 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
69};
70
71static struct platform_device eth0_device = {
72 .name = MV643XX_ETH_NAME,
73 .id = 0,
74 .num_resources = ARRAY_SIZE(mv643xx_eth0_resources),
75 .resource = mv643xx_eth0_resources,
76 .dev = {
77 .platform_data = &eth0_pd,
78 },
79};
80
81static struct resource mv643xx_eth1_resources[] = {
82 [0] = {
83 .name = "eth1 irq",
84 .start = 9,
85 .end = 9,
86 .flags = IORESOURCE_IRQ,
87 },
88};
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
92 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
93 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
94
95 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH1 + PEGASOS2_SRAM_TXRING_SIZE,
96 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
97 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
98};
99
100static struct platform_device eth1_device = {
101 .name = MV643XX_ETH_NAME,
102 .id = 1,
103 .num_resources = ARRAY_SIZE(mv643xx_eth1_resources),
104 .resource = mv643xx_eth1_resources,
105 .dev = {
106 .platform_data = &eth1_pd,
107 },
108};
109
110static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
111 &mv643xx_eth_shared_device,
112 &eth0_device,
113 &eth1_device,
114};
115
116/***********/
117/***********/
118#define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); }
119#define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset)
120
121static void __iomem *mv643xx_reg_base;
122
123static int Enable_SRAM(void)
124{
125 u32 ALong;
126
127 if (mv643xx_reg_base == NULL)
128 mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE,
129 PEGASOS2_MARVELL_REGSIZE);
130
131 if (mv643xx_reg_base == NULL)
132 return -ENOMEM;
133
134#ifdef BE_VERBOSE
135 printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n",
136 (void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base);
137#endif
138
139 MV_WRITE(MV64340_SRAM_CONFIG, 0);
140
141 MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16);
142
143 MV_READ(MV64340_BASE_ADDR_ENABLE, ALong);
144 ALong &= ~(1 << 19);
145 MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong);
146
147 ALong = 0x02;
148 ALong |= PEGASOS2_SRAM_BASE & 0xffff0000;
149 MV_WRITE(MV643XX_ETH_BAR_4, ALong);
150
151 MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000);
152
153 MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
154 ALong &= ~(1 << 4);
155 MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
156
157#ifdef BE_VERBOSE
158 printk("Pegasos II/Marvell MV64361: register unmapped\n");
159 printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE);
160#endif
161
162 iounmap(mv643xx_reg_base);
163 mv643xx_reg_base = NULL;
164
165 return 1;
166}
167
168
169/***********/
170/***********/
171int mv643xx_eth_add_pds(void)
172{
173 int ret = 0;
174 static struct pci_device_id pci_marvell_mv64360[] = {
175 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360) },
176 { }
177 };
178
179#ifdef BE_VERBOSE
180 printk("Pegasos II/Marvell MV64361: init\n");
181#endif
182
183 if (pci_dev_present(pci_marvell_mv64360)) {
184 ret = platform_add_devices(mv643xx_eth_pd_devs,
185 ARRAY_SIZE(mv643xx_eth_pd_devs));
186
187 if ( Enable_SRAM() < 0)
188 {
189 eth0_pd.tx_sram_addr = 0;
190 eth0_pd.tx_sram_size = 0;
191 eth0_pd.rx_sram_addr = 0;
192 eth0_pd.rx_sram_size = 0;
193
194 eth1_pd.tx_sram_addr = 0;
195 eth1_pd.tx_sram_size = 0;
196 eth1_pd.rx_sram_addr = 0;
197 eth1_pd.rx_sram_size = 0;
198
199#ifdef BE_VERBOSE
200 printk("Pegasos II/Marvell MV64361: Can't enable the "
201 "SRAM\n");
202#endif
203 }
204 }
205
206#ifdef BE_VERBOSE
207 printk("Pegasos II/Marvell MV64361: init is over\n");
208#endif
209
210 return ret;
211}
212
213device_initcall(mv643xx_eth_add_pds);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
new file mode 100644
index 000000000000..ecd32d5d85f4
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -0,0 +1,522 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 */
8
9/*
10 * bootup setup stuff..
11 */
12
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
24#include <linux/tty.h>
25#include <linux/major.h>
26#include <linux/interrupt.h>
27#include <linux/reboot.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/version.h>
31#include <linux/adb.h>
32#include <linux/module.h>
33#include <linux/delay.h>
34#include <linux/ide.h>
35#include <linux/console.h>
36#include <linux/seq_file.h>
37#include <linux/root_dev.h>
38#include <linux/initrd.h>
39#include <linux/module.h>
40
41#include <asm/io.h>
42#include <asm/pgtable.h>
43#include <asm/prom.h>
44#include <asm/gg2.h>
45#include <asm/pci-bridge.h>
46#include <asm/dma.h>
47#include <asm/machdep.h>
48#include <asm/irq.h>
49#include <asm/hydra.h>
50#include <asm/sections.h>
51#include <asm/time.h>
52#include <asm/btext.h>
53#include <asm/i8259.h>
54#include <asm/mpic.h>
55#include <asm/rtas.h>
56#include <asm/xmon.h>
57
58#include "chrp.h"
59
60void rtas_indicator_progress(char *, unsigned short);
61void btext_progress(char *, unsigned short);
62
63int _chrp_type;
64EXPORT_SYMBOL(_chrp_type);
65
66struct mpic *chrp_mpic;
67
68/*
69 * XXX this should be in xmon.h, but putting it there means xmon.h
70 * has to include <linux/interrupt.h> (to get irqreturn_t), which
71 * causes all sorts of problems. -- paulus
72 */
73extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
74
75extern unsigned long loops_per_jiffy;
76
77#ifdef CONFIG_SMP
78extern struct smp_ops_t chrp_smp_ops;
79#endif
80
81static const char *gg2_memtypes[4] = {
82 "FPM", "SDRAM", "EDO", "BEDO"
83};
84static const char *gg2_cachesizes[4] = {
85 "256 KB", "512 KB", "1 MB", "Reserved"
86};
87static const char *gg2_cachetypes[4] = {
88 "Asynchronous", "Reserved", "Flow-Through Synchronous",
89 "Pipelined Synchronous"
90};
91static const char *gg2_cachemodes[4] = {
92 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
93};
94
95void chrp_show_cpuinfo(struct seq_file *m)
96{
97 int i, sdramen;
98 unsigned int t;
99 struct device_node *root;
100 const char *model = "";
101
102 root = find_path_device("/");
103 if (root)
104 model = get_property(root, "model", NULL);
105 seq_printf(m, "machine\t\t: CHRP %s\n", model);
106
107 /* longtrail (goldengate) stuff */
108 if (!strncmp(model, "IBM,LongTrail", 13)) {
109 /* VLSI VAS96011/12 `Golden Gate 2' */
110 /* Memory banks */
111 sdramen = (in_le32(gg2_pci_config_base + GG2_PCI_DRAM_CTRL)
112 >>31) & 1;
113 for (i = 0; i < (sdramen ? 4 : 6); i++) {
114 t = in_le32(gg2_pci_config_base+
115 GG2_PCI_DRAM_BANK0+
116 i*4);
117 if (!(t & 1))
118 continue;
119 switch ((t>>8) & 0x1f) {
120 case 0x1f:
121 model = "4 MB";
122 break;
123 case 0x1e:
124 model = "8 MB";
125 break;
126 case 0x1c:
127 model = "16 MB";
128 break;
129 case 0x18:
130 model = "32 MB";
131 break;
132 case 0x10:
133 model = "64 MB";
134 break;
135 case 0x00:
136 model = "128 MB";
137 break;
138 default:
139 model = "Reserved";
140 break;
141 }
142 seq_printf(m, "memory bank %d\t: %s %s\n", i, model,
143 gg2_memtypes[sdramen ? 1 : ((t>>1) & 3)]);
144 }
145 /* L2 cache */
146 t = in_le32(gg2_pci_config_base+GG2_PCI_CC_CTRL);
147 seq_printf(m, "board l2\t: %s %s (%s)\n",
148 gg2_cachesizes[(t>>7) & 3],
149 gg2_cachetypes[(t>>2) & 3],
150 gg2_cachemodes[t & 3]);
151 }
152}
153
154/*
155 * Fixes for the National Semiconductor PC78308VUL SuperI/O
156 *
157 * Some versions of Open Firmware incorrectly initialize the IRQ settings
158 * for keyboard and mouse
159 */
160static inline void __init sio_write(u8 val, u8 index)
161{
162 outb(index, 0x15c);
163 outb(val, 0x15d);
164}
165
166static inline u8 __init sio_read(u8 index)
167{
168 outb(index, 0x15c);
169 return inb(0x15d);
170}
171
172static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
173 u8 type)
174{
175 u8 level0, type0, active;
176
177 /* select logical device */
178 sio_write(device, 0x07);
179 active = sio_read(0x30);
180 level0 = sio_read(0x70);
181 type0 = sio_read(0x71);
182 if (level0 != level || type0 != type || !active) {
183 printk(KERN_WARNING "sio: %s irq level %d, type %d, %sactive: "
184 "remapping to level %d, type %d, active\n",
185 name, level0, type0, !active ? "in" : "", level, type);
186 sio_write(0x01, 0x30);
187 sio_write(level, 0x70);
188 sio_write(type, 0x71);
189 }
190}
191
192static void __init sio_init(void)
193{
194 struct device_node *root;
195
196 if ((root = find_path_device("/")) &&
197 !strncmp(get_property(root, "model", NULL), "IBM,LongTrail", 13)) {
198 /* logical device 0 (KBC/Keyboard) */
199 sio_fixup_irq("keyboard", 0, 1, 2);
200 /* select logical device 1 (KBC/Mouse) */
201 sio_fixup_irq("mouse", 1, 12, 2);
202 }
203}
204
205
206static void __init pegasos_set_l2cr(void)
207{
208 struct device_node *np;
209
210 /* On Pegasos, enable the l2 cache if needed, as the OF forgets it */
211 if (_chrp_type != _CHRP_Pegasos)
212 return;
213
214 /* Enable L2 cache if needed */
215 np = find_type_devices("cpu");
216 if (np != NULL) {
217 unsigned int *l2cr = (unsigned int *)
218 get_property (np, "l2cr", NULL);
219 if (l2cr == NULL) {
220 printk ("Pegasos l2cr : no cpu l2cr property found\n");
221 return;
222 }
223 if (!((*l2cr) & 0x80000000)) {
224 printk ("Pegasos l2cr : L2 cache was not active, "
225 "activating\n");
226 _set_L2CR(0);
227 _set_L2CR((*l2cr) | 0x80000000);
228 }
229 }
230}
231
232void __init chrp_setup_arch(void)
233{
234 struct device_node *root = find_path_device ("/");
235 char *machine = NULL;
236 struct device_node *device;
237 unsigned int *p = NULL;
238
239 /* init to some ~sane value until calibrate_delay() runs */
240 loops_per_jiffy = 50000000/HZ;
241
242 if (root)
243 machine = get_property(root, "model", NULL);
244 if (machine && strncmp(machine, "Pegasos", 7) == 0) {
245 _chrp_type = _CHRP_Pegasos;
246 } else if (machine && strncmp(machine, "IBM", 3) == 0) {
247 _chrp_type = _CHRP_IBM;
248 } else if (machine && strncmp(machine, "MOT", 3) == 0) {
249 _chrp_type = _CHRP_Motorola;
250 } else {
251 /* Let's assume it is an IBM chrp if all else fails */
252 _chrp_type = _CHRP_IBM;
253 }
254 printk("chrp type = %x\n", _chrp_type);
255
256 rtas_initialize();
257 if (rtas_token("display-character") >= 0)
258 ppc_md.progress = rtas_progress;
259
260#ifdef CONFIG_BOOTX_TEXT
261 if (ppc_md.progress == NULL && boot_text_mapped)
262 ppc_md.progress = btext_progress;
263#endif
264
265#ifdef CONFIG_BLK_DEV_INITRD
266 /* this is fine for chrp */
267 initrd_below_start_ok = 1;
268
269 if (initrd_start)
270 ROOT_DEV = Root_RAM0;
271 else
272#endif
273 ROOT_DEV = Root_SDA2; /* sda2 (sda1 is for the kernel) */
274
275 /* On pegasos, enable the L2 cache if not already done by OF */
276 pegasos_set_l2cr();
277
278 /* Lookup PCI host bridges */
279 chrp_find_bridges();
280
281 /*
282 * Temporary fixes for PCI devices.
283 * -- Geert
284 */
285 hydra_init(); /* Mac I/O */
286
287 /*
288 * Fix the Super I/O configuration
289 */
290 sio_init();
291
292 /* Get the event scan rate for the rtas so we know how
293 * often it expects a heartbeat. -- Cort
294 */
295 device = find_devices("rtas");
296 if (device)
297 p = (unsigned int *) get_property
298 (device, "rtas-event-scan-rate", NULL);
299 if (p && *p) {
300 ppc_md.heartbeat = chrp_event_scan;
301 ppc_md.heartbeat_reset = HZ / (*p * 30) - 1;
302 ppc_md.heartbeat_count = 1;
303 printk("RTAS Event Scan Rate: %u (%lu jiffies)\n",
304 *p, ppc_md.heartbeat_reset);
305 }
306
307 pci_create_OF_bus_map();
308
309 /*
310 * Print the banner, then scroll down so boot progress
311 * can be printed. -- Cort
312 */
313 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
314}
315
316void
317chrp_event_scan(void)
318{
319 unsigned char log[1024];
320 int ret = 0;
321
322 /* XXX: we should loop until the hardware says no more error logs -- Cort */
323 rtas_call(rtas_token("event-scan"), 4, 1, &ret, 0xffffffff, 0,
324 __pa(log), 1024);
325 ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
326}
327
328/*
329 * Finds the open-pic node and sets up the mpic driver.
330 */
331static void __init chrp_find_openpic(void)
332{
333 struct device_node *np, *root;
334 int len, i, j, irq_count;
335 int isu_size, idu_size;
336 unsigned int *iranges, *opprop = NULL;
337 int oplen = 0;
338 unsigned long opaddr;
339 int na = 1;
340 unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
341
342 np = find_type_devices("open-pic");
343 if (np == NULL)
344 return;
345 root = find_path_device("/");
346 if (root) {
347 opprop = (unsigned int *) get_property
348 (root, "platform-open-pic", &oplen);
349 na = prom_n_addr_cells(root);
350 }
351 if (opprop && oplen >= na * sizeof(unsigned int)) {
352 opaddr = opprop[na-1]; /* assume 32-bit */
353 oplen /= na * sizeof(unsigned int);
354 } else {
355 if (np->n_addrs == 0)
356 return;
357 opaddr = np->addrs[0].address;
358 oplen = 0;
359 }
360
361 printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
362
363 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
364 prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4);
365
366 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
367 if (iranges == NULL)
368 len = 0; /* non-distributed mpic */
369 else
370 len /= 2 * sizeof(unsigned int);
371
372 /*
373 * The first pair of cells in interrupt-ranges refers to the
374 * IDU; subsequent pairs refer to the ISUs.
375 */
376 if (oplen < len) {
377 printk(KERN_ERR "Insufficient addresses for distributed"
378 " OpenPIC (%d < %d)\n", np->n_addrs, len);
379 len = oplen;
380 }
381
382 isu_size = 0;
383 idu_size = 0;
384 if (len > 0 && iranges[1] != 0) {
385 printk(KERN_INFO "OpenPIC irqs %d..%d in IDU\n",
386 iranges[0], iranges[0] + iranges[1] - 1);
387 idu_size = iranges[1];
388 }
389 if (len > 1)
390 isu_size = iranges[3];
391
392 chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY,
393 isu_size, NUM_ISA_INTERRUPTS, irq_count,
394 NR_IRQS - 4, init_senses, irq_count,
395 " MPIC ");
396 if (chrp_mpic == NULL) {
397 printk(KERN_ERR "Failed to allocate MPIC structure\n");
398 return;
399 }
400
401 j = na - 1;
402 for (i = 1; i < len; ++i) {
403 iranges += 2;
404 j += na;
405 printk(KERN_INFO "OpenPIC irqs %d..%d in ISU at %x\n",
406 iranges[0], iranges[0] + iranges[1] - 1,
407 opprop[j]);
408 mpic_assign_isu(chrp_mpic, i - 1, opprop[j]);
409 }
410
411 mpic_init(chrp_mpic);
412 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
413}
414
415#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
416static struct irqaction xmon_irqaction = {
417 .handler = xmon_irq,
418 .mask = CPU_MASK_NONE,
419 .name = "XMON break",
420};
421#endif
422
423void __init chrp_init_IRQ(void)
424{
425 struct device_node *np;
426 unsigned long chrp_int_ack = 0;
427#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
428 struct device_node *kbd;
429#endif
430
431 for (np = find_devices("pci"); np != NULL; np = np->next) {
432 unsigned int *addrp = (unsigned int *)
433 get_property(np, "8259-interrupt-acknowledge", NULL);
434
435 if (addrp == NULL)
436 continue;
437 chrp_int_ack = addrp[prom_n_addr_cells(np)-1];
438 break;
439 }
440 if (np == NULL)
441 printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n");
442
443 chrp_find_openpic();
444
445 i8259_init(chrp_int_ack, 0);
446
447 if (_chrp_type == _CHRP_Pegasos)
448 ppc_md.get_irq = i8259_irq;
449 else
450 ppc_md.get_irq = mpic_get_irq;
451
452#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
453 /* see if there is a keyboard in the device tree
454 with a parent of type "adb" */
455 for (kbd = find_devices("keyboard"); kbd; kbd = kbd->next)
456 if (kbd->parent && kbd->parent->type
457 && strcmp(kbd->parent->type, "adb") == 0)
458 break;
459 if (kbd)
460 setup_irq(HYDRA_INT_ADB_NMI, &xmon_irqaction);
461#endif
462}
463
464void __init
465chrp_init2(void)
466{
467#ifdef CONFIG_NVRAM
468 chrp_nvram_init();
469#endif
470
471 request_region(0x20,0x20,"pic1");
472 request_region(0xa0,0x20,"pic2");
473 request_region(0x00,0x20,"dma1");
474 request_region(0x40,0x20,"timer");
475 request_region(0x80,0x10,"dma page reg");
476 request_region(0xc0,0x20,"dma2");
477
478 if (ppc_md.progress)
479 ppc_md.progress(" Have fun! ", 0x7777);
480}
481
482void __init chrp_init(void)
483{
484 ISA_DMA_THRESHOLD = ~0L;
485 DMA_MODE_READ = 0x44;
486 DMA_MODE_WRITE = 0x48;
487 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
488 ppc_do_canonicalize_irqs = 1;
489
490 /* Assume we have an 8259... */
491 __irq_offset_value = NUM_ISA_INTERRUPTS;
492
493 ppc_md.setup_arch = chrp_setup_arch;
494 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
495
496 ppc_md.init_IRQ = chrp_init_IRQ;
497 ppc_md.init = chrp_init2;
498
499 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
500
501 ppc_md.restart = rtas_restart;
502 ppc_md.power_off = rtas_power_off;
503 ppc_md.halt = rtas_halt;
504
505 ppc_md.time_init = chrp_time_init;
506 ppc_md.set_rtc_time = chrp_set_rtc_time;
507 ppc_md.get_rtc_time = chrp_get_rtc_time;
508 ppc_md.calibrate_decr = chrp_calibrate_decr;
509
510#ifdef CONFIG_SMP
511 smp_ops = &chrp_smp_ops;
512#endif /* CONFIG_SMP */
513}
514
515#ifdef CONFIG_BOOTX_TEXT
516void
517btext_progress(char *s, unsigned short hex)
518{
519 btext_drawstring(s);
520 btext_drawstring("\n");
521}
522#endif /* CONFIG_BOOTX_TEXT */
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
new file mode 100644
index 000000000000..31ee49c25014
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -0,0 +1,122 @@
1/*
2 * Smp support for CHRP machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21
22#include <asm/ptrace.h>
23#include <asm/atomic.h>
24#include <asm/irq.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/sections.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/smp.h>
31#include <asm/residual.h>
32#include <asm/time.h>
33#include <asm/open_pic.h>
34#include <asm/machdep.h>
35#include <asm/smp.h>
36#include <asm/mpic.h>
37
38extern unsigned long smp_chrp_cpu_nr;
39
40static int __init smp_chrp_probe(void)
41{
42 struct device_node *cpus = NULL;
43 unsigned int *reg;
44 int reglen;
45 int ncpus = 0;
46 int cpuid;
47 unsigned int phys;
48
49 /* Count CPUs in the device-tree */
50 cpuid = 1; /* the boot cpu is logical cpu 0 */
51 while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) {
52 phys = ncpus;
53 reg = (unsigned int *) get_property(cpus, "reg", &reglen);
54 if (reg && reglen >= sizeof(unsigned int))
55 /* hmmm, not having a reg property would be bad */
56 phys = *reg;
57 if (phys != boot_cpuid_phys) {
58 set_hard_smp_processor_id(cpuid, phys);
59 ++cpuid;
60 }
61 ++ncpus;
62 }
63
64 printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus);
65
66 /* Nothing more to do if less than 2 of them */
67 if (ncpus <= 1)
68 return 1;
69
70 mpic_request_ipis();
71
72 return ncpus;
73}
74
75static void __devinit smp_chrp_kick_cpu(int nr)
76{
77 *(unsigned long *)KERNELBASE = nr;
78 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
79}
80
81static void __devinit smp_chrp_setup_cpu(int cpu_nr)
82{
83 mpic_setup_this_cpu();
84}
85
86static DEFINE_SPINLOCK(timebase_lock);
87static unsigned int timebase_upper = 0, timebase_lower = 0;
88
89void __devinit smp_chrp_give_timebase(void)
90{
91 spin_lock(&timebase_lock);
92 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
93 timebase_upper = get_tbu();
94 timebase_lower = get_tbl();
95 spin_unlock(&timebase_lock);
96
97 while (timebase_upper || timebase_lower)
98 barrier();
99 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
100}
101
102void __devinit smp_chrp_take_timebase(void)
103{
104 while (!(timebase_upper || timebase_lower))
105 barrier();
106 spin_lock(&timebase_lock);
107 set_tb(timebase_upper, timebase_lower);
108 timebase_upper = 0;
109 timebase_lower = 0;
110 spin_unlock(&timebase_lock);
111 printk("CPU %i taken timebase\n", smp_processor_id());
112}
113
114/* CHRP with openpic */
115struct smp_ops_t chrp_smp_ops = {
116 .message_pass = smp_mpic_message_pass,
117 .probe = smp_chrp_probe,
118 .kick_cpu = smp_chrp_kick_cpu,
119 .setup_cpu = smp_chrp_setup_cpu,
120 .give_timebase = smp_chrp_give_timebase,
121 .take_timebase = smp_chrp_take_timebase,
122};
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
new file mode 100644
index 000000000000..9e53535ddb82
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -0,0 +1,188 @@
1/*
2 * arch/ppc/platforms/chrp_time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * Adapted for PowerPC (PReP) by Gary Thomas
7 * Modified by Cort Dougan (cort@cs.nmt.edu).
8 * Copied and modified from arch/i386/kernel/time.c
9 *
10 */
11#include <linux/errno.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/param.h>
15#include <linux/string.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/time.h>
19#include <linux/timex.h>
20#include <linux/kernel_stat.h>
21#include <linux/mc146818rtc.h>
22#include <linux/init.h>
23#include <linux/bcd.h>
24
25#include <asm/io.h>
26#include <asm/nvram.h>
27#include <asm/prom.h>
28#include <asm/sections.h>
29#include <asm/time.h>
30
31extern spinlock_t rtc_lock;
32
33static int nvram_as1 = NVRAM_AS1;
34static int nvram_as0 = NVRAM_AS0;
35static int nvram_data = NVRAM_DATA;
36
37long __init chrp_time_init(void)
38{
39 struct device_node *rtcs;
40 int base;
41
42 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
43 if (rtcs == NULL)
44 rtcs = find_compatible_devices("rtc", "ds1385-rtc");
45 if (rtcs == NULL || rtcs->addrs == NULL)
46 return 0;
47 base = rtcs->addrs[0].address;
48 nvram_as1 = 0;
49 nvram_as0 = base;
50 nvram_data = base + 1;
51
52 return 0;
53}
54
55int chrp_cmos_clock_read(int addr)
56{
57 if (nvram_as1 != 0)
58 outb(addr>>8, nvram_as1);
59 outb(addr, nvram_as0);
60 return (inb(nvram_data));
61}
62
63void chrp_cmos_clock_write(unsigned long val, int addr)
64{
65 if (nvram_as1 != 0)
66 outb(addr>>8, nvram_as1);
67 outb(addr, nvram_as0);
68 outb(val, nvram_data);
69 return;
70}
71
72/*
73 * Set the hardware clock. -- Cort
74 */
75int chrp_set_rtc_time(struct rtc_time *tmarg)
76{
77 unsigned char save_control, save_freq_select;
78 struct rtc_time tm = *tmarg;
79
80 spin_lock(&rtc_lock);
81
82 save_control = chrp_cmos_clock_read(RTC_CONTROL); /* tell the clock it's being set */
83
84 chrp_cmos_clock_write((save_control|RTC_SET), RTC_CONTROL);
85
86 save_freq_select = chrp_cmos_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */
87
88 chrp_cmos_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
89
90 tm.tm_year -= 1900;
91 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
92 BIN_TO_BCD(tm.tm_sec);
93 BIN_TO_BCD(tm.tm_min);
94 BIN_TO_BCD(tm.tm_hour);
95 BIN_TO_BCD(tm.tm_mon);
96 BIN_TO_BCD(tm.tm_mday);
97 BIN_TO_BCD(tm.tm_year);
98 }
99 chrp_cmos_clock_write(tm.tm_sec,RTC_SECONDS);
100 chrp_cmos_clock_write(tm.tm_min,RTC_MINUTES);
101 chrp_cmos_clock_write(tm.tm_hour,RTC_HOURS);
102 chrp_cmos_clock_write(tm.tm_mon,RTC_MONTH);
103 chrp_cmos_clock_write(tm.tm_mday,RTC_DAY_OF_MONTH);
104 chrp_cmos_clock_write(tm.tm_year,RTC_YEAR);
105
106 /* The following flags have to be released exactly in this order,
107 * otherwise the DS12887 (popular MC146818A clone with integrated
108 * battery and quartz) will not reset the oscillator and will not
109 * update precisely 500 ms later. You won't find this mentioned in
110 * the Dallas Semiconductor data sheets, but who believes data
111 * sheets anyway ... -- Markus Kuhn
112 */
113 chrp_cmos_clock_write(save_control, RTC_CONTROL);
114 chrp_cmos_clock_write(save_freq_select, RTC_FREQ_SELECT);
115
116 spin_unlock(&rtc_lock);
117 return 0;
118}
119
120void chrp_get_rtc_time(struct rtc_time *tm)
121{
122 unsigned int year, mon, day, hour, min, sec;
123 int uip, i;
124
125 /* The Linux interpretation of the CMOS clock register contents:
126 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
127 * RTC registers show the second which has precisely just started.
128 * Let's hope other operating systems interpret the RTC the same way.
129 */
130
131 /* Since the UIP flag is set for about 2.2 ms and the clock
132 * is typically written with a precision of 1 jiffy, trying
133 * to obtain a precision better than a few milliseconds is
134 * an illusion. Only consistency is interesting, this also
135 * allows to use the routine for /dev/rtc without a potential
136 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
137 */
138
139 for ( i = 0; i<1000000; i++) {
140 uip = chrp_cmos_clock_read(RTC_FREQ_SELECT);
141 sec = chrp_cmos_clock_read(RTC_SECONDS);
142 min = chrp_cmos_clock_read(RTC_MINUTES);
143 hour = chrp_cmos_clock_read(RTC_HOURS);
144 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH);
145 mon = chrp_cmos_clock_read(RTC_MONTH);
146 year = chrp_cmos_clock_read(RTC_YEAR);
147 uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT);
148 if ((uip & RTC_UIP)==0) break;
149 }
150
151 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
152 BCD_TO_BIN(sec);
153 BCD_TO_BIN(min);
154 BCD_TO_BIN(hour);
155 BCD_TO_BIN(day);
156 BCD_TO_BIN(mon);
157 BCD_TO_BIN(year);
158 }
159 if ((year += 1900) < 1970)
160 year += 100;
161 tm->tm_sec = sec;
162 tm->tm_min = min;
163 tm->tm_hour = hour;
164 tm->tm_mday = day;
165 tm->tm_mon = mon;
166 tm->tm_year = year;
167}
168
169
170void __init chrp_calibrate_decr(void)
171{
172 struct device_node *cpu;
173 unsigned int freq, *fp;
174
175 /*
176 * The cpu node should have a timebase-frequency property
177 * to tell us the rate at which the decrementer counts.
178 */
179 freq = 16666000; /* hardcoded default */
180 cpu = find_type_devices("cpu");
181 if (cpu != 0) {
182 fp = (unsigned int *)
183 get_property(cpu, "timebase-frequency", NULL);
184 if (fp != 0)
185 freq = *fp;
186 }
187 ppc_tb_freq = freq;
188}
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
new file mode 100644
index 000000000000..81250090f98d
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -0,0 +1,318 @@
1choice
2 prompt "Machine Type"
3 depends on EMBEDDED6xx
4
5config KATANA
6 bool "Artesyn-Katana"
7 help
8 Select KATANA if configuring an Artesyn KATANA 750i or 3750
9 cPCI board.
10
11config WILLOW
12 bool "Cogent-Willow"
13
14config CPCI690
15 bool "Force-CPCI690"
16 help
17 Select CPCI690 if configuring a Force CPCI690 cPCI board.
18
19config POWERPMC250
20 bool "Force-PowerPMC250"
21
22config CHESTNUT
23 bool "IBM 750FX Eval board or 750GX Eval board"
24 help
25 Select CHESTNUT if configuring an IBM 750FX Eval Board or a
26 IBM 750GX Eval board.
27
28config SPRUCE
29 bool "IBM-Spruce"
30 select PPC_INDIRECT_PCI
31
32config HDPU
33 bool "Sky-HDPU"
34 help
35 Select HDPU if configuring a Sky Computers Compute Blade.
36
37config HDPU_FEATURES
38 depends HDPU
39 tristate "HDPU-Features"
40 help
41 Select to enable HDPU enhanced features.
42
43config EV64260
44 bool "Marvell-EV64260BP"
45 help
46 Select EV64260 if configuring a Marvell (formerly Galileo)
47 EV64260BP Evaluation platform.
48
49config LOPEC
50 bool "Motorola-LoPEC"
51 select PPC_I8259
52
53config MVME5100
54 bool "Motorola-MVME5100"
55 select PPC_INDIRECT_PCI
56
57config PPLUS
58 bool "Motorola-PowerPlus"
59 select PPC_I8259
60 select PPC_INDIRECT_PCI
61
62config PRPMC750
63 bool "Motorola-PrPMC750"
64 select PPC_INDIRECT_PCI
65
66config PRPMC800
67 bool "Motorola-PrPMC800"
68 select PPC_INDIRECT_PCI
69
70config SANDPOINT
71 bool "Motorola-Sandpoint"
72 select PPC_I8259
73 help
74 Select SANDPOINT if configuring for a Motorola Sandpoint X3
75 (any flavor).
76
77config RADSTONE_PPC7D
78 bool "Radstone Technology PPC7D board"
79 select PPC_I8259
80
81config PAL4
82 bool "SBS-Palomar4"
83
84config GEMINI
85 bool "Synergy-Gemini"
86 select PPC_INDIRECT_PCI
87 depends on BROKEN
88 help
89 Select Gemini if configuring for a Synergy Microsystems' Gemini
90 series Single Board Computer. More information is available at:
91 <http://www.synergymicro.com/PressRel/97_10_15.html>.
92
93config EST8260
94 bool "EST8260"
95 ---help---
96 The EST8260 is a single-board computer manufactured by Wind River
97 Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
98 the MPC8260. Wind River Systems has a website at
99 <http://www.windriver.com/>, but the EST8260 cannot be found on it
100 and has probably been discontinued or rebadged.
101
102config SBC82xx
103 bool "SBC82xx"
104 ---help---
105 SBC PowerQUICC II, single-board computer with MPC82xx CPU
106 Manufacturer: Wind River Systems, Inc.
107 Date of Release: May 2003
108 End of Life: -
109 URL: <http://www.windriver.com/>
110
111config SBS8260
112 bool "SBS8260"
113
114config RPX8260
115 bool "RPXSUPER"
116
117config TQM8260
118 bool "TQM8260"
119 ---help---
120 MPC8260 based module, little larger than credit card,
121 up to 128 MB global + 64 MB local RAM, 32 MB Flash,
122 32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
123 2 x serial ports, ...
124 Manufacturer: TQ Components, www.tq-group.de
125 Date of Release: June 2001
126 End of Life: not yet :-)
127 URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
128
129config ADS8272
130 bool "ADS8272"
131
132config PQ2FADS
133 bool "Freescale-PQ2FADS"
134 help
135 Select PQ2FADS if you wish to configure for a Freescale
136 PQ2FADS board (-VR or -ZU).
137
138config LITE5200
139 bool "Freescale LITE5200 / (IceCube)"
140 select PPC_MPC52xx
141 help
142 Support for the LITE5200 dev board for the MPC5200 from Freescale.
143 This is for the LITE5200 version 2.0 board. Don't know if it changes
144 much but it's only been tested on this board version. I think this
145 board is also known as IceCube.
146
147config MPC834x_SYS
148 bool "Freescale MPC834x SYS"
149 help
150 This option enables support for the MPC 834x SYS evaluation board.
151
152 Be aware that PCI buses can only function when SYS board is plugged
153 into the PIB (Platform IO Board) board from Freescale which provide
154 3 PCI slots. The PIBs PCI initialization is the bootloader's
155 responsiblilty.
156
157config EV64360
158 bool "Marvell-EV64360BP"
159 help
160 Select EV64360 if configuring a Marvell EV64360BP Evaluation
161 platform.
162endchoice
163
164config PQ2ADS
165 bool
166 depends on ADS8272
167 default y
168
169config TQM8xxL
170 bool
171 depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
172 default y
173
174config PPC_MPC52xx
175 bool
176
177config 8260
178 bool "CPM2 Support" if WILLOW
179 depends on 6xx
180 default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
181 help
182 The MPC8260 is a typical embedded CPU made by Motorola. Selecting
183 this option means that you wish to build a kernel for a machine with
184 an 8260 class CPU.
185
186config 8272
187 bool
188 depends on 6xx
189 default y if ADS8272
190 select 8260
191 help
192 The MPC8272 CPM has a different internal dpram setup than other CPM2
193 devices
194
195config 83xx
196 bool
197 default y if MPC834x_SYS
198
199config MPC834x
200 bool
201 default y if MPC834x_SYS
202
203config CPM2
204 bool
205 depends on 8260 || MPC8560 || MPC8555
206 default y
207 help
208 The CPM2 (Communications Processor Module) is a coprocessor on
209 embedded CPUs made by Motorola. Selecting this option means that
210 you wish to build a kernel for a machine with a CPM2 coprocessor
211 on it (826x, 827x, 8560).
212
213config PPC_GEN550
214 bool
215 depends on SANDPOINT || SPRUCE || PPLUS || \
216 PRPMC750 || PRPMC800 || LOPEC || \
217 (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
218 83xx
219 default y
220
221config FORCE
222 bool
223 depends on 6xx && POWERPMC250
224 default y
225
226config GT64260
227 bool
228 depends on EV64260 || CPCI690
229 default y
230
231config MV64360 # Really MV64360 & MV64460
232 bool
233 depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
234 default y
235
236config MV64X60
237 bool
238 depends on (GT64260 || MV64360)
239 select PPC_INDIRECT_PCI
240 default y
241
242menu "Set bridge options"
243 depends on MV64X60
244
245config NOT_COHERENT_CACHE
246 bool "Turn off Cache Coherency"
247 default n
248 help
249 Some 64x60 bridges lock up when trying to enforce cache coherency.
250 When this option is selected, cache coherency will be turned off.
251 Note that this can cause other problems (e.g., stale data being
252 speculatively loaded via a cached mapping). Use at your own risk.
253
254config MV64X60_BASE
255 hex "Set bridge base used by firmware"
256 default "0xf1000000"
257 help
258 A firmware can leave the base address of the bridge's registers at
259 a non-standard location. If so, set this value to reflect the
260 address of that non-standard location.
261
262config MV64X60_NEW_BASE
263 hex "Set bridge base used by kernel"
264 default "0xf1000000"
265 help
266 If the current base address of the bridge's registers is not where
267 you want it, set this value to the address that you want it moved to.
268
269endmenu
270
271config NONMONARCH_SUPPORT
272 bool "Enable Non-Monarch Support"
273 depends on PRPMC800
274
275config HARRIER
276 bool
277 depends on PRPMC800
278 default y
279
280config EPIC_SERIAL_MODE
281 bool
282 depends on 6xx && (LOPEC || SANDPOINT)
283 default y
284
285config MPC10X_BRIDGE
286 bool
287 depends on POWERPMC250 || LOPEC || SANDPOINT
288 select PPC_INDIRECT_PCI
289 default y
290
291config MPC10X_OPENPIC
292 bool
293 depends on POWERPMC250 || LOPEC || SANDPOINT
294 default y
295
296config MPC10X_STORE_GATHERING
297 bool "Enable MPC10x store gathering"
298 depends on MPC10X_BRIDGE
299
300config SANDPOINT_ENABLE_UART1
301 bool "Enable DUART mode on Sandpoint"
302 depends on SANDPOINT
303 help
304 If this option is enabled then the MPC824x processor will run
305 in DUART mode instead of UART mode.
306
307config HARRIER_STORE_GATHERING
308 bool "Enable Harrier store gathering"
309 depends on HARRIER
310
311config MVME5100_IPMC761_PRESENT
312 bool "MVME5100 configured with an IPMC761"
313 depends on MVME5100
314 select PPC_I8259
315
316config SPRUCE_BAUD_33M
317 bool "Spruce baud clock support"
318 depends on SPRUCE
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644
index 000000000000..3d957a30c8c2
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -0,0 +1,31 @@
1
2menu "iSeries device drivers"
3 depends on PPC_ISERIES
4
5config VIOCONS
6 tristate "iSeries Virtual Console Support"
7
8config VIODASD
9 tristate "iSeries Virtual I/O disk support"
10 help
11 If you are running on an iSeries system and you want to use
12 virtual disks created and managed by OS/400, say Y.
13
14config VIOCD
15 tristate "iSeries Virtual I/O CD support"
16 help
17 If you are running Linux on an IBM iSeries system and you want to
18 read a CD drive owned by OS/400, say Y here.
19
20config VIOTAPE
21 tristate "iSeries Virtual Tape Support"
22 help
23 If you are running Linux on an iSeries system and you want Linux
24 to read and/or write a tape drive owned by OS/400, say Y here.
25
26endmenu
27
28config VIOPATH
29 bool
30 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
31 default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644
index 000000000000..127b465308be
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -0,0 +1,9 @@
1EXTRA_CFLAGS += -mno-minimal-toc
2
3obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
4 hvcall.o proc.o htab.o iommu.o misc.o
5obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
6obj-$(CONFIG_IBMVIO) += vio.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/include/asm-ppc64/iSeries/HvCallHpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
index 43a1969230b8..321f3bb7a8f5 100644
--- a/include/asm-ppc64/iSeries/HvCallHpt.h
+++ b/arch/powerpc/platforms/iseries/call_hpt.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCallHpt.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVCALLHPT_H 18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
20#define _HVCALLHPT_H 19#define _PLATFORMS_ISERIES_CALL_HPT_H
21 20
22/* 21/*
23 * This file contains the "hypervisor call" interface which is used to 22 * This file contains the "hypervisor call" interface which is used to
@@ -99,4 +98,4 @@ static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
99 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r); 98 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
100} 99}
101 100
102#endif /* _HVCALLHPT_H */ 101#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/include/asm-ppc64/iSeries/HvCallPci.h b/arch/powerpc/platforms/iseries/call_pci.h
index c8d675c40f5e..a86e065b9577 100644
--- a/include/asm-ppc64/iSeries/HvCallPci.h
+++ b/arch/powerpc/platforms/iseries/call_pci.h
@@ -22,8 +22,8 @@
22 * Created, Jan 9, 2001 22 * Created, Jan 9, 2001
23 */ 23 */
24 24
25#ifndef _HVCALLPCI_H 25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _HVCALLPCI_H 26#define _PLATFORMS_ISERIES_CALL_PCI_H
27 27
28#include <asm/iSeries/HvCallSc.h> 28#include <asm/iSeries/HvCallSc.h>
29#include <asm/iSeries/HvTypes.h> 29#include <asm/iSeries/HvTypes.h>
@@ -126,25 +126,6 @@ enum HvCallPci_VpdType {
126#define HvCallPciUnmaskInterrupts HvCallPci + 49 126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50 127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128 128
129static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u8 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad8, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber, 129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u16 *value) 130 u8 deviceId, u32 offset, u16 *value)
150{ 131{
@@ -164,25 +145,6 @@ static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
164 return retVal.rc; 145 return retVal.rc;
165} 146}
166 147
167static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
168 u8 deviceId, u32 offset, u32 *value)
169{
170 struct HvCallPci_DsaAddr dsa;
171 struct HvCallPci_LoadReturn retVal;
172
173 *((u64*)&dsa) = 0;
174
175 dsa.busNumber = busNumber;
176 dsa.subBusNumber = subBusNumber;
177 dsa.deviceId = deviceId;
178
179 HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
180
181 *value = retVal.value;
182
183 return retVal.rc;
184}
185
186static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber, 148static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
187 u8 deviceId, u32 offset, u8 value) 149 u8 deviceId, u32 offset, u8 value)
188{ 150{
@@ -197,186 +159,6 @@ static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
197 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0); 159 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
198} 160}
199 161
200static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber,
201 u8 deviceId, u32 offset, u16 value)
202{
203 struct HvCallPci_DsaAddr dsa;
204
205 *((u64*)&dsa) = 0;
206
207 dsa.busNumber = busNumber;
208 dsa.subBusNumber = subBusNumber;
209 dsa.deviceId = deviceId;
210
211 return HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0);
212}
213
214static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber,
215 u8 deviceId, u32 offset, u32 value)
216{
217 struct HvCallPci_DsaAddr dsa;
218
219 *((u64*)&dsa) = 0;
220
221 dsa.busNumber = busNumber;
222 dsa.subBusNumber = subBusNumber;
223 dsa.deviceId = deviceId;
224
225 return HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0);
226}
227
228static inline u64 HvCallPci_barLoad8(u16 busNumberParm, u8 subBusParm,
229 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
230 u8 *valueParm)
231{
232 struct HvCallPci_DsaAddr dsa;
233 struct HvCallPci_LoadReturn retVal;
234
235 *((u64*)&dsa) = 0;
236
237 dsa.busNumber = busNumberParm;
238 dsa.subBusNumber = subBusParm;
239 dsa.deviceId = deviceIdParm;
240 dsa.barNumber = barNumberParm;
241
242 HvCall3Ret16(HvCallPciBarLoad8, &retVal, *(u64 *)&dsa, offsetParm, 0);
243
244 *valueParm = retVal.value;
245
246 return retVal.rc;
247}
248
249static inline u64 HvCallPci_barLoad16(u16 busNumberParm, u8 subBusParm,
250 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
251 u16 *valueParm)
252{
253 struct HvCallPci_DsaAddr dsa;
254 struct HvCallPci_LoadReturn retVal;
255
256 *((u64*)&dsa) = 0;
257
258 dsa.busNumber = busNumberParm;
259 dsa.subBusNumber = subBusParm;
260 dsa.deviceId = deviceIdParm;
261 dsa.barNumber = barNumberParm;
262
263 HvCall3Ret16(HvCallPciBarLoad16, &retVal, *(u64 *)&dsa, offsetParm, 0);
264
265 *valueParm = retVal.value;
266
267 return retVal.rc;
268}
269
270static inline u64 HvCallPci_barLoad32(u16 busNumberParm, u8 subBusParm,
271 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
272 u32 *valueParm)
273{
274 struct HvCallPci_DsaAddr dsa;
275 struct HvCallPci_LoadReturn retVal;
276
277 *((u64*)&dsa) = 0;
278
279 dsa.busNumber = busNumberParm;
280 dsa.subBusNumber = subBusParm;
281 dsa.deviceId = deviceIdParm;
282 dsa.barNumber = barNumberParm;
283
284 HvCall3Ret16(HvCallPciBarLoad32, &retVal, *(u64 *)&dsa, offsetParm, 0);
285
286 *valueParm = retVal.value;
287
288 return retVal.rc;
289}
290
291static inline u64 HvCallPci_barLoad64(u16 busNumberParm, u8 subBusParm,
292 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
293 u64 *valueParm)
294{
295 struct HvCallPci_DsaAddr dsa;
296 struct HvCallPci_LoadReturn retVal;
297
298 *((u64*)&dsa) = 0;
299
300 dsa.busNumber = busNumberParm;
301 dsa.subBusNumber = subBusParm;
302 dsa.deviceId = deviceIdParm;
303 dsa.barNumber = barNumberParm;
304
305 HvCall3Ret16(HvCallPciBarLoad64, &retVal, *(u64 *)&dsa, offsetParm, 0);
306
307 *valueParm = retVal.value;
308
309 return retVal.rc;
310}
311
312static inline u64 HvCallPci_barStore8(u16 busNumberParm, u8 subBusParm,
313 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
314 u8 valueParm)
315{
316 struct HvCallPci_DsaAddr dsa;
317
318 *((u64*)&dsa) = 0;
319
320 dsa.busNumber = busNumberParm;
321 dsa.subBusNumber = subBusParm;
322 dsa.deviceId = deviceIdParm;
323 dsa.barNumber = barNumberParm;
324
325 return HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm,
326 valueParm, 0);
327}
328
329static inline u64 HvCallPci_barStore16(u16 busNumberParm, u8 subBusParm,
330 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
331 u16 valueParm)
332{
333 struct HvCallPci_DsaAddr dsa;
334
335 *((u64*)&dsa) = 0;
336
337 dsa.busNumber = busNumberParm;
338 dsa.subBusNumber = subBusParm;
339 dsa.deviceId = deviceIdParm;
340 dsa.barNumber = barNumberParm;
341
342 return HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm,
343 valueParm, 0);
344}
345
346static inline u64 HvCallPci_barStore32(u16 busNumberParm, u8 subBusParm,
347 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
348 u32 valueParm)
349{
350 struct HvCallPci_DsaAddr dsa;
351
352 *((u64*)&dsa) = 0;
353
354 dsa.busNumber = busNumberParm;
355 dsa.subBusNumber = subBusParm;
356 dsa.deviceId = deviceIdParm;
357 dsa.barNumber = barNumberParm;
358
359 return HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm,
360 valueParm, 0);
361}
362
363static inline u64 HvCallPci_barStore64(u16 busNumberParm, u8 subBusParm,
364 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
365 u64 valueParm)
366{
367 struct HvCallPci_DsaAddr dsa;
368
369 *((u64*)&dsa) = 0;
370
371 dsa.busNumber = busNumberParm;
372 dsa.subBusNumber = subBusParm;
373 dsa.deviceId = deviceIdParm;
374 dsa.barNumber = barNumberParm;
375
376 return HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm,
377 valueParm, 0);
378}
379
380static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm, 162static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
381 u8 deviceIdParm) 163 u8 deviceIdParm)
382{ 164{
@@ -437,20 +219,6 @@ static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
437 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask); 219 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
438} 220}
439 221
440static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, u8 subBusParm,
441 u8 deviceIdParm, u64 onNotOff)
442{
443 struct HvCallPci_DsaAddr dsa;
444
445 *((u64*)&dsa) = 0;
446
447 dsa.busNumber = busNumberParm;
448 dsa.subBusNumber = subBusParm;
449 dsa.deviceId = deviceIdParm;
450
451 return HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff);
452}
453
454static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm, 222static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
455 u8 deviceNumberParm, u64 parms, u32 sizeofParms) 223 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
456{ 224{
@@ -519,15 +287,4 @@ static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
519 return xRc & 0xFFFF; 287 return xRc & 0xFFFF;
520} 288}
521 289
522static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, 290#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
523 u16 sizeParm)
524{
525 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
526 sizeParm, HvCallPci_BusAdapterVpd);
527 if (xRc == -1)
528 return -1;
529 else
530 return xRc & 0xFFFF;
531}
532
533#endif /* _HVCALLPCI_H */
diff --git a/include/asm-ppc64/iSeries/HvCallSm.h b/arch/powerpc/platforms/iseries/call_sm.h
index 8a3dbb071a43..ef223166cf22 100644
--- a/include/asm-ppc64/iSeries/HvCallSm.h
+++ b/arch/powerpc/platforms/iseries/call_sm.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCallSm.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVCALLSM_H 18#ifndef _ISERIES_CALL_SM_H
20#define _HVCALLSM_H 19#define _ISERIES_CALL_SM_H
21 20
22/* 21/*
23 * This file contains the "hypervisor call" interface which is used to 22 * This file contains the "hypervisor call" interface which is used to
@@ -35,4 +34,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
35 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap); 34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
36} 35}
37 36
38#endif /* _HVCALLSM_H */ 37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/powerpc/platforms/iseries/htab.c
index 073b76661747..b3c6c3374ca6 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * iSeries hashtable management. 2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c 3 * Derived from pSeries_htab.c
4 * 4 *
5 * SMP scalability work: 5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -14,11 +14,13 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/iSeries/HvCallHpt.h>
18#include <asm/abs_addr.h> 17#include <asm/abs_addr.h>
19#include <linux/spinlock.h> 18#include <linux/spinlock.h>
20 19
21static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED}; 20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
23 { [0 ... 63] = SPIN_LOCK_UNLOCKED};
22 24
23/* 25/*
24 * Very primitive algorithm for picking up a lock 26 * Very primitive algorithm for picking up a lock
@@ -84,6 +86,25 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
84 return (secondary << 3) | (slot & 7); 86 return (secondary << 3) | (slot & 7);
85} 87}
86 88
89long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
90 unsigned long va, unsigned long prpn, unsigned long vflags,
91 unsigned long rflags)
92{
93 long slot;
94 hpte_t lhpte;
95
96 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
97
98 if (lhpte.v & HPTE_V_VALID) {
99 /* Bolt the existing HPTE */
100 HvCallHpt_setSwBits(slot, 0x10, 0);
101 HvCallHpt_setPp(slot, PP_RWXX);
102 return 0;
103 }
104
105 return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
106}
107
87static unsigned long iSeries_hpte_getword0(unsigned long slot) 108static unsigned long iSeries_hpte_getword0(unsigned long slot)
88{ 109{
89 hpte_t hpte; 110 hpte_t hpte;
@@ -107,7 +128,7 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
107 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset); 128 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
108 129
109 if (! (hpte_v & HPTE_V_BOLTED)) { 130 if (! (hpte_v & HPTE_V_BOLTED)) {
110 HvCallHpt_invalidateSetSwBitsGet(hpte_group + 131 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
111 slot_offset, 0, 0); 132 slot_offset, 0, 0);
112 iSeries_hunlock(hpte_group); 133 iSeries_hunlock(hpte_group);
113 return i; 134 return i;
@@ -124,9 +145,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
124 145
125/* 146/*
126 * The HyperVisor expects the "flags" argument in this form: 147 * The HyperVisor expects the "flags" argument in this form:
127 * bits 0..59 : reserved 148 * bits 0..59 : reserved
128 * bit 60 : N 149 * bit 60 : N
129 * bits 61..63 : PP2,PP1,PP0 150 * bits 61..63 : PP2,PP1,PP0
130 */ 151 */
131static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, 152static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
132 unsigned long va, int large, int local) 153 unsigned long va, int large, int local)
@@ -152,7 +173,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
152} 173}
153 174
154/* 175/*
155 * Functions used to find the PTE for a particular virtual address. 176 * Functions used to find the PTE for a particular virtual address.
156 * Only used during boot when bolting pages. 177 * Only used during boot when bolting pages.
157 * 178 *
158 * Input : vpn : virtual page number 179 * Input : vpn : virtual page number
@@ -170,7 +191,7 @@ static long iSeries_hpte_find(unsigned long vpn)
170 * 0x00000000xxxxxxxx : Entry found in primary group, slot x 191 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
171 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x 192 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
172 */ 193 */
173 slot = HvCallHpt_findValid(&hpte, vpn); 194 slot = HvCallHpt_findValid(&hpte, vpn);
174 if (hpte.v & HPTE_V_VALID) { 195 if (hpte.v & HPTE_V_VALID) {
175 if (slot < 0) { 196 if (slot < 0) {
176 slot &= 0x7fffffffffffffff; 197 slot &= 0x7fffffffffffffff;
@@ -197,7 +218,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
197 vsid = get_kernel_vsid(ea); 218 vsid = get_kernel_vsid(ea);
198 va = (vsid << 28) | (ea & 0x0fffffff); 219 va = (vsid << 28) | (ea & 0x0fffffff);
199 vpn = va >> PAGE_SHIFT; 220 vpn = va >> PAGE_SHIFT;
200 slot = iSeries_hpte_find(vpn); 221 slot = iSeries_hpte_find(vpn);
201 if (slot == -1) 222 if (slot == -1)
202 panic("updateboltedpp: Could not find page to bolt\n"); 223 panic("updateboltedpp: Could not find page to bolt\n");
203 HvCallHpt_setPp(slot, newpp); 224 HvCallHpt_setPp(slot, newpp);
@@ -215,7 +236,7 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
215 iSeries_hlock(slot); 236 iSeries_hlock(slot);
216 237
217 hpte_v = iSeries_hpte_getword0(slot); 238 hpte_v = iSeries_hpte_getword0(slot);
218 239
219 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID)) 240 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
220 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0); 241 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
221 242
@@ -230,7 +251,7 @@ void hpte_init_iSeries(void)
230 ppc_md.hpte_updatepp = iSeries_hpte_updatepp; 251 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
231 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; 252 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
232 ppc_md.hpte_insert = iSeries_hpte_insert; 253 ppc_md.hpte_insert = iSeries_hpte_insert;
233 ppc_md.hpte_remove = iSeries_hpte_remove; 254 ppc_md.hpte_remove = iSeries_hpte_remove;
234 255
235 htab_finish_init(); 256 htab_finish_init();
236} 257}
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/powerpc/platforms/iseries/hvcall.S
index 4c699eab1b95..07ae6ad5f49f 100644
--- a/arch/ppc64/kernel/hvCall.S
+++ b/arch/powerpc/platforms/iseries/hvcall.S
@@ -1,7 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/hvCall.S
3 *
4 *
5 * This file contains the code to perform calls to the 2 * This file contains the code to perform calls to the
6 * iSeries LPAR hypervisor 3 * iSeries LPAR hypervisor
7 * 4 *
@@ -13,15 +10,16 @@
13 10
14#include <asm/ppc_asm.h> 11#include <asm/ppc_asm.h>
15#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
16 14
17 .text 15 .text
18 16
19/* 17/*
20 * Hypervisor call 18 * Hypervisor call
21 * 19 *
22 * Invoke the iSeries hypervisor via the System Call instruction 20 * Invoke the iSeries hypervisor via the System Call instruction
23 * Parameters are passed to this routine in registers r3 - r10 21 * Parameters are passed to this routine in registers r3 - r10
24 * 22 *
25 * r3 contains the HV function to be called 23 * r3 contains the HV function to be called
26 * r4-r10 contain the operands to the hypervisor function 24 * r4-r10 contain the operands to the hypervisor function
27 * 25 *
@@ -41,11 +39,11 @@ _GLOBAL(HvCall7)
41 mfcr r0 39 mfcr r0
42 std r0,-8(r1) 40 std r0,-8(r1)
43 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1) 41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
44 42
45 /* r0 = 0xffffffffffffffff indicates a hypervisor call */ 43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
46 44
47 li r0,-1 45 li r0,-1
48 46
49 /* Invoke the hypervisor */ 47 /* Invoke the hypervisor */
50 48
51 sc 49 sc
@@ -55,7 +53,7 @@ _GLOBAL(HvCall7)
55 mtcrf 0xff,r0 53 mtcrf 0xff,r0
56 54
57 /* return to caller, return value in r3 */ 55 /* return to caller, return value in r3 */
58 56
59 blr 57 blr
60 58
61_GLOBAL(HvCall0Ret16) 59_GLOBAL(HvCall0Ret16)
@@ -92,7 +90,5 @@ _GLOBAL(HvCall7Ret16)
92 ld r0,-8(r1) 90 ld r0,-8(r1)
93 mtcrf 0xff,r0 91 mtcrf 0xff,r0
94 ld r31,-16(r1) 92 ld r31,-16(r1)
95
96 blr
97
98 93
94 blr
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/powerpc/platforms/iseries/hvlog.c
index b772e65b57a2..f61e2e9ac9ec 100644
--- a/arch/ppc64/kernel/HvCall.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCall.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
index cb1d6473203c..dc28621aea0d 100644
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ b/arch/powerpc/platforms/iseries/hvlpconfig.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvLpConfig.c
3 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index f8ff1bb054dc..1db26d8be640 100644
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/iSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * 3 *
6 * Rewrite, cleanup: 4 * Rewrite, cleanup:
@@ -30,9 +28,11 @@
30#include <linux/list.h> 28#include <linux/list.h>
31 29
32#include <asm/iommu.h> 30#include <asm/iommu.h>
31#include <asm/tce.h>
33#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/abs_addr.h>
34#include <asm/pci-bridge.h>
34#include <asm/iSeries/HvCallXm.h> 35#include <asm/iSeries/HvCallXm.h>
35#include <asm/iSeries/iSeries_pci.h>
36 36
37extern struct list_head iSeries_Global_Device_List; 37extern struct list_head iSeries_Global_Device_List;
38 38
@@ -90,15 +90,16 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
90 */ 90 */
91static struct iommu_table *iommu_table_find(struct iommu_table * tbl) 91static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
92{ 92{
93 struct iSeries_Device_Node *dp; 93 struct pci_dn *pdn;
94 94
95 list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) { 95 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
96 if ((dp->iommu_table != NULL) && 96 struct iommu_table *it = pdn->iommu_table;
97 (dp->iommu_table->it_type == TCE_PCI) && 97 if ((it != NULL) &&
98 (dp->iommu_table->it_offset == tbl->it_offset) && 98 (it->it_type == TCE_PCI) &&
99 (dp->iommu_table->it_index == tbl->it_index) && 99 (it->it_offset == tbl->it_offset) &&
100 (dp->iommu_table->it_size == tbl->it_size)) 100 (it->it_index == tbl->it_index) &&
101 return dp->iommu_table; 101 (it->it_size == tbl->it_size))
102 return it;
102 } 103 }
103 return NULL; 104 return NULL;
104} 105}
@@ -112,7 +113,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
112 * 2. TCE table per Bus. 113 * 2. TCE table per Bus.
113 * 3. TCE Table per IOA. 114 * 3. TCE Table per IOA.
114 */ 115 */
115static void iommu_table_getparms(struct iSeries_Device_Node* dn, 116static void iommu_table_getparms(struct pci_dn *pdn,
116 struct iommu_table* tbl) 117 struct iommu_table* tbl)
117{ 118{
118 struct iommu_table_cb *parms; 119 struct iommu_table_cb *parms;
@@ -123,11 +124,11 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
123 124
124 memset(parms, 0, sizeof(*parms)); 125 memset(parms, 0, sizeof(*parms));
125 126
126 parms->itc_busno = ISERIES_BUS(dn); 127 parms->itc_busno = pdn->busno;
127 parms->itc_slotno = dn->LogicalSlot; 128 parms->itc_slotno = pdn->LogicalSlot;
128 parms->itc_virtbus = 0; 129 parms->itc_virtbus = 0;
129 130
130 HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms)); 131 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
131 132
132 if (parms->itc_size == 0) 133 if (parms->itc_size == 0)
133 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
@@ -144,18 +145,19 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
144} 145}
145 146
146 147
147void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn) 148void iommu_devnode_init_iSeries(struct device_node *dn)
148{ 149{
149 struct iommu_table *tbl; 150 struct iommu_table *tbl;
151 struct pci_dn *pdn = PCI_DN(dn);
150 152
151 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 153 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
152 154
153 iommu_table_getparms(dn, tbl); 155 iommu_table_getparms(pdn, tbl);
154 156
155 /* Look for existing tce table */ 157 /* Look for existing tce table */
156 dn->iommu_table = iommu_table_find(tbl); 158 pdn->iommu_table = iommu_table_find(tbl);
157 if (dn->iommu_table == NULL) 159 if (pdn->iommu_table == NULL)
158 dn->iommu_table = iommu_init_table(tbl); 160 pdn->iommu_table = iommu_init_table(tbl);
159 else 161 else
160 kfree(tbl); 162 kfree(tbl);
161} 163}
diff --git a/include/asm-ppc64/iSeries/ItIplParmsReal.h b/arch/powerpc/platforms/iseries/ipl_parms.h
index ae3417dc599e..77c135ddbf1b 100644
--- a/include/asm-ppc64/iSeries/ItIplParmsReal.h
+++ b/arch/powerpc/platforms/iseries/ipl_parms.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItIplParmsReal.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _ITIPLPARMSREAL_H 18#ifndef _ISERIES_IPL_PARMS_H
20#define _ITIPLPARMSREAL_H 19#define _ISERIES_IPL_PARMS_H
21 20
22/* 21/*
23 * This struct maps the IPL Parameters DMA'd from the SP. 22 * This struct maps the IPL Parameters DMA'd from the SP.
@@ -68,4 +67,4 @@ struct ItIplParmsReal {
68 67
69extern struct ItIplParmsReal xItIplParmsReal; 68extern struct ItIplParmsReal xItIplParmsReal;
70 69
71#endif /* _ITIPLPARMSREAL_H */ 70#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/powerpc/platforms/iseries/irq.c
index 77376c1bd611..937ac99b9d33 100644
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -38,9 +38,10 @@
38#include <asm/ppcdebug.h> 38#include <asm/ppcdebug.h>
39#include <asm/iSeries/HvTypes.h> 39#include <asm/iSeries/HvTypes.h>
40#include <asm/iSeries/HvLpEvent.h> 40#include <asm/iSeries/HvLpEvent.h>
41#include <asm/iSeries/HvCallPci.h>
42#include <asm/iSeries/HvCallXm.h> 41#include <asm/iSeries/HvCallXm.h>
43#include <asm/iSeries/iSeries_irq.h> 42
43#include "irq.h"
44#include "call_pci.h"
44 45
45/* This maps virtual irq numbers to real irqs */ 46/* This maps virtual irq numbers to real irqs */
46unsigned int virt_irq_to_real_map[NR_IRQS]; 47unsigned int virt_irq_to_real_map[NR_IRQS];
@@ -351,3 +352,15 @@ int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
351 irq_desc[virtirq].handler = &iSeries_IRQ_handler; 352 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
352 return virtirq; 353 return virtirq;
353} 354}
355
356int virt_irq_create_mapping(unsigned int real_irq)
357{
358 BUG(); /* Don't call this on iSeries, yet */
359
360 return 0;
361}
362
363void virt_irq_init(void)
364{
365 return;
366}
diff --git a/include/asm-ppc64/iSeries/iSeries_irq.h b/arch/powerpc/platforms/iseries/irq.h
index 6c9767ac1302..5f643f16ecc0 100644
--- a/include/asm-ppc64/iSeries/iSeries_irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -1,8 +1,8 @@
1#ifndef __ISERIES_IRQ_H__ 1#ifndef _ISERIES_IRQ_H
2#define __ISERIES_IRQ_H__ 2#define _ISERIES_IRQ_H
3 3
4extern void iSeries_init_IRQ(void); 4extern void iSeries_init_IRQ(void);
5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); 5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
6extern void iSeries_activate_IRQs(void); 6extern void iSeries_activate_IRQs(void);
7 7
8#endif /* __ISERIES_IRQ_H__ */ 8#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644
index 000000000000..f271b3539721
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -0,0 +1,27 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iSeries/HvCallSc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
22
23#ifdef CONFIG_SMP
24EXPORT_SYMBOL(local_get_flags);
25EXPORT_SYMBOL(local_irq_disable);
26EXPORT_SYMBOL(local_irq_restore);
27#endif
diff --git a/arch/ppc64/kernel/LparData.c b/arch/powerpc/platforms/iseries/lpardata.c
index 0a9c23ca2f0c..ed2ffee6f731 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp 2 * Copyright 2001 Mike Corrigan, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -19,18 +19,18 @@
19#include <asm/lppaca.h> 19#include <asm/lppaca.h>
20#include <asm/iSeries/ItLpRegSave.h> 20#include <asm/iSeries/ItLpRegSave.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/iSeries/HvReleaseData.h>
23#include <asm/iSeries/LparMap.h> 22#include <asm/iSeries/LparMap.h>
24#include <asm/iSeries/ItVpdAreas.h>
25#include <asm/iSeries/ItIplParmsReal.h>
26#include <asm/iSeries/ItExtVpdPanel.h> 23#include <asm/iSeries/ItExtVpdPanel.h>
27#include <asm/iSeries/ItLpQueue.h> 24#include <asm/iSeries/ItLpQueue.h>
28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h>
30 25
26#include "vpd_areas.h"
27#include "spcomm_area.h"
28#include "ipl_parms.h"
29#include "processor_vpd.h"
30#include "release_data.h"
31 31
32/* The HvReleaseData is the root of the information shared between 32/* The HvReleaseData is the root of the information shared between
33 * the hypervisor and Linux. 33 * the hypervisor and Linux.
34 */ 34 */
35struct HvReleaseData hvReleaseData = { 35struct HvReleaseData hvReleaseData = {
36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */ 36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
@@ -79,7 +79,7 @@ extern void trap_0e_iSeries(void);
79extern void performance_monitor_iSeries(void); 79extern void performance_monitor_iSeries(void);
80extern void data_access_slb_iSeries(void); 80extern void data_access_slb_iSeries(void);
81extern void instruction_access_slb_iSeries(void); 81extern void instruction_access_slb_iSeries(void);
82 82
83struct ItLpNaca itLpNaca = { 83struct ItLpNaca itLpNaca = {
84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */ 84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
85 .xSize = 0x0400, /* size of ItLpNaca */ 85 .xSize = 0x0400, /* size of ItLpNaca */
@@ -106,7 +106,7 @@ struct ItLpNaca itLpNaca = {
106 .xLoadAreaChunks = 0, /* chunks for load area */ 106 .xLoadAreaChunks = 0, /* chunks for load area */
107 .xPaseSysCallCRMask = 0, /* PASE mask */ 107 .xPaseSysCallCRMask = 0, /* PASE mask */
108 .xSlicSegmentTablePtr = 0, /* seg table */ 108 .xSlicSegmentTablePtr = 0, /* seg table */
109 .xOldLpQueue = { 0 }, /* Old LP Queue */ 109 .xOldLpQueue = { 0 }, /* Old LP Queue */
110 .xInterruptHdlr = { 110 .xInterruptHdlr = {
111 (u64)system_reset_iSeries, /* 0x100 System Reset */ 111 (u64)system_reset_iSeries, /* 0x100 System Reset */
112 (u64)machine_check_iSeries, /* 0x200 Machine Check */ 112 (u64)machine_check_iSeries, /* 0x200 Machine Check */
@@ -134,7 +134,7 @@ struct ItLpNaca itLpNaca = {
134EXPORT_SYMBOL(itLpNaca); 134EXPORT_SYMBOL(itLpNaca);
135 135
136/* May be filled in by the hypervisor so cannot end up in the BSS */ 136/* May be filled in by the hypervisor so cannot end up in the BSS */
137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); 137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
138 138
139/* May be filled in by the hypervisor so cannot end up in the BSS */ 139/* May be filled in by the hypervisor so cannot end up in the BSS */
140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data"))); 140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
@@ -151,7 +151,7 @@ struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
151 .xPVR = 0x3600 151 .xPVR = 0x3600
152 } 152 }
153}; 153};
154 154
155/* Space for Main Store Vpd 27,200 bytes */ 155/* Space for Main Store Vpd 27,200 bytes */
156/* May be filled in by the hypervisor so cannot end up in the BSS */ 156/* May be filled in by the hypervisor so cannot end up in the BSS */
157u64 xMsVpd[3400] __attribute__((__section__(".data"))); 157u64 xMsVpd[3400] __attribute__((__section__(".data")));
@@ -197,7 +197,7 @@ struct ItVpdAreas itVpdAreas = {
197 26992, /* 7 length of MS VPD */ 197 26992, /* 7 length of MS VPD */
198 0, /* 8 */ 198 0, /* 8 */
199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */ 199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
200 0, /* 10 */ 200 0, /* 10 */
201 256, /* 11 length of Recovery Log Buf */ 201 256, /* 11 length of Recovery Log Buf */
202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */ 202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
203 0,0,0, /* 13 - 15 */ 203 0,0,0, /* 13 - 15 */
@@ -207,7 +207,7 @@ struct ItVpdAreas itVpdAreas = {
207 0,0 /* 24 - 25 */ 207 0,0 /* 24 - 25 */
208 }, 208 },
209 .xSlicVpdAdrs = { /* VPD addresses */ 209 .xSlicVpdAdrs = { /* VPD addresses */
210 0,0,0, /* 0 - 2 */ 210 0,0,0, /* 0 - 2 */
211 &xItExtVpdPanel, /* 3 Extended VPD */ 211 &xItExtVpdPanel, /* 3 Extended VPD */
212 &paca[0], /* 4 first Paca */ 212 &paca[0], /* 4 first Paca */
213 0, /* 5 */ 213 0, /* 5 */
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/powerpc/platforms/iseries/lpevents.c
index 4231861288a3..54c7753dbe05 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -14,11 +13,14 @@
14#include <linux/bootmem.h> 13#include <linux/bootmem.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/module.h>
17
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/paca.h> 19#include <asm/paca.h>
19#include <asm/iSeries/ItLpQueue.h> 20#include <asm/iSeries/ItLpQueue.h>
20#include <asm/iSeries/HvLpEvent.h> 21#include <asm/iSeries/HvLpEvent.h>
21#include <asm/iSeries/HvCallEvent.h> 22#include <asm/iSeries/HvCallEvent.h>
23#include <asm/iSeries/ItLpNaca.h>
22 24
23/* 25/*
24 * The LpQueue is used to pass event data from the hypervisor to 26 * The LpQueue is used to pass event data from the hypervisor to
@@ -43,7 +45,8 @@ static char *event_types[HvLpEvent_Type_NumTypes] = {
43}; 45};
44 46
45/* Array of LpEvent handler functions */ 47/* Array of LpEvent handler functions */
46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 48static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
47 50
48static struct HvLpEvent * get_next_hvlpevent(void) 51static struct HvLpEvent * get_next_hvlpevent(void)
49{ 52{
@@ -181,11 +184,7 @@ void setup_hvlpevent_queue(void)
181{ 184{
182 void *eventStack; 185 void *eventStack;
183 186
184 /* 187 /* Allocate a page for the Event Stack. */
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize); 188 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize); 189 memset(eventStack, 0, LpEventStackSize);
191 190
@@ -199,6 +198,70 @@ void setup_hvlpevent_queue(void)
199 hvlpevent_queue.xIndex = 0; 198 hvlpevent_queue.xIndex = 0;
200} 199}
201 200
201/* Register a handler for an LpEvent type */
202int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
203{
204 if (eventType < HvLpEvent_Type_NumTypes) {
205 lpEventHandler[eventType] = handler;
206 return 0;
207 }
208 return 1;
209}
210EXPORT_SYMBOL(HvLpEvent_registerHandler);
211
212int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
213{
214 might_sleep();
215
216 if (eventType < HvLpEvent_Type_NumTypes) {
217 if (!lpEventHandlerPaths[eventType]) {
218 lpEventHandler[eventType] = NULL;
219 /*
220 * We now sleep until all other CPUs have scheduled.
221 * This ensures that the deletion is seen by all
222 * other CPUs, and that the deleted handler isn't
223 * still running on another CPU when we return.
224 */
225 synchronize_rcu();
226 return 0;
227 }
228 }
229 return 1;
230}
231EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
232
233/*
234 * lpIndex is the partition index of the target partition.
235 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
236 * indicates to use our partition index - for the other types.
237 */
238int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
239{
240 if ((eventType < HvLpEvent_Type_NumTypes) &&
241 lpEventHandler[eventType]) {
242 if (lpIndex == 0)
243 lpIndex = itLpNaca.xLpIndex;
244 HvCallEvent_openLpEventPath(lpIndex, eventType);
245 ++lpEventHandlerPaths[eventType];
246 return 0;
247 }
248 return 1;
249}
250
251int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
252{
253 if ((eventType < HvLpEvent_Type_NumTypes) &&
254 lpEventHandler[eventType] &&
255 lpEventHandlerPaths[eventType]) {
256 if (lpIndex == 0)
257 lpIndex = itLpNaca.xLpIndex;
258 HvCallEvent_closeLpEventPath(lpIndex, eventType);
259 --lpEventHandlerPaths[eventType];
260 return 0;
261 }
262 return 1;
263}
264
202static int proc_lpevents_show(struct seq_file *m, void *v) 265static int proc_lpevents_show(struct seq_file *m, void *v)
203{ 266{
204 int cpu, i; 267 int cpu, i;
diff --git a/include/asm-ppc64/iSeries/IoHriMainStore.h b/arch/powerpc/platforms/iseries/main_store.h
index 45ed3ea67d06..74f6889f834f 100644
--- a/include/asm-ppc64/iSeries/IoHriMainStore.h
+++ b/arch/powerpc/platforms/iseries/main_store.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IoHriMainStore.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19 18
20#ifndef _IOHRIMAINSTORE_H 19#ifndef _ISERIES_MAIN_STORE_H
21#define _IOHRIMAINSTORE_H 20#define _ISERIES_MAIN_STORE_H
22 21
23/* Main Store Vpd for Condor,iStar,sStar */ 22/* Main Store Vpd for Condor,iStar,sStar */
24struct IoHriMainStoreSegment4 { 23struct IoHriMainStoreSegment4 {
@@ -163,4 +162,4 @@ struct IoHriMainStoreSegment5 {
163 162
164extern u64 xMsVpd[]; 163extern u64 xMsVpd[];
165 164
166#endif /* _IOHRIMAINSTORE_H */ 165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/ppc64/kernel/mf.c b/arch/powerpc/platforms/iseries/mf.c
index ef4a338ebd01..e5de31aa0015 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1,29 +1,28 @@
1/* 1/*
2 * mf.c 2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation 3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation 4 *
5 * 5 * This modules exists as an interface between a Linux secondary partition
6 * This modules exists as an interface between a Linux secondary partition 6 * running on an iSeries and the primary partition's Virtual Service
7 * running on an iSeries and the primary partition's Virtual Service 7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * Processor (VSP) object. The VSP has final authority over powering on/off 8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * all partitions in the iSeries. It also provides miscellaneous low-level 9 * machine facility type operations.
10 * machine facility type operations. 10 *
11 * 11 *
12 * 12 * This program is free software; you can redistribute it and/or modify
13 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by
14 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or
15 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version.
16 * (at your option) any later version. 16 *
17 * 17 * This program is distributed in the hope that it will be useful,
18 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details.
21 * GNU General Public License for more details. 21 *
22 * 22 * You should have received a copy of the GNU General Public License
23 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software
24 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 */
26 */
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/errno.h> 28#include <linux/errno.h>
@@ -33,15 +32,21 @@
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
35#include <linux/bcd.h> 34#include <linux/bcd.h>
35#include <linux/rtc.h>
36 36
37#include <asm/time.h> 37#include <asm/time.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/abs_addr.h>
40#include <asm/iSeries/vio.h> 41#include <asm/iSeries/vio.h>
41#include <asm/iSeries/mf.h> 42#include <asm/iSeries/mf.h>
42#include <asm/iSeries/HvLpConfig.h> 43#include <asm/iSeries/HvLpConfig.h>
43#include <asm/iSeries/ItLpQueue.h> 44#include <asm/iSeries/ItLpQueue.h>
44 45
46#include "setup.h"
47
48extern int piranha_simulator;
49
45/* 50/*
46 * This is the structure layout for the Machine Facilites LPAR event 51 * This is the structure layout for the Machine Facilites LPAR event
47 * flows. 52 * flows.
@@ -1061,10 +1066,10 @@ static void mf_getSrcHistory(char *buffer, int size)
1061 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); 1066 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062 ev->event.data.vsp_cmd.result_code = 0xFF; 1067 ev->event.data.vsp_cmd.result_code = 0xFF;
1063 ev->event.data.vsp_cmd.reserved = 0; 1068 ev->event.data.vsp_cmd.reserved = 0;
1064 ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]); 1069 ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
1065 ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]); 1070 ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
1066 ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]); 1071 ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
1067 ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]); 1072 ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
1068 mb(); 1073 mb();
1069 if (signal_event(ev) != 0) 1074 if (signal_event(ev) != 0)
1070 return; 1075 return;
@@ -1279,3 +1284,38 @@ static int __init mf_proc_init(void)
1279__initcall(mf_proc_init); 1284__initcall(mf_proc_init);
1280 1285
1281#endif /* CONFIG_PROC_FS */ 1286#endif /* CONFIG_PROC_FS */
1287
1288/*
1289 * Get the RTC from the virtual service processor
1290 * This requires flowing LpEvents to the primary partition
1291 */
1292void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1293{
1294 if (piranha_simulator)
1295 return;
1296
1297 mf_get_rtc(rtc_tm);
1298 rtc_tm->tm_mon--;
1299}
1300
1301/*
1302 * Set the RTC in the virtual service processor
1303 * This requires flowing LpEvents to the primary partition
1304 */
1305int iSeries_set_rtc_time(struct rtc_time *tm)
1306{
1307 mf_set_rtc(tm);
1308 return 0;
1309}
1310
1311unsigned long iSeries_get_boot_time(void)
1312{
1313 struct rtc_time tm;
1314
1315 if (piranha_simulator)
1316 return 0;
1317
1318 mf_get_boot_rtc(&tm);
1319 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1320 tm.tm_hour, tm.tm_min, tm.tm_sec);
1321}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644
index 000000000000..09f14522e176
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -0,0 +1,55 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18
19 .text
20
21/* unsigned long local_save_flags(void) */
22_GLOBAL(local_get_flags)
23 lbz r3,PACAPROCENABLED(r13)
24 blr
25
26/* unsigned long local_irq_disable(void) */
27_GLOBAL(local_irq_disable)
28 lbz r3,PACAPROCENABLED(r13)
29 li r4,0
30 stb r4,PACAPROCENABLED(r13)
31 blr /* Done */
32
33/* void local_irq_restore(unsigned long flags) */
34_GLOBAL(local_irq_restore)
35 lbz r5,PACAPROCENABLED(r13)
36 /* Check if things are setup the way we want _already_. */
37 cmpw 0,r3,r5
38 beqlr
39 /* are we enabling interrupts? */
40 cmpdi 0,r3,0
41 stb r3,PACAPROCENABLED(r13)
42 beqlr
43 /* Check pending interrupts */
44 /* A decrementer, IPI or PMC interrupt may have occurred
45 * while we were in the hypervisor (which enables) */
46 ld r4,PACALPPACA+LPPACAANYINT(r13)
47 cmpdi r4,0
48 beqlr
49
50 /*
51 * Handle pending interrupts in interrupt context
52 */
53 li r0,0x5555
54 sc
55 blr
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/powerpc/platforms/iseries/pci.c
index fbc273c32bcc..959e59fd9c11 100644
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -1,28 +1,26 @@
1/* 1/*
2 * iSeries_pci.c
3 *
4 * Copyright (C) 2001 Allan Trautman, IBM Corporation 2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
5 * 3 *
6 * iSeries specific routines for PCI. 4 * iSeries specific routines for PCI.
7 * 5 *
8 * Based on code from pci.c and iSeries_pci.c 32bit 6 * Based on code from pci.c and iSeries_pci.c 32bit
9 * 7 *
10 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 11 * (at your option) any later version.
14 * 12 *
15 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 16 * GNU General Public License for more details.
19 * 17 *
20 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 21 */
24#include <linux/kernel.h> 22#include <linux/kernel.h>
25#include <linux/list.h> 23#include <linux/list.h>
26#include <linux/string.h> 24#include <linux/string.h>
27#include <linux/init.h> 25#include <linux/init.h>
28#include <linux/module.h> 26#include <linux/module.h>
@@ -36,21 +34,23 @@
36#include <asm/pci-bridge.h> 34#include <asm/pci-bridge.h>
37#include <asm/ppcdebug.h> 35#include <asm/ppcdebug.h>
38#include <asm/iommu.h> 36#include <asm/iommu.h>
37#include <asm/abs_addr.h>
39 38
40#include <asm/iSeries/HvCallPci.h>
41#include <asm/iSeries/HvCallXm.h> 39#include <asm/iSeries/HvCallXm.h>
42#include <asm/iSeries/iSeries_irq.h>
43#include <asm/iSeries/iSeries_pci.h>
44#include <asm/iSeries/mf.h> 40#include <asm/iSeries/mf.h>
45 41
42#include <asm/ppc-pci.h>
43
44#include "irq.h"
46#include "pci.h" 45#include "pci.h"
46#include "call_pci.h"
47 47
48extern unsigned long io_page_mask; 48extern unsigned long io_page_mask;
49 49
50/* 50/*
51 * Forward declares of prototypes. 51 * Forward declares of prototypes.
52 */ 52 */
53static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn); 53static struct device_node *find_Device_Node(int bus, int devfn);
54static void scan_PHB_slots(struct pci_controller *Phb); 54static void scan_PHB_slots(struct pci_controller *Phb);
55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); 55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info); 56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
@@ -68,7 +68,7 @@ static long Pci_Cfg_Write_Count;
68#endif 68#endif
69static long Pci_Error_Count; 69static long Pci_Error_Count;
70 70
71static int Pci_Retry_Max = 3; /* Only retry 3 times */ 71static int Pci_Retry_Max = 3; /* Only retry 3 times */
72static int Pci_Error_Flag = 1; /* Set Retry Error on. */ 72static int Pci_Error_Flag = 1; /* Set Retry Error on. */
73 73
74static struct pci_ops iSeries_pci_ops; 74static struct pci_ops iSeries_pci_ops;
@@ -87,7 +87,7 @@ static long current_iomm_table_entry;
87/* 87/*
88 * Lookup Tables. 88 * Lookup Tables.
89 */ 89 */
90static struct iSeries_Device_Node **iomm_table; 90static struct device_node **iomm_table;
91static u8 *iobar_table; 91static u8 *iobar_table;
92 92
93/* 93/*
@@ -179,7 +179,7 @@ static void allocate_device_bars(struct pci_dev *dev)
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) { 179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num]; 180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num); 181 iomm_table_allocate_entry(dev, bar_num);
182 } 182 }
183} 183}
184 184
185/* 185/*
@@ -201,29 +201,31 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
201/* 201/*
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn) 202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
203 */ 203 */
204static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus, 204static struct device_node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function) 205 HvSubBusNumber SubBus, int AgentId, int Function)
206{ 206{
207 struct iSeries_Device_Node *node; 207 struct device_node *node;
208 struct pci_dn *pdn;
208 209
209 PPCDBG(PPCDBG_BUSWALK, 210 PPCDBG(PPCDBG_BUSWALK,
210 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n", 211 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
211 Bus, SubBus, AgentId, Function); 212 Bus, SubBus, AgentId, Function);
212 213
213 node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL); 214 node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
214 if (node == NULL) 215 if (node == NULL)
215 return NULL; 216 return NULL;
216 217 memset(node, 0, sizeof(struct device_node));
217 memset(node, 0, sizeof(struct iSeries_Device_Node)); 218 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
218 list_add_tail(&node->Device_List, &iSeries_Global_Device_List); 219 if (pdn == NULL) {
219#if 0 220 kfree(node);
220 node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32); 221 return NULL;
221#endif 222 }
222 node->DsaAddr.DsaAddr = 0; 223 node->data = pdn;
223 node->DsaAddr.Dsa.busNumber = Bus; 224 pdn->node = node;
224 node->DsaAddr.Dsa.subBusNumber = SubBus; 225 list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
225 node->DsaAddr.Dsa.deviceId = 0x10; 226 pdn->busno = Bus;
226 node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); 227 pdn->bussubno = SubBus;
228 pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
227 return node; 229 return node;
228} 230}
229 231
@@ -278,28 +280,28 @@ unsigned long __init find_and_init_phbs(void)
278 280
279/* 281/*
280 * iSeries_pcibios_init 282 * iSeries_pcibios_init
281 * 283 *
282 * Chance to initialize and structures or variable before PCI Bus walk. 284 * Chance to initialize and structures or variable before PCI Bus walk.
283 */ 285 */
284void iSeries_pcibios_init(void) 286void iSeries_pcibios_init(void)
285{ 287{
286 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n"); 288 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
287 iomm_table_initialize(); 289 iomm_table_initialize();
288 find_and_init_phbs(); 290 find_and_init_phbs();
289 io_page_mask = -1; 291 io_page_mask = -1;
290 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n"); 292 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
291} 293}
292 294
293/* 295/*
294 * iSeries_pci_final_fixup(void) 296 * iSeries_pci_final_fixup(void)
295 */ 297 */
296void __init iSeries_pci_final_fixup(void) 298void __init iSeries_pci_final_fixup(void)
297{ 299{
298 struct pci_dev *pdev = NULL; 300 struct pci_dev *pdev = NULL;
299 struct iSeries_Device_Node *node; 301 struct device_node *node;
300 int DeviceCount = 0; 302 int DeviceCount = 0;
301 303
302 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); 304 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
303 305
304 /* Fix up at the device node and pci_dev relationship */ 306 /* Fix up at the device node and pci_dev relationship */
305 mf_display_src(0xC9000100); 307 mf_display_src(0xC9000100);
@@ -313,7 +315,7 @@ void __init iSeries_pci_final_fixup(void)
313 if (node != NULL) { 315 if (node != NULL) {
314 ++DeviceCount; 316 ++DeviceCount;
315 pdev->sysdata = (void *)node; 317 pdev->sysdata = (void *)node;
316 node->PciDev = pdev; 318 PCI_DN(node)->pcidev = pdev;
317 PPCDBG(PPCDBG_BUSWALK, 319 PPCDBG(PPCDBG_BUSWALK,
318 "pdev 0x%p <==> DevNode 0x%p\n", 320 "pdev 0x%p <==> DevNode 0x%p\n",
319 pdev, node); 321 pdev, node);
@@ -323,7 +325,7 @@ void __init iSeries_pci_final_fixup(void)
323 } else 325 } else
324 printk("PCI: Device Tree not found for 0x%016lX\n", 326 printk("PCI: Device Tree not found for 0x%016lX\n",
325 (unsigned long)pdev); 327 (unsigned long)pdev);
326 pdev->irq = node->Irq; 328 pdev->irq = PCI_DN(node)->Irq;
327 } 329 }
328 iSeries_activate_IRQs(); 330 iSeries_activate_IRQs();
329 mf_display_src(0xC9000200); 331 mf_display_src(0xC9000200);
@@ -332,24 +334,24 @@ void __init iSeries_pci_final_fixup(void)
332void pcibios_fixup_bus(struct pci_bus *PciBus) 334void pcibios_fixup_bus(struct pci_bus *PciBus)
333{ 335{
334 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n", 336 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
335 PciBus->number); 337 PciBus->number);
336} 338}
337 339
338void pcibios_fixup_resources(struct pci_dev *pdev) 340void pcibios_fixup_resources(struct pci_dev *pdev)
339{ 341{
340 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev); 342 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
341} 343}
342 344
343/* 345/*
344 * Loop through each node function to find usable EADs bridges. 346 * Loop through each node function to find usable EADs bridges.
345 */ 347 */
346static void scan_PHB_slots(struct pci_controller *Phb) 348static void scan_PHB_slots(struct pci_controller *Phb)
347{ 349{
348 struct HvCallPci_DeviceInfo *DevInfo; 350 struct HvCallPci_DeviceInfo *DevInfo;
349 HvBusNumber bus = Phb->local_number; /* System Bus */ 351 HvBusNumber bus = Phb->local_number; /* System Bus */
350 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */ 352 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
351 int HvRc = 0; 353 int HvRc = 0;
352 int IdSel; 354 int IdSel;
353 const int MaxAgents = 8; 355 const int MaxAgents = 8;
354 356
355 DevInfo = (struct HvCallPci_DeviceInfo*) 357 DevInfo = (struct HvCallPci_DeviceInfo*)
@@ -358,11 +360,11 @@ static void scan_PHB_slots(struct pci_controller *Phb)
358 return; 360 return;
359 361
360 /* 362 /*
361 * Probe for EADs Bridges 363 * Probe for EADs Bridges
362 */ 364 */
363 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) { 365 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
364 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel, 366 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
365 ISERIES_HV_ADDR(DevInfo), 367 iseries_hv_addr(DevInfo),
366 sizeof(struct HvCallPci_DeviceInfo)); 368 sizeof(struct HvCallPci_DeviceInfo));
367 if (HvRc == 0) { 369 if (HvRc == 0) {
368 if (DevInfo->deviceType == HvCallPci_NodeDevice) 370 if (DevInfo->deviceType == HvCallPci_NodeDevice)
@@ -393,19 +395,19 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
393 395
394 /* Note: hvSubBus and irq is always be 0 at this level! */ 396 /* Note: hvSubBus and irq is always be 0 at this level! */
395 for (Function = 0; Function < 8; ++Function) { 397 for (Function = 0; Function < 8; ++Function) {
396 AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 398 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
397 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0); 399 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
398 if (HvRc == 0) { 400 if (HvRc == 0) {
399 printk("found device at bus %d idsel %d func %d (AgentId %x)\n", 401 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
400 bus, IdSel, Function, AgentId); 402 bus, IdSel, Function, AgentId);
401 /* Connect EADs: 0x18.00.12 = 0x00 */ 403 /* Connect EADs: 0x18.00.12 = 0x00 */
402 PPCDBG(PPCDBG_BUSWALK, 404 PPCDBG(PPCDBG_BUSWALK,
403 "PCI:Connect EADs: 0x%02X.%02X.%02X\n", 405 "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
404 bus, SubBus, AgentId); 406 bus, SubBus, AgentId);
405 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId, 407 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
406 ISERIES_HV_ADDR(BridgeInfo), 408 iseries_hv_addr(BridgeInfo),
407 sizeof(struct HvCallPci_BridgeInfo)); 409 sizeof(struct HvCallPci_BridgeInfo));
408 if (HvRc == 0) { 410 if (HvRc == 0) {
409 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n", 411 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
410 BridgeInfo->busUnitInfo.deviceType, 412 BridgeInfo->busUnitInfo.deviceType,
411 BridgeInfo->subBusNumber, 413 BridgeInfo->subBusNumber,
@@ -428,7 +430,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
428 printk("PCI: Invalid Bridge Configuration(0x%02X)", 430 printk("PCI: Invalid Bridge Configuration(0x%02X)",
429 BridgeInfo->busUnitInfo.deviceType); 431 BridgeInfo->busUnitInfo.deviceType);
430 } 432 }
431 } else if (HvRc != 0x000B) 433 } else if (HvRc != 0x000B)
432 pci_Log_Error("EADs Connect", 434 pci_Log_Error("EADs Connect",
433 bus, SubBus, AgentId, HvRc); 435 bus, SubBus, AgentId, HvRc);
434 } 436 }
@@ -441,7 +443,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
441static int scan_bridge_slot(HvBusNumber Bus, 443static int scan_bridge_slot(HvBusNumber Bus,
442 struct HvCallPci_BridgeInfo *BridgeInfo) 444 struct HvCallPci_BridgeInfo *BridgeInfo)
443{ 445{
444 struct iSeries_Device_Node *node; 446 struct device_node *node;
445 HvSubBusNumber SubBus = BridgeInfo->subBusNumber; 447 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
446 u16 VendorId = 0; 448 u16 VendorId = 0;
447 int HvRc = 0; 449 int HvRc = 0;
@@ -451,16 +453,16 @@ static int scan_bridge_slot(HvBusNumber Bus,
451 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); 453 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
452 454
453 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ 455 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
454 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel); 456 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
455 PPCDBG(PPCDBG_BUSWALK, 457 PPCDBG(PPCDBG_BUSWALK,
456 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n", 458 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
457 Bus, 0, EADsIdSel, Irq); 459 Bus, 0, EADsIdSel, Irq);
458 460
459 /* 461 /*
460 * Connect all functions of any device found. 462 * Connect all functions of any device found.
461 */ 463 */
462 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { 464 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
463 for (Function = 0; Function < 8; ++Function) { 465 for (Function = 0; Function < 8; ++Function) {
464 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 466 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
465 HvRc = HvCallXm_connectBusUnit(Bus, SubBus, 467 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
466 AgentId, Irq); 468 AgentId, Irq);
@@ -484,15 +486,15 @@ static int scan_bridge_slot(HvBusNumber Bus,
484 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n", 486 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
485 Bus, SubBus, AgentId, VendorId, Irq); 487 Bus, SubBus, AgentId, VendorId, Irq);
486 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, 488 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
487 PCI_INTERRUPT_LINE, Irq); 489 PCI_INTERRUPT_LINE, Irq);
488 if (HvRc != 0) 490 if (HvRc != 0)
489 pci_Log_Error("PciCfgStore Irq Failed!", 491 pci_Log_Error("PciCfgStore Irq Failed!",
490 Bus, SubBus, AgentId, HvRc); 492 Bus, SubBus, AgentId, HvRc);
491 493
492 ++DeviceCount; 494 ++DeviceCount;
493 node = build_device_node(Bus, SubBus, EADsIdSel, Function); 495 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
494 node->Irq = Irq; 496 PCI_DN(node)->Irq = Irq;
495 node->LogicalSlot = BridgeInfo->logicalSlotNumber; 497 PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
496 498
497 } /* for (Function = 0; Function < 8; ++Function) */ 499 } /* for (Function = 0; Function < 8; ++Function) */
498 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ 500 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
@@ -542,16 +544,13 @@ EXPORT_SYMBOL(iSeries_memcpy_fromio);
542/* 544/*
543 * Look down the chain to find the matching Device Device 545 * Look down the chain to find the matching Device Device
544 */ 546 */
545static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn) 547static struct device_node *find_Device_Node(int bus, int devfn)
546{ 548{
547 struct list_head *pos; 549 struct pci_dn *pdn;
548 550
549 list_for_each(pos, &iSeries_Global_Device_List) { 551 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
550 struct iSeries_Device_Node *node = 552 if ((bus == pdn->busno) && (devfn == pdn->devfn))
551 list_entry(pos, struct iSeries_Device_Node, Device_List); 553 return pdn->node;
552
553 if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
554 return node;
555 } 554 }
556 return NULL; 555 return NULL;
557} 556}
@@ -562,12 +561,12 @@ static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
562 * Sanity Check Node PciDev to passed pci_dev 561 * Sanity Check Node PciDev to passed pci_dev
563 * If none is found, returns a NULL which the client must handle. 562 * If none is found, returns a NULL which the client must handle.
564 */ 563 */
565static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev) 564static struct device_node *get_Device_Node(struct pci_dev *pdev)
566{ 565{
567 struct iSeries_Device_Node *node; 566 struct device_node *node;
568 567
569 node = pdev->sysdata; 568 node = pdev->sysdata;
570 if (node == NULL || node->PciDev != pdev) 569 if (node == NULL || PCI_DN(node)->pcidev != pdev)
571 node = find_Device_Node(pdev->bus->number, pdev->devfn); 570 node = find_Device_Node(pdev->bus->number, pdev->devfn);
572 return node; 571 return node;
573} 572}
@@ -595,7 +594,7 @@ static u64 hv_cfg_write_func[4] = {
595static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, 594static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
596 int offset, int size, u32 *val) 595 int offset, int size, u32 *val)
597{ 596{
598 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 597 struct device_node *node = find_Device_Node(bus->number, devfn);
599 u64 fn; 598 u64 fn;
600 struct HvCallPci_LoadReturn ret; 599 struct HvCallPci_LoadReturn ret;
601 600
@@ -607,7 +606,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
607 } 606 }
608 607
609 fn = hv_cfg_read_func[(size - 1) & 3]; 608 fn = hv_cfg_read_func[(size - 1) & 3];
610 HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0); 609 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
611 610
612 if (ret.rc != 0) { 611 if (ret.rc != 0) {
613 *val = ~0; 612 *val = ~0;
@@ -625,7 +624,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
625static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, 624static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
626 int offset, int size, u32 val) 625 int offset, int size, u32 val)
627{ 626{
628 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 627 struct device_node *node = find_Device_Node(bus->number, devfn);
629 u64 fn; 628 u64 fn;
630 u64 ret; 629 u64 ret;
631 630
@@ -635,7 +634,7 @@ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
635 return PCIBIOS_BAD_REGISTER_NUMBER; 634 return PCIBIOS_BAD_REGISTER_NUMBER;
636 635
637 fn = hv_cfg_write_func[(size - 1) & 3]; 636 fn = hv_cfg_write_func[(size - 1) & 3];
638 ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0); 637 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
639 638
640 if (ret != 0) 639 if (ret != 0)
641 return PCIBIOS_DEVICE_NOT_FOUND; 640 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -657,14 +656,16 @@ static struct pci_ops iSeries_pci_ops = {
657 * PCI: Device 23.90 ReadL Retry( 1) 656 * PCI: Device 23.90 ReadL Retry( 1)
658 * PCI: Device 23.90 ReadL Retry Successful(1) 657 * PCI: Device 23.90 ReadL Retry Successful(1)
659 */ 658 */
660static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode, 659static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
661 int *retry, u64 ret) 660 int *retry, u64 ret)
662{ 661{
663 if (ret != 0) { 662 if (ret != 0) {
663 struct pci_dn *pdn = PCI_DN(DevNode);
664
664 ++Pci_Error_Count; 665 ++Pci_Error_Count;
665 (*retry)++; 666 (*retry)++;
666 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", 667 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
667 TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn, 668 TextHdr, pdn->busno, pdn->devfn,
668 *retry, (int)ret); 669 *retry, (int)ret);
669 /* 670 /*
670 * Bump the retry and check for retry count exceeded. 671 * Bump the retry and check for retry count exceeded.
@@ -687,14 +688,14 @@ static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
687 * Note: Make sure the passed variable end up on the stack to avoid 688 * Note: Make sure the passed variable end up on the stack to avoid
688 * the exposure of being device global. 689 * the exposure of being device global.
689 */ 690 */
690static inline struct iSeries_Device_Node *xlate_iomm_address( 691static inline struct device_node *xlate_iomm_address(
691 const volatile void __iomem *IoAddress, 692 const volatile void __iomem *IoAddress,
692 u64 *dsaptr, u64 *BarOffsetPtr) 693 u64 *dsaptr, u64 *BarOffsetPtr)
693{ 694{
694 unsigned long OrigIoAddr; 695 unsigned long OrigIoAddr;
695 unsigned long BaseIoAddr; 696 unsigned long BaseIoAddr;
696 unsigned long TableIndex; 697 unsigned long TableIndex;
697 struct iSeries_Device_Node *DevNode; 698 struct device_node *DevNode;
698 699
699 OrigIoAddr = (unsigned long __force)IoAddress; 700 OrigIoAddr = (unsigned long __force)IoAddress;
700 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory)) 701 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
@@ -705,7 +706,7 @@ static inline struct iSeries_Device_Node *xlate_iomm_address(
705 706
706 if (DevNode != NULL) { 707 if (DevNode != NULL) {
707 int barnum = iobar_table[TableIndex]; 708 int barnum = iobar_table[TableIndex];
708 *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24); 709 *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
709 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE; 710 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
710 } else 711 } else
711 panic("PCI: Invalid PCI IoAddress detected!\n"); 712 panic("PCI: Invalid PCI IoAddress detected!\n");
@@ -727,7 +728,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
727 u64 dsa; 728 u64 dsa;
728 int retry = 0; 729 int retry = 0;
729 struct HvCallPci_LoadReturn ret; 730 struct HvCallPci_LoadReturn ret;
730 struct iSeries_Device_Node *DevNode = 731 struct device_node *DevNode =
731 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 732 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
732 733
733 if (DevNode == NULL) { 734 if (DevNode == NULL) {
@@ -757,7 +758,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
757 u64 dsa; 758 u64 dsa;
758 int retry = 0; 759 int retry = 0;
759 struct HvCallPci_LoadReturn ret; 760 struct HvCallPci_LoadReturn ret;
760 struct iSeries_Device_Node *DevNode = 761 struct device_node *DevNode =
761 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 762 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
762 763
763 if (DevNode == NULL) { 764 if (DevNode == NULL) {
@@ -788,7 +789,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
788 u64 dsa; 789 u64 dsa;
789 int retry = 0; 790 int retry = 0;
790 struct HvCallPci_LoadReturn ret; 791 struct HvCallPci_LoadReturn ret;
791 struct iSeries_Device_Node *DevNode = 792 struct device_node *DevNode =
792 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 793 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
793 794
794 if (DevNode == NULL) { 795 if (DevNode == NULL) {
@@ -826,7 +827,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
826 u64 dsa; 827 u64 dsa;
827 int retry = 0; 828 int retry = 0;
828 u64 rc; 829 u64 rc;
829 struct iSeries_Device_Node *DevNode = 830 struct device_node *DevNode =
830 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 831 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
831 832
832 if (DevNode == NULL) { 833 if (DevNode == NULL) {
@@ -854,7 +855,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
854 u64 dsa; 855 u64 dsa;
855 int retry = 0; 856 int retry = 0;
856 u64 rc; 857 u64 rc;
857 struct iSeries_Device_Node *DevNode = 858 struct device_node *DevNode =
858 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 859 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
859 860
860 if (DevNode == NULL) { 861 if (DevNode == NULL) {
@@ -882,7 +883,7 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
882 u64 dsa; 883 u64 dsa;
883 int retry = 0; 884 int retry = 0;
884 u64 rc; 885 u64 rc;
885 struct iSeries_Device_Node *DevNode = 886 struct device_node *DevNode =
886 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 887 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
887 888
888 if (DevNode == NULL) { 889 if (DevNode == NULL) {
diff --git a/include/asm-ppc64/iSeries/iSeries_pci.h b/arch/powerpc/platforms/iseries/pci.h
index 575f611f8b33..33a8489fde54 100644
--- a/include/asm-ppc64/iSeries/iSeries_pci.h
+++ b/arch/powerpc/platforms/iseries/pci.h
@@ -1,8 +1,8 @@
1#ifndef _ISERIES_64_PCI_H 1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _ISERIES_64_PCI_H 2#define _PLATFORMS_ISERIES_PCI_H
3 3
4/* 4/*
5 * File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001. 5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 * 6 *
7 * Define some useful macros for the iSeries pci routines. 7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation 8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
@@ -30,23 +30,9 @@
30 * End Change Activity 30 * End Change Activity
31 */ 31 */
32 32
33#include <asm/iSeries/HvCallPci.h> 33#include <asm/pci-bridge.h>
34#include <asm/abs_addr.h>
35 34
36struct pci_dev; /* For Forward Reference */ 35struct pci_dev; /* For Forward Reference */
37struct iSeries_Device_Node;
38
39/*
40 * Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure
41 */
42
43#define ISERIES_BUS(DevPtr) DevPtr->DsaAddr.Dsa.busNumber
44#define ISERIES_SUBBUS(DevPtr) DevPtr->DsaAddr.Dsa.subBusNumber
45#define ISERIES_DEVICE(DevPtr) DevPtr->DsaAddr.Dsa.deviceId
46#define ISERIES_DSA(DevPtr) DevPtr->DsaAddr.DsaAddr
47#define ISERIES_DEVNODE(PciDev) ((struct iSeries_Device_Node *)PciDev->sysdata)
48
49#define EADsMaxAgents 7
50 36
51/* 37/*
52 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function. 38 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
@@ -62,27 +48,16 @@ struct iSeries_Device_Node;
62#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7) 48#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
63 49
64/* 50/*
65 * Converts Virtual Address to Real Address for Hypervisor calls 51 * Generate a Direct Select Address for the Hypervisor
66 */ 52 */
67#define ISERIES_HV_ADDR(virtaddr) \ 53static inline u64 iseries_ds_addr(struct device_node *node)
68 (0x8000000000000000 | virt_to_abs(virtaddr)) 54{
55 struct pci_dn *pdn = PCI_DN(node);
69 56
70/* 57 return ((u64)pdn->busno << 48) + ((u64)pdn->bussubno << 40)
71 * iSeries Device Information 58 + ((u64)0x10 << 32);
72 */ 59}
73struct iSeries_Device_Node {
74 struct list_head Device_List;
75 struct pci_dev *PciDev;
76 union HvDsaMap DsaAddr; /* Direct Select Address */
77 /* busNumber, subBusNumber, */
78 /* deviceId, barNumber */
79 int DevFn; /* Linux devfn */
80 int Irq; /* Assigned IRQ */
81 int Flags; /* Possible flags(disable/bist)*/
82 u8 LogicalSlot; /* Hv Slot Index for Tces */
83 struct iommu_table *iommu_table;/* Device TCE Table */
84};
85 60
86extern void iSeries_Device_Information(struct pci_dev*, int); 61extern void iSeries_Device_Information(struct pci_dev*, int);
87 62
88#endif /* _ISERIES_64_PCI_H */ 63#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/powerpc/platforms/iseries/proc.c
index 0fe3116eba29..6f1929cac66b 100644
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * iSeries_proc.c
3 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
5 * 4 *
@@ -27,8 +26,9 @@
27#include <asm/lppaca.h> 26#include <asm/lppaca.h>
28#include <asm/iSeries/ItLpQueue.h> 27#include <asm/iSeries/ItLpQueue.h>
29#include <asm/iSeries/HvCallXm.h> 28#include <asm/iSeries/HvCallXm.h>
30#include <asm/iSeries/IoHriMainStore.h> 29
31#include <asm/iSeries/IoHriProcessorVpd.h> 30#include "processor_vpd.h"
31#include "main_store.h"
32 32
33static int __init iseries_proc_create(void) 33static int __init iseries_proc_create(void)
34{ 34{
@@ -68,12 +68,15 @@ static int proc_titantod_show(struct seq_file *m, void *v)
68 unsigned long tb_ticks = (tb0 - startTb); 68 unsigned long tb_ticks = (tb0 - startTb);
69 unsigned long titan_jiffies = titan_usec / (1000000/HZ); 69 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ); 70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
71 unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec; 71 unsigned long titan_jiff_rem_usec =
72 titan_usec - titan_jiff_usec;
72 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy; 73 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
73 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy; 74 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
74 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks; 75 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
75 unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec; 76 unsigned long tb_jiff_rem_usec =
76 unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec; 77 tb_jiff_rem_ticks / tb_ticks_per_usec;
78 unsigned long new_tb_ticks_per_jiffy =
79 (tb_ticks * (1000000/HZ))/titan_usec;
77 80
78 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec); 81 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
79 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks); 82 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
diff --git a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
index 73b73d80b8b1..7ac5d0d0dbfa 100644
--- a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h
+++ b/arch/powerpc/platforms/iseries/processor_vpd.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IoHriProcessorVpd.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _IOHRIPROCESSORVPD_H 18#ifndef _ISERIES_PROCESSOR_VPD_H
20#define _IOHRIPROCESSORVPD_H 19#define _ISERIES_PROCESSOR_VPD_H
21 20
22#include <asm/types.h> 21#include <asm/types.h>
23 22
@@ -83,4 +82,4 @@ struct IoHriProcessorVpd {
83 82
84extern struct IoHriProcessorVpd xIoHriProcessorVpd[]; 83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
85 84
86#endif /* _IOHRIPROCESSORVPD_H */ 85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/include/asm-ppc64/iSeries/HvReleaseData.h b/arch/powerpc/platforms/iseries/release_data.h
index c8162e5ccb21..c68b9c3e5caf 100644
--- a/include/asm-ppc64/iSeries/HvReleaseData.h
+++ b/arch/powerpc/platforms/iseries/release_data.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvReleaseData.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVRELEASEDATA_H 18#ifndef _ISERIES_RELEASE_DATA_H
20#define _HVRELEASEDATA_H 19#define _ISERIES_RELEASE_DATA_H
21 20
22/* 21/*
23 * This control block contains the critical information about the 22 * This control block contains the critical information about the
@@ -61,4 +60,4 @@ struct HvReleaseData {
61 60
62extern struct HvReleaseData hvReleaseData; 61extern struct HvReleaseData hvReleaseData;
63 62
64#endif /* _HVRELEASEDATA_H */ 63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/powerpc/platforms/iseries/setup.c
index 3ffefbbc6623..1544c6f10a38 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: iSeries_setup.c
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and 7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
@@ -42,26 +40,27 @@
42#include <asm/firmware.h> 40#include <asm/firmware.h>
43 41
44#include <asm/time.h> 42#include <asm/time.h>
45#include "iSeries_setup.h"
46#include <asm/naca.h> 43#include <asm/naca.h>
47#include <asm/paca.h> 44#include <asm/paca.h>
48#include <asm/cache.h> 45#include <asm/cache.h>
49#include <asm/sections.h> 46#include <asm/sections.h>
50#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/iSeries/HvLpConfig.h> 48#include <asm/iSeries/HvLpConfig.h>
53#include <asm/iSeries/HvCallEvent.h> 49#include <asm/iSeries/HvCallEvent.h>
54#include <asm/iSeries/HvCallSm.h>
55#include <asm/iSeries/HvCallXm.h> 50#include <asm/iSeries/HvCallXm.h>
56#include <asm/iSeries/ItLpQueue.h> 51#include <asm/iSeries/ItLpQueue.h>
57#include <asm/iSeries/IoHriMainStore.h>
58#include <asm/iSeries/mf.h> 52#include <asm/iSeries/mf.h>
59#include <asm/iSeries/HvLpEvent.h> 53#include <asm/iSeries/HvLpEvent.h>
60#include <asm/iSeries/iSeries_irq.h>
61#include <asm/iSeries/IoHriProcessorVpd.h>
62#include <asm/iSeries/ItVpdAreas.h>
63#include <asm/iSeries/LparMap.h> 54#include <asm/iSeries/LparMap.h>
64 55
56#include "setup.h"
57#include "irq.h"
58#include "vpd_areas.h"
59#include "processor_vpd.h"
60#include "main_store.h"
61#include "call_sm.h"
62#include "call_hpt.h"
63
65extern void hvlog(char *fmt, ...); 64extern void hvlog(char *fmt, ...);
66 65
67#ifdef DEBUG 66#ifdef DEBUG
@@ -74,8 +73,8 @@ extern void hvlog(char *fmt, ...);
74extern void ppcdbg_initialize(void); 73extern void ppcdbg_initialize(void);
75 74
76static void build_iSeries_Memory_Map(void); 75static void build_iSeries_Memory_Map(void);
77static void setup_iSeries_cache_sizes(void); 76static void iseries_shared_idle(void);
78static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr); 77static void iseries_dedicated_idle(void);
79#ifdef CONFIG_PCI 78#ifdef CONFIG_PCI
80extern void iSeries_pci_final_fixup(void); 79extern void iSeries_pci_final_fixup(void);
81#else 80#else
@@ -83,14 +82,6 @@ static void iSeries_pci_final_fixup(void) { }
83#endif 82#endif
84 83
85/* Global Variables */ 84/* Global Variables */
86static unsigned long procFreqHz;
87static unsigned long procFreqMhz;
88static unsigned long procFreqMhzHundreths;
89
90static unsigned long tbFreqHz;
91static unsigned long tbFreqMhz;
92static unsigned long tbFreqMhzHundreths;
93
94int piranha_simulator; 85int piranha_simulator;
95 86
96extern int rd_size; /* Defined in drivers/block/rd.c */ 87extern int rd_size; /* Defined in drivers/block/rd.c */
@@ -311,14 +302,14 @@ static void __init iSeries_get_cmdline(void)
311 302
312static void __init iSeries_init_early(void) 303static void __init iSeries_init_early(void)
313{ 304{
314 extern unsigned long memory_limit;
315
316 DBG(" -> iSeries_init_early()\n"); 305 DBG(" -> iSeries_init_early()\n");
317 306
318 ppc64_firmware_features = FW_FEATURE_ISERIES; 307 ppc64_firmware_features = FW_FEATURE_ISERIES;
319 308
320 ppcdbg_initialize(); 309 ppcdbg_initialize();
321 310
311 ppc64_interrupt_controller = IC_ISERIES;
312
322#if defined(CONFIG_BLK_DEV_INITRD) 313#if defined(CONFIG_BLK_DEV_INITRD)
323 /* 314 /*
324 * If the init RAM disk has been configured and there is 315 * If the init RAM disk has been configured and there is
@@ -341,12 +332,6 @@ static void __init iSeries_init_early(void)
341 iSeries_recal_titan = HvCallXm_loadTod(); 332 iSeries_recal_titan = HvCallXm_loadTod();
342 333
343 /* 334 /*
344 * Cache sizes must be initialized before hpte_init_iSeries is called
345 * as the later need them for flush_icache_range()
346 */
347 setup_iSeries_cache_sizes();
348
349 /*
350 * Initialize the hash table management pointers 335 * Initialize the hash table management pointers
351 */ 336 */
352 hpte_init_iSeries(); 337 hpte_init_iSeries();
@@ -356,12 +341,6 @@ static void __init iSeries_init_early(void)
356 */ 341 */
357 iommu_init_early_iSeries(); 342 iommu_init_early_iSeries();
358 343
359 /*
360 * Initialize the table which translate Linux physical addresses to
361 * AS/400 absolute addresses
362 */
363 build_iSeries_Memory_Map();
364
365 iSeries_get_cmdline(); 344 iSeries_get_cmdline();
366 345
367 /* Save unparsed command line copy for /proc/cmdline */ 346 /* Save unparsed command line copy for /proc/cmdline */
@@ -379,14 +358,6 @@ static void __init iSeries_init_early(void)
379 } 358 }
380 } 359 }
381 360
382 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
383 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
384
385 lmb_init();
386 lmb_add(0, systemcfg->physicalMemorySize);
387 lmb_analyze();
388 lmb_reserve(0, __pa(klimit));
389
390 /* Initialize machine-dependency vectors */ 361 /* Initialize machine-dependency vectors */
391#ifdef CONFIG_SMP 362#ifdef CONFIG_SMP
392 smp_init_iSeries(); 363 smp_init_iSeries();
@@ -457,7 +428,6 @@ static void __init build_iSeries_Memory_Map(void)
457 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; 428 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
458 u32 nextPhysChunk; 429 u32 nextPhysChunk;
459 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages; 430 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
460 u32 num_ptegs;
461 u32 totalChunks,moreChunks; 431 u32 totalChunks,moreChunks;
462 u32 currChunk, thisChunk, absChunk; 432 u32 currChunk, thisChunk, absChunk;
463 u32 currDword; 433 u32 currDword;
@@ -520,10 +490,7 @@ static void __init build_iSeries_Memory_Map(void)
520 printk("HPT absolute addr = %016lx, size = %dK\n", 490 printk("HPT absolute addr = %016lx, size = %dK\n",
521 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); 491 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
522 492
523 /* Fill in the hashed page table hash mask */ 493 ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE);
524 num_ptegs = hptSizePages *
525 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
526 htab_hash_mask = num_ptegs - 1;
527 494
528 /* 495 /*
529 * The actual hashed page table is in the hypervisor, 496 * The actual hashed page table is in the hypervisor,
@@ -592,144 +559,33 @@ static void __init build_iSeries_Memory_Map(void)
592} 559}
593 560
594/* 561/*
595 * Set up the variables that describe the cache line sizes
596 * for this machine.
597 */
598static void __init setup_iSeries_cache_sizes(void)
599{
600 unsigned int i, n;
601 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
602
603 systemcfg->icache_size =
604 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
605 systemcfg->icache_line_size =
606 ppc64_caches.iline_size =
607 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
608 systemcfg->dcache_size =
609 ppc64_caches.dsize =
610 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
611 systemcfg->dcache_line_size =
612 ppc64_caches.dline_size =
613 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
614 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
615 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
616
617 i = ppc64_caches.iline_size;
618 n = 0;
619 while ((i = (i / 2)))
620 ++n;
621 ppc64_caches.log_iline_size = n;
622
623 i = ppc64_caches.dline_size;
624 n = 0;
625 while ((i = (i / 2)))
626 ++n;
627 ppc64_caches.log_dline_size = n;
628
629 printk("D-cache line size = %d\n",
630 (unsigned int)ppc64_caches.dline_size);
631 printk("I-cache line size = %d\n",
632 (unsigned int)ppc64_caches.iline_size);
633}
634
635/*
636 * Create a pte. Used during initialization only.
637 */
638static void iSeries_make_pte(unsigned long va, unsigned long pa,
639 int mode)
640{
641 hpte_t local_hpte, rhpte;
642 unsigned long hash, vpn;
643 long slot;
644
645 vpn = va >> PAGE_SHIFT;
646 hash = hpt_hash(vpn, 0);
647
648 local_hpte.r = pa | mode;
649 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
650 | HPTE_V_BOLTED | HPTE_V_VALID;
651
652 slot = HvCallHpt_findValid(&rhpte, vpn);
653 if (slot < 0) {
654 /* Must find space in primary group */
655 panic("hash_page: hpte already exists\n");
656 }
657 HvCallHpt_addValidate(slot, 0, &local_hpte);
658}
659
660/*
661 * Bolt the kernel addr space into the HPT
662 */
663static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
664{
665 unsigned long pa;
666 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
667 hpte_t hpte;
668
669 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
670 unsigned long ea = (unsigned long)__va(pa);
671 unsigned long vsid = get_kernel_vsid(ea);
672 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
673 unsigned long vpn = va >> PAGE_SHIFT;
674 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
675
676 /* Make non-kernel text non-executable */
677 if (!in_kernel_text(ea))
678 mode_rw |= HW_NO_EXEC;
679
680 if (hpte.v & HPTE_V_VALID) {
681 /* HPTE exists, so just bolt it */
682 HvCallHpt_setSwBits(slot, 0x10, 0);
683 /* And make sure the pp bits are correct */
684 HvCallHpt_setPp(slot, PP_RWXX);
685 } else
686 /* No HPTE exists, so create a new bolted one */
687 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
688 }
689}
690
691/*
692 * Document me. 562 * Document me.
693 */ 563 */
694static void __init iSeries_setup_arch(void) 564static void __init iSeries_setup_arch(void)
695{ 565{
696 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; 566 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
697 567
698 /* Add an eye catcher and the systemcfg layout version number */ 568 if (get_paca()->lppaca.shared_proc) {
699 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 569 ppc_md.idle_loop = iseries_shared_idle;
700 systemcfg->version.major = SYSTEMCFG_MAJOR; 570 printk(KERN_INFO "Using shared processor idle loop\n");
701 systemcfg->version.minor = SYSTEMCFG_MINOR; 571 } else {
572 ppc_md.idle_loop = iseries_dedicated_idle;
573 printk(KERN_INFO "Using dedicated idle loop\n");
574 }
702 575
703 /* Setup the Lp Event Queue */ 576 /* Setup the Lp Event Queue */
704 setup_hvlpevent_queue(); 577 setup_hvlpevent_queue();
705 578
706 /* Compute processor frequency */
707 procFreqHz = ((1UL << 34) * 1000000) /
708 xIoHriProcessorVpd[procIx].xProcFreq;
709 procFreqMhz = procFreqHz / 1000000;
710 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
711 ppc_proc_freq = procFreqHz;
712
713 /* Compute time base frequency */
714 tbFreqHz = ((1UL << 32) * 1000000) /
715 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
716 tbFreqMhz = tbFreqHz / 1000000;
717 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
718 ppc_tb_freq = tbFreqHz;
719
720 printk("Max logical processors = %d\n", 579 printk("Max logical processors = %d\n",
721 itVpdAreas.xSlicMaxLogicalProcs); 580 itVpdAreas.xSlicMaxLogicalProcs);
722 printk("Max physical processors = %d\n", 581 printk("Max physical processors = %d\n",
723 itVpdAreas.xSlicMaxPhysicalProcs); 582 itVpdAreas.xSlicMaxPhysicalProcs);
724 printk("Processor frequency = %lu.%02lu\n", procFreqMhz, 583
725 procFreqMhzHundreths);
726 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
727 tbFreqMhzHundreths);
728 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; 584 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
729 printk("Processor version = %x\n", systemcfg->processor); 585 printk("Processor version = %x\n", systemcfg->processor);
730} 586}
731 587
732static void iSeries_get_cpuinfo(struct seq_file *m) 588static void iSeries_show_cpuinfo(struct seq_file *m)
733{ 589{
734 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n"); 590 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
735} 591}
@@ -768,49 +624,6 @@ static void iSeries_halt(void)
768 mf_power_off(); 624 mf_power_off();
769} 625}
770 626
771/*
772 * void __init iSeries_calibrate_decr()
773 *
774 * Description:
775 * This routine retrieves the internal processor frequency from the VPD,
776 * and sets up the kernel timer decrementer based on that value.
777 *
778 */
779static void __init iSeries_calibrate_decr(void)
780{
781 unsigned long cyclesPerUsec;
782 struct div_result divres;
783
784 /* Compute decrementer (and TB) frequency in cycles/sec */
785 cyclesPerUsec = ppc_tb_freq / 1000000;
786
787 /*
788 * Set the amount to refresh the decrementer by. This
789 * is the number of decrementer ticks it takes for
790 * 1/HZ seconds.
791 */
792 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
793
794#if 0
795 /* TEST CODE FOR ADJTIME */
796 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
797 /* END OF TEST CODE */
798#endif
799
800 /*
801 * tb_ticks_per_sec = freq; would give better accuracy
802 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
803 * that jiffies (and xtime) will match the time returned
804 * by do_gettimeofday.
805 */
806 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
807 tb_ticks_per_usec = cyclesPerUsec;
808 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
809 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
810 tb_to_xs = divres.result_low;
811 setup_default_decr();
812}
813
814static void __init iSeries_progress(char * st, unsigned short code) 627static void __init iSeries_progress(char * st, unsigned short code)
815{ 628{
816 printk("Progress: [%04x] - %s\n", (unsigned)code, st); 629 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
@@ -878,7 +691,7 @@ static void yield_shared_processor(void)
878 process_iSeries_events(); 691 process_iSeries_events();
879} 692}
880 693
881static int iseries_shared_idle(void) 694static void iseries_shared_idle(void)
882{ 695{
883 while (1) { 696 while (1) {
884 while (!need_resched() && !hvlpevent_is_pending()) { 697 while (!need_resched() && !hvlpevent_is_pending()) {
@@ -900,11 +713,9 @@ static int iseries_shared_idle(void)
900 713
901 schedule(); 714 schedule();
902 } 715 }
903
904 return 0;
905} 716}
906 717
907static int iseries_dedicated_idle(void) 718static void iseries_dedicated_idle(void)
908{ 719{
909 long oldval; 720 long oldval;
910 721
@@ -934,44 +745,252 @@ static int iseries_dedicated_idle(void)
934 ppc64_runlatch_on(); 745 ppc64_runlatch_on();
935 schedule(); 746 schedule();
936 } 747 }
937
938 return 0;
939} 748}
940 749
941#ifndef CONFIG_PCI 750#ifndef CONFIG_PCI
942void __init iSeries_init_IRQ(void) { } 751void __init iSeries_init_IRQ(void) { }
943#endif 752#endif
944 753
945void __init iSeries_early_setup(void) 754static int __init iseries_probe(int platform)
946{ 755{
947 iSeries_fixup_klimit(); 756 return PLATFORM_ISERIES_LPAR == platform;
757}
948 758
949 ppc_md.setup_arch = iSeries_setup_arch; 759struct machdep_calls __initdata iseries_md = {
950 ppc_md.get_cpuinfo = iSeries_get_cpuinfo; 760 .setup_arch = iSeries_setup_arch,
951 ppc_md.init_IRQ = iSeries_init_IRQ; 761 .show_cpuinfo = iSeries_show_cpuinfo,
952 ppc_md.get_irq = iSeries_get_irq; 762 .init_IRQ = iSeries_init_IRQ,
953 ppc_md.init_early = iSeries_init_early, 763 .get_irq = iSeries_get_irq,
764 .init_early = iSeries_init_early,
765 .pcibios_fixup = iSeries_pci_final_fixup,
766 .restart = iSeries_restart,
767 .power_off = iSeries_power_off,
768 .halt = iSeries_halt,
769 .get_boot_time = iSeries_get_boot_time,
770 .set_rtc_time = iSeries_set_rtc_time,
771 .get_rtc_time = iSeries_get_rtc_time,
772 .calibrate_decr = generic_calibrate_decr,
773 .progress = iSeries_progress,
774 .probe = iseries_probe,
775 /* XXX Implement enable_pmcs for iSeries */
776};
954 777
955 ppc_md.pcibios_fixup = iSeries_pci_final_fixup; 778struct blob {
779 unsigned char data[PAGE_SIZE];
780 unsigned long next;
781};
956 782
957 ppc_md.restart = iSeries_restart; 783struct iseries_flat_dt {
958 ppc_md.power_off = iSeries_power_off; 784 struct boot_param_header header;
959 ppc_md.halt = iSeries_halt; 785 u64 reserve_map[2];
786 struct blob dt;
787 struct blob strings;
788};
960 789
961 ppc_md.get_boot_time = iSeries_get_boot_time; 790struct iseries_flat_dt iseries_dt;
962 ppc_md.set_rtc_time = iSeries_set_rtc_time;
963 ppc_md.get_rtc_time = iSeries_get_rtc_time;
964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
965 ppc_md.progress = iSeries_progress;
966 791
967 /* XXX Implement enable_pmcs for iSeries */ 792void dt_init(struct iseries_flat_dt *dt)
793{
794 dt->header.off_mem_rsvmap =
795 offsetof(struct iseries_flat_dt, reserve_map);
796 dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
797 dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
798 dt->header.totalsize = sizeof(struct iseries_flat_dt);
799 dt->header.dt_strings_size = sizeof(struct blob);
968 800
969 if (get_paca()->lppaca.shared_proc) { 801 /* There is no notion of hardware cpu id on iSeries */
970 ppc_md.idle_loop = iseries_shared_idle; 802 dt->header.boot_cpuid_phys = smp_processor_id();
971 printk(KERN_INFO "Using shared processor idle loop\n"); 803
972 } else { 804 dt->dt.next = (unsigned long)&dt->dt.data;
973 ppc_md.idle_loop = iseries_dedicated_idle; 805 dt->strings.next = (unsigned long)&dt->strings.data;
974 printk(KERN_INFO "Using dedicated idle loop\n"); 806
807 dt->header.magic = OF_DT_HEADER;
808 dt->header.version = 0x10;
809 dt->header.last_comp_version = 0x10;
810
811 dt->reserve_map[0] = 0;
812 dt->reserve_map[1] = 0;
813}
814
815void dt_check_blob(struct blob *b)
816{
817 if (b->next >= (unsigned long)&b->next) {
818 DBG("Ran out of space in flat device tree blob!\n");
819 BUG();
975 } 820 }
976} 821}
977 822
823void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
824{
825 *((u32*)dt->dt.next) = value;
826 dt->dt.next += sizeof(u32);
827
828 dt_check_blob(&dt->dt);
829}
830
831void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
832{
833 *((u64*)dt->dt.next) = value;
834 dt->dt.next += sizeof(u64);
835
836 dt_check_blob(&dt->dt);
837}
838
839unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
840{
841 unsigned long start = blob->next - (unsigned long)blob->data;
842
843 memcpy((char *)blob->next, data, len);
844 blob->next = _ALIGN(blob->next + len, 4);
845
846 dt_check_blob(blob);
847
848 return start;
849}
850
851void dt_start_node(struct iseries_flat_dt *dt, char *name)
852{
853 dt_push_u32(dt, OF_DT_BEGIN_NODE);
854 dt_push_bytes(&dt->dt, name, strlen(name) + 1);
855}
856
857#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
858
859void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
860{
861 unsigned long offset;
862
863 dt_push_u32(dt, OF_DT_PROP);
864
865 /* Length of the data */
866 dt_push_u32(dt, len);
867
868 /* Put the property name in the string blob. */
869 offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
870
871 /* The offset of the properties name in the string blob. */
872 dt_push_u32(dt, (u32)offset);
873
874 /* The actual data. */
875 dt_push_bytes(&dt->dt, data, len);
876}
877
878void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
879{
880 dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
881}
882
883void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
884{
885 dt_prop(dt, name, (char *)&data, sizeof(u32));
886}
887
888void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
889{
890 dt_prop(dt, name, (char *)&data, sizeof(u64));
891}
892
893void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
894{
895 dt_prop(dt, name, (char *)data, sizeof(u64) * n);
896}
897
898void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
899{
900 dt_prop(dt, name, NULL, 0);
901}
902
903void dt_cpus(struct iseries_flat_dt *dt)
904{
905 unsigned char buf[32];
906 unsigned char *p;
907 unsigned int i, index;
908 struct IoHriProcessorVpd *d;
909
910 /* yuck */
911 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
912 p = strchr(buf, ' ');
913 if (!p) p = buf + strlen(buf);
914
915 dt_start_node(dt, "cpus");
916 dt_prop_u32(dt, "#address-cells", 1);
917 dt_prop_u32(dt, "#size-cells", 0);
918
919 for (i = 0; i < NR_CPUS; i++) {
920 if (paca[i].lppaca.dyn_proc_status >= 2)
921 continue;
922
923 snprintf(p, 32 - (p - buf), "@%d", i);
924 dt_start_node(dt, buf);
925
926 dt_prop_str(dt, "device_type", "cpu");
927
928 index = paca[i].lppaca.dyn_hv_phys_proc_index;
929 d = &xIoHriProcessorVpd[index];
930
931 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
932 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
933
934 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
935 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
936
937 /* magic conversions to Hz copied from old code */
938 dt_prop_u32(dt, "clock-frequency",
939 ((1UL << 34) * 1000000) / d->xProcFreq);
940 dt_prop_u32(dt, "timebase-frequency",
941 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
942
943 dt_prop_u32(dt, "reg", i);
944
945 dt_end_node(dt);
946 }
947
948 dt_end_node(dt);
949}
950
951void build_flat_dt(struct iseries_flat_dt *dt)
952{
953 u64 tmp[2];
954
955 dt_init(dt);
956
957 dt_start_node(dt, "");
958
959 dt_prop_u32(dt, "#address-cells", 2);
960 dt_prop_u32(dt, "#size-cells", 2);
961
962 /* /memory */
963 dt_start_node(dt, "memory@0");
964 dt_prop_str(dt, "name", "memory");
965 dt_prop_str(dt, "device_type", "memory");
966 tmp[0] = 0;
967 tmp[1] = systemcfg->physicalMemorySize;
968 dt_prop_u64_list(dt, "reg", tmp, 2);
969 dt_end_node(dt);
970
971 /* /chosen */
972 dt_start_node(dt, "chosen");
973 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
974 dt_end_node(dt);
975
976 dt_cpus(dt);
977
978 dt_end_node(dt);
979
980 dt_push_u32(dt, OF_DT_END);
981}
982
983void * __init iSeries_early_setup(void)
984{
985 iSeries_fixup_klimit();
986
987 /*
988 * Initialize the table which translate Linux physical addresses to
989 * AS/400 absolute addresses
990 */
991 build_iSeries_Memory_Map();
992
993 build_flat_dt(&iseries_dt);
994
995 return (void *) __pa(&iseries_dt);
996}
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/powerpc/platforms/iseries/setup.h
index c6eb29a245ac..5213044ec411 100644
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: as400_setup.h
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and 7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
@@ -19,7 +17,7 @@
19#ifndef __ISERIES_SETUP_H__ 17#ifndef __ISERIES_SETUP_H__
20#define __ISERIES_SETUP_H__ 18#define __ISERIES_SETUP_H__
21 19
22extern void iSeries_get_boot_time(struct rtc_time *tm); 20extern unsigned long iSeries_get_boot_time(void);
23extern int iSeries_set_rtc_time(struct rtc_time *tm); 21extern int iSeries_set_rtc_time(struct rtc_time *tm);
24extern void iSeries_get_rtc_time(struct rtc_time *tm); 22extern void iSeries_get_rtc_time(struct rtc_time *tm);
25 23
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/powerpc/platforms/iseries/smp.c
index f74386e31638..f720916682f6 100644
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -47,17 +47,17 @@
47 47
48static unsigned long iSeries_smp_message[NR_CPUS]; 48static unsigned long iSeries_smp_message[NR_CPUS];
49 49
50void iSeries_smp_message_recv( struct pt_regs * regs ) 50void iSeries_smp_message_recv(struct pt_regs *regs)
51{ 51{
52 int cpu = smp_processor_id(); 52 int cpu = smp_processor_id();
53 int msg; 53 int msg;
54 54
55 if ( num_online_cpus() < 2 ) 55 if (num_online_cpus() < 2)
56 return; 56 return;
57 57
58 for ( msg = 0; msg < 4; ++msg ) 58 for (msg = 0; msg < 4; msg++)
59 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) ) 59 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
60 smp_message_recv( msg, regs ); 60 smp_message_recv(msg, regs);
61} 61}
62 62
63static inline void smp_iSeries_do_message(int cpu, int msg) 63static inline void smp_iSeries_do_message(int cpu, int msg)
@@ -74,48 +74,22 @@ static void smp_iSeries_message_pass(int target, int msg)
74 smp_iSeries_do_message(target, msg); 74 smp_iSeries_do_message(target, msg);
75 else { 75 else {
76 for_each_online_cpu(i) { 76 for_each_online_cpu(i) {
77 if (target == MSG_ALL_BUT_SELF 77 if ((target == MSG_ALL_BUT_SELF) &&
78 && i == smp_processor_id()) 78 (i == smp_processor_id()))
79 continue; 79 continue;
80 smp_iSeries_do_message(i, msg); 80 smp_iSeries_do_message(i, msg);
81 } 81 }
82 } 82 }
83} 83}
84 84
85static int smp_iSeries_numProcs(void)
86{
87 unsigned np, i;
88
89 np = 0;
90 for (i=0; i < NR_CPUS; ++i) {
91 if (paca[i].lppaca.dyn_proc_status < 2) {
92 cpu_set(i, cpu_possible_map);
93 cpu_set(i, cpu_present_map);
94 cpu_set(i, cpu_sibling_map[i]);
95 ++np;
96 }
97 }
98 return np;
99}
100
101static int smp_iSeries_probe(void) 85static int smp_iSeries_probe(void)
102{ 86{
103 unsigned i; 87 return cpus_weight(cpu_possible_map);
104 unsigned np = 0;
105
106 for (i=0; i < NR_CPUS; ++i) {
107 if (paca[i].lppaca.dyn_proc_status < 2) {
108 /*paca[i].active = 1;*/
109 ++np;
110 }
111 }
112
113 return np;
114} 88}
115 89
116static void smp_iSeries_kick_cpu(int nr) 90static void smp_iSeries_kick_cpu(int nr)
117{ 91{
118 BUG_ON(nr < 0 || nr >= NR_CPUS); 92 BUG_ON((nr < 0) || (nr >= NR_CPUS));
119 93
120 /* Verify that our partition has a processor nr */ 94 /* Verify that our partition has a processor nr */
121 if (paca[nr].lppaca.dyn_proc_status >= 2) 95 if (paca[nr].lppaca.dyn_proc_status >= 2)
@@ -144,6 +118,4 @@ static struct smp_ops_t iSeries_smp_ops = {
144void __init smp_init_iSeries(void) 118void __init smp_init_iSeries(void)
145{ 119{
146 smp_ops = &iSeries_smp_ops; 120 smp_ops = &iSeries_smp_ops;
147 systemcfg->processorCount = smp_iSeries_numProcs();
148} 121}
149
diff --git a/include/asm-ppc64/iSeries/ItSpCommArea.h b/arch/powerpc/platforms/iseries/spcomm_area.h
index 5535f8271c9f..6e3b685115c9 100644
--- a/include/asm-ppc64/iSeries/ItSpCommArea.h
+++ b/arch/powerpc/platforms/iseries/spcomm_area.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItSpCommArea.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19 18
20#ifndef _ITSPCOMMAREA_H 19#ifndef _ISERIES_SPCOMM_AREA_H
21#define _ITSPCOMMAREA_H 20#define _ISERIES_SPCOMM_AREA_H
22 21
23 22
24struct SpCommArea { 23struct SpCommArea {
@@ -34,4 +33,4 @@ struct SpCommArea {
34 33
35extern struct SpCommArea xSpCommArea; 34extern struct SpCommArea xSpCommArea;
36 35
37#endif /* _ITSPCOMMAREA_H */ 36#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/powerpc/platforms/iseries/vio.c
index 6b754b0c8344..c0f7d2e9153f 100644
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/iommu.h> 16#include <asm/iommu.h>
17#include <asm/tce.h>
17#include <asm/abs_addr.h> 18#include <asm/abs_addr.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/iSeries/vio.h> 20#include <asm/iSeries/vio.h>
diff --git a/arch/ppc64/kernel/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 2a6c4f01c45e..c0c767bd37f1 100644
--- a/arch/ppc64/kernel/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -1,5 +1,4 @@
1/* -*- linux-c -*- 1/* -*- linux-c -*-
2 * arch/ppc64/kernel/viopath.c
3 * 2 *
4 * iSeries Virtual I/O Message Path code 3 * iSeries Virtual I/O Message Path code
5 * 4 *
@@ -7,7 +6,7 @@
7 * Ryan Arnold <ryanarn@us.ibm.com> 6 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com> 7 * Colin Devilbiss <devilbis@us.ibm.com>
9 * 8 *
10 * (C) Copyright 2000-2003 IBM Corporation 9 * (C) Copyright 2000-2005 IBM Corporation
11 * 10 *
12 * This code is used by the iSeries virtual disk, cd, 11 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another 12 * tape, and console to communicate with OS/400 in another
diff --git a/include/asm-ppc64/iSeries/ItVpdAreas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
index 71b3ad24f95a..601e6dd860ed 100644
--- a/include/asm-ppc64/iSeries/ItVpdAreas.h
+++ b/arch/powerpc/platforms/iseries/vpd_areas.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItVpdAreas.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _ITVPDAREAS_H 18#ifndef _ISERIES_VPD_AREAS_H
20#define _ITVPDAREAS_H 19#define _ISERIES_VPD_AREAS_H
21 20
22/* 21/*
23 * This file defines the address and length of all of the VPD area passed to 22 * This file defines the address and length of all of the VPD area passed to
@@ -86,4 +85,4 @@ struct ItVpdAreas {
86 85
87extern struct ItVpdAreas itVpdAreas; 86extern struct ItVpdAreas itVpdAreas;
88 87
89#endif /* _ITVPDAREAS_H */ 88#endif /* _ISERIES_VPD_AREAS_H */
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c
index 5d921792571f..9c318849dee7 100644
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ b/arch/powerpc/platforms/iseries/vpdinfo.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001.
3 *
4 * This code gets the card location of the hardware 2 * This code gets the card location of the hardware
5 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp> 3 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp>
6 * Copyright (C) 2005 Stephen Rothwel, IBM Corp 4 * Copyright (C) 2005 Stephen Rothwel, IBM Corp
@@ -29,12 +27,15 @@
29#include <linux/init.h> 27#include <linux/init.h>
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/pci.h> 29#include <linux/pci.h>
30
32#include <asm/types.h> 31#include <asm/types.h>
33#include <asm/resource.h> 32#include <asm/resource.h>
34 33#include <asm/abs_addr.h>
35#include <asm/iSeries/HvCallPci.h> 34#include <asm/pci-bridge.h>
36#include <asm/iSeries/HvTypes.h> 35#include <asm/iSeries/HvTypes.h>
37#include <asm/iSeries/iSeries_pci.h> 36
37#include "pci.h"
38#include "call_pci.h"
38 39
39/* 40/*
40 * Size of Bus VPD data 41 * Size of Bus VPD data
@@ -214,7 +215,7 @@ static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
214 printk("PCI: Bus VPD Buffer allocation failure.\n"); 215 printk("PCI: Bus VPD Buffer allocation failure.\n");
215 return; 216 return;
216 } 217 }
217 BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr), 218 BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr),
218 BUS_VPDSIZE); 219 BUS_VPDSIZE);
219 if (BusVpdLen == 0) { 220 if (BusVpdLen == 0) {
220 printk("PCI: Bus VPD Buffer zero length.\n"); 221 printk("PCI: Bus VPD Buffer zero length.\n");
@@ -242,7 +243,8 @@ out_free:
242 */ 243 */
243void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) 244void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
244{ 245{
245 struct iSeries_Device_Node *DevNode = PciDev->sysdata; 246 struct device_node *DevNode = PciDev->sysdata;
247 struct pci_dn *pdn;
246 u16 bus; 248 u16 bus;
247 u8 frame; 249 u8 frame;
248 char card[4]; 250 char card[4];
@@ -255,8 +257,9 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
255 return; 257 return;
256 } 258 }
257 259
258 bus = ISERIES_BUS(DevNode); 260 pdn = PCI_DN(DevNode);
259 subbus = ISERIES_SUBBUS(DevNode); 261 bus = pdn->busno;
262 subbus = pdn->bussubno;
260 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), 263 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
261 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); 264 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
262 iSeries_Get_Location_Code(bus, agent, &frame, card); 265 iSeries_Get_Location_Code(bus, agent, &frame, card);
diff --git a/arch/powerpc/platforms/maple/Makefile b/arch/powerpc/platforms/maple/Makefile
new file mode 100644
index 000000000000..1be1a993c5f5
--- /dev/null
+++ b/arch/powerpc/platforms/maple/Makefile
@@ -0,0 +1 @@
obj-y += setup.o pci.o time.o
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
new file mode 100644
index 000000000000..0657c579b840
--- /dev/null
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -0,0 +1,12 @@
1/*
2 * Declarations for maple-specific code.
3 *
4 * Maple is the name of a PPC970 evaluation board.
5 */
6extern int maple_set_rtc_time(struct rtc_time *tm);
7extern void maple_get_rtc_time(struct rtc_time *tm);
8extern unsigned long maple_get_boot_time(void);
9extern void maple_calibrate_decr(void);
10extern void maple_pci_init(void);
11extern void maple_pcibios_fixup(void);
12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/powerpc/platforms/maple/pci.c
index 1d297e0edfc0..340c21caeae2 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -23,8 +23,9 @@
23#include <asm/pci-bridge.h> 23#include <asm/pci-bridge.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/ppc-pci.h>
26 27
27#include "pci.h" 28#include "maple.h"
28 29
29#ifdef DEBUG 30#ifdef DEBUG
30#define DBG(x...) printk(x) 31#define DBG(x...) printk(x)
@@ -276,7 +277,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
276{ 277{
277 /* On G5, we move AGP up to high bus number so we don't need 278 /* On G5, we move AGP up to high bus number so we don't need
278 * to reassign bus numbers for HT. If we ever have P2P bridges 279 * to reassign bus numbers for HT. If we ever have P2P bridges
279 * on AGP, we'll have to move pci_assign_all_busses to the 280 * on AGP, we'll have to move pci_assign_all_buses to the
280 * pci_controller structure so we enable it for AGP and not for 281 * pci_controller structure so we enable it for AGP and not for
281 * HT childs. 282 * HT childs.
282 * We hard code the address because of the different size of 283 * We hard code the address because of the different size of
@@ -360,7 +361,7 @@ static int __init add_bridge(struct device_node *dev)
360 361
361 /* Interpret the "ranges" property */ 362 /* Interpret the "ranges" property */
362 /* This also maps the I/O region and sets isa_io/mem_base */ 363 /* This also maps the I/O region and sets isa_io/mem_base */
363 pci_process_bridge_OF_ranges(hose, dev); 364 pci_process_bridge_OF_ranges(hose, dev, primary);
364 pci_setup_phb_io(hose, primary); 365 pci_setup_phb_io(hose, primary);
365 366
366 /* Fixup "bus-range" OF property */ 367 /* Fixup "bus-range" OF property */
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/powerpc/platforms/maple/setup.c
index fc0567498a3a..7ece8983a105 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/maple_setup.c 2 * Maple (970 eval board) setup code
3 * 3 *
4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), 4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org),
5 * IBM Corp. 5 * IBM Corp.
@@ -59,8 +59,10 @@
59#include <asm/time.h> 59#include <asm/time.h>
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/lmb.h> 61#include <asm/lmb.h>
62#include <asm/mpic.h>
63#include <asm/udbg.h>
62 64
63#include "mpic.h" 65#include "maple.h"
64 66
65#ifdef DEBUG 67#ifdef DEBUG
66#define DBG(fmt...) udbg_printf(fmt) 68#define DBG(fmt...) udbg_printf(fmt)
@@ -68,13 +70,6 @@
68#define DBG(fmt...) 70#define DBG(fmt...)
69#endif 71#endif
70 72
71extern int maple_set_rtc_time(struct rtc_time *tm);
72extern void maple_get_rtc_time(struct rtc_time *tm);
73extern void maple_get_boot_time(struct rtc_time *tm);
74extern void maple_calibrate_decr(void);
75extern void maple_pci_init(void);
76extern void maple_pcibios_fixup(void);
77extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
78extern void generic_find_legacy_serial_ports(u64 *physport, 73extern void generic_find_legacy_serial_ports(u64 *physport,
79 unsigned int *default_speed); 74 unsigned int *default_speed);
80 75
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/powerpc/platforms/maple/time.c
index d65210abcd03..40fc07a8e606 100644
--- a/arch/ppc64/kernel/maple_time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -36,6 +36,8 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/time.h> 37#include <asm/time.h>
38 38
39#include "maple.h"
40
39#ifdef DEBUG 41#ifdef DEBUG
40#define DBG(x...) printk(x) 42#define DBG(x...) printk(x)
41#else 43#else
@@ -156,8 +158,9 @@ int maple_set_rtc_time(struct rtc_time *tm)
156 return 0; 158 return 0;
157} 159}
158 160
159void __init maple_get_boot_time(struct rtc_time *tm) 161unsigned long __init maple_get_boot_time(void)
160{ 162{
163 struct rtc_time tm;
161 struct device_node *rtcs; 164 struct device_node *rtcs;
162 165
163 rtcs = find_compatible_devices("rtc", "pnpPNP,b00"); 166 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
@@ -170,6 +173,8 @@ void __init maple_get_boot_time(struct rtc_time *tm)
170 "legacy address (0x%x)\n", maple_rtc_addr); 173 "legacy address (0x%x)\n", maple_rtc_addr);
171 } 174 }
172 175
173 maple_get_rtc_time(tm); 176 maple_get_rtc_time(&tm);
177 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
178 tm.tm_hour, tm.tm_min, tm.tm_sec);
174} 179}
175 180
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
new file mode 100644
index 000000000000..4369676f1d54
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -0,0 +1,8 @@
1obj-y += pic.o setup.o time.o feature.o pci.o \
2 sleep.o low_i2c.o cache.o
3obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
4obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq.o
5obj-$(CONFIG_NVRAM) += nvram.o
6# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
7obj-$(CONFIG_PPC64) += nvram.o
8obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
new file mode 100644
index 000000000000..8be2f7d071f0
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -0,0 +1,202 @@
1/*
2 * Miscellaneous procedures for dealing with the PowerMac hardware.
3 * Contains support for the backlight.
4 *
5 * Copyright (C) 2000 Benjamin Herrenschmidt
6 *
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/stddef.h>
13#include <linux/reboot.h>
14#include <linux/nvram.h>
15#include <linux/console.h>
16#include <asm/sections.h>
17#include <asm/ptrace.h>
18#include <asm/io.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21#include <asm/prom.h>
22#include <asm/machdep.h>
23#include <asm/nvram.h>
24#include <asm/backlight.h>
25
26#include <linux/adb.h>
27#include <linux/pmu.h>
28
29static struct backlight_controller *backlighter;
30static void* backlighter_data;
31static int backlight_autosave;
32static int backlight_level = BACKLIGHT_MAX;
33static int backlight_enabled = 1;
34static int backlight_req_level = -1;
35static int backlight_req_enable = -1;
36
37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39
40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type)
42{
43 struct device_node* bk_node;
44 char *prop;
45 int valid = 0;
46
47 /* There's already a matching controller, bail out */
48 if (backlighter != NULL)
49 return;
50
51 bk_node = find_devices("backlight");
52
53#ifdef CONFIG_ADB_PMU
54 /* Special case for the old PowerBook since I can't test on it */
55 backlight_autosave = machine_is_compatible("AAPL,3400/2400")
56 || machine_is_compatible("AAPL,3500");
57 if ((backlight_autosave
58 || machine_is_compatible("AAPL,PowerBook1998")
59 || machine_is_compatible("PowerBook1,1"))
60 && !strcmp(type, "pmu"))
61 valid = 1;
62#endif
63 if (bk_node) {
64 prop = get_property(bk_node, "backlight-control", NULL);
65 if (prop && !strncmp(prop, type, strlen(type)))
66 valid = 1;
67 }
68 if (!valid)
69 return;
70 backlighter = ctrler;
71 backlighter_data = data;
72
73 if (bk_node && !backlight_autosave)
74 prop = get_property(bk_node, "bklt", NULL);
75 else
76 prop = NULL;
77 if (prop) {
78 backlight_level = ((*prop)+1) >> 1;
79 if (backlight_level > BACKLIGHT_MAX)
80 backlight_level = BACKLIGHT_MAX;
81 }
82
83#ifdef CONFIG_ADB_PMU
84 if (backlight_autosave) {
85 struct adb_request req;
86 pmu_request(&req, NULL, 2, 0xd9, 0);
87 while (!req.complete)
88 pmu_poll();
89 backlight_level = req.reply[0] >> 4;
90 }
91#endif
92 acquire_console_sem();
93 if (!backlighter->set_enable(1, backlight_level, data))
94 backlight_enabled = 1;
95 release_console_sem();
96
97 printk(KERN_INFO "Registered \"%s\" backlight controller,"
98 "level: %d/15\n", type, backlight_level);
99}
100EXPORT_SYMBOL(register_backlight_controller);
101
102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data)
104{
105 /* We keep the current backlight level (for now) */
106 if (ctrler == backlighter && data == backlighter_data)
107 backlighter = NULL;
108}
109EXPORT_SYMBOL(unregister_backlight_controller);
110
111static int __set_backlight_enable(int enable)
112{
113 int rc;
114
115 if (!backlighter)
116 return -ENODEV;
117 acquire_console_sem();
118 rc = backlighter->set_enable(enable, backlight_level,
119 backlighter_data);
120 if (!rc)
121 backlight_enabled = enable;
122 release_console_sem();
123 return rc;
124}
125int set_backlight_enable(int enable)
126{
127 if (!backlighter)
128 return -ENODEV;
129 backlight_req_enable = enable;
130 schedule_work(&backlight_work);
131 return 0;
132}
133
134EXPORT_SYMBOL(set_backlight_enable);
135
136int get_backlight_enable(void)
137{
138 if (!backlighter)
139 return -ENODEV;
140 return backlight_enabled;
141}
142EXPORT_SYMBOL(get_backlight_enable);
143
144static int __set_backlight_level(int level)
145{
146 int rc = 0;
147
148 if (!backlighter)
149 return -ENODEV;
150 if (level < BACKLIGHT_MIN)
151 level = BACKLIGHT_OFF;
152 if (level > BACKLIGHT_MAX)
153 level = BACKLIGHT_MAX;
154 acquire_console_sem();
155 if (backlight_enabled)
156 rc = backlighter->set_level(level, backlighter_data);
157 if (!rc)
158 backlight_level = level;
159 release_console_sem();
160 if (!rc && !backlight_autosave) {
161 level <<=1;
162 if (level & 0x10)
163 level |= 0x01;
164 // -- todo: save to property "bklt"
165 }
166 return rc;
167}
168int set_backlight_level(int level)
169{
170 if (!backlighter)
171 return -ENODEV;
172 backlight_req_level = level;
173 schedule_work(&backlight_work);
174 return 0;
175}
176
177EXPORT_SYMBOL(set_backlight_level);
178
179int get_backlight_level(void)
180{
181 if (!backlighter)
182 return -ENODEV;
183 return backlight_level;
184}
185EXPORT_SYMBOL(get_backlight_level);
186
187static void backlight_callback(void *dummy)
188{
189 int level, enable;
190
191 do {
192 level = backlight_req_level;
193 enable = backlight_req_enable;
194 mb();
195
196 if (level >= 0)
197 __set_backlight_level(level);
198 if (enable >= 0)
199 __set_backlight_enable(enable);
200 } while(cmpxchg(&backlight_req_level, level, -1) != level ||
201 cmpxchg(&backlight_req_enable, enable, -1) != enable);
202}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
new file mode 100644
index 000000000000..fb977de6b704
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -0,0 +1,359 @@
1/*
2 * This file contains low-level cache management functions
3 * used for sleep and CPU speed changes on Apple machines.
4 * (In fact the only thing that is Apple-specific is that we assume
5 * that we can read from ROM at physical address 0xfff00000.)
6 *
7 * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
8 * Benjamin Herrenschmidt (benh@kernel.crashing.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/config.h>
18#include <asm/processor.h>
19#include <asm/ppc_asm.h>
20#include <asm/cputable.h>
21
22/*
23 * Flush and disable all data caches (dL1, L2, L3). This is used
24 * when going to sleep, when doing a PMU based cpufreq transition,
25 * or when "offlining" a CPU on SMP machines. This code is over
26 * paranoid, but I've had enough issues with various CPU revs and
27 * bugs that I decided it was worth beeing over cautious
28 */
29
30_GLOBAL(flush_disable_caches)
31#ifndef CONFIG_6xx
32 blr
33#else
34BEGIN_FTR_SECTION
35 b flush_disable_745x
36END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
37BEGIN_FTR_SECTION
38 b flush_disable_75x
39END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
40 b __flush_disable_L1
41
42/* This is the code for G3 and 74[01]0 */
43flush_disable_75x:
44 mflr r10
45
46 /* Turn off EE and DR in MSR */
47 mfmsr r11
48 rlwinm r0,r11,0,~MSR_EE
49 rlwinm r0,r0,0,~MSR_DR
50 sync
51 mtmsr r0
52 isync
53
54 /* Stop DST streams */
55BEGIN_FTR_SECTION
56 DSSALL
57 sync
58END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
59
60 /* Stop DPM */
61 mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
62 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
63 sync
64 mtspr SPRN_HID0,r4 /* Disable DPM */
65 sync
66
67 /* Disp-flush L1. We have a weird problem here that I never
68 * totally figured out. On 750FX, using the ROM for the flush
69 * results in a non-working flush. We use that workaround for
70 * now until I finally understand what's going on. --BenH
71 */
72
73 /* ROM base by default */
74 lis r4,0xfff0
75 mfpvr r3
76 srwi r3,r3,16
77 cmplwi cr0,r3,0x7000
78 bne+ 1f
79 /* RAM base on 750FX */
80 li r4,0
811: li r4,0x4000
82 mtctr r4
831: lwz r0,0(r4)
84 addi r4,r4,32
85 bdnz 1b
86 sync
87 isync
88
89 /* Disable / invalidate / enable L1 data */
90 mfspr r3,SPRN_HID0
91 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
92 mtspr SPRN_HID0,r3
93 sync
94 isync
95 ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
96 sync
97 isync
98 mtspr SPRN_HID0,r3
99 xori r3,r3,(HID0_DCI|HID0_ICFI)
100 mtspr SPRN_HID0,r3
101 sync
102
103 /* Get the current enable bit of the L2CR into r4 */
104 mfspr r5,SPRN_L2CR
105 /* Set to data-only (pre-745x bit) */
106 oris r3,r5,L2CR_L2DO@h
107 b 2f
108 /* When disabling L2, code must be in L1 */
109 .balign 32
1101: mtspr SPRN_L2CR,r3
1113: sync
112 isync
113 b 1f
1142: b 3f
1153: sync
116 isync
117 b 1b
1181: /* disp-flush L2. The interesting thing here is that the L2 can be
119 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
120 * but that is probbaly fine. We disp-flush over 4Mb to be safe
121 */
122 lis r4,2
123 mtctr r4
124 lis r4,0xfff0
1251: lwz r0,0(r4)
126 addi r4,r4,32
127 bdnz 1b
128 sync
129 isync
130 lis r4,2
131 mtctr r4
132 lis r4,0xfff0
1331: dcbf 0,r4
134 addi r4,r4,32
135 bdnz 1b
136 sync
137 isync
138
139 /* now disable L2 */
140 rlwinm r5,r5,0,~L2CR_L2E
141 b 2f
142 /* When disabling L2, code must be in L1 */
143 .balign 32
1441: mtspr SPRN_L2CR,r5
1453: sync
146 isync
147 b 1f
1482: b 3f
1493: sync
150 isync
151 b 1b
1521: sync
153 isync
154 /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
155 oris r4,r5,L2CR_L2I@h
156 mtspr SPRN_L2CR,r4
157 sync
158 isync
159
160 /* Wait for the invalidation to complete */
1611: mfspr r3,SPRN_L2CR
162 rlwinm. r0,r3,0,31,31
163 bne 1b
164
165 /* Clear L2I */
166 xoris r4,r4,L2CR_L2I@h
167 sync
168 mtspr SPRN_L2CR,r4
169 sync
170
171 /* now disable the L1 data cache */
172 mfspr r0,SPRN_HID0
173 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
174 mtspr SPRN_HID0,r0
175 sync
176 isync
177
178 /* Restore HID0[DPM] to whatever it was before */
179 sync
180 mfspr r0,SPRN_HID0
181 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
182 mtspr SPRN_HID0,r0
183 sync
184
185 /* restore DR and EE */
186 sync
187 mtmsr r11
188 isync
189
190 mtlr r10
191 blr
192
193/* This code is for 745x processors */
194flush_disable_745x:
195 /* Turn off EE and DR in MSR */
196 mfmsr r11
197 rlwinm r0,r11,0,~MSR_EE
198 rlwinm r0,r0,0,~MSR_DR
199 sync
200 mtmsr r0
201 isync
202
203 /* Stop prefetch streams */
204 DSSALL
205 sync
206
207 /* Disable L2 prefetching */
208 mfspr r0,SPRN_MSSCR0
209 rlwinm r0,r0,0,0,29
210 mtspr SPRN_MSSCR0,r0
211 sync
212 isync
213 lis r4,0
214 dcbf 0,r4
215 dcbf 0,r4
216 dcbf 0,r4
217 dcbf 0,r4
218 dcbf 0,r4
219 dcbf 0,r4
220 dcbf 0,r4
221 dcbf 0,r4
222
223 /* Due to a bug with the HW flush on some CPU revs, we occasionally
224 * experience data corruption. I'm adding a displacement flush along
225 * with a dcbf loop over a few Mb to "help". The problem isn't totally
226 * fixed by this in theory, but at least, in practice, I couldn't reproduce
227 * it even with a big hammer...
228 */
229
230 lis r4,0x0002
231 mtctr r4
232 li r4,0
2331:
234 lwz r0,0(r4)
235 addi r4,r4,32 /* Go to start of next cache line */
236 bdnz 1b
237 isync
238
239 /* Now, flush the first 4MB of memory */
240 lis r4,0x0002
241 mtctr r4
242 li r4,0
243 sync
2441:
245 dcbf 0,r4
246 addi r4,r4,32 /* Go to start of next cache line */
247 bdnz 1b
248
249 /* Flush and disable the L1 data cache */
250 mfspr r6,SPRN_LDSTCR
251 lis r3,0xfff0 /* read from ROM for displacement flush */
252 li r4,0xfe /* start with only way 0 unlocked */
253 li r5,128 /* 128 lines in each way */
2541: mtctr r5
255 rlwimi r6,r4,0,24,31
256 mtspr SPRN_LDSTCR,r6
257 sync
258 isync
2592: lwz r0,0(r3) /* touch each cache line */
260 addi r3,r3,32
261 bdnz 2b
262 rlwinm r4,r4,1,24,30 /* move on to the next way */
263 ori r4,r4,1
264 cmpwi r4,0xff /* all done? */
265 bne 1b
266 /* now unlock the L1 data cache */
267 li r4,0
268 rlwimi r6,r4,0,24,31
269 sync
270 mtspr SPRN_LDSTCR,r6
271 sync
272 isync
273
274 /* Flush the L2 cache using the hardware assist */
275 mfspr r3,SPRN_L2CR
276 cmpwi r3,0 /* check if it is enabled first */
277 bge 4f
278 oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
279 b 2f
280 /* When disabling/locking L2, code must be in L1 */
281 .balign 32
2821: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
2833: sync
284 isync
285 b 1f
2862: b 3f
2873: sync
288 isync
289 b 1b
2901: sync
291 isync
292 ori r0,r3,L2CR_L2HWF_745x
293 sync
294 mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
2953: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
296 andi. r0,r0,L2CR_L2HWF_745x
297 bne 3b
298 sync
299 rlwinm r3,r3,0,~L2CR_L2E
300 b 2f
301 /* When disabling L2, code must be in L1 */
302 .balign 32
3031: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3043: sync
305 isync
306 b 1f
3072: b 3f
3083: sync
309 isync
310 b 1b
3111: sync
312 isync
313 oris r4,r3,L2CR_L2I@h
314 mtspr SPRN_L2CR,r4
315 sync
316 isync
3171: mfspr r4,SPRN_L2CR
318 andis. r0,r4,L2CR_L2I@h
319 bne 1b
320 sync
321
322BEGIN_FTR_SECTION
323 /* Flush the L3 cache using the hardware assist */
3244: mfspr r3,SPRN_L3CR
325 cmpwi r3,0 /* check if it is enabled */
326 bge 6f
327 oris r0,r3,L3CR_L3IO@h
328 ori r0,r0,L3CR_L3DO
329 sync
330 mtspr SPRN_L3CR,r0 /* lock the L3 cache */
331 sync
332 isync
333 ori r0,r0,L3CR_L3HWF
334 sync
335 mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
3365: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
337 andi. r0,r0,L3CR_L3HWF
338 bne 5b
339 rlwinm r3,r3,0,~L3CR_L3E
340 sync
341 mtspr SPRN_L3CR,r3 /* disable the L3 cache */
342 sync
343 ori r4,r3,L3CR_L3I
344 mtspr SPRN_L3CR,r4
3451: mfspr r4,SPRN_L3CR
346 andi. r0,r4,L3CR_L3I
347 bne 1b
348 sync
349END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
350
3516: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
352 rlwinm r0,r0,0,~HID0_DCE
353 mtspr SPRN_HID0,r0
354 sync
355 isync
356 mtmsr r11 /* restore DR and EE */
357 isync
358 blr
359#endif /* CONFIG_6xx */
diff --git a/arch/powerpc/platforms/powermac/cpufreq.c b/arch/powerpc/platforms/powermac/cpufreq.c
new file mode 100644
index 000000000000..c47f8b69725c
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cpufreq.c
@@ -0,0 +1,726 @@
1/*
2 * arch/ppc/platforms/pmac_cpufreq.c
3 *
4 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * TODO: Need a big cleanup here. Basically, we need to have different
12 * cpufreq_driver structures for the different type of HW instead of the
13 * current mess. We also need to better deal with the detection of the
14 * type of machine.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/adb.h>
26#include <linux/pmu.h>
27#include <linux/slab.h>
28#include <linux/cpufreq.h>
29#include <linux/init.h>
30#include <linux/sysdev.h>
31#include <linux/i2c.h>
32#include <linux/hardirq.h>
33#include <asm/prom.h>
34#include <asm/machdep.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/mmu_context.h>
38#include <asm/sections.h>
39#include <asm/cputable.h>
40#include <asm/time.h>
41#include <asm/system.h>
42#include <asm/mpic.h>
43#include <asm/keylargo.h>
44
45/* WARNING !!! This will cause calibrate_delay() to be called,
46 * but this is an __init function ! So you MUST go edit
47 * init/main.c to make it non-init before enabling DEBUG_FREQ
48 */
49#undef DEBUG_FREQ
50
51/*
52 * There is a problem with the core cpufreq code on SMP kernels,
53 * it won't recalculate the Bogomips properly
54 */
55#ifdef CONFIG_SMP
56#warning "WARNING, CPUFREQ not recommended on SMP kernels"
57#endif
58
59extern void low_choose_7447a_dfs(int dfs);
60extern void low_choose_750fx_pll(int pll);
61extern void low_sleep_handler(void);
62
63/*
64 * Currently, PowerMac cpufreq supports only high & low frequencies
65 * that are set by the firmware
66 */
67static unsigned int low_freq;
68static unsigned int hi_freq;
69static unsigned int cur_freq;
70static unsigned int sleep_freq;
71
72/*
73 * Different models uses different mecanisms to switch the frequency
74 */
75static int (*set_speed_proc)(int low_speed);
76static unsigned int (*get_speed_proc)(void);
77
78/*
79 * Some definitions used by the various speedprocs
80 */
81static u32 voltage_gpio;
82static u32 frequency_gpio;
83static u32 slew_done_gpio;
84static int no_schedule;
85static int has_cpu_l2lve;
86static int is_pmu_based;
87
88/* There are only two frequency states for each processor. Values
89 * are in kHz for the time being.
90 */
91#define CPUFREQ_HIGH 0
92#define CPUFREQ_LOW 1
93
94static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
95 {CPUFREQ_HIGH, 0},
96 {CPUFREQ_LOW, 0},
97 {0, CPUFREQ_TABLE_END},
98};
99
100static struct freq_attr* pmac_cpu_freqs_attr[] = {
101 &cpufreq_freq_attr_scaling_available_freqs,
102 NULL,
103};
104
105static inline void local_delay(unsigned long ms)
106{
107 if (no_schedule)
108 mdelay(ms);
109 else
110 msleep(ms);
111}
112
113#ifdef DEBUG_FREQ
114static inline void debug_calc_bogomips(void)
115{
116 /* This will cause a recalc of bogomips and display the
117 * result. We backup/restore the value to avoid affecting the
118 * core cpufreq framework's own calculation.
119 */
120 extern void calibrate_delay(void);
121
122 unsigned long save_lpj = loops_per_jiffy;
123 calibrate_delay();
124 loops_per_jiffy = save_lpj;
125}
126#endif /* DEBUG_FREQ */
127
128/* Switch CPU speed under 750FX CPU control
129 */
130static int cpu_750fx_cpu_speed(int low_speed)
131{
132 u32 hid2;
133
134 if (low_speed == 0) {
135 /* ramping up, set voltage first */
136 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
137 /* Make sure we sleep for at least 1ms */
138 local_delay(10);
139
140 /* tweak L2 for high voltage */
141 if (has_cpu_l2lve) {
142 hid2 = mfspr(SPRN_HID2);
143 hid2 &= ~0x2000;
144 mtspr(SPRN_HID2, hid2);
145 }
146 }
147#ifdef CONFIG_6xx
148 low_choose_750fx_pll(low_speed);
149#endif
150 if (low_speed == 1) {
151 /* tweak L2 for low voltage */
152 if (has_cpu_l2lve) {
153 hid2 = mfspr(SPRN_HID2);
154 hid2 |= 0x2000;
155 mtspr(SPRN_HID2, hid2);
156 }
157
158 /* ramping down, set voltage last */
159 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
160 local_delay(10);
161 }
162
163 return 0;
164}
165
166static unsigned int cpu_750fx_get_cpu_speed(void)
167{
168 if (mfspr(SPRN_HID1) & HID1_PS)
169 return low_freq;
170 else
171 return hi_freq;
172}
173
174/* Switch CPU speed using DFS */
175static int dfs_set_cpu_speed(int low_speed)
176{
177 if (low_speed == 0) {
178 /* ramping up, set voltage first */
179 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
180 /* Make sure we sleep for at least 1ms */
181 local_delay(1);
182 }
183
184 /* set frequency */
185#ifdef CONFIG_6xx
186 low_choose_7447a_dfs(low_speed);
187#endif
188 udelay(100);
189
190 if (low_speed == 1) {
191 /* ramping down, set voltage last */
192 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
193 local_delay(1);
194 }
195
196 return 0;
197}
198
199static unsigned int dfs_get_cpu_speed(void)
200{
201 if (mfspr(SPRN_HID1) & HID1_DFS)
202 return low_freq;
203 else
204 return hi_freq;
205}
206
207
208/* Switch CPU speed using slewing GPIOs
209 */
210static int gpios_set_cpu_speed(int low_speed)
211{
212 int gpio, timeout = 0;
213
214 /* If ramping up, set voltage first */
215 if (low_speed == 0) {
216 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
217 /* Delay is way too big but it's ok, we schedule */
218 local_delay(10);
219 }
220
221 /* Set frequency */
222 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
223 if (low_speed == ((gpio & 0x01) == 0))
224 goto skip;
225
226 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
227 low_speed ? 0x04 : 0x05);
228 udelay(200);
229 do {
230 if (++timeout > 100)
231 break;
232 local_delay(1);
233 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
234 } while((gpio & 0x02) == 0);
235 skip:
236 /* If ramping down, set voltage last */
237 if (low_speed == 1) {
238 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
239 /* Delay is way too big but it's ok, we schedule */
240 local_delay(10);
241 }
242
243#ifdef DEBUG_FREQ
244 debug_calc_bogomips();
245#endif
246
247 return 0;
248}
249
250/* Switch CPU speed under PMU control
251 */
252static int pmu_set_cpu_speed(int low_speed)
253{
254 struct adb_request req;
255 unsigned long save_l2cr;
256 unsigned long save_l3cr;
257 unsigned int pic_prio;
258 unsigned long flags;
259
260 preempt_disable();
261
262#ifdef DEBUG_FREQ
263 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
264#endif
265 pmu_suspend();
266
267 /* Disable all interrupt sources on openpic */
268 pic_prio = mpic_cpu_get_priority();
269 mpic_cpu_set_priority(0xf);
270
271 /* Make sure the decrementer won't interrupt us */
272 asm volatile("mtdec %0" : : "r" (0x7fffffff));
273 /* Make sure any pending DEC interrupt occuring while we did
274 * the above didn't re-enable the DEC */
275 mb();
276 asm volatile("mtdec %0" : : "r" (0x7fffffff));
277
278 /* We can now disable MSR_EE */
279 local_irq_save(flags);
280
281 /* Giveup the FPU & vec */
282 enable_kernel_fp();
283
284#ifdef CONFIG_ALTIVEC
285 if (cpu_has_feature(CPU_FTR_ALTIVEC))
286 enable_kernel_altivec();
287#endif /* CONFIG_ALTIVEC */
288
289 /* Save & disable L2 and L3 caches */
290 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
291 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
292
293 /* Send the new speed command. My assumption is that this command
294 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
295 */
296 pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
297 while (!req.complete)
298 pmu_poll();
299
300 /* Prepare the northbridge for the speed transition */
301 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
302
303 /* Call low level code to backup CPU state and recover from
304 * hardware reset
305 */
306 low_sleep_handler();
307
308 /* Restore the northbridge */
309 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
310
311 /* Restore L2 cache */
312 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
313 _set_L2CR(save_l2cr);
314 /* Restore L3 cache */
315 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
316 _set_L3CR(save_l3cr);
317
318 /* Restore userland MMU context */
319 set_context(current->active_mm->context, current->active_mm->pgd);
320
321#ifdef DEBUG_FREQ
322 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
323#endif
324
325 /* Restore low level PMU operations */
326 pmu_unlock();
327
328 /* Restore decrementer */
329 wakeup_decrementer();
330
331 /* Restore interrupts */
332 mpic_cpu_set_priority(pic_prio);
333
334 /* Let interrupts flow again ... */
335 local_irq_restore(flags);
336
337#ifdef DEBUG_FREQ
338 debug_calc_bogomips();
339#endif
340
341 pmu_resume();
342
343 preempt_enable();
344
345 return 0;
346}
347
348static int do_set_cpu_speed(int speed_mode, int notify)
349{
350 struct cpufreq_freqs freqs;
351 unsigned long l3cr;
352 static unsigned long prev_l3cr;
353
354 freqs.old = cur_freq;
355 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
356 freqs.cpu = smp_processor_id();
357
358 if (freqs.old == freqs.new)
359 return 0;
360
361 if (notify)
362 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
363 if (speed_mode == CPUFREQ_LOW &&
364 cpu_has_feature(CPU_FTR_L3CR)) {
365 l3cr = _get_L3CR();
366 if (l3cr & L3CR_L3E) {
367 prev_l3cr = l3cr;
368 _set_L3CR(0);
369 }
370 }
371 set_speed_proc(speed_mode == CPUFREQ_LOW);
372 if (speed_mode == CPUFREQ_HIGH &&
373 cpu_has_feature(CPU_FTR_L3CR)) {
374 l3cr = _get_L3CR();
375 if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
376 _set_L3CR(prev_l3cr);
377 }
378 if (notify)
379 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
380 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
381
382 return 0;
383}
384
385static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
386{
387 return cur_freq;
388}
389
390static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
391{
392 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
393}
394
395static int pmac_cpufreq_target( struct cpufreq_policy *policy,
396 unsigned int target_freq,
397 unsigned int relation)
398{
399 unsigned int newstate = 0;
400
401 if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
402 target_freq, relation, &newstate))
403 return -EINVAL;
404
405 return do_set_cpu_speed(newstate, 1);
406}
407
408unsigned int pmac_get_one_cpufreq(int i)
409{
410 /* Supports only one CPU for now */
411 return (i == 0) ? cur_freq : 0;
412}
413
414static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
415{
416 if (policy->cpu != 0)
417 return -ENODEV;
418
419 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
420 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
421 policy->cur = cur_freq;
422
423 cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
424 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
425}
426
427static u32 read_gpio(struct device_node *np)
428{
429 u32 *reg = (u32 *)get_property(np, "reg", NULL);
430 u32 offset;
431
432 if (reg == NULL)
433 return 0;
434 /* That works for all keylargos but shall be fixed properly
435 * some day... The problem is that it seems we can't rely
436 * on the "reg" property of the GPIO nodes, they are either
437 * relative to the base of KeyLargo or to the base of the
438 * GPIO space, and the device-tree doesn't help.
439 */
440 offset = *reg;
441 if (offset < KEYLARGO_GPIO_LEVELS0)
442 offset += KEYLARGO_GPIO_LEVELS0;
443 return offset;
444}
445
446static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
447{
448 /* Ok, this could be made a bit smarter, but let's be robust for now. We
449 * always force a speed change to high speed before sleep, to make sure
450 * we have appropriate voltage and/or bus speed for the wakeup process,
451 * and to make sure our loops_per_jiffies are "good enough", that is will
452 * not cause too short delays if we sleep in low speed and wake in high
453 * speed..
454 */
455 no_schedule = 1;
456 sleep_freq = cur_freq;
457 if (cur_freq == low_freq && !is_pmu_based)
458 do_set_cpu_speed(CPUFREQ_HIGH, 0);
459 return 0;
460}
461
462static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
463{
464 /* If we resume, first check if we have a get() function */
465 if (get_speed_proc)
466 cur_freq = get_speed_proc();
467 else
468 cur_freq = 0;
469
470 /* We don't, hrm... we don't really know our speed here, best
471 * is that we force a switch to whatever it was, which is
472 * probably high speed due to our suspend() routine
473 */
474 do_set_cpu_speed(sleep_freq == low_freq ?
475 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
476
477 no_schedule = 0;
478 return 0;
479}
480
481static struct cpufreq_driver pmac_cpufreq_driver = {
482 .verify = pmac_cpufreq_verify,
483 .target = pmac_cpufreq_target,
484 .get = pmac_cpufreq_get_speed,
485 .init = pmac_cpufreq_cpu_init,
486 .suspend = pmac_cpufreq_suspend,
487 .resume = pmac_cpufreq_resume,
488 .flags = CPUFREQ_PM_NO_WARN,
489 .attr = pmac_cpu_freqs_attr,
490 .name = "powermac",
491 .owner = THIS_MODULE,
492};
493
494
495static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
496{
497 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
498 "voltage-gpio");
499 struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
500 "frequency-gpio");
501 struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
502 "slewing-done");
503 u32 *value;
504
505 /*
506 * Check to see if it's GPIO driven or PMU only
507 *
508 * The way we extract the GPIO address is slightly hackish, but it
509 * works well enough for now. We need to abstract the whole GPIO
510 * stuff sooner or later anyway
511 */
512
513 if (volt_gpio_np)
514 voltage_gpio = read_gpio(volt_gpio_np);
515 if (freq_gpio_np)
516 frequency_gpio = read_gpio(freq_gpio_np);
517 if (slew_done_gpio_np)
518 slew_done_gpio = read_gpio(slew_done_gpio_np);
519
520 /* If we use the frequency GPIOs, calculate the min/max speeds based
521 * on the bus frequencies
522 */
523 if (frequency_gpio && slew_done_gpio) {
524 int lenp, rc;
525 u32 *freqs, *ratio;
526
527 freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
528 lenp /= sizeof(u32);
529 if (freqs == NULL || lenp != 2) {
530 printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
531 return 1;
532 }
533 ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
534 if (ratio == NULL) {
535 printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
536 return 1;
537 }
538
539 /* Get the min/max bus frequencies */
540 low_freq = min(freqs[0], freqs[1]);
541 hi_freq = max(freqs[0], freqs[1]);
542
543 /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
544 * frequency, it claims it to be around 84Mhz on some models while
545 * it appears to be approx. 101Mhz on all. Let's hack around here...
546 * fortunately, we don't need to be too precise
547 */
548 if (low_freq < 98000000)
549 low_freq = 101000000;
550
551 /* Convert those to CPU core clocks */
552 low_freq = (low_freq * (*ratio)) / 2000;
553 hi_freq = (hi_freq * (*ratio)) / 2000;
554
555 /* Now we get the frequencies, we read the GPIO to see what is out current
556 * speed
557 */
558 rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
559 cur_freq = (rc & 0x01) ? hi_freq : low_freq;
560
561 set_speed_proc = gpios_set_cpu_speed;
562 return 1;
563 }
564
565 /* If we use the PMU, look for the min & max frequencies in the
566 * device-tree
567 */
568 value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
569 if (!value)
570 return 1;
571 low_freq = (*value) / 1000;
572 /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
573 * here */
574 if (low_freq < 100000)
575 low_freq *= 10;
576
577 value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
578 if (!value)
579 return 1;
580 hi_freq = (*value) / 1000;
581 set_speed_proc = pmu_set_cpu_speed;
582 is_pmu_based = 1;
583
584 return 0;
585}
586
587static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
588{
589 struct device_node *volt_gpio_np;
590
591 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
592 return 1;
593
594 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
595 if (volt_gpio_np)
596 voltage_gpio = read_gpio(volt_gpio_np);
597 if (!voltage_gpio){
598 printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
599 return 1;
600 }
601
602 /* OF only reports the high frequency */
603 hi_freq = cur_freq;
604 low_freq = cur_freq/2;
605
606 /* Read actual frequency from CPU */
607 cur_freq = dfs_get_cpu_speed();
608 set_speed_proc = dfs_set_cpu_speed;
609 get_speed_proc = dfs_get_cpu_speed;
610
611 return 0;
612}
613
614static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
615{
616 struct device_node *volt_gpio_np;
617 u32 pvr, *value;
618
619 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
620 return 1;
621
622 hi_freq = cur_freq;
623 value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
624 if (!value)
625 return 1;
626 low_freq = (*value) / 1000;
627
628 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
629 if (volt_gpio_np)
630 voltage_gpio = read_gpio(volt_gpio_np);
631
632 pvr = mfspr(SPRN_PVR);
633 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
634
635 set_speed_proc = cpu_750fx_cpu_speed;
636 get_speed_proc = cpu_750fx_get_cpu_speed;
637 cur_freq = cpu_750fx_get_cpu_speed();
638
639 return 0;
640}
641
642/* Currently, we support the following machines:
643 *
644 * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
645 * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
646 * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
647 * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
648 * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
649 * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
650 * - Recent MacRISC3 laptops
651 * - All new machines with 7447A CPUs
652 */
653static int __init pmac_cpufreq_setup(void)
654{
655 struct device_node *cpunode;
656 u32 *value;
657
658 if (strstr(cmd_line, "nocpufreq"))
659 return 0;
660
661 /* Assume only one CPU */
662 cpunode = find_type_devices("cpu");
663 if (!cpunode)
664 goto out;
665
666 /* Get current cpu clock freq */
667 value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
668 if (!value)
669 goto out;
670 cur_freq = (*value) / 1000;
671
672 /* Check for 7447A based MacRISC3 */
673 if (machine_is_compatible("MacRISC3") &&
674 get_property(cpunode, "dynamic-power-step", NULL) &&
675 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
676 pmac_cpufreq_init_7447A(cpunode);
677 /* Check for other MacRISC3 machines */
678 } else if (machine_is_compatible("PowerBook3,4") ||
679 machine_is_compatible("PowerBook3,5") ||
680 machine_is_compatible("MacRISC3")) {
681 pmac_cpufreq_init_MacRISC3(cpunode);
682 /* Else check for iBook2 500/600 */
683 } else if (machine_is_compatible("PowerBook4,1")) {
684 hi_freq = cur_freq;
685 low_freq = 400000;
686 set_speed_proc = pmu_set_cpu_speed;
687 is_pmu_based = 1;
688 }
689 /* Else check for TiPb 550 */
690 else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
691 hi_freq = cur_freq;
692 low_freq = 500000;
693 set_speed_proc = pmu_set_cpu_speed;
694 is_pmu_based = 1;
695 }
696 /* Else check for TiPb 400 & 500 */
697 else if (machine_is_compatible("PowerBook3,2")) {
698 /* We only know about the 400 MHz and the 500Mhz model
699 * they both have 300 MHz as low frequency
700 */
701 if (cur_freq < 350000 || cur_freq > 550000)
702 goto out;
703 hi_freq = cur_freq;
704 low_freq = 300000;
705 set_speed_proc = pmu_set_cpu_speed;
706 is_pmu_based = 1;
707 }
708 /* Else check for 750FX */
709 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
710 pmac_cpufreq_init_750FX(cpunode);
711out:
712 if (set_speed_proc == NULL)
713 return -ENODEV;
714
715 pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
716 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
717
718 printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
719 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
720 low_freq/1000, hi_freq/1000, cur_freq/1000);
721
722 return cpufreq_register_driver(&pmac_cpufreq_driver);
723}
724
725module_init(pmac_cpufreq_setup);
726
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
new file mode 100644
index 000000000000..10f1d942c661
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -0,0 +1,3063 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/ohare.h>
35#include <asm/heathrow.h>
36#include <asm/keylargo.h>
37#include <asm/uninorth.h>
38#include <asm/io.h>
39#include <asm/prom.h>
40#include <asm/machdep.h>
41#include <asm/pmac_feature.h>
42#include <asm/dbdma.h>
43#include <asm/pci-bridge.h>
44#include <asm/pmac_low_i2c.h>
45
46#undef DEBUG_FEATURE
47
48#ifdef DEBUG_FEATURE
49#define DBG(fmt...) printk(KERN_DEBUG fmt)
50#else
51#define DBG(fmt...)
52#endif
53
54#ifdef CONFIG_6xx
55extern int powersave_lowspeed;
56#endif
57
58extern int powersave_nap;
59extern struct device_node *k2_skiplist[2];
60
61
62/*
63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking
65 */
66static DEFINE_SPINLOCK(feature_lock);
67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
70
71
72/*
73 * Instance of some macio stuffs
74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76
77struct macio_chip *macio_find(struct device_node *child, int type)
78{
79 while(child) {
80 int i;
81
82 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
83 if (child == macio_chips[i].of_node &&
84 (!type || macio_chips[i].type == type))
85 return &macio_chips[i];
86 child = child->parent;
87 }
88 return NULL;
89}
90EXPORT_SYMBOL_GPL(macio_find);
91
92static const char *macio_names[] =
93{
94 "Unknown",
95 "Grand Central",
96 "OHare",
97 "OHareII",
98 "Heathrow",
99 "Gatwick",
100 "Paddington",
101 "Keylargo",
102 "Pangea",
103 "Intrepid",
104 "K2"
105};
106
107
108
109/*
110 * Uninorth reg. access. Note that Uni-N regs are big endian
111 */
112
113#define UN_REG(r) (uninorth_base + ((r) >> 2))
114#define UN_IN(r) (in_be32(UN_REG(r)))
115#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118
119static struct device_node *uninorth_node;
120static u32 __iomem *uninorth_base;
121static u32 uninorth_rev;
122static int uninorth_u3;
123static void __iomem *u3_ht;
124
125/*
126 * For each motherboard family, we have a table of functions pointers
127 * that handle the various features.
128 */
129
130typedef long (*feature_call)(struct device_node *node, long param, long value);
131
132struct feature_table_entry {
133 unsigned int selector;
134 feature_call function;
135};
136
137struct pmac_mb_def
138{
139 const char* model_string;
140 const char* model_name;
141 int model_id;
142 struct feature_table_entry* features;
143 unsigned long board_flags;
144};
145static struct pmac_mb_def pmac_mb;
146
147/*
148 * Here are the chip specific feature functions
149 */
150
151static inline int simple_feature_tweak(struct device_node *node, int type,
152 int reg, u32 mask, int value)
153{
154 struct macio_chip* macio;
155 unsigned long flags;
156
157 macio = macio_find(node, type);
158 if (!macio)
159 return -ENODEV;
160 LOCK(flags);
161 if (value)
162 MACIO_BIS(reg, mask);
163 else
164 MACIO_BIC(reg, mask);
165 (void)MACIO_IN32(reg);
166 UNLOCK(flags);
167
168 return 0;
169}
170
171#ifndef CONFIG_POWER4
172
173static long ohare_htw_scc_enable(struct device_node *node, long param,
174 long value)
175{
176 struct macio_chip* macio;
177 unsigned long chan_mask;
178 unsigned long fcr;
179 unsigned long flags;
180 int htw, trans;
181 unsigned long rmask;
182
183 macio = macio_find(node, 0);
184 if (!macio)
185 return -ENODEV;
186 if (!strcmp(node->name, "ch-a"))
187 chan_mask = MACIO_FLAG_SCCA_ON;
188 else if (!strcmp(node->name, "ch-b"))
189 chan_mask = MACIO_FLAG_SCCB_ON;
190 else
191 return -ENODEV;
192
193 htw = (macio->type == macio_heathrow || macio->type == macio_paddington
194 || macio->type == macio_gatwick);
195 /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
196 trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
197 pmac_mb.model_id != PMAC_TYPE_YIKES);
198 if (value) {
199#ifdef CONFIG_ADB_PMU
200 if ((param & 0xfff) == PMAC_SCC_IRDA)
201 pmu_enable_irled(1);
202#endif /* CONFIG_ADB_PMU */
203 LOCK(flags);
204 fcr = MACIO_IN32(OHARE_FCR);
205 /* Check if scc cell need enabling */
206 if (!(fcr & OH_SCC_ENABLE)) {
207 fcr |= OH_SCC_ENABLE;
208 if (htw) {
209 /* Side effect: this will also power up the
210 * modem, but it's too messy to figure out on which
211 * ports this controls the tranceiver and on which
212 * it controls the modem
213 */
214 if (trans)
215 fcr &= ~HRW_SCC_TRANS_EN_N;
216 MACIO_OUT32(OHARE_FCR, fcr);
217 fcr |= (rmask = HRW_RESET_SCC);
218 MACIO_OUT32(OHARE_FCR, fcr);
219 } else {
220 fcr |= (rmask = OH_SCC_RESET);
221 MACIO_OUT32(OHARE_FCR, fcr);
222 }
223 UNLOCK(flags);
224 (void)MACIO_IN32(OHARE_FCR);
225 mdelay(15);
226 LOCK(flags);
227 fcr &= ~rmask;
228 MACIO_OUT32(OHARE_FCR, fcr);
229 }
230 if (chan_mask & MACIO_FLAG_SCCA_ON)
231 fcr |= OH_SCCA_IO;
232 if (chan_mask & MACIO_FLAG_SCCB_ON)
233 fcr |= OH_SCCB_IO;
234 MACIO_OUT32(OHARE_FCR, fcr);
235 macio->flags |= chan_mask;
236 UNLOCK(flags);
237 if (param & PMAC_SCC_FLAG_XMON)
238 macio->flags |= MACIO_FLAG_SCC_LOCKED;
239 } else {
240 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
241 return -EPERM;
242 LOCK(flags);
243 fcr = MACIO_IN32(OHARE_FCR);
244 if (chan_mask & MACIO_FLAG_SCCA_ON)
245 fcr &= ~OH_SCCA_IO;
246 if (chan_mask & MACIO_FLAG_SCCB_ON)
247 fcr &= ~OH_SCCB_IO;
248 MACIO_OUT32(OHARE_FCR, fcr);
249 if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
250 fcr &= ~OH_SCC_ENABLE;
251 if (htw && trans)
252 fcr |= HRW_SCC_TRANS_EN_N;
253 MACIO_OUT32(OHARE_FCR, fcr);
254 }
255 macio->flags &= ~(chan_mask);
256 UNLOCK(flags);
257 mdelay(10);
258#ifdef CONFIG_ADB_PMU
259 if ((param & 0xfff) == PMAC_SCC_IRDA)
260 pmu_enable_irled(0);
261#endif /* CONFIG_ADB_PMU */
262 }
263 return 0;
264}
265
266static long ohare_floppy_enable(struct device_node *node, long param,
267 long value)
268{
269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271}
272
273static long ohare_mesh_enable(struct device_node *node, long param, long value)
274{
275 return simple_feature_tweak(node, macio_ohare,
276 OHARE_FCR, OH_MESH_ENABLE, value);
277}
278
279static long ohare_ide_enable(struct device_node *node, long param, long value)
280{
281 switch(param) {
282 case 0:
283 /* For some reason, setting the bit in set_initial_features()
284 * doesn't stick. I'm still investigating... --BenH.
285 */
286 if (value)
287 simple_feature_tweak(node, macio_ohare,
288 OHARE_FCR, OH_IOBUS_ENABLE, 1);
289 return simple_feature_tweak(node, macio_ohare,
290 OHARE_FCR, OH_IDE0_ENABLE, value);
291 case 1:
292 return simple_feature_tweak(node, macio_ohare,
293 OHARE_FCR, OH_BAY_IDE_ENABLE, value);
294 default:
295 return -ENODEV;
296 }
297}
298
299static long ohare_ide_reset(struct device_node *node, long param, long value)
300{
301 switch(param) {
302 case 0:
303 return simple_feature_tweak(node, macio_ohare,
304 OHARE_FCR, OH_IDE0_RESET_N, !value);
305 case 1:
306 return simple_feature_tweak(node, macio_ohare,
307 OHARE_FCR, OH_IDE1_RESET_N, !value);
308 default:
309 return -ENODEV;
310 }
311}
312
313static long ohare_sleep_state(struct device_node *node, long param, long value)
314{
315 struct macio_chip* macio = &macio_chips[0];
316
317 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
318 return -EPERM;
319 if (value == 1) {
320 MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
321 } else if (value == 0) {
322 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
323 }
324
325 return 0;
326}
327
328static long heathrow_modem_enable(struct device_node *node, long param,
329 long value)
330{
331 struct macio_chip* macio;
332 u8 gpio;
333 unsigned long flags;
334
335 macio = macio_find(node, macio_unknown);
336 if (!macio)
337 return -ENODEV;
338 gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
339 if (!value) {
340 LOCK(flags);
341 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
342 UNLOCK(flags);
343 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
344 mdelay(250);
345 }
346 if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
347 pmac_mb.model_id != PMAC_TYPE_YIKES) {
348 LOCK(flags);
349 if (value)
350 MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
351 else
352 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
353 UNLOCK(flags);
354 (void)MACIO_IN32(HEATHROW_FCR);
355 mdelay(250);
356 }
357 if (value) {
358 LOCK(flags);
359 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
360 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
361 UNLOCK(flags); mdelay(250); LOCK(flags);
362 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
363 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
364 UNLOCK(flags); mdelay(250); LOCK(flags);
365 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
366 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
367 UNLOCK(flags); mdelay(250);
368 }
369 return 0;
370}
371
372static long heathrow_floppy_enable(struct device_node *node, long param,
373 long value)
374{
375 return simple_feature_tweak(node, macio_unknown,
376 HEATHROW_FCR,
377 HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
378 value);
379}
380
381static long heathrow_mesh_enable(struct device_node *node, long param,
382 long value)
383{
384 struct macio_chip* macio;
385 unsigned long flags;
386
387 macio = macio_find(node, macio_unknown);
388 if (!macio)
389 return -ENODEV;
390 LOCK(flags);
391 /* Set clear mesh cell enable */
392 if (value)
393 MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
394 else
395 MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
396 (void)MACIO_IN32(HEATHROW_FCR);
397 udelay(10);
398 /* Set/Clear termination power */
399 if (value)
400 MACIO_BIC(HEATHROW_MBCR, 0x04000000);
401 else
402 MACIO_BIS(HEATHROW_MBCR, 0x04000000);
403 (void)MACIO_IN32(HEATHROW_MBCR);
404 udelay(10);
405 UNLOCK(flags);
406
407 return 0;
408}
409
410static long heathrow_ide_enable(struct device_node *node, long param,
411 long value)
412{
413 switch(param) {
414 case 0:
415 return simple_feature_tweak(node, macio_unknown,
416 HEATHROW_FCR, HRW_IDE0_ENABLE, value);
417 case 1:
418 return simple_feature_tweak(node, macio_unknown,
419 HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
420 default:
421 return -ENODEV;
422 }
423}
424
425static long heathrow_ide_reset(struct device_node *node, long param,
426 long value)
427{
428 switch(param) {
429 case 0:
430 return simple_feature_tweak(node, macio_unknown,
431 HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
432 case 1:
433 return simple_feature_tweak(node, macio_unknown,
434 HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
435 default:
436 return -ENODEV;
437 }
438}
439
440static long heathrow_bmac_enable(struct device_node *node, long param,
441 long value)
442{
443 struct macio_chip* macio;
444 unsigned long flags;
445
446 macio = macio_find(node, 0);
447 if (!macio)
448 return -ENODEV;
449 if (value) {
450 LOCK(flags);
451 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
452 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
453 UNLOCK(flags);
454 (void)MACIO_IN32(HEATHROW_FCR);
455 mdelay(10);
456 LOCK(flags);
457 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
458 UNLOCK(flags);
459 (void)MACIO_IN32(HEATHROW_FCR);
460 mdelay(10);
461 } else {
462 LOCK(flags);
463 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
464 UNLOCK(flags);
465 }
466 return 0;
467}
468
469static long heathrow_sound_enable(struct device_node *node, long param,
470 long value)
471{
472 struct macio_chip* macio;
473 unsigned long flags;
474
475 /* B&W G3 and Yikes don't support that properly (the
476 * sound appear to never come back after beeing shut down).
477 */
478 if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
479 pmac_mb.model_id == PMAC_TYPE_YIKES)
480 return 0;
481
482 macio = macio_find(node, 0);
483 if (!macio)
484 return -ENODEV;
485 if (value) {
486 LOCK(flags);
487 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
488 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
489 UNLOCK(flags);
490 (void)MACIO_IN32(HEATHROW_FCR);
491 } else {
492 LOCK(flags);
493 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
494 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
495 UNLOCK(flags);
496 }
497 return 0;
498}
499
500static u32 save_fcr[6];
501static u32 save_mbcr;
502static u32 save_gpio_levels[2];
503static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
504static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
505static u32 save_unin_clock_ctl;
506static struct dbdma_regs save_dbdma[13];
507static struct dbdma_regs save_alt_dbdma[13];
508
509static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
510{
511 int i;
512
513 /* Save state & config of DBDMA channels */
514 for (i = 0; i < 13; i++) {
515 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
516 (macio->base + ((0x8000+i*0x100)>>2));
517 save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
518 save[i].cmdptr = in_le32(&chan->cmdptr);
519 save[i].intr_sel = in_le32(&chan->intr_sel);
520 save[i].br_sel = in_le32(&chan->br_sel);
521 save[i].wait_sel = in_le32(&chan->wait_sel);
522 }
523}
524
525static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
526{
527 int i;
528
529 /* Save state & config of DBDMA channels */
530 for (i = 0; i < 13; i++) {
531 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
532 (macio->base + ((0x8000+i*0x100)>>2));
533 out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
534 while (in_le32(&chan->status) & ACTIVE)
535 mb();
536 out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
537 out_le32(&chan->cmdptr, save[i].cmdptr);
538 out_le32(&chan->intr_sel, save[i].intr_sel);
539 out_le32(&chan->br_sel, save[i].br_sel);
540 out_le32(&chan->wait_sel, save[i].wait_sel);
541 }
542}
543
544static void heathrow_sleep(struct macio_chip *macio, int secondary)
545{
546 if (secondary) {
547 dbdma_save(macio, save_alt_dbdma);
548 save_fcr[2] = MACIO_IN32(0x38);
549 save_fcr[3] = MACIO_IN32(0x3c);
550 } else {
551 dbdma_save(macio, save_dbdma);
552 save_fcr[0] = MACIO_IN32(0x38);
553 save_fcr[1] = MACIO_IN32(0x3c);
554 save_mbcr = MACIO_IN32(0x34);
555 /* Make sure sound is shut down */
556 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
557 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
558 /* This seems to be necessary as well or the fan
559 * keeps coming up and battery drains fast */
560 MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
561 MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
562 /* Make sure eth is down even if module or sleep
563 * won't work properly */
564 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
565 }
566 /* Make sure modem is shut down */
567 MACIO_OUT8(HRW_GPIO_MODEM_RESET,
568 MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
569 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
570 MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
571
572 /* Let things settle */
573 (void)MACIO_IN32(HEATHROW_FCR);
574}
575
576static void heathrow_wakeup(struct macio_chip *macio, int secondary)
577{
578 if (secondary) {
579 MACIO_OUT32(0x38, save_fcr[2]);
580 (void)MACIO_IN32(0x38);
581 mdelay(1);
582 MACIO_OUT32(0x3c, save_fcr[3]);
583 (void)MACIO_IN32(0x38);
584 mdelay(10);
585 dbdma_restore(macio, save_alt_dbdma);
586 } else {
587 MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
588 (void)MACIO_IN32(0x38);
589 mdelay(1);
590 MACIO_OUT32(0x3c, save_fcr[1]);
591 (void)MACIO_IN32(0x38);
592 mdelay(1);
593 MACIO_OUT32(0x34, save_mbcr);
594 (void)MACIO_IN32(0x38);
595 mdelay(10);
596 dbdma_restore(macio, save_dbdma);
597 }
598}
599
600static long heathrow_sleep_state(struct device_node *node, long param,
601 long value)
602{
603 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
604 return -EPERM;
605 if (value == 1) {
606 if (macio_chips[1].type == macio_gatwick)
607 heathrow_sleep(&macio_chips[0], 1);
608 heathrow_sleep(&macio_chips[0], 0);
609 } else if (value == 0) {
610 heathrow_wakeup(&macio_chips[0], 0);
611 if (macio_chips[1].type == macio_gatwick)
612 heathrow_wakeup(&macio_chips[0], 1);
613 }
614 return 0;
615}
616
617static long core99_scc_enable(struct device_node *node, long param, long value)
618{
619 struct macio_chip* macio;
620 unsigned long flags;
621 unsigned long chan_mask;
622 u32 fcr;
623
624 macio = macio_find(node, 0);
625 if (!macio)
626 return -ENODEV;
627 if (!strcmp(node->name, "ch-a"))
628 chan_mask = MACIO_FLAG_SCCA_ON;
629 else if (!strcmp(node->name, "ch-b"))
630 chan_mask = MACIO_FLAG_SCCB_ON;
631 else
632 return -ENODEV;
633
634 if (value) {
635 int need_reset_scc = 0;
636 int need_reset_irda = 0;
637
638 LOCK(flags);
639 fcr = MACIO_IN32(KEYLARGO_FCR0);
640 /* Check if scc cell need enabling */
641 if (!(fcr & KL0_SCC_CELL_ENABLE)) {
642 fcr |= KL0_SCC_CELL_ENABLE;
643 need_reset_scc = 1;
644 }
645 if (chan_mask & MACIO_FLAG_SCCA_ON) {
646 fcr |= KL0_SCCA_ENABLE;
647 /* Don't enable line drivers for I2S modem */
648 if ((param & 0xfff) == PMAC_SCC_I2S1)
649 fcr &= ~KL0_SCC_A_INTF_ENABLE;
650 else
651 fcr |= KL0_SCC_A_INTF_ENABLE;
652 }
653 if (chan_mask & MACIO_FLAG_SCCB_ON) {
654 fcr |= KL0_SCCB_ENABLE;
655 /* Perform irda specific inits */
656 if ((param & 0xfff) == PMAC_SCC_IRDA) {
657 fcr &= ~KL0_SCC_B_INTF_ENABLE;
658 fcr |= KL0_IRDA_ENABLE;
659 fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
660 fcr |= KL0_IRDA_SOURCE1_SEL;
661 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
662 fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
663 need_reset_irda = 1;
664 } else
665 fcr |= KL0_SCC_B_INTF_ENABLE;
666 }
667 MACIO_OUT32(KEYLARGO_FCR0, fcr);
668 macio->flags |= chan_mask;
669 if (need_reset_scc) {
670 MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
671 (void)MACIO_IN32(KEYLARGO_FCR0);
672 UNLOCK(flags);
673 mdelay(15);
674 LOCK(flags);
675 MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
676 }
677 if (need_reset_irda) {
678 MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
679 (void)MACIO_IN32(KEYLARGO_FCR0);
680 UNLOCK(flags);
681 mdelay(15);
682 LOCK(flags);
683 MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
684 }
685 UNLOCK(flags);
686 if (param & PMAC_SCC_FLAG_XMON)
687 macio->flags |= MACIO_FLAG_SCC_LOCKED;
688 } else {
689 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
690 return -EPERM;
691 LOCK(flags);
692 fcr = MACIO_IN32(KEYLARGO_FCR0);
693 if (chan_mask & MACIO_FLAG_SCCA_ON)
694 fcr &= ~KL0_SCCA_ENABLE;
695 if (chan_mask & MACIO_FLAG_SCCB_ON) {
696 fcr &= ~KL0_SCCB_ENABLE;
697 /* Perform irda specific clears */
698 if ((param & 0xfff) == PMAC_SCC_IRDA) {
699 fcr &= ~KL0_IRDA_ENABLE;
700 fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
701 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
702 fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
703 }
704 }
705 MACIO_OUT32(KEYLARGO_FCR0, fcr);
706 if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
707 fcr &= ~KL0_SCC_CELL_ENABLE;
708 MACIO_OUT32(KEYLARGO_FCR0, fcr);
709 }
710 macio->flags &= ~(chan_mask);
711 UNLOCK(flags);
712 mdelay(10);
713 }
714 return 0;
715}
716
717static long
718core99_modem_enable(struct device_node *node, long param, long value)
719{
720 struct macio_chip* macio;
721 u8 gpio;
722 unsigned long flags;
723
724 /* Hack for internal USB modem */
725 if (node == NULL) {
726 if (macio_chips[0].type != macio_keylargo)
727 return -ENODEV;
728 node = macio_chips[0].of_node;
729 }
730 macio = macio_find(node, 0);
731 if (!macio)
732 return -ENODEV;
733 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
734 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
735 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
736
737 if (!value) {
738 LOCK(flags);
739 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
740 UNLOCK(flags);
741 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
742 mdelay(250);
743 }
744 LOCK(flags);
745 if (value) {
746 MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
747 UNLOCK(flags);
748 (void)MACIO_IN32(KEYLARGO_FCR2);
749 mdelay(250);
750 } else {
751 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
752 UNLOCK(flags);
753 }
754 if (value) {
755 LOCK(flags);
756 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
757 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
758 UNLOCK(flags); mdelay(250); LOCK(flags);
759 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
760 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
761 UNLOCK(flags); mdelay(250); LOCK(flags);
762 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
763 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
764 UNLOCK(flags); mdelay(250);
765 }
766 return 0;
767}
768
769static long
770pangea_modem_enable(struct device_node *node, long param, long value)
771{
772 struct macio_chip* macio;
773 u8 gpio;
774 unsigned long flags;
775
776 /* Hack for internal USB modem */
777 if (node == NULL) {
778 if (macio_chips[0].type != macio_pangea &&
779 macio_chips[0].type != macio_intrepid)
780 return -ENODEV;
781 node = macio_chips[0].of_node;
782 }
783 macio = macio_find(node, 0);
784 if (!macio)
785 return -ENODEV;
786 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
787 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
788 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
789
790 if (!value) {
791 LOCK(flags);
792 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
793 UNLOCK(flags);
794 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
795 mdelay(250);
796 }
797 LOCK(flags);
798 if (value) {
799 MACIO_OUT8(KL_GPIO_MODEM_POWER,
800 KEYLARGO_GPIO_OUTPUT_ENABLE);
801 UNLOCK(flags);
802 (void)MACIO_IN32(KEYLARGO_FCR2);
803 mdelay(250);
804 } else {
805 MACIO_OUT8(KL_GPIO_MODEM_POWER,
806 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
807 UNLOCK(flags);
808 }
809 if (value) {
810 LOCK(flags);
811 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
812 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
813 UNLOCK(flags); mdelay(250); LOCK(flags);
814 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
815 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
816 UNLOCK(flags); mdelay(250); LOCK(flags);
817 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
818 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
819 UNLOCK(flags); mdelay(250);
820 }
821 return 0;
822}
823
824static long
825core99_ata100_enable(struct device_node *node, long value)
826{
827 unsigned long flags;
828 struct pci_dev *pdev = NULL;
829 u8 pbus, pid;
830
831 if (uninorth_rev < 0x24)
832 return -ENODEV;
833
834 LOCK(flags);
835 if (value)
836 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
837 else
838 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
839 (void)UN_IN(UNI_N_CLOCK_CNTL);
840 UNLOCK(flags);
841 udelay(20);
842
843 if (value) {
844 if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
845 pdev = pci_find_slot(pbus, pid);
846 if (pdev == NULL)
847 return 0;
848 pci_enable_device(pdev);
849 pci_set_master(pdev);
850 }
851 return 0;
852}
853
854static long
855core99_ide_enable(struct device_node *node, long param, long value)
856{
857 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
858 * based ata-100
859 */
860 switch(param) {
861 case 0:
862 return simple_feature_tweak(node, macio_unknown,
863 KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
864 case 1:
865 return simple_feature_tweak(node, macio_unknown,
866 KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
867 case 2:
868 return simple_feature_tweak(node, macio_unknown,
869 KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
870 case 3:
871 return core99_ata100_enable(node, value);
872 default:
873 return -ENODEV;
874 }
875}
876
877static long
878core99_ide_reset(struct device_node *node, long param, long value)
879{
880 switch(param) {
881 case 0:
882 return simple_feature_tweak(node, macio_unknown,
883 KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
884 case 1:
885 return simple_feature_tweak(node, macio_unknown,
886 KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
887 case 2:
888 return simple_feature_tweak(node, macio_unknown,
889 KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
890 default:
891 return -ENODEV;
892 }
893}
894
895static long
896core99_gmac_enable(struct device_node *node, long param, long value)
897{
898 unsigned long flags;
899
900 LOCK(flags);
901 if (value)
902 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
903 else
904 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
905 (void)UN_IN(UNI_N_CLOCK_CNTL);
906 UNLOCK(flags);
907 udelay(20);
908
909 return 0;
910}
911
912static long
913core99_gmac_phy_reset(struct device_node *node, long param, long value)
914{
915 unsigned long flags;
916 struct macio_chip *macio;
917
918 macio = &macio_chips[0];
919 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
920 macio->type != macio_intrepid)
921 return -ENODEV;
922
923 LOCK(flags);
924 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
925 (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
926 UNLOCK(flags);
927 mdelay(10);
928 LOCK(flags);
929 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
930 KEYLARGO_GPIO_OUTOUT_DATA);
931 UNLOCK(flags);
932 mdelay(10);
933
934 return 0;
935}
936
937static long
938core99_sound_chip_enable(struct device_node *node, long param, long value)
939{
940 struct macio_chip* macio;
941 unsigned long flags;
942
943 macio = macio_find(node, 0);
944 if (!macio)
945 return -ENODEV;
946
947 /* Do a better probe code, screamer G4 desktops &
948 * iMacs can do that too, add a recalibrate in
949 * the driver as well
950 */
951 if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
952 pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
953 LOCK(flags);
954 if (value)
955 MACIO_OUT8(KL_GPIO_SOUND_POWER,
956 KEYLARGO_GPIO_OUTPUT_ENABLE |
957 KEYLARGO_GPIO_OUTOUT_DATA);
958 else
959 MACIO_OUT8(KL_GPIO_SOUND_POWER,
960 KEYLARGO_GPIO_OUTPUT_ENABLE);
961 (void)MACIO_IN8(KL_GPIO_SOUND_POWER);
962 UNLOCK(flags);
963 }
964 return 0;
965}
966
967static long
968core99_airport_enable(struct device_node *node, long param, long value)
969{
970 struct macio_chip* macio;
971 unsigned long flags;
972 int state;
973
974 macio = macio_find(node, 0);
975 if (!macio)
976 return -ENODEV;
977
978 /* Hint: we allow passing of macio itself for the sake of the
979 * sleep code
980 */
981 if (node != macio->of_node &&
982 (!node->parent || node->parent != macio->of_node))
983 return -ENODEV;
984 state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
985 if (value == state)
986 return 0;
987 if (value) {
988 /* This code is a reproduction of OF enable-cardslot
989 * and init-wireless methods, slightly hacked until
990 * I got it working.
991 */
992 LOCK(flags);
993 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
994 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
995 UNLOCK(flags);
996 mdelay(10);
997 LOCK(flags);
998 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
999 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
1000 UNLOCK(flags);
1001
1002 mdelay(10);
1003
1004 LOCK(flags);
1005 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1006 (void)MACIO_IN32(KEYLARGO_FCR2);
1007 udelay(10);
1008 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
1009 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
1010 udelay(10);
1011 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
1012 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
1013 udelay(10);
1014 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
1015 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
1016 udelay(10);
1017 MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
1018 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
1019 udelay(10);
1020 MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
1021 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
1022 UNLOCK(flags);
1023 udelay(10);
1024 MACIO_OUT32(0x1c000, 0);
1025 mdelay(1);
1026 MACIO_OUT8(0x1a3e0, 0x41);
1027 (void)MACIO_IN8(0x1a3e0);
1028 udelay(10);
1029 LOCK(flags);
1030 MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
1031 (void)MACIO_IN32(KEYLARGO_FCR2);
1032 UNLOCK(flags);
1033 mdelay(100);
1034
1035 macio->flags |= MACIO_FLAG_AIRPORT_ON;
1036 } else {
1037 LOCK(flags);
1038 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1039 (void)MACIO_IN32(KEYLARGO_FCR2);
1040 MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
1041 MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
1042 MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
1043 MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
1044 MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
1045 (void)MACIO_IN8(KL_GPIO_AIRPORT_4);
1046 UNLOCK(flags);
1047
1048 macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
1049 }
1050 return 0;
1051}
1052
1053#ifdef CONFIG_SMP
1054static long
1055core99_reset_cpu(struct device_node *node, long param, long value)
1056{
1057 unsigned int reset_io = 0;
1058 unsigned long flags;
1059 struct macio_chip *macio;
1060 struct device_node *np;
1061 const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0,
1062 KL_GPIO_RESET_CPU1,
1063 KL_GPIO_RESET_CPU2,
1064 KL_GPIO_RESET_CPU3 };
1065
1066 macio = &macio_chips[0];
1067 if (macio->type != macio_keylargo)
1068 return -ENODEV;
1069
1070 np = find_path_device("/cpus");
1071 if (np == NULL)
1072 return -ENODEV;
1073 for (np = np->child; np != NULL; np = np->sibling) {
1074 u32 *num = (u32 *)get_property(np, "reg", NULL);
1075 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1076 if (num == NULL || rst == NULL)
1077 continue;
1078 if (param == *num) {
1079 reset_io = *rst;
1080 break;
1081 }
1082 }
1083 if (np == NULL || reset_io == 0)
1084 reset_io = dflt_reset_lines[param];
1085
1086 LOCK(flags);
1087 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1088 (void)MACIO_IN8(reset_io);
1089 udelay(1);
1090 MACIO_OUT8(reset_io, 0);
1091 (void)MACIO_IN8(reset_io);
1092 UNLOCK(flags);
1093
1094 return 0;
1095}
1096#endif /* CONFIG_SMP */
1097
1098static long
1099core99_usb_enable(struct device_node *node, long param, long value)
1100{
1101 struct macio_chip *macio;
1102 unsigned long flags;
1103 char *prop;
1104 int number;
1105 u32 reg;
1106
1107 macio = &macio_chips[0];
1108 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1109 macio->type != macio_intrepid)
1110 return -ENODEV;
1111
1112 prop = (char *)get_property(node, "AAPL,clock-id", NULL);
1113 if (!prop)
1114 return -ENODEV;
1115 if (strncmp(prop, "usb0u048", 8) == 0)
1116 number = 0;
1117 else if (strncmp(prop, "usb1u148", 8) == 0)
1118 number = 2;
1119 else if (strncmp(prop, "usb2u248", 8) == 0)
1120 number = 4;
1121 else
1122 return -ENODEV;
1123
1124 /* Sorry for the brute-force locking, but this is only used during
1125 * sleep and the timing seem to be critical
1126 */
1127 LOCK(flags);
1128 if (value) {
1129 /* Turn ON */
1130 if (number == 0) {
1131 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1132 (void)MACIO_IN32(KEYLARGO_FCR0);
1133 UNLOCK(flags);
1134 mdelay(1);
1135 LOCK(flags);
1136 MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1137 } else if (number == 2) {
1138 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1139 UNLOCK(flags);
1140 (void)MACIO_IN32(KEYLARGO_FCR0);
1141 mdelay(1);
1142 LOCK(flags);
1143 MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1144 } else if (number == 4) {
1145 MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1146 UNLOCK(flags);
1147 (void)MACIO_IN32(KEYLARGO_FCR1);
1148 mdelay(1);
1149 LOCK(flags);
1150 MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
1151 }
1152 if (number < 4) {
1153 reg = MACIO_IN32(KEYLARGO_FCR4);
1154 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1155 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
1156 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1157 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
1158 MACIO_OUT32(KEYLARGO_FCR4, reg);
1159 (void)MACIO_IN32(KEYLARGO_FCR4);
1160 udelay(10);
1161 } else {
1162 reg = MACIO_IN32(KEYLARGO_FCR3);
1163 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1164 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
1165 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1166 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
1167 MACIO_OUT32(KEYLARGO_FCR3, reg);
1168 (void)MACIO_IN32(KEYLARGO_FCR3);
1169 udelay(10);
1170 }
1171 if (macio->type == macio_intrepid) {
1172 /* wait for clock stopped bits to clear */
1173 u32 test0 = 0, test1 = 0;
1174 u32 status0, status1;
1175 int timeout = 1000;
1176
1177 UNLOCK(flags);
1178 switch (number) {
1179 case 0:
1180 test0 = UNI_N_CLOCK_STOPPED_USB0;
1181 test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
1182 break;
1183 case 2:
1184 test0 = UNI_N_CLOCK_STOPPED_USB1;
1185 test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
1186 break;
1187 case 4:
1188 test0 = UNI_N_CLOCK_STOPPED_USB2;
1189 test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
1190 break;
1191 }
1192 do {
1193 if (--timeout <= 0) {
1194 printk(KERN_ERR "core99_usb_enable: "
1195 "Timeout waiting for clocks\n");
1196 break;
1197 }
1198 mdelay(1);
1199 status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
1200 status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
1201 } while ((status0 & test0) | (status1 & test1));
1202 LOCK(flags);
1203 }
1204 } else {
1205 /* Turn OFF */
1206 if (number < 4) {
1207 reg = MACIO_IN32(KEYLARGO_FCR4);
1208 reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1209 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
1210 reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1211 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
1212 MACIO_OUT32(KEYLARGO_FCR4, reg);
1213 (void)MACIO_IN32(KEYLARGO_FCR4);
1214 udelay(1);
1215 } else {
1216 reg = MACIO_IN32(KEYLARGO_FCR3);
1217 reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1218 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
1219 reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1220 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
1221 MACIO_OUT32(KEYLARGO_FCR3, reg);
1222 (void)MACIO_IN32(KEYLARGO_FCR3);
1223 udelay(1);
1224 }
1225 if (number == 0) {
1226 if (macio->type != macio_intrepid)
1227 MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1228 (void)MACIO_IN32(KEYLARGO_FCR0);
1229 udelay(1);
1230 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1231 (void)MACIO_IN32(KEYLARGO_FCR0);
1232 } else if (number == 2) {
1233 if (macio->type != macio_intrepid)
1234 MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1235 (void)MACIO_IN32(KEYLARGO_FCR0);
1236 udelay(1);
1237 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1238 (void)MACIO_IN32(KEYLARGO_FCR0);
1239 } else if (number == 4) {
1240 udelay(1);
1241 MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1242 (void)MACIO_IN32(KEYLARGO_FCR1);
1243 }
1244 udelay(1);
1245 }
1246 UNLOCK(flags);
1247
1248 return 0;
1249}
1250
1251static long
1252core99_firewire_enable(struct device_node *node, long param, long value)
1253{
1254 unsigned long flags;
1255 struct macio_chip *macio;
1256
1257 macio = &macio_chips[0];
1258 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1259 macio->type != macio_intrepid)
1260 return -ENODEV;
1261 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1262 return -ENODEV;
1263
1264 LOCK(flags);
1265 if (value) {
1266 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1267 (void)UN_IN(UNI_N_CLOCK_CNTL);
1268 } else {
1269 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1270 (void)UN_IN(UNI_N_CLOCK_CNTL);
1271 }
1272 UNLOCK(flags);
1273 mdelay(1);
1274
1275 return 0;
1276}
1277
1278static long
1279core99_firewire_cable_power(struct device_node *node, long param, long value)
1280{
1281 unsigned long flags;
1282 struct macio_chip *macio;
1283
1284 /* Trick: we allow NULL node */
1285 if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
1286 return -ENODEV;
1287 macio = &macio_chips[0];
1288 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1289 macio->type != macio_intrepid)
1290 return -ENODEV;
1291 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1292 return -ENODEV;
1293
1294 LOCK(flags);
1295 if (value) {
1296 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
1297 MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
1298 udelay(10);
1299 } else {
1300 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
1301 MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
1302 }
1303 UNLOCK(flags);
1304 mdelay(1);
1305
1306 return 0;
1307}
1308
1309static long
1310intrepid_aack_delay_enable(struct device_node *node, long param, long value)
1311{
1312 unsigned long flags;
1313
1314 if (uninorth_rev < 0xd2)
1315 return -ENODEV;
1316
1317 LOCK(flags);
1318 if (param)
1319 UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1320 else
1321 UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1322 UNLOCK(flags);
1323
1324 return 0;
1325}
1326
1327
1328#endif /* CONFIG_POWER4 */
1329
1330static long
1331core99_read_gpio(struct device_node *node, long param, long value)
1332{
1333 struct macio_chip *macio = &macio_chips[0];
1334
1335 return MACIO_IN8(param);
1336}
1337
1338
1339static long
1340core99_write_gpio(struct device_node *node, long param, long value)
1341{
1342 struct macio_chip *macio = &macio_chips[0];
1343
1344 MACIO_OUT8(param, (u8)(value & 0xff));
1345 return 0;
1346}
1347
1348#ifdef CONFIG_POWER4
1349static long g5_gmac_enable(struct device_node *node, long param, long value)
1350{
1351 struct macio_chip *macio = &macio_chips[0];
1352 unsigned long flags;
1353
1354 if (node == NULL)
1355 return -ENODEV;
1356
1357 LOCK(flags);
1358 if (value) {
1359 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1360 mb();
1361 k2_skiplist[0] = NULL;
1362 } else {
1363 k2_skiplist[0] = node;
1364 mb();
1365 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1366 }
1367
1368 UNLOCK(flags);
1369 mdelay(1);
1370
1371 return 0;
1372}
1373
1374static long g5_fw_enable(struct device_node *node, long param, long value)
1375{
1376 struct macio_chip *macio = &macio_chips[0];
1377 unsigned long flags;
1378
1379 if (node == NULL)
1380 return -ENODEV;
1381
1382 LOCK(flags);
1383 if (value) {
1384 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1385 mb();
1386 k2_skiplist[1] = NULL;
1387 } else {
1388 k2_skiplist[1] = node;
1389 mb();
1390 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1391 }
1392
1393 UNLOCK(flags);
1394 mdelay(1);
1395
1396 return 0;
1397}
1398
1399static long g5_mpic_enable(struct device_node *node, long param, long value)
1400{
1401 unsigned long flags;
1402
1403 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
1404 return 0;
1405
1406 LOCK(flags);
1407 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
1408 UNLOCK(flags);
1409
1410 return 0;
1411}
1412
1413static long g5_eth_phy_reset(struct device_node *node, long param, long value)
1414{
1415 struct macio_chip *macio = &macio_chips[0];
1416 struct device_node *phy;
1417 int need_reset;
1418
1419 /*
1420 * We must not reset the combo PHYs, only the BCM5221 found in
1421 * the iMac G5.
1422 */
1423 phy = of_get_next_child(node, NULL);
1424 if (!phy)
1425 return -ENODEV;
1426 need_reset = device_is_compatible(phy, "B5221");
1427 of_node_put(phy);
1428 if (!need_reset)
1429 return 0;
1430
1431 /* PHY reset is GPIO 29, not in device-tree unfortunately */
1432 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
1433 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
1434 /* Thankfully, this is now always called at a time when we can
1435 * schedule by sungem.
1436 */
1437 msleep(10);
1438 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
1439
1440 return 0;
1441}
1442
1443static long g5_i2s_enable(struct device_node *node, long param, long value)
1444{
1445 /* Very crude implementation for now */
1446 struct macio_chip *macio = &macio_chips[0];
1447 unsigned long flags;
1448
1449 if (value == 0)
1450 return 0; /* don't disable yet */
1451
1452 LOCK(flags);
1453 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
1454 KL3_I2S0_CLK18_ENABLE);
1455 udelay(10);
1456 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
1457 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
1458 udelay(10);
1459 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
1460 UNLOCK(flags);
1461 udelay(10);
1462
1463 return 0;
1464}
1465
1466
1467#ifdef CONFIG_SMP
1468static long g5_reset_cpu(struct device_node *node, long param, long value)
1469{
1470 unsigned int reset_io = 0;
1471 unsigned long flags;
1472 struct macio_chip *macio;
1473 struct device_node *np;
1474
1475 macio = &macio_chips[0];
1476 if (macio->type != macio_keylargo2)
1477 return -ENODEV;
1478
1479 np = find_path_device("/cpus");
1480 if (np == NULL)
1481 return -ENODEV;
1482 for (np = np->child; np != NULL; np = np->sibling) {
1483 u32 *num = (u32 *)get_property(np, "reg", NULL);
1484 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1485 if (num == NULL || rst == NULL)
1486 continue;
1487 if (param == *num) {
1488 reset_io = *rst;
1489 break;
1490 }
1491 }
1492 if (np == NULL || reset_io == 0)
1493 return -ENODEV;
1494
1495 LOCK(flags);
1496 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1497 (void)MACIO_IN8(reset_io);
1498 udelay(1);
1499 MACIO_OUT8(reset_io, 0);
1500 (void)MACIO_IN8(reset_io);
1501 UNLOCK(flags);
1502
1503 return 0;
1504}
1505#endif /* CONFIG_SMP */
1506
1507/*
1508 * This can be called from pmac_smp so isn't static
1509 *
1510 * This takes the second CPU off the bus on dual CPU machines
1511 * running UP
1512 */
1513void g5_phy_disable_cpu1(void)
1514{
1515 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1516}
1517#endif /* CONFIG_POWER4 */
1518
1519#ifndef CONFIG_POWER4
1520
1521static void
1522keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1523{
1524 u32 temp;
1525
1526 if (sleep_mode) {
1527 mdelay(1);
1528 MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
1529 (void)MACIO_IN32(KEYLARGO_FCR0);
1530 mdelay(1);
1531 }
1532
1533 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1534 KL0_SCC_CELL_ENABLE |
1535 KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
1536 KL0_IRDA_CLK19_ENABLE);
1537
1538 MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
1539 MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
1540
1541 MACIO_BIC(KEYLARGO_FCR1,
1542 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1543 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1544 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1545 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1546 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1547 KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
1548 KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
1549 KL1_UIDE_ENABLE);
1550
1551 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1552 MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
1553
1554 temp = MACIO_IN32(KEYLARGO_FCR3);
1555 if (macio->rev >= 2) {
1556 temp |= KL3_SHUTDOWN_PLL2X;
1557 if (sleep_mode)
1558 temp |= KL3_SHUTDOWN_PLL_TOTAL;
1559 }
1560
1561 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1562 KL3_SHUTDOWN_PLLKW35;
1563 if (sleep_mode)
1564 temp |= KL3_SHUTDOWN_PLLKW12;
1565 temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
1566 | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1567 if (sleep_mode)
1568 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
1569 MACIO_OUT32(KEYLARGO_FCR3, temp);
1570
1571 /* Flush posted writes & wait a bit */
1572 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1573}
1574
1575static void
1576pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1577{
1578 u32 temp;
1579
1580 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1581 KL0_SCC_CELL_ENABLE |
1582 KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
1583
1584 MACIO_BIC(KEYLARGO_FCR1,
1585 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1586 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1587 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1588 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1589 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1590 KL1_UIDE_ENABLE);
1591 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1592 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1593
1594 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1595
1596 temp = MACIO_IN32(KEYLARGO_FCR3);
1597 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1598 KL3_SHUTDOWN_PLLKW35;
1599 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
1600 | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
1601 if (sleep_mode)
1602 temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
1603 MACIO_OUT32(KEYLARGO_FCR3, temp);
1604
1605 /* Flush posted writes & wait a bit */
1606 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1607}
1608
1609static void
1610intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1611{
1612 u32 temp;
1613
1614 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1615 KL0_SCC_CELL_ENABLE);
1616
1617 MACIO_BIC(KEYLARGO_FCR1,
1618 /*KL1_USB2_CELL_ENABLE |*/
1619 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1620 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1621 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
1622 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1623 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1624
1625 temp = MACIO_IN32(KEYLARGO_FCR3);
1626 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
1627 KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1628 if (sleep_mode)
1629 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
1630 MACIO_OUT32(KEYLARGO_FCR3, temp);
1631
1632 /* Flush posted writes & wait a bit */
1633 (void)MACIO_IN32(KEYLARGO_FCR0);
1634 mdelay(10);
1635}
1636
1637
1638void pmac_tweak_clock_spreading(int enable)
1639{
1640 struct macio_chip *macio = &macio_chips[0];
1641
1642 /* Hack for doing clock spreading on some machines PowerBooks and
1643 * iBooks. This implements the "platform-do-clockspreading" OF
1644 * property as decoded manually on various models. For safety, we also
1645 * check the product ID in the device-tree in cases we'll whack the i2c
1646 * chip to make reasonably sure we won't set wrong values in there
1647 *
1648 * Of course, ultimately, we have to implement a real parser for
1649 * the platform-do-* stuff...
1650 */
1651
1652 if (macio->type == macio_intrepid) {
1653 if (enable)
1654 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1655 else
1656 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1657 mdelay(40);
1658 }
1659
1660 while (machine_is_compatible("PowerBook5,2") ||
1661 machine_is_compatible("PowerBook5,3") ||
1662 machine_is_compatible("PowerBook6,2") ||
1663 machine_is_compatible("PowerBook6,3")) {
1664 struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
1665 struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
1666 u8 buffer[9];
1667 u32 *productID;
1668 int i, rc, changed = 0;
1669
1670 if (dt == NULL)
1671 break;
1672 productID = (u32 *)get_property(dt, "pid#", NULL);
1673 if (productID == NULL)
1674 break;
1675 while(ui2c) {
1676 struct device_node *p = of_get_parent(ui2c);
1677 if (p && !strcmp(p->name, "uni-n"))
1678 break;
1679 ui2c = of_find_node_by_type(ui2c, "i2c");
1680 }
1681 if (ui2c == NULL)
1682 break;
1683 DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
1684 rc = pmac_low_i2c_open(ui2c, 1);
1685 if (rc != 0)
1686 break;
1687 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1688 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1689 DBG("read result: %d,", rc);
1690 if (rc != 0) {
1691 pmac_low_i2c_close(ui2c);
1692 break;
1693 }
1694 for (i=0; i<9; i++)
1695 DBG(" %02x", buffer[i]);
1696 DBG("\n");
1697
1698 switch(*productID) {
1699 case 0x1182: /* AlBook 12" rev 2 */
1700 case 0x1183: /* iBook G4 12" */
1701 buffer[0] = (buffer[0] & 0x8f) | 0x70;
1702 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1703 buffer[5] = (buffer[5] & 0x80) | 0x31;
1704 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1705 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
1706 buffer[8] = (buffer[8] & 0x00) | 0x30;
1707 changed = 1;
1708 break;
1709 case 0x3142: /* AlBook 15" (ATI M10) */
1710 case 0x3143: /* AlBook 17" (ATI M10) */
1711 buffer[0] = (buffer[0] & 0xaf) | 0x50;
1712 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1713 buffer[5] = (buffer[5] & 0x80) | 0x31;
1714 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1715 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
1716 buffer[8] = (buffer[8] & 0x00) | 0x30;
1717 changed = 1;
1718 break;
1719 default:
1720 DBG("i2c-hwclock: Machine model not handled\n");
1721 break;
1722 }
1723 if (!changed) {
1724 pmac_low_i2c_close(ui2c);
1725 break;
1726 }
1727 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1728 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1729 DBG("write result: %d,", rc);
1730 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1731 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1732 DBG("read result: %d,", rc);
1733 if (rc != 0) {
1734 pmac_low_i2c_close(ui2c);
1735 break;
1736 }
1737 for (i=0; i<9; i++)
1738 DBG(" %02x", buffer[i]);
1739 pmac_low_i2c_close(ui2c);
1740 break;
1741 }
1742}
1743
1744
1745static int
1746core99_sleep(void)
1747{
1748 struct macio_chip *macio;
1749 int i;
1750
1751 macio = &macio_chips[0];
1752 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1753 macio->type != macio_intrepid)
1754 return -ENODEV;
1755
1756 /* We power off the wireless slot in case it was not done
1757 * by the driver. We don't power it on automatically however
1758 */
1759 if (macio->flags & MACIO_FLAG_AIRPORT_ON)
1760 core99_airport_enable(macio->of_node, 0, 0);
1761
1762 /* We power off the FW cable. Should be done by the driver... */
1763 if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
1764 core99_firewire_enable(NULL, 0, 0);
1765 core99_firewire_cable_power(NULL, 0, 0);
1766 }
1767
1768 /* We make sure int. modem is off (in case driver lost it) */
1769 if (macio->type == macio_keylargo)
1770 core99_modem_enable(macio->of_node, 0, 0);
1771 else
1772 pangea_modem_enable(macio->of_node, 0, 0);
1773
1774 /* We make sure the sound is off as well */
1775 core99_sound_chip_enable(macio->of_node, 0, 0);
1776
1777 /*
1778 * Save various bits of KeyLargo
1779 */
1780
1781 /* Save the state of the various GPIOs */
1782 save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
1783 save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
1784 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1785 save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
1786 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1787 save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
1788
1789 /* Save the FCRs */
1790 if (macio->type == macio_keylargo)
1791 save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
1792 save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
1793 save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
1794 save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
1795 save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
1796 save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
1797 if (macio->type == macio_pangea || macio->type == macio_intrepid)
1798 save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
1799
1800 /* Save state & config of DBDMA channels */
1801 dbdma_save(macio, save_dbdma);
1802
1803 /*
1804 * Turn off as much as we can
1805 */
1806 if (macio->type == macio_pangea)
1807 pangea_shutdown(macio, 1);
1808 else if (macio->type == macio_intrepid)
1809 intrepid_shutdown(macio, 1);
1810 else if (macio->type == macio_keylargo)
1811 keylargo_shutdown(macio, 1);
1812
1813 /*
1814 * Put the host bridge to sleep
1815 */
1816
1817 save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
1818 /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
1819 * enabled !
1820 */
1821 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
1822 ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
1823 udelay(100);
1824 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1825 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
1826 mdelay(10);
1827
1828 /*
1829 * FIXME: A bit of black magic with OpenPIC (don't ask me why)
1830 */
1831 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1832 MACIO_BIS(0x506e0, 0x00400000);
1833 MACIO_BIS(0x506e0, 0x80000000);
1834 }
1835 return 0;
1836}
1837
1838static int
1839core99_wake_up(void)
1840{
1841 struct macio_chip *macio;
1842 int i;
1843
1844 macio = &macio_chips[0];
1845 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1846 macio->type != macio_intrepid)
1847 return -ENODEV;
1848
1849 /*
1850 * Wakeup the host bridge
1851 */
1852 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1853 udelay(10);
1854 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1855 udelay(10);
1856
1857 /*
1858 * Restore KeyLargo
1859 */
1860
1861 if (macio->type == macio_keylargo) {
1862 MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
1863 (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
1864 }
1865 MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
1866 (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
1867 MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
1868 (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
1869 MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
1870 (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
1871 MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
1872 (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
1873 MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
1874 (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
1875 if (macio->type == macio_pangea || macio->type == macio_intrepid) {
1876 MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
1877 (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
1878 }
1879
1880 dbdma_restore(macio, save_dbdma);
1881
1882 MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
1883 MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
1884 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1885 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
1886 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1887 MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
1888
1889 /* FIXME more black magic with OpenPIC ... */
1890 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1891 MACIO_BIC(0x506e0, 0x00400000);
1892 MACIO_BIC(0x506e0, 0x80000000);
1893 }
1894
1895 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
1896 udelay(100);
1897
1898 return 0;
1899}
1900
1901static long
1902core99_sleep_state(struct device_node *node, long param, long value)
1903{
1904 /* Param == 1 means to enter the "fake sleep" mode that is
1905 * used for CPU speed switch
1906 */
1907 if (param == 1) {
1908 if (value == 1) {
1909 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1910 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
1911 } else {
1912 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1913 udelay(10);
1914 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1915 udelay(10);
1916 }
1917 return 0;
1918 }
1919 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
1920 return -EPERM;
1921
1922 if (value == 1)
1923 return core99_sleep();
1924 else if (value == 0)
1925 return core99_wake_up();
1926 return 0;
1927}
1928
1929#endif /* CONFIG_POWER4 */
1930
1931static long
1932generic_dev_can_wake(struct device_node *node, long param, long value)
1933{
1934 /* Todo: eventually check we are really dealing with on-board
1935 * video device ...
1936 */
1937
1938 if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
1939 pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
1940 return 0;
1941}
1942
1943static long generic_get_mb_info(struct device_node *node, long param, long value)
1944{
1945 switch(param) {
1946 case PMAC_MB_INFO_MODEL:
1947 return pmac_mb.model_id;
1948 case PMAC_MB_INFO_FLAGS:
1949 return pmac_mb.board_flags;
1950 case PMAC_MB_INFO_NAME:
1951 /* hack hack hack... but should work */
1952 *((const char **)value) = pmac_mb.model_name;
1953 return 0;
1954 }
1955 return -EINVAL;
1956}
1957
1958
1959/*
1960 * Table definitions
1961 */
1962
1963/* Used on any machine
1964 */
1965static struct feature_table_entry any_features[] = {
1966 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1967 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1968 { 0, NULL }
1969};
1970
1971#ifndef CONFIG_POWER4
1972
1973/* OHare based motherboards. Currently, we only use these on the
1974 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1975 * to have issues with turning on/off those asic cells
1976 */
1977static struct feature_table_entry ohare_features[] = {
1978 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1980 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
1981 { PMAC_FTR_IDE_ENABLE, ohare_ide_enable},
1982 { PMAC_FTR_IDE_RESET, ohare_ide_reset},
1983 { PMAC_FTR_SLEEP_STATE, ohare_sleep_state },
1984 { 0, NULL }
1985};
1986
1987/* Heathrow desktop machines (Beige G3).
1988 * Separated as some features couldn't be properly tested
1989 * and the serial port control bits appear to confuse it.
1990 */
1991static struct feature_table_entry heathrow_desktop_features[] = {
1992 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1993 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1994 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
1995 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
1996 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
1997 { 0, NULL }
1998};
1999
2000/* Heathrow based laptop, that is the Wallstreet and mainstreet
2001 * powerbooks.
2002 */
2003static struct feature_table_entry heathrow_laptop_features[] = {
2004 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2005 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2006 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2007 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2008 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2009 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2010 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2011 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2012 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2013 { 0, NULL }
2014};
2015
2016/* Paddington based machines
2017 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
2018 */
2019static struct feature_table_entry paddington_features[] = {
2020 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2021 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2022 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2023 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2024 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2025 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2026 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2027 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2028 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2029 { 0, NULL }
2030};
2031
2032/* Core99 & MacRISC 2 machines (all machines released since the
2033 * iBook (included), that is all AGP machines, except pangea
2034 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
2035 * used on iBook2 & iMac "flow power".
2036 */
2037static struct feature_table_entry core99_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2041 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2042 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2043 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2044 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2045 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2046 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2047 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2048 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2049 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2050#ifdef CONFIG_SMP
2051 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2052#endif /* CONFIG_SMP */
2053 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2054 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2055 { 0, NULL }
2056};
2057
2058/* RackMac
2059 */
2060static struct feature_table_entry rackmac_features[] = {
2061 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2062 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2063 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2064 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2065 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2066 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2067 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2068 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2069 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2070#ifdef CONFIG_SMP
2071 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2072#endif /* CONFIG_SMP */
2073 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2074 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2075 { 0, NULL }
2076};
2077
2078/* Pangea features
2079 */
2080static struct feature_table_entry pangea_features[] = {
2081 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2082 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2083 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2084 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2085 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2086 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2087 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2088 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2089 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2090 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2091 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2092 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2093 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2094 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2095 { 0, NULL }
2096};
2097
2098/* Intrepid features
2099 */
2100static struct feature_table_entry intrepid_features[] = {
2101 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2102 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2103 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2104 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2105 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2106 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2107 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2108 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2109 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2110 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2111 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2112 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2113 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2114 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2115 { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable },
2116 { 0, NULL }
2117};
2118
2119#else /* CONFIG_POWER4 */
2120
2121/* G5 features
2122 */
2123static struct feature_table_entry g5_features[] = {
2124 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2125 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2126 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
2127 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
2128 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
2129#ifdef CONFIG_SMP
2130 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
2131#endif /* CONFIG_SMP */
2132 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2133 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2134 { 0, NULL }
2135};
2136
2137#endif /* CONFIG_POWER4 */
2138
2139static struct pmac_mb_def pmac_mb_defs[] = {
2140#ifndef CONFIG_POWER4
2141 /*
2142 * Desktops
2143 */
2144
2145 { "AAPL,8500", "PowerMac 8500/8600",
2146 PMAC_TYPE_PSURGE, NULL,
2147 0
2148 },
2149 { "AAPL,9500", "PowerMac 9500/9600",
2150 PMAC_TYPE_PSURGE, NULL,
2151 0
2152 },
2153 { "AAPL,7200", "PowerMac 7200",
2154 PMAC_TYPE_PSURGE, NULL,
2155 0
2156 },
2157 { "AAPL,7300", "PowerMac 7200/7300",
2158 PMAC_TYPE_PSURGE, NULL,
2159 0
2160 },
2161 { "AAPL,7500", "PowerMac 7500",
2162 PMAC_TYPE_PSURGE, NULL,
2163 0
2164 },
2165 { "AAPL,ShinerESB", "Apple Network Server",
2166 PMAC_TYPE_ANS, NULL,
2167 0
2168 },
2169 { "AAPL,e407", "Alchemy",
2170 PMAC_TYPE_ALCHEMY, NULL,
2171 0
2172 },
2173 { "AAPL,e411", "Gazelle",
2174 PMAC_TYPE_GAZELLE, NULL,
2175 0
2176 },
2177 { "AAPL,Gossamer", "PowerMac G3 (Gossamer)",
2178 PMAC_TYPE_GOSSAMER, heathrow_desktop_features,
2179 0
2180 },
2181 { "AAPL,PowerMac G3", "PowerMac G3 (Silk)",
2182 PMAC_TYPE_SILK, heathrow_desktop_features,
2183 0
2184 },
2185 { "PowerMac1,1", "Blue&White G3",
2186 PMAC_TYPE_YOSEMITE, paddington_features,
2187 0
2188 },
2189 { "PowerMac1,2", "PowerMac G4 PCI Graphics",
2190 PMAC_TYPE_YIKES, paddington_features,
2191 0
2192 },
2193 { "PowerMac2,1", "iMac FireWire",
2194 PMAC_TYPE_FW_IMAC, core99_features,
2195 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2196 },
2197 { "PowerMac2,2", "iMac FireWire",
2198 PMAC_TYPE_FW_IMAC, core99_features,
2199 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2200 },
2201 { "PowerMac3,1", "PowerMac G4 AGP Graphics",
2202 PMAC_TYPE_SAWTOOTH, core99_features,
2203 PMAC_MB_OLD_CORE99
2204 },
2205 { "PowerMac3,2", "PowerMac G4 AGP Graphics",
2206 PMAC_TYPE_SAWTOOTH, core99_features,
2207 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2208 },
2209 { "PowerMac3,3", "PowerMac G4 AGP Graphics",
2210 PMAC_TYPE_SAWTOOTH, core99_features,
2211 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2212 },
2213 { "PowerMac3,4", "PowerMac G4 Silver",
2214 PMAC_TYPE_QUICKSILVER, core99_features,
2215 PMAC_MB_MAY_SLEEP
2216 },
2217 { "PowerMac3,5", "PowerMac G4 Silver",
2218 PMAC_TYPE_QUICKSILVER, core99_features,
2219 PMAC_MB_MAY_SLEEP
2220 },
2221 { "PowerMac3,6", "PowerMac G4 Windtunnel",
2222 PMAC_TYPE_WINDTUNNEL, core99_features,
2223 PMAC_MB_MAY_SLEEP,
2224 },
2225 { "PowerMac4,1", "iMac \"Flower Power\"",
2226 PMAC_TYPE_PANGEA_IMAC, pangea_features,
2227 PMAC_MB_MAY_SLEEP
2228 },
2229 { "PowerMac4,2", "Flat panel iMac",
2230 PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features,
2231 PMAC_MB_CAN_SLEEP
2232 },
2233 { "PowerMac4,4", "eMac",
2234 PMAC_TYPE_EMAC, core99_features,
2235 PMAC_MB_MAY_SLEEP
2236 },
2237 { "PowerMac5,1", "PowerMac G4 Cube",
2238 PMAC_TYPE_CUBE, core99_features,
2239 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2240 },
2241 { "PowerMac6,1", "Flat panel iMac",
2242 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2243 PMAC_MB_MAY_SLEEP,
2244 },
2245 { "PowerMac6,3", "Flat panel iMac",
2246 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2247 PMAC_MB_MAY_SLEEP,
2248 },
2249 { "PowerMac6,4", "eMac",
2250 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2251 PMAC_MB_MAY_SLEEP,
2252 },
2253 { "PowerMac10,1", "Mac mini",
2254 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2255 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
2256 },
2257 { "iMac,1", "iMac (first generation)",
2258 PMAC_TYPE_ORIG_IMAC, paddington_features,
2259 0
2260 },
2261
2262 /*
2263 * Xserve's
2264 */
2265
2266 { "RackMac1,1", "XServe",
2267 PMAC_TYPE_RACKMAC, rackmac_features,
2268 0,
2269 },
2270 { "RackMac1,2", "XServe rev. 2",
2271 PMAC_TYPE_RACKMAC, rackmac_features,
2272 0,
2273 },
2274
2275 /*
2276 * Laptops
2277 */
2278
2279 { "AAPL,3400/2400", "PowerBook 3400",
2280 PMAC_TYPE_HOOPER, ohare_features,
2281 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2282 },
2283 { "AAPL,3500", "PowerBook 3500",
2284 PMAC_TYPE_KANGA, ohare_features,
2285 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2286 },
2287 { "AAPL,PowerBook1998", "PowerBook Wallstreet",
2288 PMAC_TYPE_WALLSTREET, heathrow_laptop_features,
2289 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2290 },
2291 { "PowerBook1,1", "PowerBook 101 (Lombard)",
2292 PMAC_TYPE_101_PBOOK, paddington_features,
2293 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2294 },
2295 { "PowerBook2,1", "iBook (first generation)",
2296 PMAC_TYPE_ORIG_IBOOK, core99_features,
2297 PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2298 },
2299 { "PowerBook2,2", "iBook FireWire",
2300 PMAC_TYPE_FW_IBOOK, core99_features,
2301 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2302 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2303 },
2304 { "PowerBook3,1", "PowerBook Pismo",
2305 PMAC_TYPE_PISMO, core99_features,
2306 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2307 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2308 },
2309 { "PowerBook3,2", "PowerBook Titanium",
2310 PMAC_TYPE_TITANIUM, core99_features,
2311 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2312 },
2313 { "PowerBook3,3", "PowerBook Titanium II",
2314 PMAC_TYPE_TITANIUM2, core99_features,
2315 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2316 },
2317 { "PowerBook3,4", "PowerBook Titanium III",
2318 PMAC_TYPE_TITANIUM3, core99_features,
2319 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2320 },
2321 { "PowerBook3,5", "PowerBook Titanium IV",
2322 PMAC_TYPE_TITANIUM4, core99_features,
2323 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2324 },
2325 { "PowerBook4,1", "iBook 2",
2326 PMAC_TYPE_IBOOK2, pangea_features,
2327 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2328 },
2329 { "PowerBook4,2", "iBook 2",
2330 PMAC_TYPE_IBOOK2, pangea_features,
2331 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2332 },
2333 { "PowerBook4,3", "iBook 2 rev. 2",
2334 PMAC_TYPE_IBOOK2, pangea_features,
2335 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2336 },
2337 { "PowerBook5,1", "PowerBook G4 17\"",
2338 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2339 PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2340 },
2341 { "PowerBook5,2", "PowerBook G4 15\"",
2342 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2343 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2344 },
2345 { "PowerBook5,3", "PowerBook G4 17\"",
2346 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2347 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2348 },
2349 { "PowerBook5,4", "PowerBook G4 15\"",
2350 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2351 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2352 },
2353 { "PowerBook5,5", "PowerBook G4 17\"",
2354 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2355 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2356 },
2357 { "PowerBook5,6", "PowerBook G4 15\"",
2358 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2359 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2360 },
2361 { "PowerBook5,7", "PowerBook G4 17\"",
2362 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2363 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2364 },
2365 { "PowerBook6,1", "PowerBook G4 12\"",
2366 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2367 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2368 },
2369 { "PowerBook6,2", "PowerBook G4",
2370 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2371 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2372 },
2373 { "PowerBook6,3", "iBook G4",
2374 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2375 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2376 },
2377 { "PowerBook6,4", "PowerBook G4 12\"",
2378 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2379 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2380 },
2381 { "PowerBook6,5", "iBook G4",
2382 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2383 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2384 },
2385 { "PowerBook6,7", "iBook G4",
2386 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2387 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2388 },
2389 { "PowerBook6,8", "PowerBook G4 12\"",
2390 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2391 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2392 },
2393#else /* CONFIG_POWER4 */
2394 { "PowerMac7,2", "PowerMac G5",
2395 PMAC_TYPE_POWERMAC_G5, g5_features,
2396 0,
2397 },
2398#ifdef CONFIG_PPC64
2399 { "PowerMac7,3", "PowerMac G5",
2400 PMAC_TYPE_POWERMAC_G5, g5_features,
2401 0,
2402 },
2403 { "PowerMac8,1", "iMac G5",
2404 PMAC_TYPE_IMAC_G5, g5_features,
2405 0,
2406 },
2407 { "PowerMac9,1", "PowerMac G5",
2408 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2409 0,
2410 },
2411 { "RackMac3,1", "XServe G5",
2412 PMAC_TYPE_XSERVE_G5, g5_features,
2413 0,
2414 },
2415#endif /* CONFIG_PPC64 */
2416#endif /* CONFIG_POWER4 */
2417};
2418
2419/*
2420 * The toplevel feature_call callback
2421 */
2422long pmac_do_feature_call(unsigned int selector, ...)
2423{
2424 struct device_node *node;
2425 long param, value;
2426 int i;
2427 feature_call func = NULL;
2428 va_list args;
2429
2430 if (pmac_mb.features)
2431 for (i=0; pmac_mb.features[i].function; i++)
2432 if (pmac_mb.features[i].selector == selector) {
2433 func = pmac_mb.features[i].function;
2434 break;
2435 }
2436 if (!func)
2437 for (i=0; any_features[i].function; i++)
2438 if (any_features[i].selector == selector) {
2439 func = any_features[i].function;
2440 break;
2441 }
2442 if (!func)
2443 return -ENODEV;
2444
2445 va_start(args, selector);
2446 node = (struct device_node*)va_arg(args, void*);
2447 param = va_arg(args, long);
2448 value = va_arg(args, long);
2449 va_end(args);
2450
2451 return func(node, param, value);
2452}
2453
2454static int __init probe_motherboard(void)
2455{
2456 int i;
2457 struct macio_chip *macio = &macio_chips[0];
2458 const char *model = NULL;
2459 struct device_node *dt;
2460
2461 /* Lookup known motherboard type in device-tree. First try an
2462 * exact match on the "model" property, then try a "compatible"
2463 * match is none is found.
2464 */
2465 dt = find_devices("device-tree");
2466 if (dt != NULL)
2467 model = (const char *) get_property(dt, "model", NULL);
2468 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2469 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
2470 pmac_mb = pmac_mb_defs[i];
2471 goto found;
2472 }
2473 }
2474 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2475 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
2476 pmac_mb = pmac_mb_defs[i];
2477 goto found;
2478 }
2479 }
2480
2481 /* Fallback to selection depending on mac-io chip type */
2482 switch(macio->type) {
2483#ifndef CONFIG_POWER4
2484 case macio_grand_central:
2485 pmac_mb.model_id = PMAC_TYPE_PSURGE;
2486 pmac_mb.model_name = "Unknown PowerSurge";
2487 break;
2488 case macio_ohare:
2489 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
2490 pmac_mb.model_name = "Unknown OHare-based";
2491 break;
2492 case macio_heathrow:
2493 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
2494 pmac_mb.model_name = "Unknown Heathrow-based";
2495 pmac_mb.features = heathrow_desktop_features;
2496 break;
2497 case macio_paddington:
2498 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
2499 pmac_mb.model_name = "Unknown Paddington-based";
2500 pmac_mb.features = paddington_features;
2501 break;
2502 case macio_keylargo:
2503 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
2504 pmac_mb.model_name = "Unknown Keylargo-based";
2505 pmac_mb.features = core99_features;
2506 break;
2507 case macio_pangea:
2508 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
2509 pmac_mb.model_name = "Unknown Pangea-based";
2510 pmac_mb.features = pangea_features;
2511 break;
2512 case macio_intrepid:
2513 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
2514 pmac_mb.model_name = "Unknown Intrepid-based";
2515 pmac_mb.features = intrepid_features;
2516 break;
2517#else /* CONFIG_POWER4 */
2518 case macio_keylargo2:
2519 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
2520 pmac_mb.model_name = "Unknown K2-based";
2521 pmac_mb.features = g5_features;
2522 break;
2523#endif /* CONFIG_POWER4 */
2524 default:
2525 return -ENODEV;
2526 }
2527found:
2528#ifndef CONFIG_POWER4
2529 /* Fixup Hooper vs. Comet */
2530 if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
2531 u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
2532 if (!mach_id_ptr)
2533 return -ENODEV;
2534 /* Here, I used to disable the media-bay on comet. It
2535 * appears this is wrong, the floppy connector is actually
2536 * a kind of media-bay and works with the current driver.
2537 */
2538 if (__raw_readl(mach_id_ptr) & 0x20000000UL)
2539 pmac_mb.model_id = PMAC_TYPE_COMET;
2540 iounmap(mach_id_ptr);
2541 }
2542#endif /* CONFIG_POWER4 */
2543
2544#ifdef CONFIG_6xx
2545 /* Set default value of powersave_nap on machines that support it.
2546 * It appears that uninorth rev 3 has a problem with it, we don't
2547 * enable it on those. In theory, the flush-on-lock property is
2548 * supposed to be set when not supported, but I'm not very confident
2549 * that all Apple OF revs did it properly, I do it the paranoid way.
2550 */
2551 while (uninorth_base && uninorth_rev > 3) {
2552 struct device_node *np = find_path_device("/cpus");
2553 if (!np || !np->child) {
2554 printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
2555 break;
2556 }
2557 np = np->child;
2558 /* Nap mode not supported on SMP */
2559 if (np->sibling)
2560 break;
2561 /* Nap mode not supported if flush-on-lock property is present */
2562 if (get_property(np, "flush-on-lock", NULL))
2563 break;
2564 powersave_nap = 1;
2565 printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
2566 break;
2567 }
2568
2569 /* On CPUs that support it (750FX), lowspeed by default during
2570 * NAP mode
2571 */
2572 powersave_lowspeed = 1;
2573#endif /* CONFIG_6xx */
2574#ifdef CONFIG_POWER4
2575 powersave_nap = 1;
2576#endif
2577 /* Check for "mobile" machine */
2578 if (model && (strncmp(model, "PowerBook", 9) == 0
2579 || strncmp(model, "iBook", 5) == 0))
2580 pmac_mb.board_flags |= PMAC_MB_MOBILE;
2581
2582
2583 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
2584 return 0;
2585}
2586
2587/* Initialize the Core99 UniNorth host bridge and memory controller
2588 */
2589static void __init probe_uninorth(void)
2590{
2591 unsigned long actrl;
2592
2593 /* Locate core99 Uni-N */
2594 uninorth_node = of_find_node_by_name(NULL, "uni-n");
2595 /* Locate G5 u3 */
2596 if (uninorth_node == NULL) {
2597 uninorth_node = of_find_node_by_name(NULL, "u3");
2598 uninorth_u3 = 1;
2599 }
2600 if (uninorth_node && uninorth_node->n_addrs > 0) {
2601 unsigned long address = uninorth_node->addrs[0].address;
2602 uninorth_base = ioremap(address, 0x40000);
2603 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
2604 if (uninorth_u3)
2605 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
2606 } else
2607 uninorth_node = NULL;
2608
2609 if (!uninorth_node)
2610 return;
2611
2612 printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
2613 uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
2614 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
2615
2616 /* Set the arbitrer QAck delay according to what Apple does
2617 */
2618 if (uninorth_rev < 0x11) {
2619 actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
2620 actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
2621 UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
2622 UN_OUT(UNI_N_ARB_CTRL, actrl);
2623 }
2624
2625 /* Some more magic as done by them in recent MacOS X on UniNorth
2626 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
2627 * memory timeout
2628 */
2629 if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
2630 UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
2631}
2632
2633static void __init probe_one_macio(const char *name, const char *compat, int type)
2634{
2635 struct device_node* node;
2636 int i;
2637 volatile u32 __iomem * base;
2638 u32* revp;
2639
2640 node = find_devices(name);
2641 if (!node || !node->n_addrs)
2642 return;
2643 if (compat)
2644 do {
2645 if (device_is_compatible(node, compat))
2646 break;
2647 node = node->next;
2648 } while (node);
2649 if (!node)
2650 return;
2651 for(i=0; i<MAX_MACIO_CHIPS; i++) {
2652 if (!macio_chips[i].of_node)
2653 break;
2654 if (macio_chips[i].of_node == node)
2655 return;
2656 }
2657 if (i >= MAX_MACIO_CHIPS) {
2658 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
2659 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
2660 return;
2661 }
2662 base = ioremap(node->addrs[0].address, node->addrs[0].size);
2663 if (!base) {
2664 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
2665 return;
2666 }
2667 if (type == macio_keylargo) {
2668 u32 *did = (u32 *)get_property(node, "device-id", NULL);
2669 if (*did == 0x00000025)
2670 type = macio_pangea;
2671 if (*did == 0x0000003e)
2672 type = macio_intrepid;
2673 }
2674 macio_chips[i].of_node = node;
2675 macio_chips[i].type = type;
2676 macio_chips[i].base = base;
2677 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
2678 macio_chips[i].name = macio_names[type];
2679 revp = (u32 *)get_property(node, "revision-id", NULL);
2680 if (revp)
2681 macio_chips[i].rev = *revp;
2682 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
2683 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
2684}
2685
2686static int __init
2687probe_macios(void)
2688{
2689 /* Warning, ordering is important */
2690 probe_one_macio("gc", NULL, macio_grand_central);
2691 probe_one_macio("ohare", NULL, macio_ohare);
2692 probe_one_macio("pci106b,7", NULL, macio_ohareII);
2693 probe_one_macio("mac-io", "keylargo", macio_keylargo);
2694 probe_one_macio("mac-io", "paddington", macio_paddington);
2695 probe_one_macio("mac-io", "gatwick", macio_gatwick);
2696 probe_one_macio("mac-io", "heathrow", macio_heathrow);
2697 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
2698
2699 /* Make sure the "main" macio chip appear first */
2700 if (macio_chips[0].type == macio_gatwick
2701 && macio_chips[1].type == macio_heathrow) {
2702 struct macio_chip temp = macio_chips[0];
2703 macio_chips[0] = macio_chips[1];
2704 macio_chips[1] = temp;
2705 }
2706 if (macio_chips[0].type == macio_ohareII
2707 && macio_chips[1].type == macio_ohare) {
2708 struct macio_chip temp = macio_chips[0];
2709 macio_chips[0] = macio_chips[1];
2710 macio_chips[1] = temp;
2711 }
2712 macio_chips[0].lbus.index = 0;
2713 macio_chips[1].lbus.index = 1;
2714
2715 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
2716}
2717
2718static void __init
2719initial_serial_shutdown(struct device_node *np)
2720{
2721 int len;
2722 struct slot_names_prop {
2723 int count;
2724 char name[1];
2725 } *slots;
2726 char *conn;
2727 int port_type = PMAC_SCC_ASYNC;
2728 int modem = 0;
2729
2730 slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
2731 conn = get_property(np, "AAPL,connector", &len);
2732 if (conn && (strcmp(conn, "infrared") == 0))
2733 port_type = PMAC_SCC_IRDA;
2734 else if (device_is_compatible(np, "cobalt"))
2735 modem = 1;
2736 else if (slots && slots->count > 0) {
2737 if (strcmp(slots->name, "IrDA") == 0)
2738 port_type = PMAC_SCC_IRDA;
2739 else if (strcmp(slots->name, "Modem") == 0)
2740 modem = 1;
2741 }
2742 if (modem)
2743 pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
2744 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
2745}
2746
2747static void __init
2748set_initial_features(void)
2749{
2750 struct device_node *np;
2751
2752 /* That hack appears to be necessary for some StarMax motherboards
2753 * but I'm not too sure it was audited for side-effects on other
2754 * ohare based machines...
2755 * Since I still have difficulties figuring the right way to
2756 * differenciate them all and since that hack was there for a long
2757 * time, I'll keep it around
2758 */
2759 if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) {
2760 struct macio_chip *macio = &macio_chips[0];
2761 MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
2762 } else if (macio_chips[0].type == macio_ohare) {
2763 struct macio_chip *macio = &macio_chips[0];
2764 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2765 } else if (macio_chips[1].type == macio_ohare) {
2766 struct macio_chip *macio = &macio_chips[1];
2767 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2768 }
2769
2770#ifdef CONFIG_POWER4
2771 if (macio_chips[0].type == macio_keylargo2) {
2772#ifndef CONFIG_SMP
2773 /* On SMP machines running UP, we have the second CPU eating
2774 * bus cycles. We need to take it off the bus. This is done
2775 * from pmac_smp for SMP kernels running on one CPU
2776 */
2777 np = of_find_node_by_type(NULL, "cpu");
2778 if (np != NULL)
2779 np = of_find_node_by_type(np, "cpu");
2780 if (np != NULL) {
2781 g5_phy_disable_cpu1();
2782 of_node_put(np);
2783 }
2784#endif /* CONFIG_SMP */
2785 /* Enable GMAC for now for PCI probing. It will be disabled
2786 * later on after PCI probe
2787 */
2788 np = of_find_node_by_name(NULL, "ethernet");
2789 while(np) {
2790 if (device_is_compatible(np, "K2-GMAC"))
2791 g5_gmac_enable(np, 0, 1);
2792 np = of_find_node_by_name(np, "ethernet");
2793 }
2794
2795 /* Enable FW before PCI probe. Will be disabled later on
2796 * Note: We should have a batter way to check that we are
2797 * dealing with uninorth internal cell and not a PCI cell
2798 * on the external PCI. The code below works though.
2799 */
2800 np = of_find_node_by_name(NULL, "firewire");
2801 while(np) {
2802 if (device_is_compatible(np, "pci106b,5811")) {
2803 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2804 g5_fw_enable(np, 0, 1);
2805 }
2806 np = of_find_node_by_name(np, "firewire");
2807 }
2808 }
2809#else /* CONFIG_POWER4 */
2810
2811 if (macio_chips[0].type == macio_keylargo ||
2812 macio_chips[0].type == macio_pangea ||
2813 macio_chips[0].type == macio_intrepid) {
2814 /* Enable GMAC for now for PCI probing. It will be disabled
2815 * later on after PCI probe
2816 */
2817 np = of_find_node_by_name(NULL, "ethernet");
2818 while(np) {
2819 if (np->parent
2820 && device_is_compatible(np->parent, "uni-north")
2821 && device_is_compatible(np, "gmac"))
2822 core99_gmac_enable(np, 0, 1);
2823 np = of_find_node_by_name(np, "ethernet");
2824 }
2825
2826 /* Enable FW before PCI probe. Will be disabled later on
2827 * Note: We should have a batter way to check that we are
2828 * dealing with uninorth internal cell and not a PCI cell
2829 * on the external PCI. The code below works though.
2830 */
2831 np = of_find_node_by_name(NULL, "firewire");
2832 while(np) {
2833 if (np->parent
2834 && device_is_compatible(np->parent, "uni-north")
2835 && (device_is_compatible(np, "pci106b,18") ||
2836 device_is_compatible(np, "pci106b,30") ||
2837 device_is_compatible(np, "pci11c1,5811"))) {
2838 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2839 core99_firewire_enable(np, 0, 1);
2840 }
2841 np = of_find_node_by_name(np, "firewire");
2842 }
2843
2844 /* Enable ATA-100 before PCI probe. */
2845 np = of_find_node_by_name(NULL, "ata-6");
2846 while(np) {
2847 if (np->parent
2848 && device_is_compatible(np->parent, "uni-north")
2849 && device_is_compatible(np, "kauai-ata")) {
2850 core99_ata100_enable(np, 1);
2851 }
2852 np = of_find_node_by_name(np, "ata-6");
2853 }
2854
2855 /* Switch airport off */
2856 np = find_devices("radio");
2857 while(np) {
2858 if (np && np->parent == macio_chips[0].of_node) {
2859 macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
2860 core99_airport_enable(np, 0, 0);
2861 }
2862 np = np->next;
2863 }
2864 }
2865
2866 /* On all machines that support sound PM, switch sound off */
2867 if (macio_chips[0].of_node)
2868 pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
2869 macio_chips[0].of_node, 0, 0);
2870
2871 /* While on some desktop G3s, we turn it back on */
2872 if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
2873 && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
2874 pmac_mb.model_id == PMAC_TYPE_SILK)) {
2875 struct macio_chip *macio = &macio_chips[0];
2876 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
2877 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
2878 }
2879
2880 /* Some machine models need the clock chip to be properly setup for
2881 * clock spreading now. This should be a platform function but we
2882 * don't do these at the moment
2883 */
2884 pmac_tweak_clock_spreading(1);
2885
2886#endif /* CONFIG_POWER4 */
2887
2888 /* On all machines, switch modem & serial ports off */
2889 np = find_devices("ch-a");
2890 while(np) {
2891 initial_serial_shutdown(np);
2892 np = np->next;
2893 }
2894 np = find_devices("ch-b");
2895 while(np) {
2896 initial_serial_shutdown(np);
2897 np = np->next;
2898 }
2899}
2900
2901void __init
2902pmac_feature_init(void)
2903{
2904 /* Detect the UniNorth memory controller */
2905 probe_uninorth();
2906
2907 /* Probe mac-io controllers */
2908 if (probe_macios()) {
2909 printk(KERN_WARNING "No mac-io chip found\n");
2910 return;
2911 }
2912
2913 /* Setup low-level i2c stuffs */
2914 pmac_init_low_i2c();
2915
2916 /* Probe machine type */
2917 if (probe_motherboard())
2918 printk(KERN_WARNING "Unknown PowerMac !\n");
2919
2920 /* Set some initial features (turn off some chips that will
2921 * be later turned on)
2922 */
2923 set_initial_features();
2924}
2925
2926int __init pmac_feature_late_init(void)
2927{
2928#if 0
2929 struct device_node *np;
2930
2931 /* Request some resources late */
2932 if (uninorth_node)
2933 request_OF_resource(uninorth_node, 0, NULL);
2934 np = find_devices("hammerhead");
2935 if (np)
2936 request_OF_resource(np, 0, NULL);
2937 np = find_devices("interrupt-controller");
2938 if (np)
2939 request_OF_resource(np, 0, NULL);
2940#endif
2941 return 0;
2942}
2943
2944device_initcall(pmac_feature_late_init);
2945
2946#if 0
2947static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
2948{
2949 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
2950 int bits[8] = { 8,16,0,32,2,4,0,0 };
2951 int freq = (frq >> 8) & 0xf;
2952
2953 if (freqs[freq] == 0)
2954 printk("%s: Unknown HT link frequency %x\n", name, freq);
2955 else
2956 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
2957 name, freqs[freq],
2958 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
2959}
2960
2961void __init pmac_check_ht_link(void)
2962{
2963 u32 ufreq, freq, ucfg, cfg;
2964 struct device_node *pcix_node;
2965 u8 px_bus, px_devfn;
2966 struct pci_controller *px_hose;
2967
2968 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
2969 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
2970 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
2971 dump_HT_speeds("U3 HyperTransport", cfg, freq);
2972
2973 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
2974 if (pcix_node == NULL) {
2975 printk("No PCI-X bridge found\n");
2976 return;
2977 }
2978 if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
2979 printk("PCI-X bridge found but not matched to pci\n");
2980 return;
2981 }
2982 px_hose = pci_find_hose_for_OF_device(pcix_node);
2983 if (px_hose == NULL) {
2984 printk("PCI-X bridge found but not matched to host\n");
2985 return;
2986 }
2987 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
2988 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
2989 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
2990 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
2991 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
2992 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
2993}
2994#endif /* 0 */
2995
2996/*
2997 * Early video resume hook
2998 */
2999
3000static void (*pmac_early_vresume_proc)(void *data);
3001static void *pmac_early_vresume_data;
3002
3003void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
3004{
3005 if (_machine != _MACH_Pmac)
3006 return;
3007 preempt_disable();
3008 pmac_early_vresume_proc = proc;
3009 pmac_early_vresume_data = data;
3010 preempt_enable();
3011}
3012EXPORT_SYMBOL(pmac_set_early_video_resume);
3013
3014void pmac_call_early_video_resume(void)
3015{
3016 if (pmac_early_vresume_proc)
3017 pmac_early_vresume_proc(pmac_early_vresume_data);
3018}
3019
3020/*
3021 * AGP related suspend/resume code
3022 */
3023
3024static struct pci_dev *pmac_agp_bridge;
3025static int (*pmac_agp_suspend)(struct pci_dev *bridge);
3026static int (*pmac_agp_resume)(struct pci_dev *bridge);
3027
3028void pmac_register_agp_pm(struct pci_dev *bridge,
3029 int (*suspend)(struct pci_dev *bridge),
3030 int (*resume)(struct pci_dev *bridge))
3031{
3032 if (suspend || resume) {
3033 pmac_agp_bridge = bridge;
3034 pmac_agp_suspend = suspend;
3035 pmac_agp_resume = resume;
3036 return;
3037 }
3038 if (bridge != pmac_agp_bridge)
3039 return;
3040 pmac_agp_suspend = pmac_agp_resume = NULL;
3041 return;
3042}
3043EXPORT_SYMBOL(pmac_register_agp_pm);
3044
3045void pmac_suspend_agp_for_card(struct pci_dev *dev)
3046{
3047 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
3048 return;
3049 if (pmac_agp_bridge->bus != dev->bus)
3050 return;
3051 pmac_agp_suspend(pmac_agp_bridge);
3052}
3053EXPORT_SYMBOL(pmac_suspend_agp_for_card);
3054
3055void pmac_resume_agp_for_card(struct pci_dev *dev)
3056{
3057 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3058 return;
3059 if (pmac_agp_bridge->bus != dev->bus)
3060 return;
3061 pmac_agp_resume(pmac_agp_bridge);
3062}
3063EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f3f39e8e337a..f3f39e8e337a 100644
--- a/arch/ppc64/kernel/pmac_low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
diff --git a/arch/ppc64/kernel/pmac_nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index e32a902236e3..4042e2f06ee0 100644
--- a/arch/ppc64/kernel/pmac_nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -15,10 +15,13 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/stddef.h> 16#include <linux/stddef.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/nvram.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/adb.h>
24#include <linux/pmu.h>
22#include <linux/bootmem.h> 25#include <linux/bootmem.h>
23#include <linux/completion.h> 26#include <linux/completion.h>
24#include <linux/spinlock.h> 27#include <linux/spinlock.h>
@@ -72,20 +75,38 @@ struct core99_header {
72/* 75/*
73 * Read and write the non-volatile RAM on PowerMacs and CHRP machines. 76 * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
74 */ 77 */
78static int nvram_naddrs;
75static volatile unsigned char *nvram_data; 79static volatile unsigned char *nvram_data;
80static int is_core_99;
76static int core99_bank = 0; 81static int core99_bank = 0;
82static int nvram_partitions[3];
77// XXX Turn that into a sem 83// XXX Turn that into a sem
78static DEFINE_SPINLOCK(nv_lock); 84static DEFINE_SPINLOCK(nv_lock);
79 85
86extern int pmac_newworld;
80extern int system_running; 87extern int system_running;
81 88
82static int (*core99_write_bank)(int bank, u8* datas); 89static int (*core99_write_bank)(int bank, u8* datas);
83static int (*core99_erase_bank)(int bank); 90static int (*core99_erase_bank)(int bank);
84 91
85static char *nvram_image __pmacdata; 92static char *nvram_image;
86 93
87 94
88static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index) 95static unsigned char core99_nvram_read_byte(int addr)
96{
97 if (nvram_image == NULL)
98 return 0xff;
99 return nvram_image[addr];
100}
101
102static void core99_nvram_write_byte(int addr, unsigned char val)
103{
104 if (nvram_image == NULL)
105 return;
106 nvram_image[addr] = val;
107}
108
109static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
89{ 110{
90 int i; 111 int i;
91 112
@@ -103,7 +124,7 @@ static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
103 return count; 124 return count;
104} 125}
105 126
106static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index) 127static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
107{ 128{
108 int i; 129 int i;
109 130
@@ -121,14 +142,95 @@ static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
121 return count; 142 return count;
122} 143}
123 144
124static ssize_t __pmac core99_nvram_size(void) 145static ssize_t core99_nvram_size(void)
125{ 146{
126 if (nvram_image == NULL) 147 if (nvram_image == NULL)
127 return -ENODEV; 148 return -ENODEV;
128 return NVRAM_SIZE; 149 return NVRAM_SIZE;
129} 150}
130 151
131static u8 __pmac chrp_checksum(struct chrp_header* hdr) 152#ifdef CONFIG_PPC32
153static volatile unsigned char *nvram_addr;
154static int nvram_mult;
155
156static unsigned char direct_nvram_read_byte(int addr)
157{
158 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
159}
160
161static void direct_nvram_write_byte(int addr, unsigned char val)
162{
163 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
164}
165
166
167static unsigned char indirect_nvram_read_byte(int addr)
168{
169 unsigned char val;
170 unsigned long flags;
171
172 spin_lock_irqsave(&nv_lock, flags);
173 out_8(nvram_addr, addr >> 5);
174 val = in_8(&nvram_data[(addr & 0x1f) << 4]);
175 spin_unlock_irqrestore(&nv_lock, flags);
176
177 return val;
178}
179
180static void indirect_nvram_write_byte(int addr, unsigned char val)
181{
182 unsigned long flags;
183
184 spin_lock_irqsave(&nv_lock, flags);
185 out_8(nvram_addr, addr >> 5);
186 out_8(&nvram_data[(addr & 0x1f) << 4], val);
187 spin_unlock_irqrestore(&nv_lock, flags);
188}
189
190
191#ifdef CONFIG_ADB_PMU
192
193static void pmu_nvram_complete(struct adb_request *req)
194{
195 if (req->arg)
196 complete((struct completion *)req->arg);
197}
198
199static unsigned char pmu_nvram_read_byte(int addr)
200{
201 struct adb_request req;
202 DECLARE_COMPLETION(req_complete);
203
204 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
205 if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
206 (addr >> 8) & 0xff, addr & 0xff))
207 return 0xff;
208 if (system_state == SYSTEM_RUNNING)
209 wait_for_completion(&req_complete);
210 while (!req.complete)
211 pmu_poll();
212 return req.reply[0];
213}
214
215static void pmu_nvram_write_byte(int addr, unsigned char val)
216{
217 struct adb_request req;
218 DECLARE_COMPLETION(req_complete);
219
220 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
221 if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
222 (addr >> 8) & 0xff, addr & 0xff, val))
223 return;
224 if (system_state == SYSTEM_RUNNING)
225 wait_for_completion(&req_complete);
226 while (!req.complete)
227 pmu_poll();
228}
229
230#endif /* CONFIG_ADB_PMU */
231#endif /* CONFIG_PPC32 */
232
233static u8 chrp_checksum(struct chrp_header* hdr)
132{ 234{
133 u8 *ptr; 235 u8 *ptr;
134 u16 sum = hdr->signature; 236 u16 sum = hdr->signature;
@@ -139,7 +241,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
139 return sum; 241 return sum;
140} 242}
141 243
142static u32 __pmac core99_calc_adler(u8 *buffer) 244static u32 core99_calc_adler(u8 *buffer)
143{ 245{
144 int cnt; 246 int cnt;
145 u32 low, high; 247 u32 low, high;
@@ -161,7 +263,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
161 return (high << 16) | low; 263 return (high << 16) | low;
162} 264}
163 265
164static u32 __pmac core99_check(u8* datas) 266static u32 core99_check(u8* datas)
165{ 267{
166 struct core99_header* hdr99 = (struct core99_header*)datas; 268 struct core99_header* hdr99 = (struct core99_header*)datas;
167 269
@@ -180,7 +282,7 @@ static u32 __pmac core99_check(u8* datas)
180 return hdr99->generation; 282 return hdr99->generation;
181} 283}
182 284
183static int __pmac sm_erase_bank(int bank) 285static int sm_erase_bank(int bank)
184{ 286{
185 int stat, i; 287 int stat, i;
186 unsigned long timeout; 288 unsigned long timeout;
@@ -194,7 +296,7 @@ static int __pmac sm_erase_bank(int bank)
194 timeout = 0; 296 timeout = 0;
195 do { 297 do {
196 if (++timeout > 1000000) { 298 if (++timeout > 1000000) {
197 printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n"); 299 printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
198 break; 300 break;
199 } 301 }
200 out_8(base, SM_FLASH_CMD_READ_STATUS); 302 out_8(base, SM_FLASH_CMD_READ_STATUS);
@@ -212,7 +314,7 @@ static int __pmac sm_erase_bank(int bank)
212 return 0; 314 return 0;
213} 315}
214 316
215static int __pmac sm_write_bank(int bank, u8* datas) 317static int sm_write_bank(int bank, u8* datas)
216{ 318{
217 int i, stat = 0; 319 int i, stat = 0;
218 unsigned long timeout; 320 unsigned long timeout;
@@ -247,7 +349,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
247 return 0; 349 return 0;
248} 350}
249 351
250static int __pmac amd_erase_bank(int bank) 352static int amd_erase_bank(int bank)
251{ 353{
252 int i, stat = 0; 354 int i, stat = 0;
253 unsigned long timeout; 355 unsigned long timeout;
@@ -294,7 +396,7 @@ static int __pmac amd_erase_bank(int bank)
294 return 0; 396 return 0;
295} 397}
296 398
297static int __pmac amd_write_bank(int bank, u8* datas) 399static int amd_write_bank(int bank, u8* datas)
298{ 400{
299 int i, stat = 0; 401 int i, stat = 0;
300 unsigned long timeout; 402 unsigned long timeout;
@@ -340,12 +442,49 @@ static int __pmac amd_write_bank(int bank, u8* datas)
340 return 0; 442 return 0;
341} 443}
342 444
445static void __init lookup_partitions(void)
446{
447 u8 buffer[17];
448 int i, offset;
449 struct chrp_header* hdr;
450
451 if (pmac_newworld) {
452 nvram_partitions[pmac_nvram_OF] = -1;
453 nvram_partitions[pmac_nvram_XPRAM] = -1;
454 nvram_partitions[pmac_nvram_NR] = -1;
455 hdr = (struct chrp_header *)buffer;
456
457 offset = 0;
458 buffer[16] = 0;
459 do {
460 for (i=0;i<16;i++)
461 buffer[i] = ppc_md.nvram_read_val(offset+i);
462 if (!strcmp(hdr->name, "common"))
463 nvram_partitions[pmac_nvram_OF] = offset + 0x10;
464 if (!strcmp(hdr->name, "APL,MacOS75")) {
465 nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
466 nvram_partitions[pmac_nvram_NR] = offset + 0x110;
467 }
468 offset += (hdr->len * 0x10);
469 } while(offset < NVRAM_SIZE);
470 } else {
471 nvram_partitions[pmac_nvram_OF] = 0x1800;
472 nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
473 nvram_partitions[pmac_nvram_NR] = 0x1400;
474 }
475 DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
476 DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
477 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
478}
343 479
344static int __pmac core99_nvram_sync(void) 480static void core99_nvram_sync(void)
345{ 481{
346 struct core99_header* hdr99; 482 struct core99_header* hdr99;
347 unsigned long flags; 483 unsigned long flags;
348 484
485 if (!is_core_99 || !nvram_data || !nvram_image)
486 return;
487
349 spin_lock_irqsave(&nv_lock, flags); 488 spin_lock_irqsave(&nv_lock, flags);
350 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, 489 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
351 NVRAM_SIZE)) 490 NVRAM_SIZE))
@@ -370,32 +509,28 @@ static int __pmac core99_nvram_sync(void)
370 bail: 509 bail:
371 spin_unlock_irqrestore(&nv_lock, flags); 510 spin_unlock_irqrestore(&nv_lock, flags);
372 511
373 return 0; 512#ifdef DEBUG
513 mdelay(2000);
514#endif
374} 515}
375 516
376int __init pmac_nvram_init(void) 517static int __init core99_nvram_setup(struct device_node *dp)
377{ 518{
378 struct device_node *dp;
379 u32 gen_bank0, gen_bank1;
380 int i; 519 int i;
520 u32 gen_bank0, gen_bank1;
381 521
382 dp = find_devices("nvram"); 522 if (nvram_naddrs < 1) {
383 if (dp == NULL) { 523 printk(KERN_ERR "nvram: no address\n");
384 printk(KERN_ERR "Can't find NVRAM device\n"); 524 return -EINVAL;
385 return -ENODEV;
386 }
387 if (!device_is_compatible(dp, "nvram,flash")) {
388 printk(KERN_ERR "Incompatible type of NVRAM\n");
389 return -ENXIO;
390 } 525 }
391
392 nvram_image = alloc_bootmem(NVRAM_SIZE); 526 nvram_image = alloc_bootmem(NVRAM_SIZE);
393 if (nvram_image == NULL) { 527 if (nvram_image == NULL) {
394 printk(KERN_ERR "nvram: can't allocate ram image\n"); 528 printk(KERN_ERR "nvram: can't allocate ram image\n");
395 return -ENOMEM; 529 return -ENOMEM;
396 } 530 }
397 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); 531 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
398 532 nvram_naddrs = 1; /* Make sure we get the correct case */
533
399 DBG("nvram: Checking bank 0...\n"); 534 DBG("nvram: Checking bank 0...\n");
400 535
401 gen_bank0 = core99_check((u8 *)nvram_data); 536 gen_bank0 = core99_check((u8 *)nvram_data);
@@ -408,11 +543,12 @@ int __init pmac_nvram_init(void)
408 for (i=0; i<NVRAM_SIZE; i++) 543 for (i=0; i<NVRAM_SIZE; i++)
409 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; 544 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
410 545
546 ppc_md.nvram_read_val = core99_nvram_read_byte;
547 ppc_md.nvram_write_val = core99_nvram_write_byte;
411 ppc_md.nvram_read = core99_nvram_read; 548 ppc_md.nvram_read = core99_nvram_read;
412 ppc_md.nvram_write = core99_nvram_write; 549 ppc_md.nvram_write = core99_nvram_write;
413 ppc_md.nvram_size = core99_nvram_size; 550 ppc_md.nvram_size = core99_nvram_size;
414 ppc_md.nvram_sync = core99_nvram_sync; 551 ppc_md.nvram_sync = core99_nvram_sync;
415
416 /* 552 /*
417 * Maybe we could be smarter here though making an exclusive list 553 * Maybe we could be smarter here though making an exclusive list
418 * of known flash chips is a bit nasty as older OF didn't provide us 554 * of known flash chips is a bit nasty as older OF didn't provide us
@@ -427,67 +563,81 @@ int __init pmac_nvram_init(void)
427 core99_erase_bank = sm_erase_bank; 563 core99_erase_bank = sm_erase_bank;
428 core99_write_bank = sm_write_bank; 564 core99_write_bank = sm_write_bank;
429 } 565 }
430
431 return 0; 566 return 0;
432} 567}
433 568
434int __pmac pmac_get_partition(int partition) 569int __init pmac_nvram_init(void)
435{ 570{
436 struct nvram_partition *part; 571 struct device_node *dp;
437 const char *name; 572 int err = 0;
438 int sig; 573
439 574 nvram_naddrs = 0;
440 switch(partition) { 575
441 case pmac_nvram_OF: 576 dp = find_devices("nvram");
442 name = "common"; 577 if (dp == NULL) {
443 sig = NVRAM_SIG_SYS; 578 printk(KERN_ERR "Can't find NVRAM device\n");
444 break;
445 case pmac_nvram_XPRAM:
446 name = "APL,MacOS75";
447 sig = NVRAM_SIG_OS;
448 break;
449 case pmac_nvram_NR:
450 default:
451 /* Oldworld stuff */
452 return -ENODEV; 579 return -ENODEV;
453 } 580 }
581 nvram_naddrs = dp->n_addrs;
582 is_core_99 = device_is_compatible(dp, "nvram,flash");
583 if (is_core_99)
584 err = core99_nvram_setup(dp);
585#ifdef CONFIG_PPC32
586 else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
587 nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
588 dp->addrs[0].size);
589 nvram_mult = 1;
590 ppc_md.nvram_read_val = direct_nvram_read_byte;
591 ppc_md.nvram_write_val = direct_nvram_write_byte;
592 } else if (nvram_naddrs == 1) {
593 nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
594 nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
595 ppc_md.nvram_read_val = direct_nvram_read_byte;
596 ppc_md.nvram_write_val = direct_nvram_write_byte;
597 } else if (nvram_naddrs == 2) {
598 nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
599 nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
600 ppc_md.nvram_read_val = indirect_nvram_read_byte;
601 ppc_md.nvram_write_val = indirect_nvram_write_byte;
602 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
603#ifdef CONFIG_ADB_PMU
604 nvram_naddrs = -1;
605 ppc_md.nvram_read_val = pmu_nvram_read_byte;
606 ppc_md.nvram_write_val = pmu_nvram_write_byte;
607#endif /* CONFIG_ADB_PMU */
608 }
609#endif
610 else {
611 printk(KERN_ERR "Incompatible type of NVRAM\n");
612 return -ENXIO;
613 }
614 lookup_partitions();
615 return err;
616}
454 617
455 part = nvram_find_partition(sig, name); 618int pmac_get_partition(int partition)
456 if (part == NULL) 619{
457 return 0; 620 return nvram_partitions[partition];
458
459 return part->index;
460} 621}
461 622
462u8 __pmac pmac_xpram_read(int xpaddr) 623u8 pmac_xpram_read(int xpaddr)
463{ 624{
464 int offset = pmac_get_partition(pmac_nvram_XPRAM); 625 int offset = pmac_get_partition(pmac_nvram_XPRAM);
465 loff_t index;
466 u8 buf;
467 ssize_t count;
468 626
469 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 627 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
470 return 0xff; 628 return 0xff;
471 index = offset + xpaddr;
472 629
473 count = ppc_md.nvram_read(&buf, 1, &index); 630 return ppc_md.nvram_read_val(xpaddr + offset);
474 if (count != 1)
475 return 0xff;
476 return buf;
477} 631}
478 632
479void __pmac pmac_xpram_write(int xpaddr, u8 data) 633void pmac_xpram_write(int xpaddr, u8 data)
480{ 634{
481 int offset = pmac_get_partition(pmac_nvram_XPRAM); 635 int offset = pmac_get_partition(pmac_nvram_XPRAM);
482 loff_t index;
483 u8 buf;
484 636
485 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 637 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
486 return; 638 return;
487 index = offset + xpaddr;
488 buf = data;
489 639
490 ppc_md.nvram_write(&buf, 1, &index); 640 ppc_md.nvram_write_val(xpaddr + offset, data);
491} 641}
492 642
493EXPORT_SYMBOL(pmac_get_partition); 643EXPORT_SYMBOL(pmac_get_partition);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
new file mode 100644
index 000000000000..8f818d092e2b
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -0,0 +1,1170 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 *
4 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
5 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/bootmem.h>
19
20#include <asm/sections.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24#include <asm/machdep.h>
25#include <asm/pmac_feature.h>
26#include <asm/grackle.h>
27#ifdef CONFIG_PPC64
28#include <asm/iommu.h>
29#include <asm/ppc-pci.h>
30#endif
31
32#undef DEBUG
33
34#ifdef DEBUG
35#define DBG(x...) printk(x)
36#else
37#define DBG(x...)
38#endif
39
40static int add_bridge(struct device_node *dev);
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45#ifdef CONFIG_PPC64
46static struct pci_controller *u3_agp;
47static struct pci_controller *u3_ht;
48#endif /* CONFIG_PPC64 */
49
50extern u8 pci_cache_line_size;
51extern int pcibios_assign_bus_offset;
52
53struct device_node *k2_skiplist[2];
54
55/*
56 * Magic constants for enabling cache coherency in the bandit/PSX bridge.
57 */
58#define BANDIT_DEVID_2 8
59#define BANDIT_REVID 3
60
61#define BANDIT_DEVNUM 11
62#define BANDIT_MAGIC 0x50
63#define BANDIT_COHERENT 0x40
64
65static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
66{
67 for (; node != 0;node = node->sibling) {
68 int * bus_range;
69 unsigned int *class_code;
70 int len;
71
72 /* For PCI<->PCI bridges or CardBus bridges, we go down */
73 class_code = (unsigned int *) get_property(node, "class-code", NULL);
74 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
75 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
76 continue;
77 bus_range = (int *) get_property(node, "bus-range", &len);
78 if (bus_range != NULL && len > 2 * sizeof(int)) {
79 if (bus_range[1] > higher)
80 higher = bus_range[1];
81 }
82 higher = fixup_one_level_bus_range(node->child, higher);
83 }
84 return higher;
85}
86
87/* This routine fixes the "bus-range" property of all bridges in the
88 * system since they tend to have their "last" member wrong on macs
89 *
90 * Note that the bus numbers manipulated here are OF bus numbers, they
91 * are not Linux bus numbers.
92 */
93static void __init fixup_bus_range(struct device_node *bridge)
94{
95 int * bus_range;
96 int len;
97
98 /* Lookup the "bus-range" property for the hose */
99 bus_range = (int *) get_property(bridge, "bus-range", &len);
100 if (bus_range == NULL || len < 2 * sizeof(int)) {
101 printk(KERN_WARNING "Can't get bus-range for %s\n",
102 bridge->full_name);
103 return;
104 }
105 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
106}
107
108/*
109 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
110 *
111 * The "Bandit" version is present in all early PCI PowerMacs,
112 * and up to the first ones using Grackle. Some machines may
113 * have 2 bandit controllers (2 PCI busses).
114 *
115 * "Chaos" is used in some "Bandit"-type machines as a bridge
116 * for the separate display bus. It is accessed the same
117 * way as bandit, but cannot be probed for devices. It therefore
118 * has its own config access functions.
119 *
120 * The "UniNorth" version is present in all Core99 machines
121 * (iBook, G4, new IMacs, and all the recent Apple machines).
122 * It contains 3 controllers in one ASIC.
123 *
124 * The U3 is the bridge used on G5 machines. It contains an
125 * AGP bus which is dealt with the old UniNorth access routines
126 * and a HyperTransport bus which uses its own set of access
127 * functions.
128 */
129
130#define MACRISC_CFA0(devfn, off) \
131 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
132 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
133 | (((unsigned long)(off)) & 0xFCUL))
134
135#define MACRISC_CFA1(bus, devfn, off) \
136 ((((unsigned long)(bus)) << 16) \
137 |(((unsigned long)(devfn)) << 8) \
138 |(((unsigned long)(off)) & 0xFCUL) \
139 |1UL)
140
141static unsigned long macrisc_cfg_access(struct pci_controller* hose,
142 u8 bus, u8 dev_fn, u8 offset)
143{
144 unsigned int caddr;
145
146 if (bus == hose->first_busno) {
147 if (dev_fn < (11 << 3))
148 return 0;
149 caddr = MACRISC_CFA0(dev_fn, offset);
150 } else
151 caddr = MACRISC_CFA1(bus, dev_fn, offset);
152
153 /* Uninorth will return garbage if we don't read back the value ! */
154 do {
155 out_le32(hose->cfg_addr, caddr);
156 } while (in_le32(hose->cfg_addr) != caddr);
157
158 offset &= has_uninorth ? 0x07 : 0x03;
159 return ((unsigned long)hose->cfg_data) + offset;
160}
161
162static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
163 int offset, int len, u32 *val)
164{
165 struct pci_controller *hose;
166 unsigned long addr;
167
168 hose = pci_bus_to_host(bus);
169 if (hose == NULL)
170 return PCIBIOS_DEVICE_NOT_FOUND;
171
172 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
173 if (!addr)
174 return PCIBIOS_DEVICE_NOT_FOUND;
175 /*
176 * Note: the caller has already checked that offset is
177 * suitably aligned and that len is 1, 2 or 4.
178 */
179 switch (len) {
180 case 1:
181 *val = in_8((u8 *)addr);
182 break;
183 case 2:
184 *val = in_le16((u16 *)addr);
185 break;
186 default:
187 *val = in_le32((u32 *)addr);
188 break;
189 }
190 return PCIBIOS_SUCCESSFUL;
191}
192
193static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
194 int offset, int len, u32 val)
195{
196 struct pci_controller *hose;
197 unsigned long addr;
198
199 hose = pci_bus_to_host(bus);
200 if (hose == NULL)
201 return PCIBIOS_DEVICE_NOT_FOUND;
202
203 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
204 if (!addr)
205 return PCIBIOS_DEVICE_NOT_FOUND;
206 /*
207 * Note: the caller has already checked that offset is
208 * suitably aligned and that len is 1, 2 or 4.
209 */
210 switch (len) {
211 case 1:
212 out_8((u8 *)addr, val);
213 (void) in_8((u8 *)addr);
214 break;
215 case 2:
216 out_le16((u16 *)addr, val);
217 (void) in_le16((u16 *)addr);
218 break;
219 default:
220 out_le32((u32 *)addr, val);
221 (void) in_le32((u32 *)addr);
222 break;
223 }
224 return PCIBIOS_SUCCESSFUL;
225}
226
227static struct pci_ops macrisc_pci_ops =
228{
229 macrisc_read_config,
230 macrisc_write_config
231};
232
233#ifdef CONFIG_PPC32
234/*
235 * Verify that a specific (bus, dev_fn) exists on chaos
236 */
237static int
238chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
239{
240 struct device_node *np;
241 u32 *vendor, *device;
242
243 np = pci_busdev_to_OF_node(bus, devfn);
244 if (np == NULL)
245 return PCIBIOS_DEVICE_NOT_FOUND;
246
247 vendor = (u32 *)get_property(np, "vendor-id", NULL);
248 device = (u32 *)get_property(np, "device-id", NULL);
249 if (vendor == NULL || device == NULL)
250 return PCIBIOS_DEVICE_NOT_FOUND;
251
252 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
253 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
254 return PCIBIOS_BAD_REGISTER_NUMBER;
255
256 return PCIBIOS_SUCCESSFUL;
257}
258
259static int
260chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
261 int len, u32 *val)
262{
263 int result = chaos_validate_dev(bus, devfn, offset);
264 if (result == PCIBIOS_BAD_REGISTER_NUMBER)
265 *val = ~0U;
266 if (result != PCIBIOS_SUCCESSFUL)
267 return result;
268 return macrisc_read_config(bus, devfn, offset, len, val);
269}
270
271static int
272chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
273 int len, u32 val)
274{
275 int result = chaos_validate_dev(bus, devfn, offset);
276 if (result != PCIBIOS_SUCCESSFUL)
277 return result;
278 return macrisc_write_config(bus, devfn, offset, len, val);
279}
280
281static struct pci_ops chaos_pci_ops =
282{
283 chaos_read_config,
284 chaos_write_config
285};
286
287static void __init setup_chaos(struct pci_controller *hose,
288 struct reg_property *addr)
289{
290 /* assume a `chaos' bridge */
291 hose->ops = &chaos_pci_ops;
292 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
293 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
294}
295#else
296#define setup_chaos(hose, addr)
297#endif /* CONFIG_PPC32 */
298
299#ifdef CONFIG_PPC64
300/*
301 * These versions of U3 HyperTransport config space access ops do not
302 * implement self-view of the HT host yet
303 */
304
305/*
306 * This function deals with some "special cases" devices.
307 *
308 * 0 -> No special case
309 * 1 -> Skip the device but act as if the access was successfull
310 * (return 0xff's on reads, eventually, cache config space
311 * accesses in a later version)
312 * -1 -> Hide the device (unsuccessful acess)
313 */
314static int u3_ht_skip_device(struct pci_controller *hose,
315 struct pci_bus *bus, unsigned int devfn)
316{
317 struct device_node *busdn, *dn;
318 int i;
319
320 /* We only allow config cycles to devices that are in OF device-tree
321 * as we are apparently having some weird things going on with some
322 * revs of K2 on recent G5s
323 */
324 if (bus->self)
325 busdn = pci_device_to_OF_node(bus->self);
326 else
327 busdn = hose->arch_data;
328 for (dn = busdn->child; dn; dn = dn->sibling)
329 if (dn->data && PCI_DN(dn)->devfn == devfn)
330 break;
331 if (dn == NULL)
332 return -1;
333
334 /*
335 * When a device in K2 is powered down, we die on config
336 * cycle accesses. Fix that here.
337 */
338 for (i=0; i<2; i++)
339 if (k2_skiplist[i] == dn)
340 return 1;
341
342 return 0;
343}
344
345#define U3_HT_CFA0(devfn, off) \
346 ((((unsigned long)devfn) << 8) | offset)
347#define U3_HT_CFA1(bus, devfn, off) \
348 (U3_HT_CFA0(devfn, off) \
349 + (((unsigned long)bus) << 16) \
350 + 0x01000000UL)
351
352static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
353 u8 bus, u8 devfn, u8 offset)
354{
355 if (bus == hose->first_busno) {
356 /* For now, we don't self probe U3 HT bridge */
357 if (PCI_SLOT(devfn) == 0)
358 return 0;
359 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
360 } else
361 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
362}
363
364static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
365 int offset, int len, u32 *val)
366{
367 struct pci_controller *hose;
368 unsigned long addr;
369
370 hose = pci_bus_to_host(bus);
371 if (hose == NULL)
372 return PCIBIOS_DEVICE_NOT_FOUND;
373
374 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
375 if (!addr)
376 return PCIBIOS_DEVICE_NOT_FOUND;
377
378 switch (u3_ht_skip_device(hose, bus, devfn)) {
379 case 0:
380 break;
381 case 1:
382 switch (len) {
383 case 1:
384 *val = 0xff; break;
385 case 2:
386 *val = 0xffff; break;
387 default:
388 *val = 0xfffffffful; break;
389 }
390 return PCIBIOS_SUCCESSFUL;
391 default:
392 return PCIBIOS_DEVICE_NOT_FOUND;
393 }
394
395 /*
396 * Note: the caller has already checked that offset is
397 * suitably aligned and that len is 1, 2 or 4.
398 */
399 switch (len) {
400 case 1:
401 *val = in_8((u8 *)addr);
402 break;
403 case 2:
404 *val = in_le16((u16 *)addr);
405 break;
406 default:
407 *val = in_le32((u32 *)addr);
408 break;
409 }
410 return PCIBIOS_SUCCESSFUL;
411}
412
413static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
414 int offset, int len, u32 val)
415{
416 struct pci_controller *hose;
417 unsigned long addr;
418
419 hose = pci_bus_to_host(bus);
420 if (hose == NULL)
421 return PCIBIOS_DEVICE_NOT_FOUND;
422
423 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
424 if (!addr)
425 return PCIBIOS_DEVICE_NOT_FOUND;
426
427 switch (u3_ht_skip_device(hose, bus, devfn)) {
428 case 0:
429 break;
430 case 1:
431 return PCIBIOS_SUCCESSFUL;
432 default:
433 return PCIBIOS_DEVICE_NOT_FOUND;
434 }
435
436 /*
437 * Note: the caller has already checked that offset is
438 * suitably aligned and that len is 1, 2 or 4.
439 */
440 switch (len) {
441 case 1:
442 out_8((u8 *)addr, val);
443 (void) in_8((u8 *)addr);
444 break;
445 case 2:
446 out_le16((u16 *)addr, val);
447 (void) in_le16((u16 *)addr);
448 break;
449 default:
450 out_le32((u32 *)addr, val);
451 (void) in_le32((u32 *)addr);
452 break;
453 }
454 return PCIBIOS_SUCCESSFUL;
455}
456
457static struct pci_ops u3_ht_pci_ops =
458{
459 u3_ht_read_config,
460 u3_ht_write_config
461};
462#endif /* CONFIG_PPC64 */
463
464#ifdef CONFIG_PPC32
465/*
466 * For a bandit bridge, turn on cache coherency if necessary.
467 * N.B. we could clean this up using the hose ops directly.
468 */
469static void __init init_bandit(struct pci_controller *bp)
470{
471 unsigned int vendev, magic;
472 int rev;
473
474 /* read the word at offset 0 in config space for device 11 */
475 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
476 udelay(2);
477 vendev = in_le32(bp->cfg_data);
478 if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
479 PCI_VENDOR_ID_APPLE) {
480 /* read the revision id */
481 out_le32(bp->cfg_addr,
482 (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
483 udelay(2);
484 rev = in_8(bp->cfg_data);
485 if (rev != BANDIT_REVID)
486 printk(KERN_WARNING
487 "Unknown revision %d for bandit\n", rev);
488 } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
489 printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
490 return;
491 }
492
493 /* read the word at offset 0x50 */
494 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
495 udelay(2);
496 magic = in_le32(bp->cfg_data);
497 if ((magic & BANDIT_COHERENT) != 0)
498 return;
499 magic |= BANDIT_COHERENT;
500 udelay(2);
501 out_le32(bp->cfg_data, magic);
502 printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
503}
504
505/*
506 * Tweak the PCI-PCI bridge chip on the blue & white G3s.
507 */
508static void __init init_p2pbridge(void)
509{
510 struct device_node *p2pbridge;
511 struct pci_controller* hose;
512 u8 bus, devfn;
513 u16 val;
514
515 /* XXX it would be better here to identify the specific
516 PCI-PCI bridge chip we have. */
517 if ((p2pbridge = find_devices("pci-bridge")) == 0
518 || p2pbridge->parent == NULL
519 || strcmp(p2pbridge->parent->name, "pci") != 0)
520 return;
521 if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
522 DBG("Can't find PCI infos for PCI<->PCI bridge\n");
523 return;
524 }
525 /* Warning: At this point, we have not yet renumbered all busses.
526 * So we must use OF walking to find out hose
527 */
528 hose = pci_find_hose_for_OF_device(p2pbridge);
529 if (!hose) {
530 DBG("Can't find hose for PCI<->PCI bridge\n");
531 return;
532 }
533 if (early_read_config_word(hose, bus, devfn,
534 PCI_BRIDGE_CONTROL, &val) < 0) {
535 printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
536 return;
537 }
538 val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
539 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
540}
541
542/*
543 * Some Apple desktop machines have a NEC PD720100A USB2 controller
544 * on the motherboard. Open Firmware, on these, will disable the
545 * EHCI part of it so it behaves like a pair of OHCI's. This fixup
546 * code re-enables it ;)
547 */
548static void __init fixup_nec_usb2(void)
549{
550 struct device_node *nec;
551
552 for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
553 struct pci_controller *hose;
554 u32 data, *prop;
555 u8 bus, devfn;
556
557 prop = (u32 *)get_property(nec, "vendor-id", NULL);
558 if (prop == NULL)
559 continue;
560 if (0x1033 != *prop)
561 continue;
562 prop = (u32 *)get_property(nec, "device-id", NULL);
563 if (prop == NULL)
564 continue;
565 if (0x0035 != *prop)
566 continue;
567 prop = (u32 *)get_property(nec, "reg", NULL);
568 if (prop == NULL)
569 continue;
570 devfn = (prop[0] >> 8) & 0xff;
571 bus = (prop[0] >> 16) & 0xff;
572 if (PCI_FUNC(devfn) != 0)
573 continue;
574 hose = pci_find_hose_for_OF_device(nec);
575 if (!hose)
576 continue;
577 early_read_config_dword(hose, bus, devfn, 0xe4, &data);
578 if (data & 1UL) {
579 printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
580 data &= ~1UL;
581 early_write_config_dword(hose, bus, devfn, 0xe4, data);
582 early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
583 nec->intrs[0].line);
584 }
585 }
586}
587
588static void __init setup_bandit(struct pci_controller *hose,
589 struct reg_property *addr)
590{
591 hose->ops = &macrisc_pci_ops;
592 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
593 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
594 init_bandit(hose);
595}
596
597static int __init setup_uninorth(struct pci_controller *hose,
598 struct reg_property *addr)
599{
600 pci_assign_all_buses = 1;
601 has_uninorth = 1;
602 hose->ops = &macrisc_pci_ops;
603 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
604 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
605 /* We "know" that the bridge at f2000000 has the PCI slots. */
606 return addr->address == 0xf2000000;
607}
608#endif
609
610#ifdef CONFIG_PPC64
611static void __init setup_u3_agp(struct pci_controller* hose)
612{
613 /* On G5, we move AGP up to high bus number so we don't need
614 * to reassign bus numbers for HT. If we ever have P2P bridges
615 * on AGP, we'll have to move pci_assign_all_busses to the
616 * pci_controller structure so we enable it for AGP and not for
617 * HT childs.
618 * We hard code the address because of the different size of
619 * the reg address cell, we shall fix that by killing struct
620 * reg_property and using some accessor functions instead
621 */
622 hose->first_busno = 0xf0;
623 hose->last_busno = 0xff;
624 has_uninorth = 1;
625 hose->ops = &macrisc_pci_ops;
626 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
627 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
628
629 u3_agp = hose;
630}
631
632static void __init setup_u3_ht(struct pci_controller* hose)
633{
634 struct device_node *np = (struct device_node *)hose->arch_data;
635 int i, cur;
636
637 hose->ops = &u3_ht_pci_ops;
638
639 /* We hard code the address because of the different size of
640 * the reg address cell, we shall fix that by killing struct
641 * reg_property and using some accessor functions instead
642 */
643 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
644
645 /*
646 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
647 * have been allocated to AGP. So far, this version of the code doesn't assign
648 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
649 * We need to fix that sooner or later by either parsing all child "ranges"
650 * properties or figuring out the U3 address space decoding logic and
651 * then read its configuration register (if any).
652 */
653 hose->io_base_phys = 0xf4000000;
654 hose->pci_io_size = 0x00400000;
655 hose->io_resource.name = np->full_name;
656 hose->io_resource.start = 0;
657 hose->io_resource.end = 0x003fffff;
658 hose->io_resource.flags = IORESOURCE_IO;
659 hose->pci_mem_offset = 0;
660 hose->first_busno = 0;
661 hose->last_busno = 0xef;
662 hose->mem_resources[0].name = np->full_name;
663 hose->mem_resources[0].start = 0x80000000;
664 hose->mem_resources[0].end = 0xefffffff;
665 hose->mem_resources[0].flags = IORESOURCE_MEM;
666
667 u3_ht = hose;
668
669 if (u3_agp == NULL) {
670 DBG("U3 has no AGP, using full resource range\n");
671 return;
672 }
673
674 /* We "remove" the AGP resources from the resources allocated to HT, that
675 * is we create "holes". However, that code does assumptions that so far
676 * happen to be true (cross fingers...), typically that resources in the
677 * AGP node are properly ordered
678 */
679 cur = 0;
680 for (i=0; i<3; i++) {
681 struct resource *res = &u3_agp->mem_resources[i];
682 if (res->flags != IORESOURCE_MEM)
683 continue;
684 /* We don't care about "fine" resources */
685 if (res->start >= 0xf0000000)
686 continue;
687 /* Check if it's just a matter of "shrinking" us in one direction */
688 if (hose->mem_resources[cur].start == res->start) {
689 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
690 cur, hose->mem_resources[cur].start, res->end + 1);
691 hose->mem_resources[cur].start = res->end + 1;
692 continue;
693 }
694 if (hose->mem_resources[cur].end == res->end) {
695 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
696 cur, hose->mem_resources[cur].end, res->start - 1);
697 hose->mem_resources[cur].end = res->start - 1;
698 continue;
699 }
700 /* No, it's not the case, we need a hole */
701 if (cur == 2) {
702 /* not enough resources for a hole, we drop part of the range */
703 printk(KERN_WARNING "Running out of resources for /ht host !\n");
704 hose->mem_resources[cur].end = res->start - 1;
705 continue;
706 }
707 cur++;
708 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
709 cur-1, res->start - 1, cur, res->end + 1);
710 hose->mem_resources[cur].name = np->full_name;
711 hose->mem_resources[cur].flags = IORESOURCE_MEM;
712 hose->mem_resources[cur].start = res->end + 1;
713 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
714 hose->mem_resources[cur-1].end = res->start - 1;
715 }
716}
717
718/* XXX this needs to be converged between ppc32 and ppc64... */
719static struct pci_controller * __init pcibios_alloc_controller(void)
720{
721 struct pci_controller *hose;
722
723 hose = alloc_bootmem(sizeof(struct pci_controller));
724 if (hose)
725 pci_setup_pci_controller(hose);
726 return hose;
727}
728#endif
729
730/*
731 * We assume that if we have a G3 powermac, we have one bridge called
732 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
733 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
734 */
735static int __init add_bridge(struct device_node *dev)
736{
737 int len;
738 struct pci_controller *hose;
739#ifdef CONFIG_PPC32
740 struct reg_property *addr;
741#endif
742 char *disp_name;
743 int *bus_range;
744 int primary = 1;
745
746 DBG("Adding PCI host bridge %s\n", dev->full_name);
747
748#ifdef CONFIG_PPC32
749 /* XXX fix this */
750 addr = (struct reg_property *) get_property(dev, "reg", &len);
751 if (addr == NULL || len < sizeof(*addr)) {
752 printk(KERN_WARNING "Can't use %s: no address\n",
753 dev->full_name);
754 return -ENODEV;
755 }
756#endif
757 bus_range = (int *) get_property(dev, "bus-range", &len);
758 if (bus_range == NULL || len < 2 * sizeof(int)) {
759 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
760 dev->full_name);
761 }
762
763 hose = pcibios_alloc_controller();
764 if (!hose)
765 return -ENOMEM;
766 hose->arch_data = dev;
767 hose->first_busno = bus_range ? bus_range[0] : 0;
768 hose->last_busno = bus_range ? bus_range[1] : 0xff;
769
770 disp_name = NULL;
771#ifdef CONFIG_POWER4
772 if (device_is_compatible(dev, "u3-agp")) {
773 setup_u3_agp(hose);
774 disp_name = "U3-AGP";
775 primary = 0;
776 } else if (device_is_compatible(dev, "u3-ht")) {
777 setup_u3_ht(hose);
778 disp_name = "U3-HT";
779 primary = 1;
780 }
781 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
782 disp_name, hose->first_busno, hose->last_busno);
783#else
784 if (device_is_compatible(dev, "uni-north")) {
785 primary = setup_uninorth(hose, addr);
786 disp_name = "UniNorth";
787 } else if (strcmp(dev->name, "pci") == 0) {
788 /* XXX assume this is a mpc106 (grackle) */
789 setup_grackle(hose);
790 disp_name = "Grackle (MPC106)";
791 } else if (strcmp(dev->name, "bandit") == 0) {
792 setup_bandit(hose, addr);
793 disp_name = "Bandit";
794 } else if (strcmp(dev->name, "chaos") == 0) {
795 setup_chaos(hose, addr);
796 disp_name = "Chaos";
797 primary = 0;
798 }
799 printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
800 disp_name, addr->address, hose->first_busno, hose->last_busno);
801#endif
802 DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
803 hose, hose->cfg_addr, hose->cfg_data);
804
805 /* Interpret the "ranges" property */
806 /* This also maps the I/O region and sets isa_io/mem_base */
807 pci_process_bridge_OF_ranges(hose, dev, primary);
808
809 /* Fixup "bus-range" OF property */
810 fixup_bus_range(dev);
811
812 return 0;
813}
814
815static void __init
816pcibios_fixup_OF_interrupts(void)
817{
818 struct pci_dev* dev = NULL;
819
820 /*
821 * Open Firmware often doesn't initialize the
822 * PCI_INTERRUPT_LINE config register properly, so we
823 * should find the device node and apply the interrupt
824 * obtained from the OF device-tree
825 */
826 for_each_pci_dev(dev) {
827 struct device_node *node;
828 node = pci_device_to_OF_node(dev);
829 /* this is the node, see if it has interrupts */
830 if (node && node->n_intrs > 0)
831 dev->irq = node->intrs[0].line;
832 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
833 }
834}
835
836void __init
837pmac_pcibios_fixup(void)
838{
839 /* Fixup interrupts according to OF tree */
840 pcibios_fixup_OF_interrupts();
841}
842
843#ifdef CONFIG_PPC64
844static void __init pmac_fixup_phb_resources(void)
845{
846 struct pci_controller *hose, *tmp;
847
848 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
849 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
850 hose->global_number,
851 hose->io_resource.start, hose->io_resource.end);
852 }
853}
854#endif
855
856void __init pmac_pci_init(void)
857{
858 struct device_node *np, *root;
859 struct device_node *ht = NULL;
860
861 root = of_find_node_by_path("/");
862 if (root == NULL) {
863 printk(KERN_CRIT "pmac_pci_init: can't find root "
864 "of device tree\n");
865 return;
866 }
867 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
868 if (np->name == NULL)
869 continue;
870 if (strcmp(np->name, "bandit") == 0
871 || strcmp(np->name, "chaos") == 0
872 || strcmp(np->name, "pci") == 0) {
873 if (add_bridge(np) == 0)
874 of_node_get(np);
875 }
876 if (strcmp(np->name, "ht") == 0) {
877 of_node_get(np);
878 ht = np;
879 }
880 }
881 of_node_put(root);
882
883#ifdef CONFIG_PPC64
884 /* Probe HT last as it relies on the agp resources to be already
885 * setup
886 */
887 if (ht && add_bridge(ht) != 0)
888 of_node_put(ht);
889
890 /*
891 * We need to call pci_setup_phb_io for the HT bridge first
892 * so it gets the I/O port numbers starting at 0, and we
893 * need to call it for the AGP bridge after that so it gets
894 * small positive I/O port numbers.
895 */
896 if (u3_ht)
897 pci_setup_phb_io(u3_ht, 1);
898 if (u3_agp)
899 pci_setup_phb_io(u3_agp, 0);
900
901 /*
902 * On ppc64, fixup the IO resources on our host bridges as
903 * the common code does it only for children of the host bridges
904 */
905 pmac_fixup_phb_resources();
906
907 /* Setup the linkage between OF nodes and PHBs */
908 pci_devs_phb_init();
909
910 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
911 * assume there is no P2P bridge on the AGP bus, which should be a
912 * safe assumptions hopefully.
913 */
914 if (u3_agp) {
915 struct device_node *np = u3_agp->arch_data;
916 PCI_DN(np)->busno = 0xf0;
917 for (np = np->child; np; np = np->sibling)
918 PCI_DN(np)->busno = 0xf0;
919 }
920
921 /* map in PCI I/O space */
922 phbs_remap_io();
923
924 /* pmac_check_ht_link(); */
925
926 /* Tell pci.c to not use the common resource allocation mechanism */
927 pci_probe_only = 1;
928
929 /* Allow all IO */
930 io_page_mask = -1;
931
932#else /* CONFIG_PPC64 */
933 init_p2pbridge();
934 fixup_nec_usb2();
935
936 /* We are still having some issues with the Xserve G4, enabling
937 * some offset between bus number and domains for now when we
938 * assign all busses should help for now
939 */
940 if (pci_assign_all_buses)
941 pcibios_assign_bus_offset = 0x10;
942#endif
943}
944
945int
946pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
947{
948 struct device_node* node;
949 int updatecfg = 0;
950 int uninorth_child;
951
952 node = pci_device_to_OF_node(dev);
953
954 /* We don't want to enable USB controllers absent from the OF tree
955 * (iBook second controller)
956 */
957 if (dev->vendor == PCI_VENDOR_ID_APPLE
958 && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10))
959 && !node) {
960 printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
961 pci_name(dev));
962 return -EINVAL;
963 }
964
965 if (!node)
966 return 0;
967
968 uninorth_child = node->parent &&
969 device_is_compatible(node->parent, "uni-north");
970
971 /* Firewire & GMAC were disabled after PCI probe, the driver is
972 * claiming them, we must re-enable them now.
973 */
974 if (uninorth_child && !strcmp(node->name, "firewire") &&
975 (device_is_compatible(node, "pci106b,18") ||
976 device_is_compatible(node, "pci106b,30") ||
977 device_is_compatible(node, "pci11c1,5811"))) {
978 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
979 pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
980 updatecfg = 1;
981 }
982 if (uninorth_child && !strcmp(node->name, "ethernet") &&
983 device_is_compatible(node, "gmac")) {
984 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
985 updatecfg = 1;
986 }
987
988 if (updatecfg) {
989 u16 cmd;
990
991 /*
992 * Make sure PCI is correctly configured
993 *
994 * We use old pci_bios versions of the function since, by
995 * default, gmac is not powered up, and so will be absent
996 * from the kernel initial PCI lookup.
997 *
998 * Should be replaced by 2.4 new PCI mechanisms and really
999 * register the device.
1000 */
1001 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1002 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1003 | PCI_COMMAND_INVALIDATE;
1004 pci_write_config_word(dev, PCI_COMMAND, cmd);
1005 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
1006 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
1007 L1_CACHE_BYTES >> 2);
1008 }
1009
1010 return 0;
1011}
1012
1013/* We power down some devices after they have been probed. They'll
1014 * be powered back on later on
1015 */
1016void __init pmac_pcibios_after_init(void)
1017{
1018 struct device_node* nd;
1019
1020#ifdef CONFIG_BLK_DEV_IDE
1021 struct pci_dev *dev = NULL;
1022
1023 /* OF fails to initialize IDE controllers on macs
1024 * (and maybe other machines)
1025 *
1026 * Ideally, this should be moved to the IDE layer, but we need
1027 * to check specifically with Andre Hedrick how to do it cleanly
1028 * since the common IDE code seem to care about the fact that the
1029 * BIOS may have disabled a controller.
1030 *
1031 * -- BenH
1032 */
1033 for_each_pci_dev(dev) {
1034 if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
1035 pci_enable_device(dev);
1036 }
1037#endif /* CONFIG_BLK_DEV_IDE */
1038
1039 nd = find_devices("firewire");
1040 while (nd) {
1041 if (nd->parent && (device_is_compatible(nd, "pci106b,18") ||
1042 device_is_compatible(nd, "pci106b,30") ||
1043 device_is_compatible(nd, "pci11c1,5811"))
1044 && device_is_compatible(nd->parent, "uni-north")) {
1045 pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
1046 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
1047 }
1048 nd = nd->next;
1049 }
1050 nd = find_devices("ethernet");
1051 while (nd) {
1052 if (nd->parent && device_is_compatible(nd, "gmac")
1053 && device_is_compatible(nd->parent, "uni-north"))
1054 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
1055 nd = nd->next;
1056 }
1057}
1058
1059#ifdef CONFIG_PPC32
1060void pmac_pci_fixup_cardbus(struct pci_dev* dev)
1061{
1062 if (_machine != _MACH_Pmac)
1063 return;
1064 /*
1065 * Fix the interrupt routing on the various cardbus bridges
1066 * used on powerbooks
1067 */
1068 if (dev->vendor != PCI_VENDOR_ID_TI)
1069 return;
1070 if (dev->device == PCI_DEVICE_ID_TI_1130 ||
1071 dev->device == PCI_DEVICE_ID_TI_1131) {
1072 u8 val;
1073 /* Enable PCI interrupt */
1074 if (pci_read_config_byte(dev, 0x91, &val) == 0)
1075 pci_write_config_byte(dev, 0x91, val | 0x30);
1076 /* Disable ISA interrupt mode */
1077 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1078 pci_write_config_byte(dev, 0x92, val & ~0x06);
1079 }
1080 if (dev->device == PCI_DEVICE_ID_TI_1210 ||
1081 dev->device == PCI_DEVICE_ID_TI_1211 ||
1082 dev->device == PCI_DEVICE_ID_TI_1410 ||
1083 dev->device == PCI_DEVICE_ID_TI_1510) {
1084 u8 val;
1085 /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
1086 signal out the MFUNC0 pin */
1087 if (pci_read_config_byte(dev, 0x8c, &val) == 0)
1088 pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
1089 /* Disable ISA interrupt mode */
1090 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1091 pci_write_config_byte(dev, 0x92, val & ~0x06);
1092 }
1093}
1094
1095DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
1096
1097void pmac_pci_fixup_pciata(struct pci_dev* dev)
1098{
1099 u8 progif = 0;
1100
1101 /*
1102 * On PowerMacs, we try to switch any PCI ATA controller to
1103 * fully native mode
1104 */
1105 if (_machine != _MACH_Pmac)
1106 return;
1107 /* Some controllers don't have the class IDE */
1108 if (dev->vendor == PCI_VENDOR_ID_PROMISE)
1109 switch(dev->device) {
1110 case PCI_DEVICE_ID_PROMISE_20246:
1111 case PCI_DEVICE_ID_PROMISE_20262:
1112 case PCI_DEVICE_ID_PROMISE_20263:
1113 case PCI_DEVICE_ID_PROMISE_20265:
1114 case PCI_DEVICE_ID_PROMISE_20267:
1115 case PCI_DEVICE_ID_PROMISE_20268:
1116 case PCI_DEVICE_ID_PROMISE_20269:
1117 case PCI_DEVICE_ID_PROMISE_20270:
1118 case PCI_DEVICE_ID_PROMISE_20271:
1119 case PCI_DEVICE_ID_PROMISE_20275:
1120 case PCI_DEVICE_ID_PROMISE_20276:
1121 case PCI_DEVICE_ID_PROMISE_20277:
1122 goto good;
1123 }
1124 /* Others, check PCI class */
1125 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
1126 return;
1127 good:
1128 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1129 if ((progif & 5) != 5) {
1130 printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
1131 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
1132 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
1133 (progif & 5) != 5)
1134 printk(KERN_ERR "Rewrite of PROGIF failed !\n");
1135 }
1136}
1137DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1138#endif
1139
1140/*
1141 * Disable second function on K2-SATA, it's broken
1142 * and disable IO BARs on first one
1143 */
1144static void fixup_k2_sata(struct pci_dev* dev)
1145{
1146 int i;
1147 u16 cmd;
1148
1149 if (PCI_FUNC(dev->devfn) > 0) {
1150 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1151 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
1152 pci_write_config_word(dev, PCI_COMMAND, cmd);
1153 for (i = 0; i < 6; i++) {
1154 dev->resource[i].start = dev->resource[i].end = 0;
1155 dev->resource[i].flags = 0;
1156 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1157 }
1158 } else {
1159 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1160 cmd &= ~PCI_COMMAND_IO;
1161 pci_write_config_word(dev, PCI_COMMAND, cmd);
1162 for (i = 0; i < 5; i++) {
1163 dev->resource[i].start = dev->resource[i].end = 0;
1164 dev->resource[i].flags = 0;
1165 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1166 }
1167 }
1168}
1169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
1170
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
new file mode 100644
index 000000000000..0037a8c8c81f
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -0,0 +1,678 @@
1/*
2 * Support for the interrupt controllers found on Power Macintosh,
3 * currently Apple's "Grand Central" interrupt controller in all
4 * it's incarnations. OpenPIC support used on newer machines is
5 * in a separate file
6 *
7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8 *
9 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/stddef.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/signal.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/sysdev.h>
26#include <linux/adb.h>
27#include <linux/pmu.h>
28#include <linux/module.h>
29
30#include <asm/sections.h>
31#include <asm/io.h>
32#include <asm/smp.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h>
35#include <asm/time.h>
36#include <asm/pmac_feature.h>
37#include <asm/mpic.h>
38
39#include "pmac.h"
40
41/*
42 * XXX this should be in xmon.h, but putting it there means xmon.h
43 * has to include <linux/interrupt.h> (to get irqreturn_t), which
44 * causes all sorts of problems. -- paulus
45 */
46extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
47
48#ifdef CONFIG_PPC32
49struct pmac_irq_hw {
50 unsigned int event;
51 unsigned int enable;
52 unsigned int ack;
53 unsigned int level;
54};
55
56/* Default addresses */
57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
58 (struct pmac_irq_hw *) 0xf3000020,
59 (struct pmac_irq_hw *) 0xf3000010,
60 (struct pmac_irq_hw *) 0xf4000020,
61 (struct pmac_irq_hw *) 0xf4000010,
62};
63
64#define GC_LEVEL_MASK 0x3ff00000
65#define OHARE_LEVEL_MASK 0x1ff00000
66#define HEATHROW_LEVEL_MASK 0x1ff00000
67
68static int max_irqs;
69static int max_real_irqs;
70static u32 level_mask[4];
71
72static DEFINE_SPINLOCK(pmac_pic_lock);
73
74#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76
77/*
78 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask).
80 * -- Cort
81 */
82void
83__set_lost(unsigned long irq_nr, int nokick)
84{
85 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
86 atomic_inc(&ppc_n_lost_interrupts);
87 if (!nokick)
88 set_dec(1);
89 }
90}
91
92static void
93pmac_mask_and_ack_irq(unsigned int irq_nr)
94{
95 unsigned long bit = 1UL << (irq_nr & 0x1f);
96 int i = irq_nr >> 5;
97 unsigned long flags;
98
99 if ((unsigned)irq_nr >= max_irqs)
100 return;
101
102 clear_bit(irq_nr, ppc_cached_irq_mask);
103 if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
104 atomic_dec(&ppc_n_lost_interrupts);
105 spin_lock_irqsave(&pmac_pic_lock, flags);
106 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
107 out_le32(&pmac_irq_hw[i]->ack, bit);
108 do {
109 /* make sure ack gets to controller before we enable
110 interrupts */
111 mb();
112 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
113 != (ppc_cached_irq_mask[i] & bit));
114 spin_unlock_irqrestore(&pmac_pic_lock, flags);
115}
116
117static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
118{
119 unsigned long bit = 1UL << (irq_nr & 0x1f);
120 int i = irq_nr >> 5;
121 unsigned long flags;
122
123 if ((unsigned)irq_nr >= max_irqs)
124 return;
125
126 spin_lock_irqsave(&pmac_pic_lock, flags);
127 /* enable unmasked interrupts */
128 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
129
130 do {
131 /* make sure mask gets to controller before we
132 return to user */
133 mb();
134 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
135 != (ppc_cached_irq_mask[i] & bit));
136
137 /*
138 * Unfortunately, setting the bit in the enable register
139 * when the device interrupt is already on *doesn't* set
140 * the bit in the flag register or request another interrupt.
141 */
142 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
143 __set_lost((ulong)irq_nr, nokicklost);
144 spin_unlock_irqrestore(&pmac_pic_lock, flags);
145}
146
147/* When an irq gets requested for the first client, if it's an
148 * edge interrupt, we clear any previous one on the controller
149 */
150static unsigned int pmac_startup_irq(unsigned int irq_nr)
151{
152 unsigned long bit = 1UL << (irq_nr & 0x1f);
153 int i = irq_nr >> 5;
154
155 if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
156 out_le32(&pmac_irq_hw[i]->ack, bit);
157 set_bit(irq_nr, ppc_cached_irq_mask);
158 pmac_set_irq_mask(irq_nr, 0);
159
160 return 0;
161}
162
163static void pmac_mask_irq(unsigned int irq_nr)
164{
165 clear_bit(irq_nr, ppc_cached_irq_mask);
166 pmac_set_irq_mask(irq_nr, 0);
167 mb();
168}
169
170static void pmac_unmask_irq(unsigned int irq_nr)
171{
172 set_bit(irq_nr, ppc_cached_irq_mask);
173 pmac_set_irq_mask(irq_nr, 0);
174}
175
176static void pmac_end_irq(unsigned int irq_nr)
177{
178 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
179 && irq_desc[irq_nr].action) {
180 set_bit(irq_nr, ppc_cached_irq_mask);
181 pmac_set_irq_mask(irq_nr, 1);
182 }
183}
184
185
186struct hw_interrupt_type pmac_pic = {
187 .typename = " PMAC-PIC ",
188 .startup = pmac_startup_irq,
189 .enable = pmac_unmask_irq,
190 .disable = pmac_mask_irq,
191 .ack = pmac_mask_and_ack_irq,
192 .end = pmac_end_irq,
193};
194
195struct hw_interrupt_type gatwick_pic = {
196 .typename = " GATWICK ",
197 .startup = pmac_startup_irq,
198 .enable = pmac_unmask_irq,
199 .disable = pmac_mask_irq,
200 .ack = pmac_mask_and_ack_irq,
201 .end = pmac_end_irq,
202};
203
204static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
205{
206 int irq, bits;
207
208 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
209 int i = irq >> 5;
210 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
211 /* We must read level interrupts from the level register */
212 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
213 bits &= ppc_cached_irq_mask[i];
214 if (bits == 0)
215 continue;
216 irq += __ilog2(bits);
217 __do_IRQ(irq, regs);
218 return IRQ_HANDLED;
219 }
220 printk("gatwick irq not from gatwick pic\n");
221 return IRQ_NONE;
222}
223
224int
225pmac_get_irq(struct pt_regs *regs)
226{
227 int irq;
228 unsigned long bits = 0;
229
230#ifdef CONFIG_SMP
231 void psurge_smp_message_recv(struct pt_regs *);
232
233 /* IPI's are a hack on the powersurge -- Cort */
234 if ( smp_processor_id() != 0 ) {
235 psurge_smp_message_recv(regs);
236 return -2; /* ignore, already handled */
237 }
238#endif /* CONFIG_SMP */
239 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
240 int i = irq >> 5;
241 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
242 /* We must read level interrupts from the level register */
243 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
244 bits &= ppc_cached_irq_mask[i];
245 if (bits == 0)
246 continue;
247 irq += __ilog2(bits);
248 break;
249 }
250
251 return irq;
252}
253
254/* This routine will fix some missing interrupt values in the device tree
255 * on the gatwick mac-io controller used by some PowerBooks
256 */
257static void __init
258pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
259{
260 struct device_node *node;
261 int count;
262
263 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
264 node = gw->child;
265 count = 0;
266 while(node)
267 {
268 /* Fix SCC */
269 if (strcasecmp(node->name, "escc") == 0)
270 if (node->child) {
271 if (node->child->n_intrs < 3) {
272 node->child->intrs = &gatwick_int_pool[count];
273 count += 3;
274 }
275 node->child->n_intrs = 3;
276 node->child->intrs[0].line = 15+irq_base;
277 node->child->intrs[1].line = 4+irq_base;
278 node->child->intrs[2].line = 5+irq_base;
279 printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
280 node->child->intrs[0].line,
281 node->child->intrs[1].line,
282 node->child->intrs[2].line);
283 }
284 /* Fix media-bay & left SWIM */
285 if (strcasecmp(node->name, "media-bay") == 0) {
286 struct device_node* ya_node;
287
288 if (node->n_intrs == 0)
289 node->intrs = &gatwick_int_pool[count++];
290 node->n_intrs = 1;
291 node->intrs[0].line = 29+irq_base;
292 printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
293 node->intrs[0].line);
294
295 ya_node = node->child;
296 while(ya_node)
297 {
298 if (strcasecmp(ya_node->name, "floppy") == 0) {
299 if (ya_node->n_intrs < 2) {
300 ya_node->intrs = &gatwick_int_pool[count];
301 count += 2;
302 }
303 ya_node->n_intrs = 2;
304 ya_node->intrs[0].line = 19+irq_base;
305 ya_node->intrs[1].line = 1+irq_base;
306 printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
307 ya_node->intrs[0].line, ya_node->intrs[1].line);
308 }
309 if (strcasecmp(ya_node->name, "ata4") == 0) {
310 if (ya_node->n_intrs < 2) {
311 ya_node->intrs = &gatwick_int_pool[count];
312 count += 2;
313 }
314 ya_node->n_intrs = 2;
315 ya_node->intrs[0].line = 14+irq_base;
316 ya_node->intrs[1].line = 3+irq_base;
317 printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
318 ya_node->intrs[0].line, ya_node->intrs[1].line);
319 }
320 ya_node = ya_node->sibling;
321 }
322 }
323 node = node->sibling;
324 }
325 if (count > 10) {
326 printk("WARNING !! Gatwick interrupt pool overflow\n");
327 printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
328 printk(" requested = %d\n", count);
329 }
330}
331
332/*
333 * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
334 * card which includes an ohare chip that acts as a second interrupt
335 * controller. If we find this second ohare, set it up and fix the
336 * interrupt value in the device tree for the ethernet chip.
337 */
338static int __init enable_second_ohare(void)
339{
340 unsigned char bus, devfn;
341 unsigned short cmd;
342 unsigned long addr;
343 struct device_node *irqctrler = find_devices("pci106b,7");
344 struct device_node *ether;
345
346 if (irqctrler == NULL || irqctrler->n_addrs <= 0)
347 return -1;
348 addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
349 pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
350 max_irqs = 64;
351 if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
352 struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
353 if (!hose)
354 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
355 else {
356 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
357 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
358 cmd &= ~PCI_COMMAND_IO;
359 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
360 }
361 }
362
363 /* Fix interrupt for the modem/ethernet combo controller. The number
364 in the device tree (27) is bogus (correct for the ethernet-only
365 board but not the combo ethernet/modem board).
366 The real interrupt is 28 on the second controller -> 28+32 = 60.
367 */
368 ether = find_devices("pci1011,14");
369 if (ether && ether->n_intrs > 0) {
370 ether->intrs[0].line = 60;
371 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
372 ether->intrs[0].line);
373 }
374
375 /* Return the interrupt number of the cascade */
376 return irqctrler->intrs[0].line;
377}
378
379#ifdef CONFIG_XMON
380static struct irqaction xmon_action = {
381 .handler = xmon_irq,
382 .flags = 0,
383 .mask = CPU_MASK_NONE,
384 .name = "NMI - XMON"
385};
386#endif
387
388static struct irqaction gatwick_cascade_action = {
389 .handler = gatwick_action,
390 .flags = SA_INTERRUPT,
391 .mask = CPU_MASK_NONE,
392 .name = "cascade",
393};
394#endif /* CONFIG_PPC32 */
395
396static int pmac_u3_cascade(struct pt_regs *regs, void *data)
397{
398 return mpic_get_one_irq((struct mpic *)data, regs);
399}
400
401void __init pmac_pic_init(void)
402{
403 struct device_node *irqctrler = NULL;
404 struct device_node *irqctrler2 = NULL;
405 struct device_node *np;
406#ifdef CONFIG_PPC32
407 int i;
408 unsigned long addr;
409 int irq_cascade = -1;
410#endif
411 struct mpic *mpic1, *mpic2;
412
413 /* We first try to detect Apple's new Core99 chipset, since mac-io
414 * is quite different on those machines and contains an IBM MPIC2.
415 */
416 np = find_type_devices("open-pic");
417 while (np) {
418 if (np->parent && !strcmp(np->parent->name, "u3"))
419 irqctrler2 = np;
420 else
421 irqctrler = np;
422 np = np->next;
423 }
424 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
425 unsigned char senses[128];
426
427 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
428 (unsigned int)irqctrler->addrs[0].address);
429 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
430
431 prom_get_irq_senses(senses, 0, 128);
432 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
433 MPIC_PRIMARY | MPIC_WANTS_RESET,
434 0, 0, 128, 252, senses, 128, " OpenPIC ");
435 BUG_ON(mpic1 == NULL);
436 mpic_init(mpic1);
437
438 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
439 irqctrler2->n_addrs > 0) {
440 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
441 (u32)irqctrler2->addrs[0].address,
442 irqctrler2->intrs[0].line);
443
444 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
445 prom_get_irq_senses(senses, 128, 128 + 124);
446
447 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
448 * hypertransport interrupts routed to it
449 */
450 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
451 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
452 0, 128, 124, 0, senses, 124,
453 " U3-MPIC ");
454 BUG_ON(mpic2 == NULL);
455 mpic_init(mpic2);
456 mpic_setup_cascade(irqctrler2->intrs[0].line,
457 pmac_u3_cascade, mpic2);
458 }
459#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
460 {
461 struct device_node* pswitch;
462 int nmi_irq;
463
464 pswitch = find_devices("programmer-switch");
465 if (pswitch && pswitch->n_intrs) {
466 nmi_irq = pswitch->intrs[0].line;
467 mpic_irq_set_priority(nmi_irq, 9);
468 setup_irq(nmi_irq, &xmon_action);
469 }
470 }
471#endif /* CONFIG_XMON */
472 return;
473 }
474 irqctrler = NULL;
475
476#ifdef CONFIG_PPC32
477 /* Get the level/edge settings, assume if it's not
478 * a Grand Central nor an OHare, then it's an Heathrow
479 * (or Paddington).
480 */
481 ppc_md.get_irq = pmac_get_irq;
482 if (find_devices("gc"))
483 level_mask[0] = GC_LEVEL_MASK;
484 else if (find_devices("ohare")) {
485 level_mask[0] = OHARE_LEVEL_MASK;
486 /* We might have a second cascaded ohare */
487 level_mask[1] = OHARE_LEVEL_MASK;
488 } else {
489 level_mask[0] = HEATHROW_LEVEL_MASK;
490 level_mask[1] = 0;
491 /* We might have a second cascaded heathrow */
492 level_mask[2] = HEATHROW_LEVEL_MASK;
493 level_mask[3] = 0;
494 }
495
496 /*
497 * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
498 * 1998 G3 Series PowerBooks have 128,
499 * other powermacs have 32.
500 * The combo ethernet/modem card for the Powerstar powerbooks
501 * (2400/3400/3500, ohare based) has a second ohare chip
502 * effectively making a total of 64.
503 */
504 max_irqs = max_real_irqs = 32;
505 irqctrler = find_devices("mac-io");
506 if (irqctrler)
507 {
508 max_real_irqs = 64;
509 if (irqctrler->next)
510 max_irqs = 128;
511 else
512 max_irqs = 64;
513 }
514 for ( i = 0; i < max_real_irqs ; i++ )
515 irq_desc[i].handler = &pmac_pic;
516
517 /* get addresses of first controller */
518 if (irqctrler) {
519 if (irqctrler->n_addrs > 0) {
520 addr = (unsigned long)
521 ioremap(irqctrler->addrs[0].address, 0x40);
522 for (i = 0; i < 2; ++i)
523 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
524 (addr + (2 - i) * 0x10);
525 }
526
527 /* get addresses of second controller */
528 irqctrler = irqctrler->next;
529 if (irqctrler && irqctrler->n_addrs > 0) {
530 addr = (unsigned long)
531 ioremap(irqctrler->addrs[0].address, 0x40);
532 for (i = 2; i < 4; ++i)
533 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
534 (addr + (4 - i) * 0x10);
535 irq_cascade = irqctrler->intrs[0].line;
536 if (device_is_compatible(irqctrler, "gatwick"))
537 pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
538 }
539 } else {
540 /* older powermacs have a GC (grand central) or ohare at
541 f3000000, with interrupt control registers at f3000020. */
542 addr = (unsigned long) ioremap(0xf3000000, 0x40);
543 pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
544 }
545
546 /* PowerBooks 3400 and 3500 can have a second controller in a second
547 ohare chip, on the combo ethernet/modem card */
548 if (machine_is_compatible("AAPL,3400/2400")
549 || machine_is_compatible("AAPL,3500"))
550 irq_cascade = enable_second_ohare();
551
552 /* disable all interrupts in all controllers */
553 for (i = 0; i * 32 < max_irqs; ++i)
554 out_le32(&pmac_irq_hw[i]->enable, 0);
555 /* mark level interrupts */
556 for (i = 0; i < max_irqs; i++)
557 if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
558 irq_desc[i].status = IRQ_LEVEL;
559
560 /* get interrupt line of secondary interrupt controller */
561 if (irq_cascade >= 0) {
562 printk(KERN_INFO "irq: secondary controller on irq %d\n",
563 (int)irq_cascade);
564 for ( i = max_real_irqs ; i < max_irqs ; i++ )
565 irq_desc[i].handler = &gatwick_pic;
566 setup_irq(irq_cascade, &gatwick_cascade_action);
567 }
568 printk("System has %d possible interrupts\n", max_irqs);
569 if (max_irqs != max_real_irqs)
570 printk(KERN_DEBUG "%d interrupts on main controller\n",
571 max_real_irqs);
572
573#ifdef CONFIG_XMON
574 setup_irq(20, &xmon_action);
575#endif /* CONFIG_XMON */
576#endif /* CONFIG_PPC32 */
577}
578
579#ifdef CONFIG_PM
580/*
581 * These procedures are used in implementing sleep on the powerbooks.
582 * sleep_save_intrs() saves the states of all interrupt enables
583 * and disables all interrupts except for the nominated one.
584 * sleep_restore_intrs() restores the states of all interrupt enables.
585 */
586unsigned long sleep_save_mask[2];
587
588/* This used to be passed by the PMU driver but that link got
589 * broken with the new driver model. We use this tweak for now...
590 */
591static int pmacpic_find_viaint(void)
592{
593 int viaint = -1;
594
595#ifdef CONFIG_ADB_PMU
596 struct device_node *np;
597
598 if (pmu_get_model() != PMU_OHARE_BASED)
599 goto not_found;
600 np = of_find_node_by_name(NULL, "via-pmu");
601 if (np == NULL)
602 goto not_found;
603 viaint = np->intrs[0].line;
604#endif /* CONFIG_ADB_PMU */
605
606not_found:
607 return viaint;
608}
609
610static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
611{
612 int viaint = pmacpic_find_viaint();
613
614 sleep_save_mask[0] = ppc_cached_irq_mask[0];
615 sleep_save_mask[1] = ppc_cached_irq_mask[1];
616 ppc_cached_irq_mask[0] = 0;
617 ppc_cached_irq_mask[1] = 0;
618 if (viaint > 0)
619 set_bit(viaint, ppc_cached_irq_mask);
620 out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
621 if (max_real_irqs > 32)
622 out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
623 (void)in_le32(&pmac_irq_hw[0]->event);
624 /* make sure mask gets to controller before we return to caller */
625 mb();
626 (void)in_le32(&pmac_irq_hw[0]->enable);
627
628 return 0;
629}
630
631static int pmacpic_resume(struct sys_device *sysdev)
632{
633 int i;
634
635 out_le32(&pmac_irq_hw[0]->enable, 0);
636 if (max_real_irqs > 32)
637 out_le32(&pmac_irq_hw[1]->enable, 0);
638 mb();
639 for (i = 0; i < max_real_irqs; ++i)
640 if (test_bit(i, sleep_save_mask))
641 pmac_unmask_irq(i);
642
643 return 0;
644}
645
646#endif /* CONFIG_PM */
647
648static struct sysdev_class pmacpic_sysclass = {
649 set_kset_name("pmac_pic"),
650};
651
652static struct sys_device device_pmacpic = {
653 .id = 0,
654 .cls = &pmacpic_sysclass,
655};
656
657static struct sysdev_driver driver_pmacpic = {
658#ifdef CONFIG_PM
659 .suspend = &pmacpic_suspend,
660 .resume = &pmacpic_resume,
661#endif /* CONFIG_PM */
662};
663
664static int __init init_pmacpic_sysfs(void)
665{
666#ifdef CONFIG_PPC32
667 if (max_irqs == 0)
668 return -ENODEV;
669#endif
670 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
671 sysdev_class_register(&pmacpic_sysclass);
672 sysdev_register(&device_pmacpic);
673 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
674 return 0;
675}
676
677subsys_initcall(init_pmacpic_sysfs);
678
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
new file mode 100644
index 000000000000..664103dfeef9
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.h
@@ -0,0 +1,11 @@
1#ifndef __PPC_PLATFORMS_PMAC_PIC_H
2#define __PPC_PLATFORMS_PMAC_PIC_H
3
4#include <linux/irq.h>
5
6extern struct hw_interrupt_type pmac_pic;
7
8void pmac_pic_init(void);
9int pmac_get_irq(struct pt_regs *regs);
10
11#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644
index 000000000000..2ad25e13423e
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -0,0 +1,51 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6#include <linux/irq.h>
7
8/*
9 * Declaration for the various functions exported by the
10 * pmac_* files. Mostly for use by pmac_setup
11 */
12
13struct rtc_time;
14
15extern long pmac_time_init(void);
16extern unsigned long pmac_get_boot_time(void);
17extern void pmac_get_rtc_time(struct rtc_time *);
18extern int pmac_set_rtc_time(struct rtc_time *);
19extern void pmac_read_rtc_time(void);
20extern void pmac_calibrate_decr(void);
21extern void pmac_pcibios_fixup(void);
22extern void pmac_pci_init(void);
23extern unsigned long pmac_ide_get_base(int index);
24extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
25 unsigned long data_port, unsigned long ctrl_port, int *irq);
26
27extern void pmac_nvram_update(void);
28extern unsigned char pmac_nvram_read_byte(int addr);
29extern void pmac_nvram_write_byte(int addr, unsigned char val);
30extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
31extern void pmac_pcibios_after_init(void);
32extern int of_show_percpuinfo(struct seq_file *m, int i);
33
34extern void pmac_pci_init(void);
35extern void pmac_setup_pci_dma(void);
36extern void pmac_check_ht_link(void);
37
38extern void pmac_setup_smp(void);
39
40extern unsigned long pmac_ide_get_base(int index);
41extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
42 unsigned long data_port, unsigned long ctrl_port, int *irq);
43
44extern int pmac_nvram_init(void);
45
46extern struct hw_interrupt_type pmac_pic;
47
48void pmac_pic_init(void);
49int pmac_get_irq(struct pt_regs *regs);
50
51#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
new file mode 100644
index 000000000000..6f62af597291
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -0,0 +1,794 @@
1/*
2 * Powermac setup and early boot code plus other random bits.
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#include <linux/config.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/stddef.h>
33#include <linux/unistd.h>
34#include <linux/ptrace.h>
35#include <linux/slab.h>
36#include <linux/user.h>
37#include <linux/a.out.h>
38#include <linux/tty.h>
39#include <linux/string.h>
40#include <linux/delay.h>
41#include <linux/ioport.h>
42#include <linux/major.h>
43#include <linux/initrd.h>
44#include <linux/vt_kern.h>
45#include <linux/console.h>
46#include <linux/ide.h>
47#include <linux/pci.h>
48#include <linux/adb.h>
49#include <linux/cuda.h>
50#include <linux/pmu.h>
51#include <linux/irq.h>
52#include <linux/seq_file.h>
53#include <linux/root_dev.h>
54#include <linux/bitops.h>
55#include <linux/suspend.h>
56
57#include <asm/reg.h>
58#include <asm/sections.h>
59#include <asm/prom.h>
60#include <asm/system.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/ohare.h>
65#include <asm/mediabay.h>
66#include <asm/machdep.h>
67#include <asm/dma.h>
68#include <asm/cputable.h>
69#include <asm/btext.h>
70#include <asm/pmac_feature.h>
71#include <asm/time.h>
72#include <asm/of_device.h>
73#include <asm/mmu_context.h>
74#include <asm/iommu.h>
75#include <asm/smu.h>
76#include <asm/pmc.h>
77#include <asm/mpic.h>
78
79#include "pmac.h"
80
81#undef SHOW_GATWICK_IRQS
82
83unsigned char drive_info;
84
85int ppc_override_l2cr = 0;
86int ppc_override_l2cr_value;
87int has_l2cache = 0;
88
89int pmac_newworld = 1;
90
91static int current_root_goodness = -1;
92
93extern int pmac_newworld;
94extern struct machdep_calls pmac_md;
95
96#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
97
98#ifdef CONFIG_PPC64
99#include <asm/udbg.h>
100int sccdbg;
101#endif
102
103extern void zs_kgdb_hook(int tty_num);
104
105sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
106EXPORT_SYMBOL(sys_ctrler);
107
108#ifdef CONFIG_PMAC_SMU
109unsigned long smu_cmdbuf_abs;
110EXPORT_SYMBOL(smu_cmdbuf_abs);
111#endif
112
113#ifdef CONFIG_SMP
114extern struct smp_ops_t psurge_smp_ops;
115extern struct smp_ops_t core99_smp_ops;
116#endif /* CONFIG_SMP */
117
118static void pmac_show_cpuinfo(struct seq_file *m)
119{
120 struct device_node *np;
121 char *pp;
122 int plen;
123 int mbmodel;
124 unsigned int mbflags;
125 char* mbname;
126
127 mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
128 PMAC_MB_INFO_MODEL, 0);
129 mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
130 PMAC_MB_INFO_FLAGS, 0);
131 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
132 (long) &mbname) != 0)
133 mbname = "Unknown";
134
135 /* find motherboard type */
136 seq_printf(m, "machine\t\t: ");
137 np = of_find_node_by_path("/");
138 if (np != NULL) {
139 pp = (char *) get_property(np, "model", NULL);
140 if (pp != NULL)
141 seq_printf(m, "%s\n", pp);
142 else
143 seq_printf(m, "PowerMac\n");
144 pp = (char *) get_property(np, "compatible", &plen);
145 if (pp != NULL) {
146 seq_printf(m, "motherboard\t:");
147 while (plen > 0) {
148 int l = strlen(pp) + 1;
149 seq_printf(m, " %s", pp);
150 plen -= l;
151 pp += l;
152 }
153 seq_printf(m, "\n");
154 }
155 of_node_put(np);
156 } else
157 seq_printf(m, "PowerMac\n");
158
159 /* print parsed model */
160 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
161 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
162
163 /* find l2 cache info */
164 np = of_find_node_by_name(NULL, "l2-cache");
165 if (np == NULL)
166 np = of_find_node_by_type(NULL, "cache");
167 if (np != NULL) {
168 unsigned int *ic = (unsigned int *)
169 get_property(np, "i-cache-size", NULL);
170 unsigned int *dc = (unsigned int *)
171 get_property(np, "d-cache-size", NULL);
172 seq_printf(m, "L2 cache\t:");
173 has_l2cache = 1;
174 if (get_property(np, "cache-unified", NULL) != 0 && dc) {
175 seq_printf(m, " %dK unified", *dc / 1024);
176 } else {
177 if (ic)
178 seq_printf(m, " %dK instruction", *ic / 1024);
179 if (dc)
180 seq_printf(m, "%s %dK data",
181 (ic? " +": ""), *dc / 1024);
182 }
183 pp = get_property(np, "ram-type", NULL);
184 if (pp)
185 seq_printf(m, " %s", pp);
186 seq_printf(m, "\n");
187 of_node_put(np);
188 }
189
190 /* Indicate newworld/oldworld */
191 seq_printf(m, "pmac-generation\t: %s\n",
192 pmac_newworld ? "NewWorld" : "OldWorld");
193}
194
195static void pmac_show_percpuinfo(struct seq_file *m, int i)
196{
197#ifdef CONFIG_CPU_FREQ_PMAC
198 extern unsigned int pmac_get_one_cpufreq(int i);
199 unsigned int freq = pmac_get_one_cpufreq(i);
200 if (freq != 0) {
201 seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
202 return;
203 }
204#endif /* CONFIG_CPU_FREQ_PMAC */
205}
206
207#ifndef CONFIG_ADB_CUDA
208int find_via_cuda(void)
209{
210 if (!find_devices("via-cuda"))
211 return 0;
212 printk("WARNING ! Your machine is CUDA-based but your kernel\n");
213 printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
214 return 0;
215}
216#endif
217
218#ifndef CONFIG_ADB_PMU
219int find_via_pmu(void)
220{
221 if (!find_devices("via-pmu"))
222 return 0;
223 printk("WARNING ! Your machine is PMU-based but your kernel\n");
224 printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
225 return 0;
226}
227#endif
228
229#ifndef CONFIG_PMAC_SMU
230int smu_init(void)
231{
232 /* should check and warn if SMU is present */
233 return 0;
234}
235#endif
236
237#ifdef CONFIG_PPC32
238static volatile u32 *sysctrl_regs;
239
240static void __init ohare_init(void)
241{
242 /* this area has the CPU identification register
243 and some registers used by smp boards */
244 sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
245
246 /*
247 * Turn on the L2 cache.
248 * We assume that we have a PSX memory controller iff
249 * we have an ohare I/O controller.
250 */
251 if (find_devices("ohare") != NULL) {
252 if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
253 if (sysctrl_regs[4] & 0x10)
254 sysctrl_regs[4] |= 0x04000020;
255 else
256 sysctrl_regs[4] |= 0x04000000;
257 if(has_l2cache)
258 printk(KERN_INFO "Level 2 cache enabled\n");
259 }
260 }
261}
262
263static void __init l2cr_init(void)
264{
265 /* Checks "l2cr-value" property in the registry */
266 if (cpu_has_feature(CPU_FTR_L2CR)) {
267 struct device_node *np = find_devices("cpus");
268 if (np == 0)
269 np = find_type_devices("cpu");
270 if (np != 0) {
271 unsigned int *l2cr = (unsigned int *)
272 get_property(np, "l2cr-value", NULL);
273 if (l2cr != 0) {
274 ppc_override_l2cr = 1;
275 ppc_override_l2cr_value = *l2cr;
276 _set_L2CR(0);
277 _set_L2CR(ppc_override_l2cr_value);
278 }
279 }
280 }
281
282 if (ppc_override_l2cr)
283 printk(KERN_INFO "L2CR overridden (0x%x), "
284 "backside cache is %s\n",
285 ppc_override_l2cr_value,
286 (ppc_override_l2cr_value & 0x80000000)
287 ? "enabled" : "disabled");
288}
289#endif
290
291void __init pmac_setup_arch(void)
292{
293 struct device_node *cpu, *ic;
294 int *fp;
295 unsigned long pvr;
296
297 pvr = PVR_VER(mfspr(SPRN_PVR));
298
299 /* Set loops_per_jiffy to a half-way reasonable value,
300 for use until calibrate_delay gets called. */
301 loops_per_jiffy = 50000000 / HZ;
302 cpu = of_find_node_by_type(NULL, "cpu");
303 if (cpu != NULL) {
304 fp = (int *) get_property(cpu, "clock-frequency", NULL);
305 if (fp != NULL) {
306 if (pvr >= 0x30 && pvr < 0x80)
307 /* PPC970 etc. */
308 loops_per_jiffy = *fp / (3 * HZ);
309 else if (pvr == 4 || pvr >= 8)
310 /* 604, G3, G4 etc. */
311 loops_per_jiffy = *fp / HZ;
312 else
313 /* 601, 603, etc. */
314 loops_per_jiffy = *fp / (2 * HZ);
315 }
316 of_node_put(cpu);
317 }
318
319 /* See if newworld or oldworld */
320 for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; )
321 if (get_property(ic, "interrupt-controller", NULL))
322 break;
323 pmac_newworld = (ic != NULL);
324 if (ic)
325 of_node_put(ic);
326
327 /* Lookup PCI hosts */
328 pmac_pci_init();
329
330#ifdef CONFIG_PPC32
331 ohare_init();
332 l2cr_init();
333#endif /* CONFIG_PPC32 */
334
335#ifdef CONFIG_PPC64
336 /* Probe motherboard chipset */
337 /* this is done earlier in setup_arch for 32-bit */
338 pmac_feature_init();
339
340 /* We can NAP */
341 powersave_nap = 1;
342 printk(KERN_INFO "Using native/NAP idle loop\n");
343#endif
344
345#ifdef CONFIG_KGDB
346 zs_kgdb_hook(0);
347#endif
348
349 find_via_cuda();
350 find_via_pmu();
351 smu_init();
352
353#ifdef CONFIG_NVRAM
354 pmac_nvram_init();
355#endif
356
357#ifdef CONFIG_PPC32
358#ifdef CONFIG_BLK_DEV_INITRD
359 if (initrd_start)
360 ROOT_DEV = Root_RAM0;
361 else
362#endif
363 ROOT_DEV = DEFAULT_ROOT_DEVICE;
364#endif
365
366#ifdef CONFIG_SMP
367 /* Check for Core99 */
368 if (find_devices("uni-n") || find_devices("u3"))
369 smp_ops = &core99_smp_ops;
370#ifdef CONFIG_PPC32
371 else
372 smp_ops = &psurge_smp_ops;
373#endif
374#endif /* CONFIG_SMP */
375}
376
377char *bootpath;
378char *bootdevice;
379void *boot_host;
380int boot_target;
381int boot_part;
382extern dev_t boot_dev;
383
384#ifdef CONFIG_SCSI
385void __init note_scsi_host(struct device_node *node, void *host)
386{
387 int l;
388 char *p;
389
390 l = strlen(node->full_name);
391 if (bootpath != NULL && bootdevice != NULL
392 && strncmp(node->full_name, bootdevice, l) == 0
393 && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
394 boot_host = host;
395 /*
396 * There's a bug in OF 1.0.5. (Why am I not surprised.)
397 * If you pass a path like scsi/sd@1:0 to canon, it returns
398 * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
399 * That is, the scsi target number doesn't get preserved.
400 * So we pick the target number out of bootpath and use that.
401 */
402 p = strstr(bootpath, "/sd@");
403 if (p != NULL) {
404 p += 4;
405 boot_target = simple_strtoul(p, NULL, 10);
406 p = strchr(p, ':');
407 if (p != NULL)
408 boot_part = simple_strtoul(p + 1, NULL, 10);
409 }
410 }
411}
412EXPORT_SYMBOL(note_scsi_host);
413#endif
414
415#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
416static dev_t __init find_ide_boot(void)
417{
418 char *p;
419 int n;
420 dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
421
422 if (bootdevice == NULL)
423 return 0;
424 p = strrchr(bootdevice, '/');
425 if (p == NULL)
426 return 0;
427 n = p - bootdevice;
428
429 return pmac_find_ide_boot(bootdevice, n);
430}
431#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
432
433static void __init find_boot_device(void)
434{
435#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
436 boot_dev = find_ide_boot();
437#endif
438}
439
440/* TODO: Merge the suspend-to-ram with the common code !!!
441 * currently, this is a stub implementation for suspend-to-disk
442 * only
443 */
444
445#ifdef CONFIG_SOFTWARE_SUSPEND
446
447static int pmac_pm_prepare(suspend_state_t state)
448{
449 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
450
451 return 0;
452}
453
454static int pmac_pm_enter(suspend_state_t state)
455{
456 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
457
458 /* Giveup the lazy FPU & vec so we don't have to back them
459 * up from the low level code
460 */
461 enable_kernel_fp();
462
463#ifdef CONFIG_ALTIVEC
464 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
465 enable_kernel_altivec();
466#endif /* CONFIG_ALTIVEC */
467
468 return 0;
469}
470
471static int pmac_pm_finish(suspend_state_t state)
472{
473 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
474
475 /* Restore userland MMU context */
476 set_context(current->active_mm->context, current->active_mm->pgd);
477
478 return 0;
479}
480
481static struct pm_ops pmac_pm_ops = {
482 .pm_disk_mode = PM_DISK_SHUTDOWN,
483 .prepare = pmac_pm_prepare,
484 .enter = pmac_pm_enter,
485 .finish = pmac_pm_finish,
486};
487
488#endif /* CONFIG_SOFTWARE_SUSPEND */
489
490static int initializing = 1;
491
492static int pmac_late_init(void)
493{
494 initializing = 0;
495#ifdef CONFIG_SOFTWARE_SUSPEND
496 pm_set_ops(&pmac_pm_ops);
497#endif /* CONFIG_SOFTWARE_SUSPEND */
498 return 0;
499}
500
501late_initcall(pmac_late_init);
502
503/* can't be __init - can be called whenever a disk is first accessed */
504void note_bootable_part(dev_t dev, int part, int goodness)
505{
506 static int found_boot = 0;
507 char *p;
508
509 if (!initializing)
510 return;
511 if ((goodness <= current_root_goodness) &&
512 ROOT_DEV != DEFAULT_ROOT_DEVICE)
513 return;
514 p = strstr(saved_command_line, "root=");
515 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
516 return;
517
518 if (!found_boot) {
519 find_boot_device();
520 found_boot = 1;
521 }
522 if (!boot_dev || dev == boot_dev) {
523 ROOT_DEV = dev + part;
524 boot_dev = 0;
525 current_root_goodness = goodness;
526 }
527}
528
529#ifdef CONFIG_ADB_CUDA
530static void cuda_restart(void)
531{
532 struct adb_request req;
533
534 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
535 for (;;)
536 cuda_poll();
537}
538
539static void cuda_shutdown(void)
540{
541 struct adb_request req;
542
543 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
544 for (;;)
545 cuda_poll();
546}
547
548#else
549#define cuda_restart()
550#define cuda_shutdown()
551#endif
552
553#ifndef CONFIG_ADB_PMU
554#define pmu_restart()
555#define pmu_shutdown()
556#endif
557
558#ifndef CONFIG_PMAC_SMU
559#define smu_restart()
560#define smu_shutdown()
561#endif
562
563static void pmac_restart(char *cmd)
564{
565 switch (sys_ctrler) {
566 case SYS_CTRLER_CUDA:
567 cuda_restart();
568 break;
569 case SYS_CTRLER_PMU:
570 pmu_restart();
571 break;
572 case SYS_CTRLER_SMU:
573 smu_restart();
574 break;
575 default: ;
576 }
577}
578
579static void pmac_power_off(void)
580{
581 switch (sys_ctrler) {
582 case SYS_CTRLER_CUDA:
583 cuda_shutdown();
584 break;
585 case SYS_CTRLER_PMU:
586 pmu_shutdown();
587 break;
588 case SYS_CTRLER_SMU:
589 smu_shutdown();
590 break;
591 default: ;
592 }
593}
594
595static void
596pmac_halt(void)
597{
598 pmac_power_off();
599}
600
601#ifdef CONFIG_PPC32
602void __init pmac_init(void)
603{
604 /* isa_io_base gets set in pmac_pci_init */
605 isa_mem_base = PMAC_ISA_MEM_BASE;
606 pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
607 ISA_DMA_THRESHOLD = ~0L;
608 DMA_MODE_READ = 1;
609 DMA_MODE_WRITE = 2;
610
611 ppc_md = pmac_md;
612
613#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
614#ifdef CONFIG_BLK_DEV_IDE_PMAC
615 ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
616 ppc_ide_md.default_io_base = pmac_ide_get_base;
617#endif /* CONFIG_BLK_DEV_IDE_PMAC */
618#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
619
620 if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
621
622}
623#endif
624
625/*
626 * Early initialization.
627 */
628static void __init pmac_init_early(void)
629{
630#ifdef CONFIG_PPC64
631 /* Initialize hash table, from now on, we can take hash faults
632 * and call ioremap
633 */
634 hpte_init_native();
635
636 /* Init SCC */
637 if (strstr(cmd_line, "sccdbg")) {
638 sccdbg = 1;
639 udbg_init_scc(NULL);
640 }
641
642 /* Setup interrupt mapping options */
643 ppc64_interrupt_controller = IC_OPEN_PIC;
644
645 iommu_init_early_u3();
646#endif
647}
648
649static void __init pmac_progress(char *s, unsigned short hex)
650{
651#ifdef CONFIG_PPC64
652 if (sccdbg) {
653 udbg_puts(s);
654 udbg_puts("\n");
655 return;
656 }
657#endif
658#ifdef CONFIG_BOOTX_TEXT
659 if (boot_text_mapped) {
660 btext_drawstring(s);
661 btext_drawchar('\n');
662 }
663#endif /* CONFIG_BOOTX_TEXT */
664}
665
666/*
667 * pmac has no legacy IO, anything calling this function has to
668 * fail or bad things will happen
669 */
670static int pmac_check_legacy_ioport(unsigned int baseport)
671{
672 return -ENODEV;
673}
674
675static int __init pmac_declare_of_platform_devices(void)
676{
677 struct device_node *np, *npp;
678
679 np = find_devices("uni-n");
680 if (np) {
681 for (np = np->child; np != NULL; np = np->sibling)
682 if (strncmp(np->name, "i2c", 3) == 0) {
683 of_platform_device_create(np, "uni-n-i2c",
684 NULL);
685 break;
686 }
687 }
688 np = find_devices("valkyrie");
689 if (np)
690 of_platform_device_create(np, "valkyrie", NULL);
691 np = find_devices("platinum");
692 if (np)
693 of_platform_device_create(np, "platinum", NULL);
694
695 npp = of_find_node_by_name(NULL, "u3");
696 if (npp) {
697 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
698 if (strncmp(np->name, "i2c", 3) == 0) {
699 of_platform_device_create(np, "u3-i2c", NULL);
700 of_node_put(np);
701 break;
702 }
703 }
704 of_node_put(npp);
705 }
706 np = of_find_node_by_type(NULL, "smu");
707 if (np) {
708 of_platform_device_create(np, "smu", NULL);
709 of_node_put(np);
710 }
711
712 return 0;
713}
714
715device_initcall(pmac_declare_of_platform_devices);
716
717/*
718 * Called very early, MMU is off, device-tree isn't unflattened
719 */
720static int __init pmac_probe(int platform)
721{
722#ifdef CONFIG_PPC64
723 if (platform != PLATFORM_POWERMAC)
724 return 0;
725
726 /*
727 * On U3, the DART (iommu) must be allocated now since it
728 * has an impact on htab_initialize (due to the large page it
729 * occupies having to be broken up so the DART itself is not
730 * part of the cacheable linar mapping
731 */
732 alloc_u3_dart_table();
733#endif
734
735#ifdef CONFIG_PMAC_SMU
736 /*
737 * SMU based G5s need some memory below 2Gb, at least the current
738 * driver needs that. We have to allocate it now. We allocate 4k
739 * (1 small page) for now.
740 */
741 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
742#endif /* CONFIG_PMAC_SMU */
743
744 return 1;
745}
746
747#ifdef CONFIG_PPC64
748static int pmac_probe_mode(struct pci_bus *bus)
749{
750 struct device_node *node = bus->sysdata;
751
752 /* We need to use normal PCI probing for the AGP bus,
753 since the device for the AGP bridge isn't in the tree. */
754 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
755 return PCI_PROBE_NORMAL;
756
757 return PCI_PROBE_DEVTREE;
758}
759#endif
760
761struct machdep_calls __initdata pmac_md = {
762#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
763 .cpu_die = generic_mach_cpu_die,
764#endif
765 .probe = pmac_probe,
766 .setup_arch = pmac_setup_arch,
767 .init_early = pmac_init_early,
768 .show_cpuinfo = pmac_show_cpuinfo,
769 .show_percpuinfo = pmac_show_percpuinfo,
770 .init_IRQ = pmac_pic_init,
771 .get_irq = mpic_get_irq, /* changed later */
772 .pcibios_fixup = pmac_pcibios_fixup,
773 .restart = pmac_restart,
774 .power_off = pmac_power_off,
775 .halt = pmac_halt,
776 .time_init = pmac_time_init,
777 .get_boot_time = pmac_get_boot_time,
778 .set_rtc_time = pmac_set_rtc_time,
779 .get_rtc_time = pmac_get_rtc_time,
780 .calibrate_decr = pmac_calibrate_decr,
781 .feature_call = pmac_do_feature_call,
782 .check_legacy_ioport = pmac_check_legacy_ioport,
783 .progress = pmac_progress,
784#ifdef CONFIG_PPC64
785 .pci_probe_mode = pmac_probe_mode,
786 .idle_loop = native_idle,
787 .enable_pmcs = power4_enable_pmcs,
788#endif
789#ifdef CONFIG_PPC32
790 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
791 .pcibios_after_init = pmac_pcibios_after_init,
792 .phys_mem_access_prot = pci_phys_mem_access_prot,
793#endif
794};
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
new file mode 100644
index 000000000000..22b113d19b24
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -0,0 +1,396 @@
1/*
2 * This file contains sleep low-level functions for PowerBook G3.
3 * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 * and Paul Mackerras (paulus@samba.org).
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <asm/processor.h>
15#include <asm/page.h>
16#include <asm/ppc_asm.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h>
21
22#define MAGIC 0x4c617273 /* 'Lars' */
23
24/*
25 * Structure for storing CPU registers on the stack.
26 */
27#define SL_SP 0
28#define SL_PC 4
29#define SL_MSR 8
30#define SL_SDR1 0xc
31#define SL_SPRG0 0x10 /* 4 sprg's */
32#define SL_DBAT0 0x20
33#define SL_IBAT0 0x28
34#define SL_DBAT1 0x30
35#define SL_IBAT1 0x38
36#define SL_DBAT2 0x40
37#define SL_IBAT2 0x48
38#define SL_DBAT3 0x50
39#define SL_IBAT3 0x58
40#define SL_TB 0x60
41#define SL_R2 0x68
42#define SL_CR 0x6c
43#define SL_R12 0x70 /* r12 to r31 */
44#define SL_SIZE (SL_R12 + 80)
45
46 .section .text
47 .align 5
48
49#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
50
51/* This gets called by via-pmu.c late during the sleep process.
52 * The PMU was already send the sleep command and will shut us down
53 * soon. We need to save all that is needed and setup the wakeup
54 * vector that will be called by the ROM on wakeup
55 */
56_GLOBAL(low_sleep_handler)
57#ifndef CONFIG_6xx
58 blr
59#else
60 mflr r0
61 stw r0,4(r1)
62 stwu r1,-SL_SIZE(r1)
63 mfcr r0
64 stw r0,SL_CR(r1)
65 stw r2,SL_R2(r1)
66 stmw r12,SL_R12(r1)
67
68 /* Save MSR & SDR1 */
69 mfmsr r4
70 stw r4,SL_MSR(r1)
71 mfsdr1 r4
72 stw r4,SL_SDR1(r1)
73
74 /* Get a stable timebase and save it */
751: mftbu r4
76 stw r4,SL_TB(r1)
77 mftb r5
78 stw r5,SL_TB+4(r1)
79 mftbu r3
80 cmpw r3,r4
81 bne 1b
82
83 /* Save SPRGs */
84 mfsprg r4,0
85 stw r4,SL_SPRG0(r1)
86 mfsprg r4,1
87 stw r4,SL_SPRG0+4(r1)
88 mfsprg r4,2
89 stw r4,SL_SPRG0+8(r1)
90 mfsprg r4,3
91 stw r4,SL_SPRG0+12(r1)
92
93 /* Save BATs */
94 mfdbatu r4,0
95 stw r4,SL_DBAT0(r1)
96 mfdbatl r4,0
97 stw r4,SL_DBAT0+4(r1)
98 mfdbatu r4,1
99 stw r4,SL_DBAT1(r1)
100 mfdbatl r4,1
101 stw r4,SL_DBAT1+4(r1)
102 mfdbatu r4,2
103 stw r4,SL_DBAT2(r1)
104 mfdbatl r4,2
105 stw r4,SL_DBAT2+4(r1)
106 mfdbatu r4,3
107 stw r4,SL_DBAT3(r1)
108 mfdbatl r4,3
109 stw r4,SL_DBAT3+4(r1)
110 mfibatu r4,0
111 stw r4,SL_IBAT0(r1)
112 mfibatl r4,0
113 stw r4,SL_IBAT0+4(r1)
114 mfibatu r4,1
115 stw r4,SL_IBAT1(r1)
116 mfibatl r4,1
117 stw r4,SL_IBAT1+4(r1)
118 mfibatu r4,2
119 stw r4,SL_IBAT2(r1)
120 mfibatl r4,2
121 stw r4,SL_IBAT2+4(r1)
122 mfibatu r4,3
123 stw r4,SL_IBAT3(r1)
124 mfibatl r4,3
125 stw r4,SL_IBAT3+4(r1)
126
127 /* Backup various CPU config stuffs */
128 bl __save_cpu_setup
129
130 /* The ROM can wake us up via 2 different vectors:
131 * - On wallstreet & lombard, we must write a magic
132 * value 'Lars' at address 4 and a pointer to a
133 * memory location containing the PC to resume from
134 * at address 0.
135 * - On Core99, we must store the wakeup vector at
136 * address 0x80 and eventually it's parameters
137 * at address 0x84. I've have some trouble with those
138 * parameters however and I no longer use them.
139 */
140 lis r5,grackle_wake_up@ha
141 addi r5,r5,grackle_wake_up@l
142 tophys(r5,r5)
143 stw r5,SL_PC(r1)
144 lis r4,KERNELBASE@h
145 tophys(r5,r1)
146 addi r5,r5,SL_PC
147 lis r6,MAGIC@ha
148 addi r6,r6,MAGIC@l
149 stw r5,0(r4)
150 stw r6,4(r4)
151 /* Setup stuffs at 0x80-0x84 for Core99 */
152 lis r3,core99_wake_up@ha
153 addi r3,r3,core99_wake_up@l
154 tophys(r3,r3)
155 stw r3,0x80(r4)
156 stw r5,0x84(r4)
157 /* Store a pointer to our backup storage into
158 * a kernel global
159 */
160 lis r3,sleep_storage@ha
161 addi r3,r3,sleep_storage@l
162 stw r5,0(r3)
163
164 .globl low_cpu_die
165low_cpu_die:
166 /* Flush & disable all caches */
167 bl flush_disable_caches
168
169 /* Turn off data relocation. */
170 mfmsr r3 /* Save MSR in r7 */
171 rlwinm r3,r3,0,28,26 /* Turn off DR bit */
172 sync
173 mtmsr r3
174 isync
175
176BEGIN_FTR_SECTION
177 /* Flush any pending L2 data prefetches to work around HW bug */
178 sync
179 lis r3,0xfff0
180 lwz r0,0(r3) /* perform cache-inhibited load to ROM */
181 sync /* (caches are disabled at this point) */
182END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
183
184/*
185 * Set the HID0 and MSR for sleep.
186 */
187 mfspr r2,SPRN_HID0
188 rlwinm r2,r2,0,10,7 /* clear doze, nap */
189 oris r2,r2,HID0_SLEEP@h
190 sync
191 isync
192 mtspr SPRN_HID0,r2
193 sync
194
195/* This loop puts us back to sleep in case we have a spurrious
196 * wakeup so that the host bridge properly stays asleep. The
197 * CPU will be turned off, either after a known time (about 1
198 * second) on wallstreet & lombard, or as soon as the CPU enters
199 * SLEEP mode on core99
200 */
201 mfmsr r2
202 oris r2,r2,MSR_POW@h
2031: sync
204 mtmsr r2
205 isync
206 b 1b
207
208/*
209 * Here is the resume code.
210 */
211
212
213/*
214 * Core99 machines resume here
215 * r4 has the physical address of SL_PC(sp) (unused)
216 */
217_GLOBAL(core99_wake_up)
218 /* Make sure HID0 no longer contains any sleep bit and that data cache
219 * is disabled
220 */
221 mfspr r3,SPRN_HID0
222 rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
223 rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
224 mtspr SPRN_HID0,r3
225 sync
226 isync
227
228 /* sanitize MSR */
229 mfmsr r3
230 ori r3,r3,MSR_EE|MSR_IP
231 xori r3,r3,MSR_EE|MSR_IP
232 sync
233 isync
234 mtmsr r3
235 sync
236 isync
237
238 /* Recover sleep storage */
239 lis r3,sleep_storage@ha
240 addi r3,r3,sleep_storage@l
241 tophys(r3,r3)
242 lwz r1,0(r3)
243
244 /* Pass thru to older resume code ... */
245/*
246 * Here is the resume code for older machines.
247 * r1 has the physical address of SL_PC(sp).
248 */
249
250grackle_wake_up:
251
252 /* Restore the kernel's segment registers before
253 * we do any r1 memory access as we are not sure they
254 * are in a sane state above the first 256Mb region
255 */
256 li r0,16 /* load up segment register values */
257 mtctr r0 /* for context 0 */
258 lis r3,0x2000 /* Ku = 1, VSID = 0 */
259 li r4,0
2603: mtsrin r3,r4
261 addi r3,r3,0x111 /* increment VSID */
262 addis r4,r4,0x1000 /* address of next segment */
263 bdnz 3b
264 sync
265 isync
266
267 subi r1,r1,SL_PC
268
269 /* Restore various CPU config stuffs */
270 bl __restore_cpu_setup
271
272 /* Make sure all FPRs have been initialized */
273 bl reloc_offset
274 bl __init_fpu_registers
275
276 /* Invalidate & enable L1 cache, we don't care about
277 * whatever the ROM may have tried to write to memory
278 */
279 bl __inval_enable_L1
280
281 /* Restore the BATs, and SDR1. Then we can turn on the MMU. */
282 lwz r4,SL_SDR1(r1)
283 mtsdr1 r4
284 lwz r4,SL_SPRG0(r1)
285 mtsprg 0,r4
286 lwz r4,SL_SPRG0+4(r1)
287 mtsprg 1,r4
288 lwz r4,SL_SPRG0+8(r1)
289 mtsprg 2,r4
290 lwz r4,SL_SPRG0+12(r1)
291 mtsprg 3,r4
292
293 lwz r4,SL_DBAT0(r1)
294 mtdbatu 0,r4
295 lwz r4,SL_DBAT0+4(r1)
296 mtdbatl 0,r4
297 lwz r4,SL_DBAT1(r1)
298 mtdbatu 1,r4
299 lwz r4,SL_DBAT1+4(r1)
300 mtdbatl 1,r4
301 lwz r4,SL_DBAT2(r1)
302 mtdbatu 2,r4
303 lwz r4,SL_DBAT2+4(r1)
304 mtdbatl 2,r4
305 lwz r4,SL_DBAT3(r1)
306 mtdbatu 3,r4
307 lwz r4,SL_DBAT3+4(r1)
308 mtdbatl 3,r4
309 lwz r4,SL_IBAT0(r1)
310 mtibatu 0,r4
311 lwz r4,SL_IBAT0+4(r1)
312 mtibatl 0,r4
313 lwz r4,SL_IBAT1(r1)
314 mtibatu 1,r4
315 lwz r4,SL_IBAT1+4(r1)
316 mtibatl 1,r4
317 lwz r4,SL_IBAT2(r1)
318 mtibatu 2,r4
319 lwz r4,SL_IBAT2+4(r1)
320 mtibatl 2,r4
321 lwz r4,SL_IBAT3(r1)
322 mtibatu 3,r4
323 lwz r4,SL_IBAT3+4(r1)
324 mtibatl 3,r4
325
326BEGIN_FTR_SECTION
327 li r4,0
328 mtspr SPRN_DBAT4U,r4
329 mtspr SPRN_DBAT4L,r4
330 mtspr SPRN_DBAT5U,r4
331 mtspr SPRN_DBAT5L,r4
332 mtspr SPRN_DBAT6U,r4
333 mtspr SPRN_DBAT6L,r4
334 mtspr SPRN_DBAT7U,r4
335 mtspr SPRN_DBAT7L,r4
336 mtspr SPRN_IBAT4U,r4
337 mtspr SPRN_IBAT4L,r4
338 mtspr SPRN_IBAT5U,r4
339 mtspr SPRN_IBAT5L,r4
340 mtspr SPRN_IBAT6U,r4
341 mtspr SPRN_IBAT6L,r4
342 mtspr SPRN_IBAT7U,r4
343 mtspr SPRN_IBAT7L,r4
344END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
345
346 /* Flush all TLBs */
347 lis r4,0x1000
3481: addic. r4,r4,-0x1000
349 tlbie r4
350 blt 1b
351 sync
352
353 /* restore the MSR and turn on the MMU */
354 lwz r3,SL_MSR(r1)
355 bl turn_on_mmu
356
357 /* get back the stack pointer */
358 tovirt(r1,r1)
359
360 /* Restore TB */
361 li r3,0
362 mttbl r3
363 lwz r3,SL_TB(r1)
364 lwz r4,SL_TB+4(r1)
365 mttbu r3
366 mttbl r4
367
368 /* Restore the callee-saved registers and return */
369 lwz r0,SL_CR(r1)
370 mtcr r0
371 lwz r2,SL_R2(r1)
372 lmw r12,SL_R12(r1)
373 addi r1,r1,SL_SIZE
374 lwz r0,4(r1)
375 mtlr r0
376 blr
377
378turn_on_mmu:
379 mflr r4
380 tovirt(r4,r4)
381 mtsrr0 r4
382 mtsrr1 r3
383 sync
384 isync
385 rfi
386
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388
389 .section .data
390 .balign L1_CACHE_BYTES
391sleep_storage:
392 .long 0
393 .balign L1_CACHE_BYTES, 0
394
395#endif /* CONFIG_6xx */
396 .section .text
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
new file mode 100644
index 000000000000..e1f9443cc872
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -0,0 +1,865 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24#include <linux/config.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/interrupt.h>
30#include <linux/kernel_stat.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/errno.h>
35#include <linux/hardirq.h>
36#include <linux/cpu.h>
37
38#include <asm/ptrace.h>
39#include <asm/atomic.h>
40#include <asm/irq.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/sections.h>
44#include <asm/io.h>
45#include <asm/prom.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/pmac_feature.h>
49#include <asm/time.h>
50#include <asm/mpic.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void __secondary_start_pmac_0(void);
64
65#ifdef CONFIG_PPC32
66
67/* Sync flag for HW tb sync */
68static volatile int sec_tb_reset = 0;
69
70/*
71 * Powersurge (old powermac SMP) support.
72 */
73
74/* Addresses for powersurge registers */
75#define HAMMERHEAD_BASE 0xf8000000
76#define HHEAD_CONFIG 0x90
77#define HHEAD_SEC_INTR 0xc0
78
79/* register for interrupting the primary processor on the powersurge */
80/* N.B. this is actually the ethernet ROM! */
81#define PSURGE_PRI_INTR 0xf3019000
82
83/* register for storing the start address for the secondary processor */
84/* N.B. this is the PCI config space address register for the 1st bridge */
85#define PSURGE_START 0xf2800000
86
87/* Daystar/XLR8 4-CPU card */
88#define PSURGE_QUAD_REG_ADDR 0xf8800000
89
90#define PSURGE_QUAD_IRQ_SET 0
91#define PSURGE_QUAD_IRQ_CLR 1
92#define PSURGE_QUAD_IRQ_PRIMARY 2
93#define PSURGE_QUAD_CKSTOP_CTL 3
94#define PSURGE_QUAD_PRIMARY_ARB 4
95#define PSURGE_QUAD_BOARD_ID 6
96#define PSURGE_QUAD_WHICH_CPU 7
97#define PSURGE_QUAD_CKSTOP_RDBK 8
98#define PSURGE_QUAD_RESET_CTL 11
99
100#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
101#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
102#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
103#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
104
105/* virtual addresses for the above */
106static volatile u8 __iomem *hhead_base;
107static volatile u8 __iomem *quad_base;
108static volatile u32 __iomem *psurge_pri_intr;
109static volatile u8 __iomem *psurge_sec_intr;
110static volatile u32 __iomem *psurge_start;
111
112/* values for psurge_type */
113#define PSURGE_NONE -1
114#define PSURGE_DUAL 0
115#define PSURGE_QUAD_OKEE 1
116#define PSURGE_QUAD_COTTON 2
117#define PSURGE_QUAD_ICEGRASS 3
118
119/* what sort of powersurge board we have */
120static int psurge_type = PSURGE_NONE;
121
122/*
123 * Set and clear IPIs for powersurge.
124 */
125static inline void psurge_set_ipi(int cpu)
126{
127 if (psurge_type == PSURGE_NONE)
128 return;
129 if (cpu == 0)
130 in_be32(psurge_pri_intr);
131 else if (psurge_type == PSURGE_DUAL)
132 out_8(psurge_sec_intr, 0);
133 else
134 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
135}
136
137static inline void psurge_clr_ipi(int cpu)
138{
139 if (cpu > 0) {
140 switch(psurge_type) {
141 case PSURGE_DUAL:
142 out_8(psurge_sec_intr, ~0);
143 case PSURGE_NONE:
144 break;
145 default:
146 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
147 }
148 }
149}
150
151/*
152 * On powersurge (old SMP powermac architecture) we don't have
153 * separate IPIs for separate messages like openpic does. Instead
154 * we have a bitmap for each processor, where a 1 bit means that
155 * the corresponding message is pending for that processor.
156 * Ideally each cpu's entry would be in a different cache line.
157 * -- paulus.
158 */
159static unsigned long psurge_smp_message[NR_CPUS];
160
161void psurge_smp_message_recv(struct pt_regs *regs)
162{
163 int cpu = smp_processor_id();
164 int msg;
165
166 /* clear interrupt */
167 psurge_clr_ipi(cpu);
168
169 if (num_online_cpus() < 2)
170 return;
171
172 /* make sure there is a message there */
173 for (msg = 0; msg < 4; msg++)
174 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
175 smp_message_recv(msg, regs);
176}
177
178irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
179{
180 psurge_smp_message_recv(regs);
181 return IRQ_HANDLED;
182}
183
184static void smp_psurge_message_pass(int target, int msg)
185{
186 int i;
187
188 if (num_online_cpus() < 2)
189 return;
190
191 for (i = 0; i < NR_CPUS; i++) {
192 if (!cpu_online(i))
193 continue;
194 if (target == MSG_ALL
195 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
196 || target == i) {
197 set_bit(msg, &psurge_smp_message[i]);
198 psurge_set_ipi(i);
199 }
200 }
201}
202
203/*
204 * Determine a quad card presence. We read the board ID register, we
205 * force the data bus to change to something else, and we read it again.
206 * It it's stable, then the register probably exist (ugh !)
207 */
208static int __init psurge_quad_probe(void)
209{
210 int type;
211 unsigned int i;
212
213 type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
214 if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
215 || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
216 return PSURGE_DUAL;
217
218 /* looks OK, try a slightly more rigorous test */
219 /* bogus is not necessarily cacheline-aligned,
220 though I don't suppose that really matters. -- paulus */
221 for (i = 0; i < 100; i++) {
222 volatile u32 bogus[8];
223 bogus[(0+i)%8] = 0x00000000;
224 bogus[(1+i)%8] = 0x55555555;
225 bogus[(2+i)%8] = 0xFFFFFFFF;
226 bogus[(3+i)%8] = 0xAAAAAAAA;
227 bogus[(4+i)%8] = 0x33333333;
228 bogus[(5+i)%8] = 0xCCCCCCCC;
229 bogus[(6+i)%8] = 0xCCCCCCCC;
230 bogus[(7+i)%8] = 0x33333333;
231 wmb();
232 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
233 mb();
234 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
235 return PSURGE_DUAL;
236 }
237 return type;
238}
239
240static void __init psurge_quad_init(void)
241{
242 int procbits;
243
244 if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
245 procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
246 if (psurge_type == PSURGE_QUAD_ICEGRASS)
247 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
248 else
249 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
250 mdelay(33);
251 out_8(psurge_sec_intr, ~0);
252 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
253 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
254 if (psurge_type != PSURGE_QUAD_ICEGRASS)
255 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
256 PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
257 mdelay(33);
258 PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
259 mdelay(33);
260 PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
261 mdelay(33);
262}
263
264static int __init smp_psurge_probe(void)
265{
266 int i, ncpus;
267
268 /* We don't do SMP on the PPC601 -- paulus */
269 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
270 return 1;
271
272 /*
273 * The powersurge cpu board can be used in the generation
274 * of powermacs that have a socket for an upgradeable cpu card,
275 * including the 7500, 8500, 9500, 9600.
276 * The device tree doesn't tell you if you have 2 cpus because
277 * OF doesn't know anything about the 2nd processor.
278 * Instead we look for magic bits in magic registers,
279 * in the hammerhead memory controller in the case of the
280 * dual-cpu powersurge board. -- paulus.
281 */
282 if (find_devices("hammerhead") == NULL)
283 return 1;
284
285 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
286 quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
287 psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
288
289 psurge_type = psurge_quad_probe();
290 if (psurge_type != PSURGE_DUAL) {
291 psurge_quad_init();
292 /* All released cards using this HW design have 4 CPUs */
293 ncpus = 4;
294 } else {
295 iounmap(quad_base);
296 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
297 /* not a dual-cpu card */
298 iounmap(hhead_base);
299 psurge_type = PSURGE_NONE;
300 return 1;
301 }
302 ncpus = 2;
303 }
304
305 psurge_start = ioremap(PSURGE_START, 4);
306 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
307
308 /* this is not actually strictly necessary -- paulus. */
309 for (i = 1; i < ncpus; ++i)
310 smp_hw_index[i] = i;
311
312 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
313
314 return ncpus;
315}
316
317static void __init smp_psurge_kick_cpu(int nr)
318{
319 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
320 unsigned long a;
321
322 /* may need to flush here if secondary bats aren't setup */
323 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
324 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
325 asm volatile("sync");
326
327 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
328
329 out_be32(psurge_start, start);
330 mb();
331
332 psurge_set_ipi(nr);
333 udelay(10);
334 psurge_clr_ipi(nr);
335
336 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
337}
338
339/*
340 * With the dual-cpu powersurge board, the decrementers and timebases
341 * of both cpus are frozen after the secondary cpu is started up,
342 * until we give the secondary cpu another interrupt. This routine
343 * uses this to get the timebases synchronized.
344 * -- paulus.
345 */
346static void __init psurge_dual_sync_tb(int cpu_nr)
347{
348 int t;
349
350 set_dec(tb_ticks_per_jiffy);
351 set_tb(0, 0);
352 last_jiffy_stamp(cpu_nr) = 0;
353
354 if (cpu_nr > 0) {
355 mb();
356 sec_tb_reset = 1;
357 return;
358 }
359
360 /* wait for the secondary to have reset its TB before proceeding */
361 for (t = 10000000; t > 0 && !sec_tb_reset; --t)
362 ;
363
364 /* now interrupt the secondary, starting both TBs */
365 psurge_set_ipi(1);
366
367 smp_tb_synchronized = 1;
368}
369
370static struct irqaction psurge_irqaction = {
371 .handler = psurge_primary_intr,
372 .flags = SA_INTERRUPT,
373 .mask = CPU_MASK_NONE,
374 .name = "primary IPI",
375};
376
377static void __init smp_psurge_setup_cpu(int cpu_nr)
378{
379
380 if (cpu_nr == 0) {
381 /* If we failed to start the second CPU, we should still
382 * send it an IPI to start the timebase & DEC or we might
383 * have them stuck.
384 */
385 if (num_online_cpus() < 2) {
386 if (psurge_type == PSURGE_DUAL)
387 psurge_set_ipi(1);
388 return;
389 }
390 /* reset the entry point so if we get another intr we won't
391 * try to startup again */
392 out_be32(psurge_start, 0x100);
393 if (setup_irq(30, &psurge_irqaction))
394 printk(KERN_ERR "Couldn't get primary IPI interrupt");
395 }
396
397 if (psurge_type == PSURGE_DUAL)
398 psurge_dual_sync_tb(cpu_nr);
399}
400
401void __init smp_psurge_take_timebase(void)
402{
403 /* Dummy implementation */
404}
405
406void __init smp_psurge_give_timebase(void)
407{
408 /* Dummy implementation */
409}
410
411/* PowerSurge-style Macs */
412struct smp_ops_t psurge_smp_ops = {
413 .message_pass = smp_psurge_message_pass,
414 .probe = smp_psurge_probe,
415 .kick_cpu = smp_psurge_kick_cpu,
416 .setup_cpu = smp_psurge_setup_cpu,
417 .give_timebase = smp_psurge_give_timebase,
418 .take_timebase = smp_psurge_take_timebase,
419};
420#endif /* CONFIG_PPC32 - actually powersurge support */
421
422#ifdef CONFIG_PPC64
423/*
424 * G5s enable/disable the timebase via an i2c-connected clock chip.
425 */
426static struct device_node *pmac_tb_clock_chip_host;
427static u8 pmac_tb_pulsar_addr;
428static void (*pmac_tb_freeze)(int freeze);
429static DEFINE_SPINLOCK(timebase_lock);
430static unsigned long timebase;
431
432static void smp_core99_cypress_tb_freeze(int freeze)
433{
434 u8 data;
435 int rc;
436
437 /* Strangely, the device-tree says address is 0xd2, but darwin
438 * accesses 0xd0 ...
439 */
440 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
441 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
442 0xd0 | pmac_low_i2c_read,
443 0x81, &data, 1);
444 if (rc != 0)
445 goto bail;
446
447 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
448
449 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
450 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
451 0xd0 | pmac_low_i2c_write,
452 0x81, &data, 1);
453
454 bail:
455 if (rc != 0) {
456 printk("Cypress Timebase %s rc: %d\n",
457 freeze ? "freeze" : "unfreeze", rc);
458 panic("Timebase freeze failed !\n");
459 }
460}
461
462
463static void smp_core99_pulsar_tb_freeze(int freeze)
464{
465 u8 data;
466 int rc;
467
468 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
469 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
470 pmac_tb_pulsar_addr | pmac_low_i2c_read,
471 0x2e, &data, 1);
472 if (rc != 0)
473 goto bail;
474
475 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
476
477 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
478 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
479 pmac_tb_pulsar_addr | pmac_low_i2c_write,
480 0x2e, &data, 1);
481 bail:
482 if (rc != 0) {
483 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
484 freeze ? "freeze" : "unfreeze", rc);
485 panic("Timebase freeze failed !\n");
486 }
487}
488
489
490static void smp_core99_give_timebase(void)
491{
492 /* Open i2c bus for synchronous access */
493 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
494 panic("Can't open i2c for TB sync !\n");
495
496 spin_lock(&timebase_lock);
497 (*pmac_tb_freeze)(1);
498 mb();
499 timebase = get_tb();
500 spin_unlock(&timebase_lock);
501
502 while (timebase)
503 barrier();
504
505 spin_lock(&timebase_lock);
506 (*pmac_tb_freeze)(0);
507 spin_unlock(&timebase_lock);
508
509 /* Close i2c bus */
510 pmac_low_i2c_close(pmac_tb_clock_chip_host);
511}
512
513
514static void __devinit smp_core99_take_timebase(void)
515{
516 while (!timebase)
517 barrier();
518 spin_lock(&timebase_lock);
519 set_tb(timebase >> 32, timebase & 0xffffffff);
520 timebase = 0;
521 spin_unlock(&timebase_lock);
522}
523
524static void __init smp_core99_setup(int ncpus)
525{
526 struct device_node *cc = NULL;
527 struct device_node *p;
528 u32 *reg;
529 int ok;
530
531 /* HW sync only on these platforms */
532 if (!machine_is_compatible("PowerMac7,2") &&
533 !machine_is_compatible("PowerMac7,3") &&
534 !machine_is_compatible("RackMac3,1"))
535 return;
536
537 /* Look for the clock chip */
538 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
539 p = of_get_parent(cc);
540 ok = p && device_is_compatible(p, "uni-n-i2c");
541 of_node_put(p);
542 if (!ok)
543 continue;
544
545 reg = (u32 *)get_property(cc, "reg", NULL);
546 if (reg == NULL)
547 continue;
548
549 switch (*reg) {
550 case 0xd2:
551 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
552 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
553 pmac_tb_pulsar_addr = 0xd2;
554 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
555 } else if (device_is_compatible(cc, "cy28508")) {
556 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
557 printk(KERN_INFO "Timebase clock is Cypress chip\n");
558 }
559 break;
560 case 0xd4:
561 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
562 pmac_tb_pulsar_addr = 0xd4;
563 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
564 break;
565 }
566 if (pmac_tb_freeze != NULL) {
567 pmac_tb_clock_chip_host = of_get_parent(cc);
568 of_node_put(cc);
569 break;
570 }
571 }
572 if (pmac_tb_freeze == NULL) {
573 smp_ops->give_timebase = smp_generic_give_timebase;
574 smp_ops->take_timebase = smp_generic_take_timebase;
575 }
576}
577
578/* nothing to do here, caches are already set up by service processor */
579static inline void __devinit core99_init_caches(int cpu)
580{
581}
582
583#else /* CONFIG_PPC64 */
584
585/*
586 * SMP G4 powermacs use a GPIO to enable/disable the timebase.
587 */
588
589static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
590
591static unsigned int pri_tb_hi, pri_tb_lo;
592static unsigned int pri_tb_stamp;
593
594/* not __init, called in sleep/wakeup code */
595void smp_core99_give_timebase(void)
596{
597 unsigned long flags;
598 unsigned int t;
599
600 /* wait for the secondary to be in take_timebase */
601 for (t = 100000; t > 0 && !sec_tb_reset; --t)
602 udelay(10);
603 if (!sec_tb_reset) {
604 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
605 return;
606 }
607
608 /* freeze the timebase and read it */
609 /* disable interrupts so the timebase is disabled for the
610 shortest possible time */
611 local_irq_save(flags);
612 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
613 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
614 mb();
615 pri_tb_hi = get_tbu();
616 pri_tb_lo = get_tbl();
617 pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
618 mb();
619
620 /* tell the secondary we're ready */
621 sec_tb_reset = 2;
622 mb();
623
624 /* wait for the secondary to have taken it */
625 for (t = 100000; t > 0 && sec_tb_reset; --t)
626 udelay(10);
627 if (sec_tb_reset)
628 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
629 else
630 smp_tb_synchronized = 1;
631
632 /* Now, restart the timebase by leaving the GPIO to an open collector */
633 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
634 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
635 local_irq_restore(flags);
636}
637
638/* not __init, called in sleep/wakeup code */
639void smp_core99_take_timebase(void)
640{
641 unsigned long flags;
642
643 /* tell the primary we're here */
644 sec_tb_reset = 1;
645 mb();
646
647 /* wait for the primary to set pri_tb_hi/lo */
648 while (sec_tb_reset < 2)
649 mb();
650
651 /* set our stuff the same as the primary */
652 local_irq_save(flags);
653 set_dec(1);
654 set_tb(pri_tb_hi, pri_tb_lo);
655 last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
656 mb();
657
658 /* tell the primary we're done */
659 sec_tb_reset = 0;
660 mb();
661 local_irq_restore(flags);
662}
663
664/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
665volatile static long int core99_l2_cache;
666volatile static long int core99_l3_cache;
667
668static void __devinit core99_init_caches(int cpu)
669{
670 if (!cpu_has_feature(CPU_FTR_L2CR))
671 return;
672
673 if (cpu == 0) {
674 core99_l2_cache = _get_L2CR();
675 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
676 } else {
677 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
678 _set_L2CR(0);
679 _set_L2CR(core99_l2_cache);
680 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
681 }
682
683 if (!cpu_has_feature(CPU_FTR_L3CR))
684 return;
685
686 if (cpu == 0){
687 core99_l3_cache = _get_L3CR();
688 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
689 } else {
690 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
691 _set_L3CR(0);
692 _set_L3CR(core99_l3_cache);
693 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
694 }
695}
696
697static void __init smp_core99_setup(int ncpus)
698{
699 struct device_node *cpu;
700 u32 *tbprop = NULL;
701 int i;
702
703 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
704 cpu = of_find_node_by_type(NULL, "cpu");
705 if (cpu != NULL) {
706 tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
707 if (tbprop)
708 core99_tb_gpio = *tbprop;
709 of_node_put(cpu);
710 }
711
712 /* XXX should get this from reg properties */
713 for (i = 1; i < ncpus; ++i)
714 smp_hw_index[i] = i;
715 powersave_nap = 0;
716}
717#endif
718
719static int __init smp_core99_probe(void)
720{
721 struct device_node *cpus;
722 int ncpus = 0;
723
724 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
725
726 /* Count CPUs in the device-tree */
727 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
728 ++ncpus;
729
730 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
731
732 /* Nothing more to do if less than 2 of them */
733 if (ncpus <= 1)
734 return 1;
735
736 smp_core99_setup(ncpus);
737 mpic_request_ipis();
738 core99_init_caches(0);
739
740 return ncpus;
741}
742
743static void __devinit smp_core99_kick_cpu(int nr)
744{
745 unsigned int save_vector;
746 unsigned long new_vector;
747 unsigned long flags;
748 volatile unsigned int *vector
749 = ((volatile unsigned int *)(KERNELBASE+0x100));
750
751 if (nr < 0 || nr > 3)
752 return;
753 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
754
755 local_irq_save(flags);
756 local_irq_disable();
757
758 /* Save reset vector */
759 save_vector = *vector;
760
761 /* Setup fake reset vector that does
762 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
763 */
764 new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
765 *vector = 0x48000002 + new_vector - KERNELBASE;
766
767 /* flush data cache and inval instruction cache */
768 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
769
770 /* Put some life in our friend */
771 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
772
773 /* FIXME: We wait a bit for the CPU to take the exception, I should
774 * instead wait for the entry code to set something for me. Well,
775 * ideally, all that crap will be done in prom.c and the CPU left
776 * in a RAM-based wait loop like CHRP.
777 */
778 mdelay(1);
779
780 /* Restore our exception vector */
781 *vector = save_vector;
782 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
783
784 local_irq_restore(flags);
785 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
786}
787
788static void __devinit smp_core99_setup_cpu(int cpu_nr)
789{
790 /* Setup L2/L3 */
791 if (cpu_nr != 0)
792 core99_init_caches(cpu_nr);
793
794 /* Setup openpic */
795 mpic_setup_this_cpu();
796
797 if (cpu_nr == 0) {
798#ifdef CONFIG_POWER4
799 extern void g5_phy_disable_cpu1(void);
800
801 /* If we didn't start the second CPU, we must take
802 * it off the bus
803 */
804 if (machine_is_compatible("MacRISC4") &&
805 num_online_cpus() < 2)
806 g5_phy_disable_cpu1();
807#endif /* CONFIG_POWER4 */
808 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
809 }
810}
811
812
813/* Core99 Macs (dual G4s and G5s) */
814struct smp_ops_t core99_smp_ops = {
815 .message_pass = smp_mpic_message_pass,
816 .probe = smp_core99_probe,
817 .kick_cpu = smp_core99_kick_cpu,
818 .setup_cpu = smp_core99_setup_cpu,
819 .give_timebase = smp_core99_give_timebase,
820 .take_timebase = smp_core99_take_timebase,
821};
822
823#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
824
825int __cpu_disable(void)
826{
827 cpu_clear(smp_processor_id(), cpu_online_map);
828
829 /* XXX reset cpu affinity here */
830 mpic_cpu_set_priority(0xf);
831 asm volatile("mtdec %0" : : "r" (0x7fffffff));
832 mb();
833 udelay(20);
834 asm volatile("mtdec %0" : : "r" (0x7fffffff));
835 return 0;
836}
837
838extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
839static int cpu_dead[NR_CPUS];
840
841void cpu_die(void)
842{
843 local_irq_disable();
844 cpu_dead[smp_processor_id()] = 1;
845 mb();
846 low_cpu_die();
847}
848
849void __cpu_die(unsigned int cpu)
850{
851 int timeout;
852
853 timeout = 1000;
854 while (!cpu_dead[cpu]) {
855 if (--timeout == 0) {
856 printk("CPU %u refused to die!\n", cpu);
857 break;
858 }
859 msleep(1);
860 }
861 cpu_callin_map[cpu] = 0;
862 cpu_dead[cpu] = 0;
863}
864
865#endif
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
new file mode 100644
index 000000000000..5947b21a8588
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -0,0 +1,360 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/cuda.h>
23#include <linux/pmu.h>
24#include <linux/interrupt.h>
25#include <linux/hardirq.h>
26#include <linux/rtc.h>
27
28#include <asm/sections.h>
29#include <asm/prom.h>
30#include <asm/system.h>
31#include <asm/io.h>
32#include <asm/pgtable.h>
33#include <asm/machdep.h>
34#include <asm/time.h>
35#include <asm/nvram.h>
36#include <asm/smu.h>
37
38#undef DEBUG
39
40#ifdef DEBUG
41#define DBG(x...) printk(x)
42#else
43#define DBG(x...)
44#endif
45
46/* Apparently the RTC stores seconds since 1 Jan 1904 */
47#define RTC_OFFSET 2082844800
48
49/*
50 * Calibrate the decrementer frequency with the VIA timer 1.
51 */
52#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
53
54/* VIA registers */
55#define RS 0x200 /* skip between registers */
56#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
57#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
58#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
59#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
60#define ACR (11*RS) /* Auxiliary control register */
61#define IFR (13*RS) /* Interrupt flag register */
62
63/* Bits in ACR */
64#define T1MODE 0xc0 /* Timer 1 mode */
65#define T1MODE_CONT 0x40 /* continuous interrupts */
66
67/* Bits in IFR and IER */
68#define T1_INT 0x40 /* Timer 1 interrupt */
69
70long __init pmac_time_init(void)
71{
72 s32 delta = 0;
73#ifdef CONFIG_NVRAM
74 int dst;
75
76 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
77 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
78 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
79 if (delta & 0x00800000UL)
80 delta |= 0xFF000000UL;
81 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
82 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
83 dst ? "on" : "off");
84#endif
85 return delta;
86}
87
88static void to_rtc_time(unsigned long now, struct rtc_time *tm)
89{
90 to_tm(now, tm);
91 tm->tm_year -= 1900;
92 tm->tm_mon -= 1;
93}
94
95static unsigned long from_rtc_time(struct rtc_time *tm)
96{
97 return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
98 tm->tm_hour, tm->tm_min, tm->tm_sec);
99}
100
101#ifdef CONFIG_ADB_CUDA
102static unsigned long cuda_get_time(void)
103{
104 struct adb_request req;
105 unsigned long now;
106
107 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
108 return 0;
109 while (!req.complete)
110 cuda_poll();
111 if (req.reply_len != 7)
112 printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
113 req.reply_len);
114 now = (req.reply[3] << 24) + (req.reply[4] << 16)
115 + (req.reply[5] << 8) + req.reply[6];
116 return now - RTC_OFFSET;
117}
118
119#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
120
121static int cuda_set_rtc_time(struct rtc_time *tm)
122{
123 unsigned int nowtime;
124 struct adb_request req;
125
126 nowtime = from_rtc_time(tm) + RTC_OFFSET;
127 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
128 nowtime >> 24, nowtime >> 16, nowtime >> 8,
129 nowtime) < 0)
130 return -ENXIO;
131 while (!req.complete)
132 cuda_poll();
133 if ((req.reply_len != 3) && (req.reply_len != 7))
134 printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
135 req.reply_len);
136 return 0;
137}
138
139#else
140#define cuda_get_time() 0
141#define cuda_get_rtc_time(tm)
142#define cuda_set_rtc_time(tm) 0
143#endif
144
145#ifdef CONFIG_ADB_PMU
146static unsigned long pmu_get_time(void)
147{
148 struct adb_request req;
149 unsigned long now;
150
151 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
152 return 0;
153 pmu_wait_complete(&req);
154 if (req.reply_len != 4)
155 printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
156 req.reply_len);
157 now = (req.reply[0] << 24) + (req.reply[1] << 16)
158 + (req.reply[2] << 8) + req.reply[3];
159 return now - RTC_OFFSET;
160}
161
162#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
163
164static int pmu_set_rtc_time(struct rtc_time *tm)
165{
166 unsigned int nowtime;
167 struct adb_request req;
168
169 nowtime = from_rtc_time(tm) + RTC_OFFSET;
170 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
171 nowtime >> 16, nowtime >> 8, nowtime) < 0)
172 return -ENXIO;
173 pmu_wait_complete(&req);
174 if (req.reply_len != 0)
175 printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
176 req.reply_len);
177 return 0;
178}
179
180#else
181#define pmu_get_time() 0
182#define pmu_get_rtc_time(tm)
183#define pmu_set_rtc_time(tm) 0
184#endif
185
186#ifdef CONFIG_PMAC_SMU
187static unsigned long smu_get_time(void)
188{
189 struct rtc_time tm;
190
191 if (smu_get_rtc_time(&tm, 1))
192 return 0;
193 return from_rtc_time(&tm);
194}
195
196#else
197#define smu_get_time() 0
198#define smu_get_rtc_time(tm, spin)
199#define smu_set_rtc_time(tm, spin) 0
200#endif
201
202unsigned long pmac_get_boot_time(void)
203{
204 /* Get the time from the RTC, used only at boot time */
205 switch (sys_ctrler) {
206 case SYS_CTRLER_CUDA:
207 return cuda_get_time();
208 case SYS_CTRLER_PMU:
209 return pmu_get_time();
210 case SYS_CTRLER_SMU:
211 return smu_get_time();
212 default:
213 return 0;
214 }
215}
216
217void pmac_get_rtc_time(struct rtc_time *tm)
218{
219 /* Get the time from the RTC, used only at boot time */
220 switch (sys_ctrler) {
221 case SYS_CTRLER_CUDA:
222 cuda_get_rtc_time(tm);
223 break;
224 case SYS_CTRLER_PMU:
225 pmu_get_rtc_time(tm);
226 break;
227 case SYS_CTRLER_SMU:
228 smu_get_rtc_time(tm, 1);
229 break;
230 default:
231 ;
232 }
233}
234
235int pmac_set_rtc_time(struct rtc_time *tm)
236{
237 switch (sys_ctrler) {
238 case SYS_CTRLER_CUDA:
239 return cuda_set_rtc_time(tm);
240 case SYS_CTRLER_PMU:
241 return pmu_set_rtc_time(tm);
242 case SYS_CTRLER_SMU:
243 return smu_set_rtc_time(tm, 1);
244 default:
245 return -ENODEV;
246 }
247}
248
249#ifdef CONFIG_PPC32
250/*
251 * Calibrate the decrementer register using VIA timer 1.
252 * This is used both on powermacs and CHRP machines.
253 */
254int __init via_calibrate_decr(void)
255{
256 struct device_node *vias;
257 volatile unsigned char __iomem *via;
258 int count = VIA_TIMER_FREQ_6 / 100;
259 unsigned int dstart, dend;
260
261 vias = find_devices("via-cuda");
262 if (vias == 0)
263 vias = find_devices("via-pmu");
264 if (vias == 0)
265 vias = find_devices("via");
266 if (vias == 0 || vias->n_addrs == 0)
267 return 0;
268 via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
269
270 /* set timer 1 for continuous interrupts */
271 out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
272 /* set the counter to a small value */
273 out_8(&via[T1CH], 2);
274 /* set the latch to `count' */
275 out_8(&via[T1LL], count);
276 out_8(&via[T1LH], count >> 8);
277 /* wait until it hits 0 */
278 while ((in_8(&via[IFR]) & T1_INT) == 0)
279 ;
280 dstart = get_dec();
281 /* clear the interrupt & wait until it hits 0 again */
282 in_8(&via[T1CL]);
283 while ((in_8(&via[IFR]) & T1_INT) == 0)
284 ;
285 dend = get_dec();
286
287 ppc_tb_freq = (dstart - dend) * 100 / 6;
288
289 iounmap(via);
290
291 return 1;
292}
293#endif
294
295#ifdef CONFIG_PM
296/*
297 * Reset the time after a sleep.
298 */
299static int
300time_sleep_notify(struct pmu_sleep_notifier *self, int when)
301{
302 static unsigned long time_diff;
303 unsigned long flags;
304 unsigned long seq;
305 struct timespec tv;
306
307 switch (when) {
308 case PBOOK_SLEEP_NOW:
309 do {
310 seq = read_seqbegin_irqsave(&xtime_lock, flags);
311 time_diff = xtime.tv_sec - pmac_get_boot_time();
312 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
313 break;
314 case PBOOK_WAKE:
315 tv.tv_sec = pmac_get_boot_time() + time_diff;
316 tv.tv_nsec = 0;
317 do_settimeofday(&tv);
318 break;
319 }
320 return PBOOK_SLEEP_OK;
321}
322
323static struct pmu_sleep_notifier time_sleep_notifier = {
324 time_sleep_notify, SLEEP_LEVEL_MISC,
325};
326#endif /* CONFIG_PM */
327
328/*
329 * Query the OF and get the decr frequency.
330 */
331void __init pmac_calibrate_decr(void)
332{
333#ifdef CONFIG_PM
334 /* XXX why here? */
335 pmu_register_sleep_notifier(&time_sleep_notifier);
336#endif /* CONFIG_PM */
337
338 generic_calibrate_decr();
339
340#ifdef CONFIG_PPC32
341 /* We assume MacRISC2 machines have correct device-tree
342 * calibration. That's better since the VIA itself seems
343 * to be slightly off. --BenH
344 */
345 if (!machine_is_compatible("MacRISC2") &&
346 !machine_is_compatible("MacRISC3") &&
347 !machine_is_compatible("MacRISC4"))
348 if (via_calibrate_decr())
349 return;
350
351 /* Special case: QuickSilver G4s seem to have a badly calibrated
352 * timebase-frequency in OF, VIA is much better on these. We should
353 * probably implement calibration based on the KL timer on these
354 * machines anyway... -BenH
355 */
356 if (machine_is_compatible("PowerMac3,5"))
357 if (via_calibrate_decr())
358 return;
359#endif
360}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
new file mode 100644
index 000000000000..673ac47a1626
--- /dev/null
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -0,0 +1,22 @@
1
2config PREP_RESIDUAL
3 bool "Support for PReP Residual Data"
4 depends on PPC_PREP
5 help
6 Some PReP systems have residual data passed to the kernel by the
7 firmware. This allows detection of memory size, devices present and
8 other useful pieces of information. Sometimes this information is
9 not present or incorrect, in which case it could lead to the machine
10 behaving incorrectly. If this happens, either disable PREP_RESIDUAL
11 or pass the 'noresidual' option to the kernel.
12
13 If you are running a PReP system, say Y here, otherwise say N.
14
15config PROC_PREPRESIDUAL
16 bool "Support for reading of PReP Residual Data in /proc"
17 depends on PREP_RESIDUAL && PROC_FS
18 help
19 Enabling this option will create a /proc/residual file which allows
20 you to get at the residual data on PReP systems. You will need a tool
21 (lsresidual) to parse it. If you aren't on a PReP system, you don't
22 want this.
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
new file mode 100644
index 000000000000..2d57f588151d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -0,0 +1,42 @@
1
2config PPC_SPLPAR
3 depends on PPC_PSERIES
4 bool "Support for shared-processor logical partitions"
5 default n
6 help
7 Enabling this option will make the kernel run more efficiently
8 on logically-partitioned pSeries systems which use shared
9 processors, that is, which share physical processors between
10 two or more partitions.
11
12config HMT
13 bool "Hardware multithreading"
14 depends on SMP && PPC_PSERIES && BROKEN
15 help
16 This option enables hardware multithreading on RS64 cpus.
17 pSeries systems p620 and p660 have such a cpu type.
18
19config EEH
20 bool "PCI Extended Error Handling (EEH)" if EMBEDDED
21 depends on PPC_PSERIES
22 default y if !EMBEDDED
23
24config RTAS_PROC
25 bool "Proc interface to RTAS"
26 depends on PPC_RTAS
27 default y
28
29config RTAS_FLASH
30 tristate "Firmware flash interface"
31 depends on PPC64 && RTAS_PROC
32
33config SCANLOG
34 tristate "Scanlog dump interface"
35 depends on RTAS_PROC && PPC_PSERIES
36
37config LPARCFG
38 tristate "LPAR Configuration Data"
39 depends on PPC_PSERIES || PPC_ISERIES
40 help
41 Provide system capacity information via human readable
42 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
new file mode 100644
index 000000000000..5ef494e3a70f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -0,0 +1,5 @@
1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
2 setup.o iommu.o rtas-fw.o ras.o
3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 176e8da76466..176e8da76466 100644
--- a/arch/ppc64/kernel/pSeries_hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index d17f0108a032..9e90d41131d8 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -46,7 +46,8 @@
46#include <asm/pSeries_reconfig.h> 46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#include "pci.h" 49#include <asm/tce.h>
50#include <asm/ppc-pci.h>
50 51
51#define DBG(fmt...) 52#define DBG(fmt...)
52 53
@@ -59,6 +60,9 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
59 union tce_entry t; 60 union tce_entry t;
60 union tce_entry *tp; 61 union tce_entry *tp;
61 62
63 index <<= TCE_PAGE_FACTOR;
64 npages <<= TCE_PAGE_FACTOR;
65
62 t.te_word = 0; 66 t.te_word = 0;
63 t.te_rdwr = 1; // Read allowed 67 t.te_rdwr = 1; // Read allowed
64 68
@@ -69,11 +73,11 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
69 73
70 while (npages--) { 74 while (npages--) {
71 /* can't move this out since we might cross LMB boundary */ 75 /* can't move this out since we might cross LMB boundary */
72 t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 76 t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
73 77
74 tp->te_word = t.te_word; 78 tp->te_word = t.te_word;
75 79
76 uaddr += PAGE_SIZE; 80 uaddr += TCE_PAGE_SIZE;
77 tp++; 81 tp++;
78 } 82 }
79} 83}
@@ -84,6 +88,9 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
84 union tce_entry t; 88 union tce_entry t;
85 union tce_entry *tp; 89 union tce_entry *tp;
86 90
91 npages <<= TCE_PAGE_FACTOR;
92 index <<= TCE_PAGE_FACTOR;
93
87 t.te_word = 0; 94 t.te_word = 0;
88 tp = ((union tce_entry *)tbl->it_base) + index; 95 tp = ((union tce_entry *)tbl->it_base) + index;
89 96
@@ -103,7 +110,7 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
103 union tce_entry tce; 110 union tce_entry tce;
104 111
105 tce.te_word = 0; 112 tce.te_word = 0;
106 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 113 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
107 tce.te_rdwr = 1; 114 tce.te_rdwr = 1;
108 if (direction != DMA_TO_DEVICE) 115 if (direction != DMA_TO_DEVICE)
109 tce.te_pciwr = 1; 116 tce.te_pciwr = 1;
@@ -136,6 +143,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
136 union tce_entry tce, *tcep; 143 union tce_entry tce, *tcep;
137 long l, limit; 144 long l, limit;
138 145
146 tcenum <<= TCE_PAGE_FACTOR;
147 npages <<= TCE_PAGE_FACTOR;
148
139 if (npages == 1) 149 if (npages == 1)
140 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
141 direction); 151 direction);
@@ -155,7 +165,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
155 } 165 }
156 166
157 tce.te_word = 0; 167 tce.te_word = 0;
158 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 168 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
159 tce.te_rdwr = 1; 169 tce.te_rdwr = 1;
160 if (direction != DMA_TO_DEVICE) 170 if (direction != DMA_TO_DEVICE)
161 tce.te_pciwr = 1; 171 tce.te_pciwr = 1;
@@ -166,7 +176,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
166 * Set up the page with TCE data, looping through and setting 176 * Set up the page with TCE data, looping through and setting
167 * the values. 177 * the values.
168 */ 178 */
169 limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry)); 179 limit = min_t(long, npages, 4096/sizeof(union tce_entry));
170 180
171 for (l = 0; l < limit; l++) { 181 for (l = 0; l < limit; l++) {
172 tcep[l] = tce; 182 tcep[l] = tce;
@@ -196,6 +206,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
196 u64 rc; 206 u64 rc;
197 union tce_entry tce; 207 union tce_entry tce;
198 208
209 tcenum <<= TCE_PAGE_FACTOR;
210 npages <<= TCE_PAGE_FACTOR;
211
199 tce.te_word = 0; 212 tce.te_word = 0;
200 213
201 while (npages--) { 214 while (npages--) {
@@ -221,6 +234,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
221 u64 rc; 234 u64 rc;
222 union tce_entry tce; 235 union tce_entry tce;
223 236
237 tcenum <<= TCE_PAGE_FACTOR;
238 npages <<= TCE_PAGE_FACTOR;
239
224 tce.te_word = 0; 240 tce.te_word = 0;
225 241
226 rc = plpar_tce_stuff((u64)tbl->it_index, 242 rc = plpar_tce_stuff((u64)tbl->it_index,
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a6de83f2078f..268d8362dde7 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
487 * lock. 487 * lock.
488 */ 488 */
489void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, 489void pSeries_lpar_flush_hash_range(unsigned long number, int local)
490 int local)
491{ 490{
492 int i; 491 int i;
493 unsigned long flags = 0; 492 unsigned long flags = 0;
@@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
498 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 497 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
499 498
500 for (i = 0; i < number; i++) 499 for (i = 0; i < number; i++)
501 flush_hash_page(context, batch->addr[i], batch->pte[i], local); 500 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
502 501
503 if (lock_tlbie) 502 if (lock_tlbie)
504 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 503 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/ppc64/kernel/pSeries_nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 18abfb1f4e24..18abfb1f4e24 100644
--- a/arch/ppc64/kernel/pSeries_nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/powerpc/platforms/pseries/pci.c
index 928f8febdb3b..c198656a3bb5 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,7 @@
29 29
30#include <asm/pci-bridge.h> 30#include <asm/pci-bridge.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32 32#include <asm/ppc-pci.h>
33#include "pci.h"
34 33
35static int __devinitdata s7a_workaround = -1; 34static int __devinitdata s7a_workaround = -1;
36 35
diff --git a/arch/ppc64/kernel/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 41b97dc9cc0a..6562ff4b0a82 100644
--- a/arch/ppc64/kernel/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -1,17 +1,16 @@
1/* 1/*
2 * ras.c
3 * Copyright (C) 2001 Dave Engebretsen IBM Corporation 2 * Copyright (C) 2001 Dave Engebretsen IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version. 7 * (at your option) any later version.
9 * 8 *
10 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 12 * GNU General Public License for more details.
14 * 13 *
15 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -19,7 +18,7 @@
19 18
20/* Change Activity: 19/* Change Activity:
21 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support. 20 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
22 * End Change Activity 21 * End Change Activity
23 */ 22 */
24 23
25#include <linux/errno.h> 24#include <linux/errno.h>
@@ -323,7 +322,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
323 nonfatal = 1; 322 nonfatal = 1;
324 } 323 }
325 324
326 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal); 325 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
327 326
328 return nonfatal; 327 return nonfatal;
329} 328}
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 58c61219d08e..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.c b/arch/powerpc/platforms/pseries/rtas-fw.c
new file mode 100644
index 000000000000..15d81d758ca0
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rtas-fw.c
@@ -0,0 +1,138 @@
1/*
2 *
3 * Procedures for firmware flash updates on pSeries systems.
4 *
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <stdarg.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/prom.h>
22#include <asm/rtas.h>
23#include <asm/semaphore.h>
24#include <asm/machdep.h>
25#include <asm/page.h>
26#include <asm/param.h>
27#include <asm/system.h>
28#include <asm/abs_addr.h>
29#include <asm/udbg.h>
30#include <asm/delay.h>
31#include <asm/uaccess.h>
32#include <asm/systemcfg.h>
33
34#include "rtas-fw.h"
35
36struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
37
38#define FLASH_BLOCK_LIST_VERSION (1UL)
39
40static void rtas_flash_firmware(void)
41{
42 unsigned long image_size;
43 struct flash_block_list *f, *next, *flist;
44 unsigned long rtas_block_list;
45 int i, status, update_token;
46
47 update_token = rtas_token("ibm,update-flash-64-and-reboot");
48 if (update_token == RTAS_UNKNOWN_SERVICE) {
49 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
50 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
51 return;
52 }
53
54 /* NOTE: the "first" block list is a global var with no data
55 * blocks in the kernel data segment. We do this because
56 * we want to ensure this block_list addr is under 4GB.
57 */
58 rtas_firmware_flash_list.num_blocks = 0;
59 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
60 rtas_block_list = virt_to_abs(flist);
61 if (rtas_block_list >= 4UL*1024*1024*1024) {
62 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
63 return;
64 }
65
66 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
67 /* Update the block_list in place. */
68 image_size = 0;
69 for (f = flist; f; f = next) {
70 /* Translate data addrs to absolute */
71 for (i = 0; i < f->num_blocks; i++) {
72 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
73 image_size += f->blocks[i].length;
74 }
75 next = f->next;
76 /* Don't translate NULL pointer for last entry */
77 if (f->next)
78 f->next = (struct flash_block_list *)virt_to_abs(f->next);
79 else
80 f->next = NULL;
81 /* make num_blocks into the version/length field */
82 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
83 }
84
85 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
86 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
87 rtas_progress("Flashing \n", 0x0);
88 rtas_progress("Please Wait... ", 0x0);
89 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
90 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
91 switch (status) { /* should only get "bad" status */
92 case 0:
93 printk(KERN_ALERT "FLASH: success\n");
94 break;
95 case -1:
96 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
97 break;
98 case -3:
99 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
100 break;
101 case -4:
102 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
103 break;
104 default:
105 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
106 break;
107 }
108}
109
110void rtas_flash_bypass_warning(void)
111{
112 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
113 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
114}
115
116
117void rtas_fw_restart(char *cmd)
118{
119 if (rtas_firmware_flash_list.next)
120 rtas_flash_firmware();
121 rtas_restart(cmd);
122}
123
124void rtas_fw_power_off(void)
125{
126 if (rtas_firmware_flash_list.next)
127 rtas_flash_bypass_warning();
128 rtas_power_off();
129}
130
131void rtas_fw_halt(void)
132{
133 if (rtas_firmware_flash_list.next)
134 rtas_flash_bypass_warning();
135 rtas_halt();
136}
137
138EXPORT_SYMBOL(rtas_firmware_flash_list);
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.h b/arch/powerpc/platforms/pseries/rtas-fw.h
new file mode 100644
index 000000000000..e70fa69974a3
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rtas-fw.h
@@ -0,0 +1,3 @@
1void rtas_fw_restart(char *cmd);
2void rtas_fw_power_off(void);
3void rtas_fw_halt(void);
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/powerpc/platforms/pseries/setup.c
index 3009701eb90d..10cb0f2d9b5b 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc/kernel/setup.c 2 * 64-bit pSeries and RS/6000 setup code.
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas 5 * Adapted from 'alpha' version by Gary Thomas
@@ -59,13 +59,15 @@
59#include <asm/time.h> 59#include <asm/time.h>
60#include <asm/nvram.h> 60#include <asm/nvram.h>
61#include <asm/plpar_wrappers.h> 61#include <asm/plpar_wrappers.h>
62#include <asm/xics.h> 62#include "xics.h"
63#include <asm/firmware.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h> 64#include <asm/pmc.h>
65#include <asm/mpic.h>
66#include <asm/ppc-pci.h>
67#include <asm/i8259.h>
68#include <asm/udbg.h>
65 69
66#include "i8259.h" 70#include "rtas-fw.h"
67#include "mpic.h"
68#include "pci.h"
69 71
70#ifdef DEBUG 72#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt) 73#define DBG(fmt...) udbg_printf(fmt)
@@ -84,13 +86,12 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
84extern void pSeries_system_reset_exception(struct pt_regs *regs); 86extern void pSeries_system_reset_exception(struct pt_regs *regs);
85extern int pSeries_machine_check_exception(struct pt_regs *regs); 87extern int pSeries_machine_check_exception(struct pt_regs *regs);
86 88
87static int pseries_shared_idle(void); 89static void pseries_shared_idle(void);
88static int pseries_dedicated_idle(void); 90static void pseries_dedicated_idle(void);
89 91
90static volatile void __iomem * chrp_int_ack_special;
91struct mpic *pSeries_mpic; 92struct mpic *pSeries_mpic;
92 93
93void pSeries_get_cpuinfo(struct seq_file *m) 94void pSeries_show_cpuinfo(struct seq_file *m)
94{ 95{
95 struct device_node *root; 96 struct device_node *root;
96 const char *model = ""; 97 const char *model = "";
@@ -119,19 +120,11 @@ static void __init fwnmi_init(void)
119 fwnmi_active = 1; 120 fwnmi_active = 1;
120} 121}
121 122
122static int pSeries_irq_cascade(struct pt_regs *regs, void *data)
123{
124 if (chrp_int_ack_special)
125 return readb(chrp_int_ack_special);
126 else
127 return i8259_irq(smp_processor_id());
128}
129
130static void __init pSeries_init_mpic(void) 123static void __init pSeries_init_mpic(void)
131{ 124{
132 unsigned int *addrp; 125 unsigned int *addrp;
133 struct device_node *np; 126 struct device_node *np;
134 int i; 127 unsigned long intack = 0;
135 128
136 /* All ISUs are setup, complete initialization */ 129 /* All ISUs are setup, complete initialization */
137 mpic_init(pSeries_mpic); 130 mpic_init(pSeries_mpic);
@@ -142,16 +135,14 @@ static void __init pSeries_init_mpic(void)
142 get_property(np, "8259-interrupt-acknowledge", NULL))) 135 get_property(np, "8259-interrupt-acknowledge", NULL)))
143 printk(KERN_ERR "Cannot find pci to get ack address\n"); 136 printk(KERN_ERR "Cannot find pci to get ack address\n");
144 else 137 else
145 chrp_int_ack_special = ioremap(addrp[prom_n_addr_cells(np)-1], 1); 138 intack = addrp[prom_n_addr_cells(np)-1];
146 of_node_put(np); 139 of_node_put(np);
147 140
148 /* Setup the legacy interrupts & controller */ 141 /* Setup the legacy interrupts & controller */
149 for (i = 0; i < NUM_ISA_INTERRUPTS; i++) 142 i8259_init(intack, 0);
150 irq_desc[i].handler = &i8259_pic;
151 i8259_init(0);
152 143
153 /* Hook cascade to mpic */ 144 /* Hook cascade to mpic */
154 mpic_setup_cascade(NUM_ISA_INTERRUPTS, pSeries_irq_cascade, NULL); 145 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
155} 146}
156 147
157static void __init pSeries_setup_mpic(void) 148static void __init pSeries_setup_mpic(void)
@@ -241,10 +232,6 @@ static void __init pSeries_setup_arch(void)
241 find_and_init_phbs(); 232 find_and_init_phbs();
242 eeh_init(); 233 eeh_init();
243 234
244#ifdef CONFIG_DUMMY_CONSOLE
245 conswitchp = &dummy_con;
246#endif
247
248 pSeries_nvram_init(); 235 pSeries_nvram_init();
249 236
250 /* Choose an idle loop */ 237 /* Choose an idle loop */
@@ -488,8 +475,8 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
488 } 475 }
489} 476}
490 477
491static int pseries_dedicated_idle(void) 478static void pseries_dedicated_idle(void)
492{ 479{
493 long oldval; 480 long oldval;
494 struct paca_struct *lpaca = get_paca(); 481 struct paca_struct *lpaca = get_paca();
495 unsigned int cpu = smp_processor_id(); 482 unsigned int cpu = smp_processor_id();
@@ -544,7 +531,7 @@ static int pseries_dedicated_idle(void)
544 } 531 }
545} 532}
546 533
547static int pseries_shared_idle(void) 534static void pseries_shared_idle(void)
548{ 535{
549 struct paca_struct *lpaca = get_paca(); 536 struct paca_struct *lpaca = get_paca();
550 unsigned int cpu = smp_processor_id(); 537 unsigned int cpu = smp_processor_id();
@@ -586,8 +573,6 @@ static int pseries_shared_idle(void)
586 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 573 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
587 cpu_die(); 574 cpu_die();
588 } 575 }
589
590 return 0;
591} 576}
592 577
593static int pSeries_pci_probe_mode(struct pci_bus *bus) 578static int pSeries_pci_probe_mode(struct pci_bus *bus)
@@ -601,14 +586,14 @@ struct machdep_calls __initdata pSeries_md = {
601 .probe = pSeries_probe, 586 .probe = pSeries_probe,
602 .setup_arch = pSeries_setup_arch, 587 .setup_arch = pSeries_setup_arch,
603 .init_early = pSeries_init_early, 588 .init_early = pSeries_init_early,
604 .get_cpuinfo = pSeries_get_cpuinfo, 589 .show_cpuinfo = pSeries_show_cpuinfo,
605 .log_error = pSeries_log_error, 590 .log_error = pSeries_log_error,
606 .pcibios_fixup = pSeries_final_fixup, 591 .pcibios_fixup = pSeries_final_fixup,
607 .pci_probe_mode = pSeries_pci_probe_mode, 592 .pci_probe_mode = pSeries_pci_probe_mode,
608 .irq_bus_setup = pSeries_irq_bus_setup, 593 .irq_bus_setup = pSeries_irq_bus_setup,
609 .restart = rtas_restart, 594 .restart = rtas_fw_restart,
610 .power_off = rtas_power_off, 595 .power_off = rtas_fw_power_off,
611 .halt = rtas_halt, 596 .halt = rtas_fw_halt,
612 .panic = rtas_os_term, 597 .panic = rtas_os_term,
613 .cpu_die = pSeries_mach_cpu_die, 598 .cpu_die = pSeries_mach_cpu_die,
614 .get_boot_time = rtas_get_boot_time, 599 .get_boot_time = rtas_get_boot_time,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/powerpc/platforms/pseries/smp.c
index d2c7e2c4733b..9c9458ddfc25 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SMP support for pSeries and BPA machines. 2 * SMP support for pSeries machines.
3 * 3 *
4 * Dave Engebretsen, Peter Bergner, and 4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -39,16 +39,14 @@
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/time.h> 40#include <asm/time.h>
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/xics.h> 42#include "xics.h"
43#include <asm/cputable.h> 43#include <asm/cputable.h>
44#include <asm/firmware.h> 44#include <asm/firmware.h>
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/rtas.h> 46#include <asm/rtas.h>
47#include <asm/plpar_wrappers.h> 47#include <asm/plpar_wrappers.h>
48#include <asm/pSeries_reconfig.h> 48#include <asm/pSeries_reconfig.h>
49 49#include <asm/mpic.h>
50#include "mpic.h"
51#include "bpa_iic.h"
52 50
53#ifdef DEBUG 51#ifdef DEBUG
54#define DBG(fmt...) udbg_printf(fmt) 52#define DBG(fmt...) udbg_printf(fmt)
@@ -343,36 +341,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
343 341
344} 342}
345#endif /* CONFIG_XICS */ 343#endif /* CONFIG_XICS */
346#ifdef CONFIG_BPA_IIC
347static void smp_iic_message_pass(int target, int msg)
348{
349 unsigned int i;
350
351 if (target < NR_CPUS) {
352 iic_cause_IPI(target, msg);
353 } else {
354 for_each_online_cpu(i) {
355 if (target == MSG_ALL_BUT_SELF
356 && i == smp_processor_id())
357 continue;
358 iic_cause_IPI(i, msg);
359 }
360 }
361}
362
363static int __init smp_iic_probe(void)
364{
365 iic_request_IPIs();
366
367 return cpus_weight(cpu_possible_map);
368}
369
370static void __devinit smp_iic_setup_cpu(int cpu)
371{
372 if (cpu != boot_cpuid)
373 iic_setup_cpu();
374}
375#endif /* CONFIG_BPA_IIC */
376 344
377static DEFINE_SPINLOCK(timebase_lock); 345static DEFINE_SPINLOCK(timebase_lock);
378static unsigned long timebase = 0; 346static unsigned long timebase = 0;
@@ -444,15 +412,6 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
444 .cpu_bootable = smp_pSeries_cpu_bootable, 412 .cpu_bootable = smp_pSeries_cpu_bootable,
445}; 413};
446#endif 414#endif
447#ifdef CONFIG_BPA_IIC
448static struct smp_ops_t bpa_iic_smp_ops = {
449 .message_pass = smp_iic_message_pass,
450 .probe = smp_iic_probe,
451 .kick_cpu = smp_pSeries_kick_cpu,
452 .setup_cpu = smp_iic_setup_cpu,
453 .cpu_bootable = smp_pSeries_cpu_bootable,
454};
455#endif
456 415
457/* This is called very early */ 416/* This is called very early */
458void __init smp_init_pSeries(void) 417void __init smp_init_pSeries(void)
@@ -472,11 +431,6 @@ void __init smp_init_pSeries(void)
472 smp_ops = &pSeries_xics_smp_ops; 431 smp_ops = &pSeries_xics_smp_ops;
473 break; 432 break;
474#endif 433#endif
475#ifdef CONFIG_BPA_IIC
476 case IC_BPA_IIC:
477 smp_ops = &bpa_iic_smp_ops;
478 break;
479#endif
480 default: 434 default:
481 panic("Invalid interrupt controller"); 435 panic("Invalid interrupt controller");
482 } 436 }
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/powerpc/platforms/pseries/vio.c
index e0ae06f58f86..866379b80c09 100644
--- a/arch/ppc64/kernel/pSeries_vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -22,6 +22,7 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/vio.h> 23#include <asm/vio.h>
24#include <asm/hvcall.h> 24#include <asm/hvcall.h>
25#include <asm/tce.h>
25 26
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 27extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27 28
diff --git a/arch/ppc64/kernel/xics.c b/arch/powerpc/platforms/pseries/xics.c
index daf93885dcfa..c72c86f05cb6 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/xics.c 2 * arch/powerpc/platforms/pseries/xics.c
3 * 3 *
4 * Copyright 2000 IBM Corporation. 4 * Copyright 2000 IBM Corporation.
5 * 5 *
@@ -25,11 +25,11 @@
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27#include <asm/rtas.h> 27#include <asm/rtas.h>
28#include <asm/xics.h>
29#include <asm/hvcall.h> 28#include <asm/hvcall.h>
30#include <asm/machdep.h> 29#include <asm/machdep.h>
30#include <asm/i8259.h>
31 31
32#include "i8259.h" 32#include "xics.h"
33 33
34static unsigned int xics_startup(unsigned int irq); 34static unsigned int xics_startup(unsigned int irq);
35static void xics_enable_irq(unsigned int irq); 35static void xics_enable_irq(unsigned int irq);
@@ -62,7 +62,7 @@ static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
62/* Want a priority other than 0. Various HW issues require this. */ 62/* Want a priority other than 0. Various HW issues require this. */
63#define DEFAULT_PRIORITY 5 63#define DEFAULT_PRIORITY 5
64 64
65/* 65/*
66 * Mark IPIs as higher priority so we can take them inside interrupts that 66 * Mark IPIs as higher priority so we can take them inside interrupts that
67 * arent marked SA_INTERRUPT 67 * arent marked SA_INTERRUPT
68 */ 68 */
@@ -169,11 +169,11 @@ static inline long plpar_xirr(unsigned long *xirr_ret)
169static int pSeriesLP_xirr_info_get(int n_cpu) 169static int pSeriesLP_xirr_info_get(int n_cpu)
170{ 170{
171 unsigned long lpar_rc; 171 unsigned long lpar_rc;
172 unsigned long return_value; 172 unsigned long return_value;
173 173
174 lpar_rc = plpar_xirr(&return_value); 174 lpar_rc = plpar_xirr(&return_value);
175 if (lpar_rc != H_Success) 175 if (lpar_rc != H_Success)
176 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 176 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
177 return (int)return_value; 177 return (int)return_value;
178} 178}
179 179
@@ -185,7 +185,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
185 lpar_rc = plpar_eoi(val64); 185 lpar_rc = plpar_eoi(val64);
186 if (lpar_rc != H_Success) 186 if (lpar_rc != H_Success)
187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc, 187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
188 val64); 188 val64);
189} 189}
190 190
191void pSeriesLP_cppr_info(int n_cpu, u8 value) 191void pSeriesLP_cppr_info(int n_cpu, u8 value)
@@ -194,7 +194,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
194 194
195 lpar_rc = plpar_cppr(value); 195 lpar_rc = plpar_cppr(value);
196 if (lpar_rc != H_Success) 196 if (lpar_rc != H_Success)
197 panic("bad return code cppr - rc = %lx\n", lpar_rc); 197 panic("bad return code cppr - rc = %lx\n", lpar_rc);
198} 198}
199 199
200static void pSeriesLP_qirr_info(int n_cpu , u8 value) 200static void pSeriesLP_qirr_info(int n_cpu , u8 value)
@@ -203,7 +203,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
203 203
204 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); 204 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
205 if (lpar_rc != H_Success) 205 if (lpar_rc != H_Success)
206 panic("bad return code qirr - rc = %lx\n", lpar_rc); 206 panic("bad return code qirr - rc = %lx\n", lpar_rc);
207} 207}
208 208
209xics_ops pSeriesLP_ops = { 209xics_ops pSeriesLP_ops = {
@@ -366,7 +366,7 @@ int xics_get_irq(struct pt_regs *regs)
366 366
367 /* for sanity, this had better be < NR_IRQS - 16 */ 367 /* for sanity, this had better be < NR_IRQS - 16 */
368 if (vec == xics_irq_8259_cascade_real) { 368 if (vec == xics_irq_8259_cascade_real) {
369 irq = i8259_irq(cpu); 369 irq = i8259_irq(regs);
370 if (irq == -1) { 370 if (irq == -1) {
371 /* Spurious cascaded interrupt. Still must ack xics */ 371 /* Spurious cascaded interrupt. Still must ack xics */
372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); 372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
@@ -462,7 +462,7 @@ void xics_init_IRQ(void)
462 struct xics_interrupt_node { 462 struct xics_interrupt_node {
463 unsigned long addr; 463 unsigned long addr;
464 unsigned long size; 464 unsigned long size;
465 } intnodes[NR_CPUS]; 465 } intnodes[NR_CPUS];
466 466
467 ppc64_boot_msg(0x20, "XICS Init"); 467 ppc64_boot_msg(0x20, "XICS Init");
468 468
@@ -487,7 +487,7 @@ nextnode:
487 ireg = (uint *)get_property(np, "reg", &ilen); 487 ireg = (uint *)get_property(np, "reg", &ilen);
488 if (!ireg) 488 if (!ireg)
489 panic("xics_init_IRQ: can't find interrupt reg property"); 489 panic("xics_init_IRQ: can't find interrupt reg property");
490 490
491 while (ilen) { 491 while (ilen) {
492 intnodes[indx].addr = (unsigned long)*ireg++ << 32; 492 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
493 ilen -= sizeof(uint); 493 ilen -= sizeof(uint);
@@ -555,7 +555,7 @@ nextnode:
555 continue; 555 continue;
556 556
557 hard_id = get_hard_smp_processor_id(i); 557 hard_id = get_hard_smp_processor_id(i);
558 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, 558 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
559 intnodes[hard_id].size); 559 intnodes[hard_id].size);
560 } 560 }
561#else 561#else
@@ -589,7 +589,7 @@ static int __init xics_setup_i8259(void)
589 no_action, 0, "8259 cascade", NULL)) 589 no_action, 0, "8259 cascade", NULL))
590 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " 590 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
591 "cascade\n"); 591 "cascade\n");
592 i8259_init(0); 592 i8259_init(0, 0);
593 } 593 }
594 return 0; 594 return 0;
595} 595}
diff --git a/include/asm-ppc64/xics.h b/arch/powerpc/platforms/pseries/xics.h
index 1092af55d707..e14c70868f1d 100644
--- a/include/asm-ppc64/xics.h
+++ b/arch/powerpc/platforms/pseries/xics.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/xics.h 2 * arch/powerpc/platforms/pseries/xics.h
3 * 3 *
4 * Copyright 2000 IBM Corporation. 4 * Copyright 2000 IBM Corporation.
5 * 5 *
@@ -9,8 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _PPC64_KERNEL_XICS_H 12#ifndef _POWERPC_KERNEL_XICS_H
13#define _PPC64_KERNEL_XICS_H 13#define _POWERPC_KERNEL_XICS_H
14 14
15#include <linux/cache.h> 15#include <linux/cache.h>
16 16
@@ -31,4 +31,4 @@ struct xics_ipi_struct {
31 31
32extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; 32extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
33 33
34#endif /* _PPC64_KERNEL_XICS_H */ 34#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
new file mode 100644
index 000000000000..8acd21dee05d
--- /dev/null
+++ b/arch/powerpc/sysdev/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_MPIC) += mpic.o
2obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
3obj-$(CONFIG_PPC_I8259) += i8259.o
4obj-$(CONFIG_PPC_MPC106) += grackle.o
5obj-$(CONFIG_BOOKE) += dcr.o
6obj-$(CONFIG_40x) += dcr.o
7obj-$(CONFIG_U3_DART) += u3_iommu.o
diff --git a/arch/ppc/syslib/dcr.S b/arch/powerpc/sysdev/dcr.S
index 895f10243a43..895f10243a43 100644
--- a/arch/ppc/syslib/dcr.S
+++ b/arch/powerpc/sysdev/dcr.S
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
new file mode 100644
index 000000000000..b6ec793a23be
--- /dev/null
+++ b/arch/powerpc/sysdev/grackle.c
@@ -0,0 +1,64 @@
1/*
2 * Functions for setting up and using a MPC106 northbridge
3 * Extracted from arch/powerpc/platforms/powermac/pci.c.
4 *
5 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
6 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16
17#include <asm/io.h>
18#include <asm/prom.h>
19#include <asm/pci-bridge.h>
20#include <asm/grackle.h>
21
22#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
23 | (((o) & ~3) << 24))
24
25#define GRACKLE_PICR1_STG 0x00000040
26#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
27
28/* N.B. this is called before bridges is initialized, so we can't
29 use grackle_pcibios_{read,write}_config_dword. */
30static inline void grackle_set_stg(struct pci_controller* bp, int enable)
31{
32 unsigned int val;
33
34 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
35 val = in_le32(bp->cfg_data);
36 val = enable? (val | GRACKLE_PICR1_STG) :
37 (val & ~GRACKLE_PICR1_STG);
38 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
39 out_le32(bp->cfg_data, val);
40 (void)in_le32(bp->cfg_data);
41}
42
43static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
44{
45 unsigned int val;
46
47 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
48 val = in_le32(bp->cfg_data);
49 val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
50 (val & ~GRACKLE_PICR1_LOOPSNOOP);
51 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
52 out_le32(bp->cfg_data, val);
53 (void)in_le32(bp->cfg_data);
54}
55
56void __init setup_grackle(struct pci_controller *hose)
57{
58 setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
59 if (machine_is_compatible("AAPL,PowerBook1998"))
60 grackle_set_loop_snoop(hose, 1);
61#if 0 /* Disabled for now, HW problems ??? */
62 grackle_set_stg(hose, 1);
63#endif
64}
diff --git a/arch/ppc/syslib/i8259.c b/arch/powerpc/sysdev/i8259.c
index 5c7908c20e43..90bce6e0c191 100644
--- a/arch/ppc/syslib/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -1,18 +1,26 @@
1/*
2 * i8259 interrupt controller driver.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
1#include <linux/init.h> 9#include <linux/init.h>
2#include <linux/ioport.h> 10#include <linux/ioport.h>
3#include <linux/interrupt.h> 11#include <linux/interrupt.h>
4#include <asm/io.h> 12#include <asm/io.h>
5#include <asm/i8259.h> 13#include <asm/i8259.h>
6 14
7static volatile unsigned char *pci_intack; /* RO, gives us the irq vector */ 15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
8 16
9unsigned char cached_8259[2] = { 0xff, 0xff }; 17static unsigned char cached_8259[2] = { 0xff, 0xff };
10#define cached_A1 (cached_8259[0]) 18#define cached_A1 (cached_8259[0])
11#define cached_21 (cached_8259[1]) 19#define cached_21 (cached_8259[1])
12 20
13static DEFINE_SPINLOCK(i8259_lock); 21static DEFINE_SPINLOCK(i8259_lock);
14 22
15int i8259_pic_irq_offset; 23static int i8259_pic_irq_offset;
16 24
17/* 25/*
18 * Acknowledge the IRQ using either the PCI host bridge's interrupt 26 * Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -20,8 +28,7 @@ int i8259_pic_irq_offset;
20 * which is called. It should be noted that polling is broken on some 28 * which is called. It should be noted that polling is broken on some
21 * IBM and Motorola PReP boxes so we must use the int-ack feature on them. 29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
22 */ 30 */
23int 31int i8259_irq(struct pt_regs *regs)
24i8259_irq(struct pt_regs *regs)
25{ 32{
26 int irq; 33 int irq;
27 34
@@ -29,7 +36,7 @@ i8259_irq(struct pt_regs *regs)
29 36
30 /* Either int-ack or poll for the IRQ */ 37 /* Either int-ack or poll for the IRQ */
31 if (pci_intack) 38 if (pci_intack)
32 irq = *pci_intack; 39 irq = readb(pci_intack);
33 else { 40 else {
34 /* Perform an interrupt acknowledge cycle on controller 1. */ 41 /* Perform an interrupt acknowledge cycle on controller 1. */
35 outb(0x0C, 0x20); /* prepare for poll */ 42 outb(0x0C, 0x20); /* prepare for poll */
@@ -59,7 +66,12 @@ i8259_irq(struct pt_regs *regs)
59 } 66 }
60 67
61 spin_unlock(&i8259_lock); 68 spin_unlock(&i8259_lock);
62 return irq; 69 return irq + i8259_pic_irq_offset;
70}
71
72int i8259_irq_cascade(struct pt_regs *regs, void *unused)
73{
74 return i8259_irq(regs);
63} 75}
64 76
65static void i8259_mask_and_ack_irq(unsigned int irq_nr) 77static void i8259_mask_and_ack_irq(unsigned int irq_nr)
@@ -67,20 +79,18 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
67 unsigned long flags; 79 unsigned long flags;
68 80
69 spin_lock_irqsave(&i8259_lock, flags); 81 spin_lock_irqsave(&i8259_lock, flags);
70 if ( irq_nr >= i8259_pic_irq_offset ) 82 irq_nr -= i8259_pic_irq_offset;
71 irq_nr -= i8259_pic_irq_offset;
72
73 if (irq_nr > 7) { 83 if (irq_nr > 7) {
74 cached_A1 |= 1 << (irq_nr-8); 84 cached_A1 |= 1 << (irq_nr-8);
75 inb(0xA1); /* DUMMY */ 85 inb(0xA1); /* DUMMY */
76 outb(cached_A1,0xA1); 86 outb(cached_A1, 0xA1);
77 outb(0x20,0xA0); /* Non-specific EOI */ 87 outb(0x20, 0xA0); /* Non-specific EOI */
78 outb(0x20,0x20); /* Non-specific EOI to cascade */ 88 outb(0x20, 0x20); /* Non-specific EOI to cascade */
79 } else { 89 } else {
80 cached_21 |= 1 << irq_nr; 90 cached_21 |= 1 << irq_nr;
81 inb(0x21); /* DUMMY */ 91 inb(0x21); /* DUMMY */
82 outb(cached_21,0x21); 92 outb(cached_21, 0x21);
83 outb(0x20,0x20); /* Non-specific EOI */ 93 outb(0x20, 0x20); /* Non-specific EOI */
84 } 94 }
85 spin_unlock_irqrestore(&i8259_lock, flags); 95 spin_unlock_irqrestore(&i8259_lock, flags);
86} 96}
@@ -96,9 +106,8 @@ static void i8259_mask_irq(unsigned int irq_nr)
96 unsigned long flags; 106 unsigned long flags;
97 107
98 spin_lock_irqsave(&i8259_lock, flags); 108 spin_lock_irqsave(&i8259_lock, flags);
99 if ( irq_nr >= i8259_pic_irq_offset ) 109 irq_nr -= i8259_pic_irq_offset;
100 irq_nr -= i8259_pic_irq_offset; 110 if (irq_nr < 8)
101 if ( irq_nr < 8 )
102 cached_21 |= 1 << irq_nr; 111 cached_21 |= 1 << irq_nr;
103 else 112 else
104 cached_A1 |= 1 << (irq_nr-8); 113 cached_A1 |= 1 << (irq_nr-8);
@@ -111,9 +120,8 @@ static void i8259_unmask_irq(unsigned int irq_nr)
111 unsigned long flags; 120 unsigned long flags;
112 121
113 spin_lock_irqsave(&i8259_lock, flags); 122 spin_lock_irqsave(&i8259_lock, flags);
114 if ( irq_nr >= i8259_pic_irq_offset ) 123 irq_nr -= i8259_pic_irq_offset;
115 irq_nr -= i8259_pic_irq_offset; 124 if (irq_nr < 8)
116 if ( irq_nr < 8 )
117 cached_21 &= ~(1 << irq_nr); 125 cached_21 &= ~(1 << irq_nr);
118 else 126 else
119 cached_A1 &= ~(1 << (irq_nr-8)); 127 cached_A1 &= ~(1 << (irq_nr-8));
@@ -169,12 +177,14 @@ static struct irqaction i8259_irqaction = {
169 * intack_addr - PCI interrupt acknowledge (real) address which will return 177 * intack_addr - PCI interrupt acknowledge (real) address which will return
170 * the active irq from the 8259 178 * the active irq from the 8259
171 */ 179 */
172void __init 180void __init i8259_init(unsigned long intack_addr, int offset)
173i8259_init(long intack_addr)
174{ 181{
175 unsigned long flags; 182 unsigned long flags;
183 int i;
176 184
177 spin_lock_irqsave(&i8259_lock, flags); 185 spin_lock_irqsave(&i8259_lock, flags);
186 i8259_pic_irq_offset = offset;
187
178 /* init master interrupt controller */ 188 /* init master interrupt controller */
179 outb(0x11, 0x20); /* Start init sequence */ 189 outb(0x11, 0x20); /* Start init sequence */
180 outb(0x00, 0x21); /* Vector base */ 190 outb(0x00, 0x21); /* Vector base */
@@ -198,11 +208,14 @@ i8259_init(long intack_addr)
198 spin_unlock_irqrestore(&i8259_lock, flags); 208 spin_unlock_irqrestore(&i8259_lock, flags);
199 209
200 /* reserve our resources */ 210 /* reserve our resources */
201 setup_irq( i8259_pic_irq_offset + 2, &i8259_irqaction); 211 setup_irq(offset + 2, &i8259_irqaction);
202 request_resource(&ioport_resource, &pic1_iores); 212 request_resource(&ioport_resource, &pic1_iores);
203 request_resource(&ioport_resource, &pic2_iores); 213 request_resource(&ioport_resource, &pic2_iores);
204 request_resource(&ioport_resource, &pic_edgectrl_iores); 214 request_resource(&ioport_resource, &pic_edgectrl_iores);
205 215
206 if (intack_addr != 0) 216 if (intack_addr != 0)
207 pci_intack = ioremap(intack_addr, 1); 217 pci_intack = ioremap(intack_addr, 1);
218
219 for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
220 irq_desc[offset + i].handler = &i8259_pic;
208} 221}
diff --git a/arch/ppc/syslib/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index e71488469704..e71488469704 100644
--- a/arch/ppc/syslib/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
diff --git a/arch/ppc64/kernel/mpic.c b/arch/powerpc/sysdev/mpic.c
index 5f5bc73754d9..105f05341a41 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/mpic.c 2 * arch/powerpc/kernel/mpic.c
3 * 3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the 4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal 5 * common implementation beeing IBM's MPIC. This driver also can deal
@@ -31,8 +31,8 @@
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34 34#include <asm/mpic.h>
35#include "mpic.h" 35#include <asm/smp.h>
36 36
37#ifdef DEBUG 37#ifdef DEBUG
38#define DBG(fmt...) printk(fmt) 38#define DBG(fmt...) printk(fmt)
@@ -44,6 +44,9 @@ static struct mpic *mpics;
44static struct mpic *mpic_primary; 44static struct mpic *mpic_primary;
45static DEFINE_SPINLOCK(mpic_lock); 45static DEFINE_SPINLOCK(mpic_lock);
46 46
47#ifdef CONFIG_PPC32 /* XXX for now */
48#define distribute_irqs CONFIG_IRQ_ALL_CPUS
49#endif
47 50
48/* 51/*
49 * Register accessor functions 52 * Register accessor functions
@@ -355,7 +358,7 @@ static void mpic_enable_irq(unsigned int irq)
355 struct mpic *mpic = mpic_from_irq(irq); 358 struct mpic *mpic = mpic_from_irq(irq);
356 unsigned int src = irq - mpic->irq_offset; 359 unsigned int src = irq - mpic->irq_offset;
357 360
358 DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src); 361 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
359 362
360 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 363 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
361 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK); 364 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
@@ -480,6 +483,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
480 if (mpic == NULL) 483 if (mpic == NULL)
481 return NULL; 484 return NULL;
482 485
486
483 memset(mpic, 0, sizeof(struct mpic)); 487 memset(mpic, 0, sizeof(struct mpic));
484 mpic->name = name; 488 mpic->name = name;
485 489
@@ -506,7 +510,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
506 mpic->senses_count = senses_count; 510 mpic->senses_count = senses_count;
507 511
508 /* Map the global registers */ 512 /* Map the global registers */
509 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x2000); 513 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
510 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); 514 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
511 BUG_ON(mpic->gregs == NULL); 515 BUG_ON(mpic->gregs == NULL);
512 516
@@ -644,7 +648,6 @@ void __init mpic_init(struct mpic *mpic)
644 continue; 648 continue;
645 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; 649 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
646 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; 650 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
647
648#endif /* CONFIG_SMP */ 651#endif /* CONFIG_SMP */
649 } 652 }
650 653
@@ -700,7 +703,7 @@ void __init mpic_init(struct mpic *mpic)
700 /* init hw */ 703 /* init hw */
701 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 704 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
702 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 705 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
703 1 << get_hard_smp_processor_id(boot_cpuid)); 706 1 << hard_smp_processor_id());
704 707
705 /* init linux descriptors */ 708 /* init linux descriptors */
706 if (i < mpic->irq_count) { 709 if (i < mpic->irq_count) {
@@ -792,6 +795,21 @@ void mpic_setup_this_cpu(void)
792#endif /* CONFIG_SMP */ 795#endif /* CONFIG_SMP */
793} 796}
794 797
798int mpic_cpu_get_priority(void)
799{
800 struct mpic *mpic = mpic_primary;
801
802 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
803}
804
805void mpic_cpu_set_priority(int prio)
806{
807 struct mpic *mpic = mpic_primary;
808
809 prio &= MPIC_CPU_TASKPRI_MASK;
810 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
811}
812
795/* 813/*
796 * XXX: someone who knows mpic should check this. 814 * XXX: someone who knows mpic should check this.
797 * do we need to eoi the ipi including for kexec cpu here (see xics comments)? 815 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
@@ -885,4 +903,25 @@ void mpic_request_ipis(void)
885 903
886 printk("IPIs requested... \n"); 904 printk("IPIs requested... \n");
887} 905}
906
907void smp_mpic_message_pass(int target, int msg)
908{
909 /* make sure we're sending something that translates to an IPI */
910 if ((unsigned int)msg > 3) {
911 printk("SMP %d: smp_message_pass: unknown msg %d\n",
912 smp_processor_id(), msg);
913 return;
914 }
915 switch (target) {
916 case MSG_ALL:
917 mpic_send_ipi(msg, 0xffffffff);
918 break;
919 case MSG_ALL_BUT_SELF:
920 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
921 break;
922 default:
923 mpic_send_ipi(msg, 1 << target);
924 break;
925 }
926}
888#endif /* CONFIG_SMP */ 927#endif /* CONFIG_SMP */
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 41ea09cb9ac7..fba871a1bda5 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -44,39 +44,11 @@
44#include <asm/abs_addr.h> 44#include <asm/abs_addr.h>
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46#include <asm/lmb.h> 46#include <asm/lmb.h>
47 47#include <asm/dart.h>
48#include "pci.h" 48#include <asm/ppc-pci.h>
49 49
50extern int iommu_force_on; 50extern int iommu_force_on;
51 51
52/* physical base of DART registers */
53#define DART_BASE 0xf8033000UL
54
55/* Offset from base to control register */
56#define DARTCNTL 0
57/* Offset from base to exception register */
58#define DARTEXCP 0x10
59/* Offset from base to TLB tag registers */
60#define DARTTAG 0x1000
61
62
63/* Control Register fields */
64
65/* base address of table (pfn) */
66#define DARTCNTL_BASE_MASK 0xfffff
67#define DARTCNTL_BASE_SHIFT 12
68
69#define DARTCNTL_FLUSHTLB 0x400
70#define DARTCNTL_ENABLE 0x200
71
72/* size of table in pages */
73#define DARTCNTL_SIZE_MASK 0x1ff
74#define DARTCNTL_SIZE_SHIFT 0
75
76/* DART table fields */
77#define DARTMAP_VALID 0x80000000
78#define DARTMAP_RPNMASK 0x00ffffff
79
80/* Physical base address and size of the DART table */ 52/* Physical base address and size of the DART table */
81unsigned long dart_tablebase; /* exported to htab_initialize */ 53unsigned long dart_tablebase; /* exported to htab_initialize */
82static unsigned long dart_tablesize; 54static unsigned long dart_tablesize;
@@ -152,18 +124,21 @@ static void dart_build(struct iommu_table *tbl, long index,
152 124
153 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); 125 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
154 126
127 index <<= DART_PAGE_FACTOR;
128 npages <<= DART_PAGE_FACTOR;
129
155 dp = ((unsigned int*)tbl->it_base) + index; 130 dp = ((unsigned int*)tbl->it_base) + index;
156 131
157 /* On U3, all memory is contigous, so we can move this 132 /* On U3, all memory is contigous, so we can move this
158 * out of the loop. 133 * out of the loop.
159 */ 134 */
160 while (npages--) { 135 while (npages--) {
161 rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; 136 rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
162 137
163 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); 138 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
164 139
165 rpn++; 140 rpn++;
166 uaddr += PAGE_SIZE; 141 uaddr += DART_PAGE_SIZE;
167 } 142 }
168 143
169 dart_dirty = 1; 144 dart_dirty = 1;
@@ -181,6 +156,9 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
181 156
182 DBG("dart: free at: %lx, %lx\n", index, npages); 157 DBG("dart: free at: %lx, %lx\n", index, npages);
183 158
159 index <<= DART_PAGE_FACTOR;
160 npages <<= DART_PAGE_FACTOR;
161
184 dp = ((unsigned int *)tbl->it_base) + index; 162 dp = ((unsigned int *)tbl->it_base) + index;
185 163
186 while (npages--) 164 while (npages--)
@@ -209,10 +187,10 @@ static int dart_init(struct device_node *dart_node)
209 * that to work around what looks like a problem with the HT bridge 187 * that to work around what looks like a problem with the HT bridge
210 * prefetching into invalid pages and corrupting data 188 * prefetching into invalid pages and corrupting data
211 */ 189 */
212 tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE); 190 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
213 if (!tmp) 191 if (!tmp)
214 panic("U3-DART: Cannot allocate spare page!"); 192 panic("U3-DART: Cannot allocate spare page!");
215 dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK); 193 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
216 194
217 /* Map in DART registers. FIXME: Use device node to get base address */ 195 /* Map in DART registers. FIXME: Use device node to get base address */
218 dart = ioremap(DART_BASE, 0x7000); 196 dart = ioremap(DART_BASE, 0x7000);
@@ -223,8 +201,8 @@ static int dart_init(struct device_node *dart_node)
223 * table size and enable bit 201 * table size and enable bit
224 */ 202 */
225 regword = DARTCNTL_ENABLE | 203 regword = DARTCNTL_ENABLE |
226 ((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) | 204 ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
227 (((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK) 205 (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
228 << DARTCNTL_SIZE_SHIFT); 206 << DARTCNTL_SIZE_SHIFT);
229 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); 207 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
230 208
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
new file mode 100644
index 000000000000..79a784f0e7a9
--- /dev/null
+++ b/arch/powerpc/xmon/Makefile
@@ -0,0 +1,11 @@
1# Makefile for xmon
2
3ifdef CONFIG_PPC64
4EXTRA_CFLAGS += -mno-minimal-toc
5endif
6
7obj-$(CONFIG_8xx) += start_8xx.o
8obj-$(CONFIG_6xx) += start_32.o
9obj-$(CONFIG_4xx) += start_32.o
10obj-$(CONFIG_PPC64) += start_64.o
11obj-y += xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
diff --git a/arch/ppc64/xmon/ansidecl.h b/arch/powerpc/xmon/ansidecl.h
index c9b9f0929e9e..c9b9f0929e9e 100644
--- a/arch/ppc64/xmon/ansidecl.h
+++ b/arch/powerpc/xmon/ansidecl.h
diff --git a/arch/ppc64/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 84211a21c6f4..84211a21c6f4 100644
--- a/arch/ppc64/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
diff --git a/arch/ppc64/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index ac0a9d2427e0..ac0a9d2427e0 100644
--- a/arch/ppc64/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
diff --git a/arch/ppc64/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index 5ee8fc32f824..5ee8fc32f824 100644
--- a/arch/ppc64/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
diff --git a/arch/ppc64/xmon/ppc.h b/arch/powerpc/xmon/ppc.h
index 342237e8dd69..342237e8dd69 100644
--- a/arch/ppc64/xmon/ppc.h
+++ b/arch/powerpc/xmon/ppc.h
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
new file mode 100644
index 000000000000..f8e40dfd2bff
--- /dev/null
+++ b/arch/powerpc/xmon/setjmp.S
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: assert(sizeof(buf) > 23 * sizeof(long))
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h>
14
15_GLOBAL(xmon_setjmp)
16 mflr r0
17 STL r0,0(r3)
18 STL r1,SZL(r3)
19 STL r2,2*SZL(r3)
20 mfcr r0
21 STL r0,3*SZL(r3)
22 STL r13,4*SZL(r3)
23 STL r14,5*SZL(r3)
24 STL r15,6*SZL(r3)
25 STL r16,7*SZL(r3)
26 STL r17,8*SZL(r3)
27 STL r18,9*SZL(r3)
28 STL r19,10*SZL(r3)
29 STL r20,11*SZL(r3)
30 STL r21,12*SZL(r3)
31 STL r22,13*SZL(r3)
32 STL r23,14*SZL(r3)
33 STL r24,15*SZL(r3)
34 STL r25,16*SZL(r3)
35 STL r26,17*SZL(r3)
36 STL r27,18*SZL(r3)
37 STL r28,19*SZL(r3)
38 STL r29,20*SZL(r3)
39 STL r30,21*SZL(r3)
40 STL r31,22*SZL(r3)
41 li r3,0
42 blr
43
44_GLOBAL(xmon_longjmp)
45 CMPI r4,0
46 bne 1f
47 li r4,1
481: LDL r13,4*SZL(r3)
49 LDL r14,5*SZL(r3)
50 LDL r15,6*SZL(r3)
51 LDL r16,7*SZL(r3)
52 LDL r17,8*SZL(r3)
53 LDL r18,9*SZL(r3)
54 LDL r19,10*SZL(r3)
55 LDL r20,11*SZL(r3)
56 LDL r21,12*SZL(r3)
57 LDL r22,13*SZL(r3)
58 LDL r23,14*SZL(r3)
59 LDL r24,15*SZL(r3)
60 LDL r25,16*SZL(r3)
61 LDL r26,17*SZL(r3)
62 LDL r27,18*SZL(r3)
63 LDL r28,19*SZL(r3)
64 LDL r29,20*SZL(r3)
65 LDL r30,21*SZL(r3)
66 LDL r31,22*SZL(r3)
67 LDL r0,3*SZL(r3)
68 mtcrf 0x38,r0
69 LDL r0,0(r3)
70 LDL r1,SZL(r3)
71 LDL r2,2*SZL(r3)
72 mtlr r0
73 mr r3,r4
74 blr
75
76/*
77 * Grab the register values as they are now.
78 * This won't do a particularily good job because we really
79 * want our caller's caller's registers, and our caller has
80 * already executed its prologue.
81 * ToDo: We could reach back into the caller's save area to do
82 * a better job of representing the caller's state (note that
83 * that will be different for 32-bit and 64-bit, because of the
84 * different ABIs, though).
85 */
86_GLOBAL(xmon_save_regs)
87 STL r0,0*SZL(r3)
88 STL r2,2*SZL(r3)
89 STL r3,3*SZL(r3)
90 STL r4,4*SZL(r3)
91 STL r5,5*SZL(r3)
92 STL r6,6*SZL(r3)
93 STL r7,7*SZL(r3)
94 STL r8,8*SZL(r3)
95 STL r9,9*SZL(r3)
96 STL r10,10*SZL(r3)
97 STL r11,11*SZL(r3)
98 STL r12,12*SZL(r3)
99 STL r13,13*SZL(r3)
100 STL r14,14*SZL(r3)
101 STL r15,15*SZL(r3)
102 STL r16,16*SZL(r3)
103 STL r17,17*SZL(r3)
104 STL r18,18*SZL(r3)
105 STL r19,19*SZL(r3)
106 STL r20,20*SZL(r3)
107 STL r21,21*SZL(r3)
108 STL r22,22*SZL(r3)
109 STL r23,23*SZL(r3)
110 STL r24,24*SZL(r3)
111 STL r25,25*SZL(r3)
112 STL r26,26*SZL(r3)
113 STL r27,27*SZL(r3)
114 STL r28,28*SZL(r3)
115 STL r29,29*SZL(r3)
116 STL r30,30*SZL(r3)
117 STL r31,31*SZL(r3)
118 /* go up one stack frame for SP */
119 LDL r4,0(r1)
120 STL r4,1*SZL(r3)
121 /* get caller's LR */
122 LDL r0,LRSAVE(r4)
123 STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
124 STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
125 mfmsr r0
126 STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
127 mfctr r0
128 STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
129 mfxer r0
130 STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
131 mfcr r0
132 STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
133 li r0,0
134 STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
135 blr
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
new file mode 100644
index 000000000000..69b658c0f760
--- /dev/null
+++ b/arch/powerpc/xmon/start_32.c
@@ -0,0 +1,624 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 */
4#include <linux/config.h>
5#include <linux/string.h>
6#include <asm/machdep.h>
7#include <asm/io.h>
8#include <asm/page.h>
9#include <linux/adb.h>
10#include <linux/pmu.h>
11#include <linux/cuda.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/sysrq.h>
15#include <linux/bitops.h>
16#include <asm/xmon.h>
17#include <asm/prom.h>
18#include <asm/bootx.h>
19#include <asm/machdep.h>
20#include <asm/errno.h>
21#include <asm/pmac_feature.h>
22#include <asm/processor.h>
23#include <asm/delay.h>
24#include <asm/btext.h>
25
26static volatile unsigned char __iomem *sccc, *sccd;
27unsigned int TXRDY, RXRDY, DLAB;
28static int xmon_expect(const char *str, unsigned int timeout);
29
30static int use_serial;
31static int use_screen;
32static int via_modem;
33static int xmon_use_sccb;
34static struct device_node *channel_node;
35
36#define TB_SPEED 25000000
37
38static inline unsigned int readtb(void)
39{
40 unsigned int ret;
41
42 asm volatile("mftb %0" : "=r" (ret) :);
43 return ret;
44}
45
46void buf_access(void)
47{
48 if (DLAB)
49 sccd[3] &= ~DLAB; /* reset DLAB */
50}
51
52extern int adb_init(void);
53
54#ifdef CONFIG_PPC_CHRP
55/*
56 * This looks in the "ranges" property for the primary PCI host bridge
57 * to find the physical address of the start of PCI/ISA I/O space.
58 * It is basically a cut-down version of pci_process_bridge_OF_ranges.
59 */
60static unsigned long chrp_find_phys_io_base(void)
61{
62 struct device_node *node;
63 unsigned int *ranges;
64 unsigned long base = CHRP_ISA_IO_BASE;
65 int rlen = 0;
66 int np;
67
68 node = find_devices("isa");
69 if (node != NULL) {
70 node = node->parent;
71 if (node == NULL || node->type == NULL
72 || strcmp(node->type, "pci") != 0)
73 node = NULL;
74 }
75 if (node == NULL)
76 node = find_devices("pci");
77 if (node == NULL)
78 return base;
79
80 ranges = (unsigned int *) get_property(node, "ranges", &rlen);
81 np = prom_n_addr_cells(node) + 5;
82 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
83 if ((ranges[0] >> 24) == 1 && ranges[2] == 0) {
84 /* I/O space starting at 0, grab the phys base */
85 base = ranges[np - 3];
86 break;
87 }
88 ranges += np;
89 }
90 return base;
91}
92#endif /* CONFIG_PPC_CHRP */
93
94#ifdef CONFIG_MAGIC_SYSRQ
95static void sysrq_handle_xmon(int key, struct pt_regs *regs,
96 struct tty_struct *tty)
97{
98 xmon(regs);
99}
100
101static struct sysrq_key_op sysrq_xmon_op =
102{
103 .handler = sysrq_handle_xmon,
104 .help_msg = "Xmon",
105 .action_msg = "Entering xmon",
106};
107#endif
108
109void
110xmon_map_scc(void)
111{
112#ifdef CONFIG_PPC_MULTIPLATFORM
113 volatile unsigned char __iomem *base;
114
115 if (_machine == _MACH_Pmac) {
116 struct device_node *np;
117 unsigned long addr;
118#ifdef CONFIG_BOOTX_TEXT
119 if (!use_screen && !use_serial
120 && !machine_is_compatible("iMac")) {
121 /* see if there is a keyboard in the device tree
122 with a parent of type "adb" */
123 for (np = find_devices("keyboard"); np; np = np->next)
124 if (np->parent && np->parent->type
125 && strcmp(np->parent->type, "adb") == 0)
126 break;
127
128 /* needs to be hacked if xmon_printk is to be used
129 from within find_via_pmu() */
130#ifdef CONFIG_ADB_PMU
131 if (np != NULL && boot_text_mapped && find_via_pmu())
132 use_screen = 1;
133#endif
134#ifdef CONFIG_ADB_CUDA
135 if (np != NULL && boot_text_mapped && find_via_cuda())
136 use_screen = 1;
137#endif
138 }
139 if (!use_screen && (np = find_devices("escc")) != NULL) {
140 /*
141 * look for the device node for the serial port
142 * we're using and see if it says it has a modem
143 */
144 char *name = xmon_use_sccb? "ch-b": "ch-a";
145 char *slots;
146 int l;
147
148 np = np->child;
149 while (np != NULL && strcmp(np->name, name) != 0)
150 np = np->sibling;
151 if (np != NULL) {
152 /* XXX should parse this properly */
153 channel_node = np;
154 slots = get_property(np, "slot-names", &l);
155 if (slots != NULL && l >= 10
156 && strcmp(slots+4, "Modem") == 0)
157 via_modem = 1;
158 }
159 }
160 btext_drawstring("xmon uses ");
161 if (use_screen)
162 btext_drawstring("screen and keyboard\n");
163 else {
164 if (via_modem)
165 btext_drawstring("modem on ");
166 btext_drawstring(xmon_use_sccb? "printer": "modem");
167 btext_drawstring(" port\n");
168 }
169
170#endif /* CONFIG_BOOTX_TEXT */
171
172#ifdef CHRP_ESCC
173 addr = 0xc1013020;
174#else
175 addr = 0xf3013020;
176#endif
177 TXRDY = 4;
178 RXRDY = 1;
179
180 np = find_devices("mac-io");
181 if (np && np->n_addrs)
182 addr = np->addrs[0].address + 0x13020;
183 base = (volatile unsigned char *) ioremap(addr & PAGE_MASK, PAGE_SIZE);
184 sccc = base + (addr & ~PAGE_MASK);
185 sccd = sccc + 0x10;
186
187 } else {
188 base = (volatile unsigned char *) isa_io_base;
189
190#ifdef CONFIG_PPC_CHRP
191 if (_machine == _MACH_chrp)
192 base = (volatile unsigned char __iomem *)
193 ioremap(chrp_find_phys_io_base(), 0x1000);
194#endif
195
196 sccc = base + 0x3fd;
197 sccd = base + 0x3f8;
198 if (xmon_use_sccb) {
199 sccc -= 0x100;
200 sccd -= 0x100;
201 }
202 TXRDY = 0x20;
203 RXRDY = 1;
204 DLAB = 0x80;
205 }
206#elif defined(CONFIG_GEMINI)
207 /* should already be mapped by the kernel boot */
208 sccc = (volatile unsigned char __iomem *) 0xffeffb0d;
209 sccd = (volatile unsigned char __iomem *) 0xffeffb08;
210 TXRDY = 0x20;
211 RXRDY = 1;
212 DLAB = 0x80;
213#elif defined(CONFIG_405GP)
214 sccc = (volatile unsigned char __iomem *)0xef600305;
215 sccd = (volatile unsigned char __iomem *)0xef600300;
216 TXRDY = 0x20;
217 RXRDY = 1;
218 DLAB = 0x80;
219#endif /* platform */
220
221 register_sysrq_key('x', &sysrq_xmon_op);
222}
223
224static int scc_initialized = 0;
225
226void xmon_init_scc(void);
227extern void cuda_poll(void);
228
229static inline void do_poll_adb(void)
230{
231#ifdef CONFIG_ADB_PMU
232 if (sys_ctrler == SYS_CTRLER_PMU)
233 pmu_poll_adb();
234#endif /* CONFIG_ADB_PMU */
235#ifdef CONFIG_ADB_CUDA
236 if (sys_ctrler == SYS_CTRLER_CUDA)
237 cuda_poll();
238#endif /* CONFIG_ADB_CUDA */
239}
240
241int
242xmon_write(void *handle, void *ptr, int nb)
243{
244 char *p = ptr;
245 int i, c, ct;
246
247#ifdef CONFIG_SMP
248 static unsigned long xmon_write_lock;
249 int lock_wait = 1000000;
250 int locked;
251
252 while ((locked = test_and_set_bit(0, &xmon_write_lock)) != 0)
253 if (--lock_wait == 0)
254 break;
255#endif
256
257#ifdef CONFIG_BOOTX_TEXT
258 if (use_screen) {
259 /* write it on the screen */
260 for (i = 0; i < nb; ++i)
261 btext_drawchar(*p++);
262 goto out;
263 }
264#endif
265 if (!scc_initialized)
266 xmon_init_scc();
267 ct = 0;
268 for (i = 0; i < nb; ++i) {
269 while ((*sccc & TXRDY) == 0)
270 do_poll_adb();
271 c = p[i];
272 if (c == '\n' && !ct) {
273 c = '\r';
274 ct = 1;
275 --i;
276 } else {
277 ct = 0;
278 }
279 buf_access();
280 *sccd = c;
281 eieio();
282 }
283
284 out:
285#ifdef CONFIG_SMP
286 if (!locked)
287 clear_bit(0, &xmon_write_lock);
288#endif
289 return nb;
290}
291
292int xmon_wants_key;
293int xmon_adb_keycode;
294
295#ifdef CONFIG_BOOTX_TEXT
296static int xmon_adb_shiftstate;
297
298static unsigned char xmon_keytab[128] =
299 "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
300 "yt123465=97-80]o" /* 0x10 - 0x1f */
301 "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
302 "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
303 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
304 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
305
306static unsigned char xmon_shift_keytab[128] =
307 "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
308 "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
309 "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
310 "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
311 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
312 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
313
314static int
315xmon_get_adb_key(void)
316{
317 int k, t, on;
318
319 xmon_wants_key = 1;
320 for (;;) {
321 xmon_adb_keycode = -1;
322 t = 0;
323 on = 0;
324 do {
325 if (--t < 0) {
326 on = 1 - on;
327 btext_drawchar(on? 0xdb: 0x20);
328 btext_drawchar('\b');
329 t = 200000;
330 }
331 do_poll_adb();
332 } while (xmon_adb_keycode == -1);
333 k = xmon_adb_keycode;
334 if (on)
335 btext_drawstring(" \b");
336
337 /* test for shift keys */
338 if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
339 xmon_adb_shiftstate = (k & 0x80) == 0;
340 continue;
341 }
342 if (k >= 0x80)
343 continue; /* ignore up transitions */
344 k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
345 if (k != 0)
346 break;
347 }
348 xmon_wants_key = 0;
349 return k;
350}
351#endif /* CONFIG_BOOTX_TEXT */
352
353int
354xmon_read(void *handle, void *ptr, int nb)
355{
356 char *p = ptr;
357 int i;
358
359#ifdef CONFIG_BOOTX_TEXT
360 if (use_screen) {
361 for (i = 0; i < nb; ++i)
362 *p++ = xmon_get_adb_key();
363 return i;
364 }
365#endif
366 if (!scc_initialized)
367 xmon_init_scc();
368 for (i = 0; i < nb; ++i) {
369 while ((*sccc & RXRDY) == 0)
370 do_poll_adb();
371 buf_access();
372 *p++ = *sccd;
373 }
374 return i;
375}
376
377int
378xmon_read_poll(void)
379{
380 if ((*sccc & RXRDY) == 0) {
381 do_poll_adb();
382 return -1;
383 }
384 buf_access();
385 return *sccd;
386}
387
388static unsigned char scc_inittab[] = {
389 13, 0, /* set baud rate divisor */
390 12, 1,
391 14, 1, /* baud rate gen enable, src=rtxc */
392 11, 0x50, /* clocks = br gen */
393 5, 0xea, /* tx 8 bits, assert DTR & RTS */
394 4, 0x46, /* x16 clock, 1 stop */
395 3, 0xc1, /* rx enable, 8 bits */
396};
397
398void
399xmon_init_scc(void)
400{
401 if ( _machine == _MACH_chrp )
402 {
403 sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */
404 sccd[0] = 12; eieio(); /* DLL = 9600 baud */
405 sccd[1] = 0; eieio();
406 sccd[2] = 0; eieio(); /* FCR = 0 */
407 sccd[3] = 3; eieio(); /* LCR = 8N1 */
408 sccd[1] = 0; eieio(); /* IER = 0 */
409 }
410 else if ( _machine == _MACH_Pmac )
411 {
412 int i, x;
413
414 if (channel_node != 0)
415 pmac_call_feature(
416 PMAC_FTR_SCC_ENABLE,
417 channel_node,
418 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
419 printk(KERN_INFO "Serial port locked ON by debugger !\n");
420 if (via_modem && channel_node != 0) {
421 unsigned int t0;
422
423 pmac_call_feature(
424 PMAC_FTR_MODEM_ENABLE,
425 channel_node, 0, 1);
426 printk(KERN_INFO "Modem powered up by debugger !\n");
427 t0 = readtb();
428 while (readtb() - t0 < 3*TB_SPEED)
429 eieio();
430 }
431 /* use the B channel if requested */
432 if (xmon_use_sccb) {
433 sccc = (volatile unsigned char *)
434 ((unsigned long)sccc & ~0x20);
435 sccd = sccc + 0x10;
436 }
437 for (i = 20000; i != 0; --i) {
438 x = *sccc; eieio();
439 }
440 *sccc = 9; eieio(); /* reset A or B side */
441 *sccc = ((unsigned long)sccc & 0x20)? 0x80: 0x40; eieio();
442 for (i = 0; i < sizeof(scc_inittab); ++i) {
443 *sccc = scc_inittab[i];
444 eieio();
445 }
446 }
447 scc_initialized = 1;
448 if (via_modem) {
449 for (;;) {
450 xmon_write(NULL, "ATE1V1\r", 7);
451 if (xmon_expect("OK", 5)) {
452 xmon_write(NULL, "ATA\r", 4);
453 if (xmon_expect("CONNECT", 40))
454 break;
455 }
456 xmon_write(NULL, "+++", 3);
457 xmon_expect("OK", 3);
458 }
459 }
460}
461
462void *xmon_stdin;
463void *xmon_stdout;
464void *xmon_stderr;
465
466int xmon_putc(int c, void *f)
467{
468 char ch = c;
469
470 if (c == '\n')
471 xmon_putc('\r', f);
472 return xmon_write(f, &ch, 1) == 1? c: -1;
473}
474
475int xmon_putchar(int c)
476{
477 return xmon_putc(c, xmon_stdout);
478}
479
480int xmon_fputs(char *str, void *f)
481{
482 int n = strlen(str);
483
484 return xmon_write(f, str, n) == n? 0: -1;
485}
486
487int
488xmon_readchar(void)
489{
490 char ch;
491
492 for (;;) {
493 switch (xmon_read(xmon_stdin, &ch, 1)) {
494 case 1:
495 return ch;
496 case -1:
497 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
498 return -1;
499 }
500 }
501}
502
503static char line[256];
504static char *lineptr;
505static int lineleft;
506
507int xmon_expect(const char *str, unsigned int timeout)
508{
509 int c;
510 unsigned int t0;
511
512 timeout *= TB_SPEED;
513 t0 = readtb();
514 do {
515 lineptr = line;
516 for (;;) {
517 c = xmon_read_poll();
518 if (c == -1) {
519 if (readtb() - t0 > timeout)
520 return 0;
521 continue;
522 }
523 if (c == '\n')
524 break;
525 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
526 *lineptr++ = c;
527 }
528 *lineptr = 0;
529 } while (strstr(line, str) == NULL);
530 return 1;
531}
532
533int
534xmon_getchar(void)
535{
536 int c;
537
538 if (lineleft == 0) {
539 lineptr = line;
540 for (;;) {
541 c = xmon_readchar();
542 if (c == -1 || c == 4)
543 break;
544 if (c == '\r' || c == '\n') {
545 *lineptr++ = '\n';
546 xmon_putchar('\n');
547 break;
548 }
549 switch (c) {
550 case 0177:
551 case '\b':
552 if (lineptr > line) {
553 xmon_putchar('\b');
554 xmon_putchar(' ');
555 xmon_putchar('\b');
556 --lineptr;
557 }
558 break;
559 case 'U' & 0x1F:
560 while (lineptr > line) {
561 xmon_putchar('\b');
562 xmon_putchar(' ');
563 xmon_putchar('\b');
564 --lineptr;
565 }
566 break;
567 default:
568 if (lineptr >= &line[sizeof(line) - 1])
569 xmon_putchar('\a');
570 else {
571 xmon_putchar(c);
572 *lineptr++ = c;
573 }
574 }
575 }
576 lineleft = lineptr - line;
577 lineptr = line;
578 }
579 if (lineleft == 0)
580 return -1;
581 --lineleft;
582 return *lineptr++;
583}
584
585char *
586xmon_fgets(char *str, int nb, void *f)
587{
588 char *p;
589 int c;
590
591 for (p = str; p < str + nb - 1; ) {
592 c = xmon_getchar();
593 if (c == -1) {
594 if (p == str)
595 return NULL;
596 break;
597 }
598 *p++ = c;
599 if (c == '\n')
600 break;
601 }
602 *p = 0;
603 return str;
604}
605
606void
607xmon_enter(void)
608{
609#ifdef CONFIG_ADB_PMU
610 if (_machine == _MACH_Pmac) {
611 pmu_suspend();
612 }
613#endif
614}
615
616void
617xmon_leave(void)
618{
619#ifdef CONFIG_ADB_PMU
620 if (_machine == _MACH_Pmac) {
621 pmu_resume();
622 }
623#endif
624}
diff --git a/arch/ppc64/xmon/start.c b/arch/powerpc/xmon/start_64.c
index e50c158191e1..e50c158191e1 100644
--- a/arch/ppc64/xmon/start.c
+++ b/arch/powerpc/xmon/start_64.c
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
new file mode 100644
index 000000000000..a48bd594cf61
--- /dev/null
+++ b/arch/powerpc/xmon/start_8xx.c
@@ -0,0 +1,287 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 * Copyright (C) 2000 Dan Malek.
4 * Quick hack of Paul's code to make XMON work on 8xx processors. Lots
5 * of assumptions, like the SMC1 is used, it has been initialized by the
6 * loader at some point, and we can just stuff and suck bytes.
7 * We rely upon the 8xx uart driver to support us, as the interface
8 * changes between boot up and operational phases of the kernel.
9 */
10#include <linux/string.h>
11#include <asm/machdep.h>
12#include <asm/io.h>
13#include <asm/page.h>
14#include <linux/kernel.h>
15#include <asm/8xx_immap.h>
16#include <asm/mpc8xx.h>
17#include <asm/commproc.h>
18
19extern void xmon_printf(const char *fmt, ...);
20extern int xmon_8xx_write(char *str, int nb);
21extern int xmon_8xx_read_poll(void);
22extern int xmon_8xx_read_char(void);
23void prom_drawhex(uint);
24void prom_drawstring(const char *str);
25
26static int use_screen = 1; /* default */
27
28#define TB_SPEED 25000000
29
30static inline unsigned int readtb(void)
31{
32 unsigned int ret;
33
34 asm volatile("mftb %0" : "=r" (ret) :);
35 return ret;
36}
37
38void buf_access(void)
39{
40}
41
42void
43xmon_map_scc(void)
44{
45
46 cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
47 use_screen = 0;
48
49 prom_drawstring("xmon uses serial port\n");
50}
51
52static int scc_initialized = 0;
53
54void xmon_init_scc(void);
55
56int
57xmon_write(void *handle, void *ptr, int nb)
58{
59 char *p = ptr;
60 int i, c, ct;
61
62 if (!scc_initialized)
63 xmon_init_scc();
64
65 return(xmon_8xx_write(ptr, nb));
66}
67
68int xmon_wants_key;
69
70int
71xmon_read(void *handle, void *ptr, int nb)
72{
73 char *p = ptr;
74 int i;
75
76 if (!scc_initialized)
77 xmon_init_scc();
78
79 for (i = 0; i < nb; ++i) {
80 *p++ = xmon_8xx_read_char();
81 }
82 return i;
83}
84
85int
86xmon_read_poll(void)
87{
88 return(xmon_8xx_read_poll());
89}
90
91void
92xmon_init_scc()
93{
94 scc_initialized = 1;
95}
96
97#if 0
98extern int (*prom_entry)(void *);
99
100int
101xmon_exit(void)
102{
103 struct prom_args {
104 char *service;
105 } args;
106
107 for (;;) {
108 args.service = "exit";
109 (*prom_entry)(&args);
110 }
111}
112#endif
113
114void *xmon_stdin;
115void *xmon_stdout;
116void *xmon_stderr;
117
118void
119xmon_init(void)
120{
121}
122
123int
124xmon_putc(int c, void *f)
125{
126 char ch = c;
127
128 if (c == '\n')
129 xmon_putc('\r', f);
130 return xmon_write(f, &ch, 1) == 1? c: -1;
131}
132
133int
134xmon_putchar(int c)
135{
136 return xmon_putc(c, xmon_stdout);
137}
138
139int
140xmon_fputs(char *str, void *f)
141{
142 int n = strlen(str);
143
144 return xmon_write(f, str, n) == n? 0: -1;
145}
146
147int
148xmon_readchar(void)
149{
150 char ch;
151
152 for (;;) {
153 switch (xmon_read(xmon_stdin, &ch, 1)) {
154 case 1:
155 return ch;
156 case -1:
157 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
158 return -1;
159 }
160 }
161}
162
163static char line[256];
164static char *lineptr;
165static int lineleft;
166
167#if 0
168int xmon_expect(const char *str, unsigned int timeout)
169{
170 int c;
171 unsigned int t0;
172
173 timeout *= TB_SPEED;
174 t0 = readtb();
175 do {
176 lineptr = line;
177 for (;;) {
178 c = xmon_read_poll();
179 if (c == -1) {
180 if (readtb() - t0 > timeout)
181 return 0;
182 continue;
183 }
184 if (c == '\n')
185 break;
186 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
187 *lineptr++ = c;
188 }
189 *lineptr = 0;
190 } while (strstr(line, str) == NULL);
191 return 1;
192}
193#endif
194
195int
196xmon_getchar(void)
197{
198 int c;
199
200 if (lineleft == 0) {
201 lineptr = line;
202 for (;;) {
203 c = xmon_readchar();
204 if (c == -1 || c == 4)
205 break;
206 if (c == '\r' || c == '\n') {
207 *lineptr++ = '\n';
208 xmon_putchar('\n');
209 break;
210 }
211 switch (c) {
212 case 0177:
213 case '\b':
214 if (lineptr > line) {
215 xmon_putchar('\b');
216 xmon_putchar(' ');
217 xmon_putchar('\b');
218 --lineptr;
219 }
220 break;
221 case 'U' & 0x1F:
222 while (lineptr > line) {
223 xmon_putchar('\b');
224 xmon_putchar(' ');
225 xmon_putchar('\b');
226 --lineptr;
227 }
228 break;
229 default:
230 if (lineptr >= &line[sizeof(line) - 1])
231 xmon_putchar('\a');
232 else {
233 xmon_putchar(c);
234 *lineptr++ = c;
235 }
236 }
237 }
238 lineleft = lineptr - line;
239 lineptr = line;
240 }
241 if (lineleft == 0)
242 return -1;
243 --lineleft;
244 return *lineptr++;
245}
246
247char *
248xmon_fgets(char *str, int nb, void *f)
249{
250 char *p;
251 int c;
252
253 for (p = str; p < str + nb - 1; ) {
254 c = xmon_getchar();
255 if (c == -1) {
256 if (p == str)
257 return 0;
258 break;
259 }
260 *p++ = c;
261 if (c == '\n')
262 break;
263 }
264 *p = 0;
265 return str;
266}
267
268void
269prom_drawhex(uint val)
270{
271 unsigned char buf[10];
272
273 int i;
274 for (i = 7; i >= 0; i--)
275 {
276 buf[i] = "0123456789abcdef"[val & 0x0f];
277 val >>= 4;
278 }
279 buf[8] = '\0';
280 xmon_fputs(buf, xmon_stdout);
281}
282
283void
284prom_drawstring(const char *str)
285{
286 xmon_fputs(str, xmon_stdout);
287}
diff --git a/arch/ppc64/xmon/subr_prf.c b/arch/powerpc/xmon/subr_prf.c
index 5242bd7d0959..b48738c6dd33 100644
--- a/arch/ppc64/xmon/subr_prf.c
+++ b/arch/powerpc/xmon/subr_prf.c
@@ -18,13 +18,13 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/module.h>
21#include <stdarg.h> 22#include <stdarg.h>
22#include "nonstdio.h" 23#include "nonstdio.h"
23 24
24extern int xmon_write(void *, void *, int); 25extern int xmon_write(void *, void *, int);
25 26
26void 27void xmon_vfprintf(void *f, const char *fmt, va_list ap)
27xmon_vfprintf(void *f, const char *fmt, va_list ap)
28{ 28{
29 static char xmon_buf[2048]; 29 static char xmon_buf[2048];
30 int n; 30 int n;
@@ -33,8 +33,7 @@ xmon_vfprintf(void *f, const char *fmt, va_list ap)
33 xmon_write(f, xmon_buf, n); 33 xmon_write(f, xmon_buf, n);
34} 34}
35 35
36void 36void xmon_printf(const char *fmt, ...)
37xmon_printf(const char *fmt, ...)
38{ 37{
39 va_list ap; 38 va_list ap;
40 39
@@ -42,9 +41,9 @@ xmon_printf(const char *fmt, ...)
42 xmon_vfprintf(stdout, fmt, ap); 41 xmon_vfprintf(stdout, fmt, ap);
43 va_end(ap); 42 va_end(ap);
44} 43}
44EXPORT_SYMBOL(xmon_printf);
45 45
46void 46void xmon_fprintf(void *f, const char *fmt, ...)
47xmon_fprintf(void *f, const char *fmt, ...)
48{ 47{
49 va_list ap; 48 va_list ap;
50 49
diff --git a/arch/ppc64/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 74e63a886a69..1124f1146202 100644
--- a/arch/ppc64/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,25 +17,31 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h>
20 21
21#include <asm/ptrace.h> 22#include <asm/ptrace.h>
22#include <asm/string.h> 23#include <asm/string.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/machdep.h> 25#include <asm/machdep.h>
26#include <asm/xmon.h>
27#ifdef CONFIG_PMAC_BACKLIGHT
28#include <asm/backlight.h>
29#endif
25#include <asm/processor.h> 30#include <asm/processor.h>
26#include <asm/pgtable.h> 31#include <asm/pgtable.h>
27#include <asm/mmu.h> 32#include <asm/mmu.h>
28#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
29#include <asm/paca.h>
30#include <asm/ppcdebug.h>
31#include <asm/cputable.h> 34#include <asm/cputable.h>
32#include <asm/rtas.h> 35#include <asm/rtas.h>
33#include <asm/sstep.h> 36#include <asm/sstep.h>
34#include <asm/bug.h> 37#include <asm/bug.h>
38
39#ifdef CONFIG_PPC64
35#include <asm/hvcall.h> 40#include <asm/hvcall.h>
41#include <asm/paca.h>
42#endif
36 43
37#include "nonstdio.h" 44#include "nonstdio.h"
38#include "privinst.h"
39 45
40#define scanhex xmon_scanhex 46#define scanhex xmon_scanhex
41#define skipbl xmon_skipbl 47#define skipbl xmon_skipbl
@@ -58,7 +64,7 @@ static unsigned long ncsum = 4096;
58static int termch; 64static int termch;
59static char tmpstr[128]; 65static char tmpstr[128];
60 66
61#define JMP_BUF_LEN (184/sizeof(long)) 67#define JMP_BUF_LEN 23
62static long bus_error_jmp[JMP_BUF_LEN]; 68static long bus_error_jmp[JMP_BUF_LEN];
63static int catch_memory_errors; 69static int catch_memory_errors;
64static long *xmon_fault_jmp[NR_CPUS]; 70static long *xmon_fault_jmp[NR_CPUS];
@@ -130,23 +136,36 @@ static void cacheflush(void);
130static int cpu_cmd(void); 136static int cpu_cmd(void);
131static void csum(void); 137static void csum(void);
132static void bootcmds(void); 138static void bootcmds(void);
139static void proccall(void);
133void dump_segments(void); 140void dump_segments(void);
134static void symbol_lookup(void); 141static void symbol_lookup(void);
135static void xmon_print_symbol(unsigned long address, const char *mid, 142static void xmon_print_symbol(unsigned long address, const char *mid,
136 const char *after); 143 const char *after);
137static const char *getvecname(unsigned long vec); 144static const char *getvecname(unsigned long vec);
138 145
139static void debug_trace(void);
140
141extern int print_insn_powerpc(unsigned long, unsigned long, int); 146extern int print_insn_powerpc(unsigned long, unsigned long, int);
142extern void printf(const char *fmt, ...); 147extern void printf(const char *fmt, ...);
143extern void xmon_vfprintf(void *f, const char *fmt, va_list ap); 148extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
144extern int xmon_putc(int c, void *f); 149extern int xmon_putc(int c, void *f);
145extern int putchar(int ch); 150extern int putchar(int ch);
151
152extern void xmon_enter(void);
153extern void xmon_leave(void);
154
146extern int xmon_read_poll(void); 155extern int xmon_read_poll(void);
147extern int setjmp(long *); 156extern long setjmp(long *);
148extern void longjmp(long *, int); 157extern void longjmp(long *, long);
149extern unsigned long _ASR; 158extern void xmon_save_regs(struct pt_regs *);
159
160#ifdef CONFIG_PPC64
161#define REG "%.16lx"
162#define REGS_PER_LINE 4
163#define LAST_VOLATILE 13
164#else
165#define REG "%.8lx"
166#define REGS_PER_LINE 8
167#define LAST_VOLATILE 12
168#endif
150 169
151#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) 170#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
152 171
@@ -186,47 +205,45 @@ Commands:\n\
186 ml locate a block of memory\n\ 205 ml locate a block of memory\n\
187 mz zero a block of memory\n\ 206 mz zero a block of memory\n\
188 mi show information about memory allocation\n\ 207 mi show information about memory allocation\n\
189 p show the task list\n\ 208 p call a procedure\n\
190 r print registers\n\ 209 r print registers\n\
191 s single step\n\ 210 s single step\n\
192 S print special registers\n\ 211 S print special registers\n\
193 t print backtrace\n\ 212 t print backtrace\n\
194 T Enable/Disable PPCDBG flags\n\
195 x exit monitor and recover\n\ 213 x exit monitor and recover\n\
196 X exit monitor and dont recover\n\ 214 X exit monitor and dont recover\n"
197 u dump segment table or SLB\n\ 215#ifdef CONFIG_PPC64
198 ? help\n" 216" u dump segment table or SLB\n"
199 "\ 217#endif
200 zr reboot\n\ 218#ifdef CONFIG_PPC_STD_MMU_32
219" u dump segment registers\n"
220#endif
221" ? help\n"
222" zr reboot\n\
201 zh halt\n" 223 zh halt\n"
202; 224;
203 225
204static struct pt_regs *xmon_regs; 226static struct pt_regs *xmon_regs;
205 227
206extern inline void sync(void) 228static inline void sync(void)
207{ 229{
208 asm volatile("sync; isync"); 230 asm volatile("sync; isync");
209} 231}
210 232
211/* (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs). 233static inline void store_inst(void *p)
212 A PPC stack frame looks like this: 234{
213 235 asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
214 High Address 236}
215 Back Chain 237
216 FP reg save area 238static inline void cflush(void *p)
217 GP reg save area 239{
218 Local var space 240 asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
219 Parameter save area (SP+48) 241}
220 TOC save area (SP+40) 242
221 link editor doubleword (SP+32) 243static inline void cinval(void *p)
222 compiler doubleword (SP+24) 244{
223 LR save (SP+16) 245 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
224 CR save (SP+8) 246}
225 Back Chain (SP+0)
226
227 Note that the LR (ret addr) may not be saved in the current frame if
228 no functions have been called from the current function.
229 */
230 247
231/* 248/*
232 * Disable surveillance (the service processor watchdog function) 249 * Disable surveillance (the service processor watchdog function)
@@ -310,8 +327,8 @@ int xmon_core(struct pt_regs *regs, int fromipi)
310 unsigned long timeout; 327 unsigned long timeout;
311#endif 328#endif
312 329
313 msr = get_msr(); 330 msr = mfmsr();
314 set_msrd(msr & ~MSR_EE); /* disable interrupts */ 331 mtmsr(msr & ~MSR_EE); /* disable interrupts */
315 332
316 bp = in_breakpoint_table(regs->nip, &offset); 333 bp = in_breakpoint_table(regs->nip, &offset);
317 if (bp != NULL) { 334 if (bp != NULL) {
@@ -487,7 +504,7 @@ int xmon_core(struct pt_regs *regs, int fromipi)
487 504
488 insert_cpu_bpts(); 505 insert_cpu_bpts();
489 506
490 set_msrd(msr); /* restore interrupt enable */ 507 mtmsr(msr); /* restore interrupt enable */
491 508
492 return cmd != 'X'; 509 return cmd != 'X';
493} 510}
@@ -497,56 +514,23 @@ int xmon(struct pt_regs *excp)
497 struct pt_regs regs; 514 struct pt_regs regs;
498 515
499 if (excp == NULL) { 516 if (excp == NULL) {
500 /* Ok, grab regs as they are now. 517 xmon_save_regs(&regs);
501 This won't do a particularily good job because the
502 prologue has already been executed.
503 ToDo: We could reach back into the callers save
504 area to do a better job of representing the
505 caller's state.
506 */
507 asm volatile ("std 0,0(%0)\n\
508 std 1,8(%0)\n\
509 std 2,16(%0)\n\
510 std 3,24(%0)\n\
511 std 4,32(%0)\n\
512 std 5,40(%0)\n\
513 std 6,48(%0)\n\
514 std 7,56(%0)\n\
515 std 8,64(%0)\n\
516 std 9,72(%0)\n\
517 std 10,80(%0)\n\
518 std 11,88(%0)\n\
519 std 12,96(%0)\n\
520 std 13,104(%0)\n\
521 std 14,112(%0)\n\
522 std 15,120(%0)\n\
523 std 16,128(%0)\n\
524 std 17,136(%0)\n\
525 std 18,144(%0)\n\
526 std 19,152(%0)\n\
527 std 20,160(%0)\n\
528 std 21,168(%0)\n\
529 std 22,176(%0)\n\
530 std 23,184(%0)\n\
531 std 24,192(%0)\n\
532 std 25,200(%0)\n\
533 std 26,208(%0)\n\
534 std 27,216(%0)\n\
535 std 28,224(%0)\n\
536 std 29,232(%0)\n\
537 std 30,240(%0)\n\
538 std 31,248(%0)" : : "b" (&regs));
539
540 regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
541 regs.msr = get_msr();
542 regs.ctr = get_ctr();
543 regs.xer = get_xer();
544 regs.ccr = get_cr();
545 regs.trap = 0;
546 excp = &regs; 518 excp = &regs;
547 } 519 }
548 return xmon_core(excp, 0); 520 return xmon_core(excp, 0);
549} 521}
522EXPORT_SYMBOL(xmon);
523
524irqreturn_t
525xmon_irq(int irq, void *d, struct pt_regs *regs)
526{
527 unsigned long flags;
528 local_irq_save(flags);
529 printf("Keyboard interrupt\n");
530 xmon(regs);
531 local_irq_restore(flags);
532 return IRQ_HANDLED;
533}
550 534
551int xmon_bpt(struct pt_regs *regs) 535int xmon_bpt(struct pt_regs *regs)
552{ 536{
@@ -718,7 +702,7 @@ static void insert_cpu_bpts(void)
718 if (dabr.enabled) 702 if (dabr.enabled)
719 set_dabr(dabr.address | (dabr.enabled & 7)); 703 set_dabr(dabr.address | (dabr.enabled & 7));
720 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 704 if (iabr && cpu_has_feature(CPU_FTR_IABR))
721 set_iabr(iabr->address 705 mtspr(SPRN_IABR, iabr->address
722 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 706 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
723} 707}
724 708
@@ -746,7 +730,7 @@ static void remove_cpu_bpts(void)
746{ 730{
747 set_dabr(0); 731 set_dabr(0);
748 if (cpu_has_feature(CPU_FTR_IABR)) 732 if (cpu_has_feature(CPU_FTR_IABR))
749 set_iabr(0); 733 mtspr(SPRN_IABR, 0);
750} 734}
751 735
752/* Command interpreting routine */ 736/* Command interpreting routine */
@@ -830,9 +814,6 @@ cmds(struct pt_regs *excp)
830 case '?': 814 case '?':
831 printf(help_string); 815 printf(help_string);
832 break; 816 break;
833 case 'p':
834 show_state();
835 break;
836 case 'b': 817 case 'b':
837 bpt_cmds(); 818 bpt_cmds();
838 break; 819 break;
@@ -846,12 +827,14 @@ cmds(struct pt_regs *excp)
846 case 'z': 827 case 'z':
847 bootcmds(); 828 bootcmds();
848 break; 829 break;
849 case 'T': 830 case 'p':
850 debug_trace(); 831 proccall();
851 break; 832 break;
833#ifdef CONFIG_PPC_STD_MMU
852 case 'u': 834 case 'u':
853 dump_segments(); 835 dump_segments();
854 break; 836 break;
837#endif
855 default: 838 default:
856 printf("Unrecognized command: "); 839 printf("Unrecognized command: ");
857 do { 840 do {
@@ -1070,6 +1053,7 @@ bpt_cmds(void)
1070 1053
1071 cmd = inchar(); 1054 cmd = inchar();
1072 switch (cmd) { 1055 switch (cmd) {
1056#ifndef CONFIG_8xx
1073 case 'd': /* bd - hardware data breakpoint */ 1057 case 'd': /* bd - hardware data breakpoint */
1074 mode = 7; 1058 mode = 7;
1075 cmd = inchar(); 1059 cmd = inchar();
@@ -1111,6 +1095,7 @@ bpt_cmds(void)
1111 iabr = bp; 1095 iabr = bp;
1112 } 1096 }
1113 break; 1097 break;
1098#endif
1114 1099
1115 case 'c': 1100 case 'c':
1116 if (!scanhex(&a)) { 1101 if (!scanhex(&a)) {
@@ -1152,7 +1137,7 @@ bpt_cmds(void)
1152 /* print all breakpoints */ 1137 /* print all breakpoints */
1153 printf(" type address\n"); 1138 printf(" type address\n");
1154 if (dabr.enabled) { 1139 if (dabr.enabled) {
1155 printf(" data %.16lx [", dabr.address); 1140 printf(" data "REG" [", dabr.address);
1156 if (dabr.enabled & 1) 1141 if (dabr.enabled & 1)
1157 printf("r"); 1142 printf("r");
1158 if (dabr.enabled & 2) 1143 if (dabr.enabled & 2)
@@ -1231,6 +1216,18 @@ static void get_function_bounds(unsigned long pc, unsigned long *startp,
1231 1216
1232static int xmon_depth_to_print = 64; 1217static int xmon_depth_to_print = 64;
1233 1218
1219#ifdef CONFIG_PPC64
1220#define LRSAVE_OFFSET 0x10
1221#define REG_FRAME_MARKER 0x7265677368657265ul /* "regshere" */
1222#define MARKER_OFFSET 0x60
1223#define REGS_OFFSET 0x70
1224#else
1225#define LRSAVE_OFFSET 4
1226#define REG_FRAME_MARKER 0x72656773
1227#define MARKER_OFFSET 8
1228#define REGS_OFFSET 16
1229#endif
1230
1234static void xmon_show_stack(unsigned long sp, unsigned long lr, 1231static void xmon_show_stack(unsigned long sp, unsigned long lr,
1235 unsigned long pc) 1232 unsigned long pc)
1236{ 1233{
@@ -1247,7 +1244,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1247 break; 1244 break;
1248 } 1245 }
1249 1246
1250 if (!mread(sp + 16, &ip, sizeof(unsigned long)) 1247 if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long))
1251 || !mread(sp, &newsp, sizeof(unsigned long))) { 1248 || !mread(sp, &newsp, sizeof(unsigned long))) {
1252 printf("Couldn't read stack frame at %lx\n", sp); 1249 printf("Couldn't read stack frame at %lx\n", sp);
1253 break; 1250 break;
@@ -1266,7 +1263,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1266 get_function_bounds(pc, &fnstart, &fnend); 1263 get_function_bounds(pc, &fnstart, &fnend);
1267 nextip = 0; 1264 nextip = 0;
1268 if (newsp > sp) 1265 if (newsp > sp)
1269 mread(newsp + 16, &nextip, 1266 mread(newsp + LRSAVE_OFFSET, &nextip,
1270 sizeof(unsigned long)); 1267 sizeof(unsigned long));
1271 if (lr == ip) { 1268 if (lr == ip) {
1272 if (lr < PAGE_OFFSET 1269 if (lr < PAGE_OFFSET
@@ -1280,24 +1277,24 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1280 xmon_print_symbol(lr, " ", "\n"); 1277 xmon_print_symbol(lr, " ", "\n");
1281 } 1278 }
1282 if (printip) { 1279 if (printip) {
1283 printf("[%.16lx] ", sp); 1280 printf("["REG"] ", sp);
1284 xmon_print_symbol(ip, " ", " (unreliable)\n"); 1281 xmon_print_symbol(ip, " ", " (unreliable)\n");
1285 } 1282 }
1286 pc = lr = 0; 1283 pc = lr = 0;
1287 1284
1288 } else { 1285 } else {
1289 printf("[%.16lx] ", sp); 1286 printf("["REG"] ", sp);
1290 xmon_print_symbol(ip, " ", "\n"); 1287 xmon_print_symbol(ip, " ", "\n");
1291 } 1288 }
1292 1289
1293 /* Look for "regshere" marker to see if this is 1290 /* Look for "regshere" marker to see if this is
1294 an exception frame. */ 1291 an exception frame. */
1295 if (mread(sp + 0x60, &marker, sizeof(unsigned long)) 1292 if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long))
1296 && marker == 0x7265677368657265ul) { 1293 && marker == REG_FRAME_MARKER) {
1297 if (mread(sp + 0x70, &regs, sizeof(regs)) 1294 if (mread(sp + REGS_OFFSET, &regs, sizeof(regs))
1298 != sizeof(regs)) { 1295 != sizeof(regs)) {
1299 printf("Couldn't read registers at %lx\n", 1296 printf("Couldn't read registers at %lx\n",
1300 sp + 0x70); 1297 sp + REGS_OFFSET);
1301 break; 1298 break;
1302 } 1299 }
1303 printf("--- Exception: %lx %s at ", regs.trap, 1300 printf("--- Exception: %lx %s at ", regs.trap,
@@ -1371,7 +1368,9 @@ void excprint(struct pt_regs *fp)
1371 } 1368 }
1372 1369
1373 printf(" current = 0x%lx\n", current); 1370 printf(" current = 0x%lx\n", current);
1371#ifdef CONFIG_PPC64
1374 printf(" paca = 0x%lx\n", get_paca()); 1372 printf(" paca = 0x%lx\n", get_paca());
1373#endif
1375 if (current) { 1374 if (current) {
1376 printf(" pid = %ld, comm = %s\n", 1375 printf(" pid = %ld, comm = %s\n",
1377 current->pid, current->comm); 1376 current->pid, current->comm);
@@ -1383,7 +1382,7 @@ void excprint(struct pt_regs *fp)
1383 1382
1384void prregs(struct pt_regs *fp) 1383void prregs(struct pt_regs *fp)
1385{ 1384{
1386 int n; 1385 int n, trap;
1387 unsigned long base; 1386 unsigned long base;
1388 struct pt_regs regs; 1387 struct pt_regs regs;
1389 1388
@@ -1396,7 +1395,7 @@ void prregs(struct pt_regs *fp)
1396 __delay(200); 1395 __delay(200);
1397 } else { 1396 } else {
1398 catch_memory_errors = 0; 1397 catch_memory_errors = 0;
1399 printf("*** Error reading registers from %.16lx\n", 1398 printf("*** Error reading registers from "REG"\n",
1400 base); 1399 base);
1401 return; 1400 return;
1402 } 1401 }
@@ -1404,22 +1403,36 @@ void prregs(struct pt_regs *fp)
1404 fp = &regs; 1403 fp = &regs;
1405 } 1404 }
1406 1405
1406#ifdef CONFIG_PPC64
1407 if (FULL_REGS(fp)) { 1407 if (FULL_REGS(fp)) {
1408 for (n = 0; n < 16; ++n) 1408 for (n = 0; n < 16; ++n)
1409 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n", 1409 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1410 n, fp->gpr[n], n+16, fp->gpr[n+16]); 1410 n, fp->gpr[n], n+16, fp->gpr[n+16]);
1411 } else { 1411 } else {
1412 for (n = 0; n < 7; ++n) 1412 for (n = 0; n < 7; ++n)
1413 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n", 1413 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1414 n, fp->gpr[n], n+7, fp->gpr[n+7]); 1414 n, fp->gpr[n], n+7, fp->gpr[n+7]);
1415 } 1415 }
1416#else
1417 for (n = 0; n < 32; ++n) {
1418 printf("R%.2d = %.8x%s", n, fp->gpr[n],
1419 (n & 3) == 3? "\n": " ");
1420 if (n == 12 && !FULL_REGS(fp)) {
1421 printf("\n");
1422 break;
1423 }
1424 }
1425#endif
1416 printf("pc = "); 1426 printf("pc = ");
1417 xmon_print_symbol(fp->nip, " ", "\n"); 1427 xmon_print_symbol(fp->nip, " ", "\n");
1418 printf("lr = "); 1428 printf("lr = ");
1419 xmon_print_symbol(fp->link, " ", "\n"); 1429 xmon_print_symbol(fp->link, " ", "\n");
1420 printf("msr = %.16lx cr = %.8lx\n", fp->msr, fp->ccr); 1430 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr);
1421 printf("ctr = %.16lx xer = %.16lx trap = %8lx\n", 1431 printf("ctr = "REG" xer = "REG" trap = %4lx\n",
1422 fp->ctr, fp->xer, fp->trap); 1432 fp->ctr, fp->xer, fp->trap);
1433 trap = TRAP(fp);
1434 if (trap == 0x300 || trap == 0x380 || trap == 0x600)
1435 printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr);
1423} 1436}
1424 1437
1425void cacheflush(void) 1438void cacheflush(void)
@@ -1519,8 +1532,7 @@ static unsigned long regno;
1519extern char exc_prolog; 1532extern char exc_prolog;
1520extern char dec_exc; 1533extern char dec_exc;
1521 1534
1522void 1535void super_regs(void)
1523super_regs(void)
1524{ 1536{
1525 int cmd; 1537 int cmd;
1526 unsigned long val; 1538 unsigned long val;
@@ -1536,12 +1548,14 @@ super_regs(void)
1536 asm("mr %0,1" : "=r" (sp) :); 1548 asm("mr %0,1" : "=r" (sp) :);
1537 asm("mr %0,2" : "=r" (toc) :); 1549 asm("mr %0,2" : "=r" (toc) :);
1538 1550
1539 printf("msr = %.16lx sprg0= %.16lx\n", get_msr(), get_sprg0()); 1551 printf("msr = "REG" sprg0= "REG"\n",
1540 printf("pvr = %.16lx sprg1= %.16lx\n", get_pvr(), get_sprg1()); 1552 mfmsr(), mfspr(SPRN_SPRG0));
1541 printf("dec = %.16lx sprg2= %.16lx\n", get_dec(), get_sprg2()); 1553 printf("pvr = "REG" sprg1= "REG"\n",
1542 printf("sp = %.16lx sprg3= %.16lx\n", sp, get_sprg3()); 1554 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
1543 printf("toc = %.16lx dar = %.16lx\n", toc, get_dar()); 1555 printf("dec = "REG" sprg2= "REG"\n",
1544 printf("srr0 = %.16lx srr1 = %.16lx\n", get_srr0(), get_srr1()); 1556 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1557 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
1558 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
1545#ifdef CONFIG_PPC_ISERIES 1559#ifdef CONFIG_PPC_ISERIES
1546 // Dump out relevant Paca data areas. 1560 // Dump out relevant Paca data areas.
1547 printf("Paca: \n"); 1561 printf("Paca: \n");
@@ -1578,11 +1592,6 @@ super_regs(void)
1578 case 'r': 1592 case 'r':
1579 printf("spr %lx = %lx\n", regno, read_spr(regno)); 1593 printf("spr %lx = %lx\n", regno, read_spr(regno));
1580 break; 1594 break;
1581 case 'm':
1582 val = get_msr();
1583 scanhex(&val);
1584 set_msrd(val);
1585 break;
1586 } 1595 }
1587 scannl(); 1596 scannl();
1588} 1597}
@@ -1604,13 +1613,13 @@ mread(unsigned long adrs, void *buf, int size)
1604 q = (char *)buf; 1613 q = (char *)buf;
1605 switch (size) { 1614 switch (size) {
1606 case 2: 1615 case 2:
1607 *(short *)q = *(short *)p; 1616 *(u16 *)q = *(u16 *)p;
1608 break; 1617 break;
1609 case 4: 1618 case 4:
1610 *(int *)q = *(int *)p; 1619 *(u32 *)q = *(u32 *)p;
1611 break; 1620 break;
1612 case 8: 1621 case 8:
1613 *(long *)q = *(long *)p; 1622 *(u64 *)q = *(u64 *)p;
1614 break; 1623 break;
1615 default: 1624 default:
1616 for( ; n < size; ++n) { 1625 for( ; n < size; ++n) {
@@ -1641,13 +1650,13 @@ mwrite(unsigned long adrs, void *buf, int size)
1641 q = (char *) buf; 1650 q = (char *) buf;
1642 switch (size) { 1651 switch (size) {
1643 case 2: 1652 case 2:
1644 *(short *)p = *(short *)q; 1653 *(u16 *)p = *(u16 *)q;
1645 break; 1654 break;
1646 case 4: 1655 case 4:
1647 *(int *)p = *(int *)q; 1656 *(u32 *)p = *(u32 *)q;
1648 break; 1657 break;
1649 case 8: 1658 case 8:
1650 *(long *)p = *(long *)q; 1659 *(u64 *)p = *(u64 *)q;
1651 break; 1660 break;
1652 default: 1661 default:
1653 for ( ; n < size; ++n) { 1662 for ( ; n < size; ++n) {
@@ -1667,11 +1676,12 @@ mwrite(unsigned long adrs, void *buf, int size)
1667} 1676}
1668 1677
1669static int fault_type; 1678static int fault_type;
1679static int fault_except;
1670static char *fault_chars[] = { "--", "**", "##" }; 1680static char *fault_chars[] = { "--", "**", "##" };
1671 1681
1672static int 1682static int handle_fault(struct pt_regs *regs)
1673handle_fault(struct pt_regs *regs)
1674{ 1683{
1684 fault_except = TRAP(regs);
1675 switch (TRAP(regs)) { 1685 switch (TRAP(regs)) {
1676 case 0x200: 1686 case 0x200:
1677 fault_type = 0; 1687 fault_type = 0;
@@ -1960,7 +1970,7 @@ prdump(unsigned long adrs, long ndump)
1960 unsigned char temp[16]; 1970 unsigned char temp[16];
1961 1971
1962 for (n = ndump; n > 0;) { 1972 for (n = ndump; n > 0;) {
1963 printf("%.16lx", adrs); 1973 printf(REG, adrs);
1964 putchar(' '); 1974 putchar(' ');
1965 r = n < 16? n: 16; 1975 r = n < 16? n: 16;
1966 nr = mread(adrs, temp, r); 1976 nr = mread(adrs, temp, r);
@@ -2008,7 +2018,7 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
2008 if (nr == 0) { 2018 if (nr == 0) {
2009 if (praddr) { 2019 if (praddr) {
2010 const char *x = fault_chars[fault_type]; 2020 const char *x = fault_chars[fault_type];
2011 printf("%.16lx %s%s%s%s\n", adr, x, x, x, x); 2021 printf(REG" %s%s%s%s\n", adr, x, x, x, x);
2012 } 2022 }
2013 break; 2023 break;
2014 } 2024 }
@@ -2023,7 +2033,7 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
2023 dotted = 0; 2033 dotted = 0;
2024 last_inst = inst; 2034 last_inst = inst;
2025 if (praddr) 2035 if (praddr)
2026 printf("%.16lx %.8x", adr, inst); 2036 printf(REG" %.8x", adr, inst);
2027 printf("\t"); 2037 printf("\t");
2028 print_insn_powerpc(inst, adr, 0); /* always returns 4 */ 2038 print_insn_powerpc(inst, adr, 0); /* always returns 4 */
2029 printf("\n"); 2039 printf("\n");
@@ -2152,6 +2162,42 @@ memzcan(void)
2152 printf("%.8x\n", a - mskip); 2162 printf("%.8x\n", a - mskip);
2153} 2163}
2154 2164
2165void proccall(void)
2166{
2167 unsigned long args[8];
2168 unsigned long ret;
2169 int i;
2170 typedef unsigned long (*callfunc_t)(unsigned long, unsigned long,
2171 unsigned long, unsigned long, unsigned long,
2172 unsigned long, unsigned long, unsigned long);
2173 callfunc_t func;
2174
2175 if (!scanhex(&adrs))
2176 return;
2177 if (termch != '\n')
2178 termch = 0;
2179 for (i = 0; i < 8; ++i)
2180 args[i] = 0;
2181 for (i = 0; i < 8; ++i) {
2182 if (!scanhex(&args[i]) || termch == '\n')
2183 break;
2184 termch = 0;
2185 }
2186 func = (callfunc_t) adrs;
2187 ret = 0;
2188 if (setjmp(bus_error_jmp) == 0) {
2189 catch_memory_errors = 1;
2190 sync();
2191 ret = func(args[0], args[1], args[2], args[3],
2192 args[4], args[5], args[6], args[7]);
2193 sync();
2194 printf("return value is %x\n", ret);
2195 } else {
2196 printf("*** %x exception occurred\n", fault_except);
2197 }
2198 catch_memory_errors = 0;
2199}
2200
2155/* Input scanning routines */ 2201/* Input scanning routines */
2156int 2202int
2157skipbl(void) 2203skipbl(void)
@@ -2174,7 +2220,12 @@ static char *regnames[N_PTREGS] = {
2174 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 2220 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
2175 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 2221 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
2176 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 2222 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
2177 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", "softe", 2223 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr",
2224#ifdef CONFIG_PPC64
2225 "softe",
2226#else
2227 "mq",
2228#endif
2178 "trap", "dar", "dsisr", "res" 2229 "trap", "dar", "dsisr", "res"
2179}; 2230};
2180 2231
@@ -2280,8 +2331,7 @@ scannl(void)
2280 c = inchar(); 2331 c = inchar();
2281} 2332}
2282 2333
2283int 2334int hexdigit(int c)
2284hexdigit(int c)
2285{ 2335{
2286 if( '0' <= c && c <= '9' ) 2336 if( '0' <= c && c <= '9' )
2287 return c - '0'; 2337 return c - '0';
@@ -2378,7 +2428,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
2378 const char *name = NULL; 2428 const char *name = NULL;
2379 unsigned long offset, size; 2429 unsigned long offset, size;
2380 2430
2381 printf("%.16lx", address); 2431 printf(REG, address);
2382 if (setjmp(bus_error_jmp) == 0) { 2432 if (setjmp(bus_error_jmp) == 0) {
2383 catch_memory_errors = 1; 2433 catch_memory_errors = 1;
2384 sync(); 2434 sync();
@@ -2399,55 +2449,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
2399 printf("%s", after); 2449 printf("%s", after);
2400} 2450}
2401 2451
2402static void debug_trace(void) 2452#ifdef CONFIG_PPC64
2403{
2404 unsigned long val, cmd, on;
2405
2406 cmd = skipbl();
2407 if (cmd == '\n') {
2408 /* show current state */
2409 unsigned long i;
2410 printf("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
2411 for (i = 0; i < PPCDBG_NUM_FLAGS ;i++) {
2412 on = PPCDBG_BITVAL(i) & ppc64_debug_switch;
2413 printf("%02x %s %12s ", i, on ? "on " : "off", trace_names[i] ? trace_names[i] : "");
2414 if (((i+1) % 3) == 0)
2415 printf("\n");
2416 }
2417 printf("\n");
2418 return;
2419 }
2420 while (cmd != '\n') {
2421 on = 1; /* default if no sign given */
2422 while (cmd == '+' || cmd == '-') {
2423 on = (cmd == '+');
2424 cmd = inchar();
2425 if (cmd == ' ' || cmd == '\n') { /* Turn on or off based on + or - */
2426 ppc64_debug_switch = on ? PPCDBG_ALL:PPCDBG_NONE;
2427 printf("Setting all values to %s...\n", on ? "on" : "off");
2428 if (cmd == '\n') return;
2429 else cmd = skipbl();
2430 }
2431 else
2432 termch = cmd;
2433 }
2434 termch = cmd; /* not +/- ... let scanhex see it */
2435 scanhex((void *)&val);
2436 if (val >= 64) {
2437 printf("Value %x out of range:\n", val);
2438 return;
2439 }
2440 if (on) {
2441 ppc64_debug_switch |= PPCDBG_BITVAL(val);
2442 printf("enable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2443 } else {
2444 ppc64_debug_switch &= ~PPCDBG_BITVAL(val);
2445 printf("disable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2446 }
2447 cmd = skipbl();
2448 }
2449}
2450
2451static void dump_slb(void) 2453static void dump_slb(void)
2452{ 2454{
2453 int i; 2455 int i;
@@ -2484,6 +2486,27 @@ static void dump_stab(void)
2484 } 2486 }
2485} 2487}
2486 2488
2489void dump_segments(void)
2490{
2491 if (cpu_has_feature(CPU_FTR_SLB))
2492 dump_slb();
2493 else
2494 dump_stab();
2495}
2496#endif
2497
2498#ifdef CONFIG_PPC_STD_MMU_32
2499void dump_segments(void)
2500{
2501 int i;
2502
2503 printf("sr0-15 =");
2504 for (i = 0; i < 16; ++i)
2505 printf(" %x", mfsrin(i));
2506 printf("\n");
2507}
2508#endif
2509
2487void xmon_init(int enable) 2510void xmon_init(int enable)
2488{ 2511{
2489 if (enable) { 2512 if (enable) {
@@ -2504,11 +2527,3 @@ void xmon_init(int enable)
2504 __debugger_fault_handler = NULL; 2527 __debugger_fault_handler = NULL;
2505 } 2528 }
2506} 2529}
2507
2508void dump_segments(void)
2509{
2510 if (cpu_has_feature(CPU_FTR_SLB))
2511 dump_slb();
2512 else
2513 dump_stab();
2514}
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 11726e2a4ec8..b42789f8eb76 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -73,7 +73,7 @@ cpm_mask_irq(unsigned int irq)
73{ 73{
74 int cpm_vec = irq - CPM_IRQ_OFFSET; 74 int cpm_vec = irq - CPM_IRQ_OFFSET;
75 75
76 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr &= ~(1 << cpm_vec); 76 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr) & ~(1 << cpm_vec));
77} 77}
78 78
79static void 79static void
@@ -81,7 +81,7 @@ cpm_unmask_irq(unsigned int irq)
81{ 81{
82 int cpm_vec = irq - CPM_IRQ_OFFSET; 82 int cpm_vec = irq - CPM_IRQ_OFFSET;
83 83
84 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr |= (1 << cpm_vec); 84 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr) | (1 << cpm_vec));
85} 85}
86 86
87static void 87static void
@@ -95,7 +95,7 @@ cpm_eoi(unsigned int irq)
95{ 95{
96 int cpm_vec = irq - CPM_IRQ_OFFSET; 96 int cpm_vec = irq - CPM_IRQ_OFFSET;
97 97
98 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr = (1 << cpm_vec); 98 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr, (1 << cpm_vec));
99} 99}
100 100
101struct hw_interrupt_type cpm_pic = { 101struct hw_interrupt_type cpm_pic = {
@@ -133,7 +133,7 @@ m8xx_cpm_reset(void)
133 * manual recommends it. 133 * manual recommends it.
134 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 134 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
135 */ 135 */
136 imp->im_siu_conf.sc_sdcr = 1; 136 out_be32(&imp->im_siu_conf.sc_sdcr, 1),
137 137
138 /* Reclaim the DP memory for our use. */ 138 /* Reclaim the DP memory for our use. */
139 m8xx_cpm_dpinit(); 139 m8xx_cpm_dpinit();
@@ -178,10 +178,10 @@ cpm_interrupt_init(void)
178 178
179 /* Initialize the CPM interrupt controller. 179 /* Initialize the CPM interrupt controller.
180 */ 180 */
181 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr = 181 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr,
182 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 182 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
183 ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK; 183 ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK);
184 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr = 0; 184 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, 0);
185 185
186 /* install the CPM interrupt controller routines for the CPM 186 /* install the CPM interrupt controller routines for the CPM
187 * interrupt vectors 187 * interrupt vectors
@@ -198,7 +198,7 @@ cpm_interrupt_init(void)
198 if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction)) 198 if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction))
199 panic("Could not allocate CPM error IRQ!"); 199 panic("Could not allocate CPM error IRQ!");
200 200
201 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr |= CICR_IEN; 201 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr) | CICR_IEN);
202} 202}
203 203
204/* 204/*
@@ -212,8 +212,8 @@ cpm_get_irq(struct pt_regs *regs)
212 /* Get the vector by setting the ACK bit and then reading 212 /* Get the vector by setting the ACK bit and then reading
213 * the register. 213 * the register.
214 */ 214 */
215 ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr = 1; 215 out_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr, 1);
216 cpm_vec = ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr; 216 cpm_vec = in_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr);
217 cpm_vec >>= 11; 217 cpm_vec >>= 11;
218 218
219 return cpm_vec; 219 return cpm_vec;
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 776941c75672..114b90fdea24 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -568,6 +568,7 @@ config CHESTNUT
568 568
569config SPRUCE 569config SPRUCE
570 bool "IBM-Spruce" 570 bool "IBM-Spruce"
571 select PPC_INDIRECT_PCI
571 572
572config HDPU 573config HDPU
573 bool "Sky-HDPU" 574 bool "Sky-HDPU"
@@ -588,27 +589,35 @@ config EV64260
588 589
589config LOPEC 590config LOPEC
590 bool "Motorola-LoPEC" 591 bool "Motorola-LoPEC"
592 select PPC_I8259
591 593
592config MVME5100 594config MVME5100
593 bool "Motorola-MVME5100" 595 bool "Motorola-MVME5100"
596 select PPC_INDIRECT_PCI
594 597
595config PPLUS 598config PPLUS
596 bool "Motorola-PowerPlus" 599 bool "Motorola-PowerPlus"
600 select PPC_I8259
601 select PPC_INDIRECT_PCI
597 602
598config PRPMC750 603config PRPMC750
599 bool "Motorola-PrPMC750" 604 bool "Motorola-PrPMC750"
605 select PPC_INDIRECT_PCI
600 606
601config PRPMC800 607config PRPMC800
602 bool "Motorola-PrPMC800" 608 bool "Motorola-PrPMC800"
609 select PPC_INDIRECT_PCI
603 610
604config SANDPOINT 611config SANDPOINT
605 bool "Motorola-Sandpoint" 612 bool "Motorola-Sandpoint"
613 select PPC_I8259
606 help 614 help
607 Select SANDPOINT if configuring for a Motorola Sandpoint X3 615 Select SANDPOINT if configuring for a Motorola Sandpoint X3
608 (any flavor). 616 (any flavor).
609 617
610config RADSTONE_PPC7D 618config RADSTONE_PPC7D
611 bool "Radstone Technology PPC7D board" 619 bool "Radstone Technology PPC7D board"
620 select PPC_I8259
612 621
613config PAL4 622config PAL4
614 bool "SBS-Palomar4" 623 bool "SBS-Palomar4"
@@ -616,6 +625,7 @@ config PAL4
616config GEMINI 625config GEMINI
617 bool "Synergy-Gemini" 626 bool "Synergy-Gemini"
618 depends on BROKEN 627 depends on BROKEN
628 select PPC_INDIRECT_PCI
619 help 629 help
620 Select Gemini if configuring for a Synergy Microsystems' Gemini 630 Select Gemini if configuring for a Synergy Microsystems' Gemini
621 series Single Board Computer. More information is available at: 631 series Single Board Computer. More information is available at:
@@ -747,13 +757,16 @@ config CPM2
747 on it (826x, 827x, 8560). 757 on it (826x, 827x, 8560).
748 758
749config PPC_CHRP 759config PPC_CHRP
750 bool 760 bool " Common Hardware Reference Platform (CHRP) based machines"
751 depends on PPC_MULTIPLATFORM 761 depends on PPC_MULTIPLATFORM
762 select PPC_I8259
763 select PPC_INDIRECT_PCI
752 default y 764 default y
753 765
754config PPC_PMAC 766config PPC_PMAC
755 bool 767 bool " Apple PowerMac based machines"
756 depends on PPC_MULTIPLATFORM 768 depends on PPC_MULTIPLATFORM
769 select PPC_INDIRECT_PCI
757 default y 770 default y
758 771
759config PPC_PMAC64 772config PPC_PMAC64
@@ -762,8 +775,10 @@ config PPC_PMAC64
762 default y 775 default y
763 776
764config PPC_PREP 777config PPC_PREP
765 bool 778 bool " PowerPC Reference Platform (PReP) based machines"
766 depends on PPC_MULTIPLATFORM 779 depends on PPC_MULTIPLATFORM
780 select PPC_I8259
781 select PPC_INDIRECT_PCI
767 default y 782 default y
768 783
769config PPC_OF 784config PPC_OF
@@ -797,6 +812,7 @@ config MV64360 # Really MV64360 & MV64460
797config MV64X60 812config MV64X60
798 bool 813 bool
799 depends on (GT64260 || MV64360) 814 depends on (GT64260 || MV64360)
815 select PPC_INDIRECT_PCI
800 default y 816 default y
801 817
802menu "Set bridge options" 818menu "Set bridge options"
@@ -845,6 +861,7 @@ config EPIC_SERIAL_MODE
845config MPC10X_BRIDGE 861config MPC10X_BRIDGE
846 bool 862 bool
847 depends on POWERPMC250 || LOPEC || SANDPOINT 863 depends on POWERPMC250 || LOPEC || SANDPOINT
864 select PPC_INDIRECT_PCI
848 default y 865 default y
849 866
850config MPC10X_OPENPIC 867config MPC10X_OPENPIC
@@ -870,6 +887,7 @@ config HARRIER_STORE_GATHERING
870config MVME5100_IPMC761_PRESENT 887config MVME5100_IPMC761_PRESENT
871 bool "MVME5100 configured with an IPMC761" 888 bool "MVME5100 configured with an IPMC761"
872 depends on MVME5100 889 depends on MVME5100
890 select PPC_I8259
873 891
874config SPRUCE_BAUD_33M 892config SPRUCE_BAUD_33M
875 bool "Spruce baud clock support" 893 bool "Spruce baud clock support"
@@ -1127,6 +1145,7 @@ menu "Bus options"
1127config ISA 1145config ISA
1128 bool "Support for ISA-bus hardware" 1146 bool "Support for ISA-bus hardware"
1129 depends on PPC_PREP || PPC_CHRP 1147 depends on PPC_PREP || PPC_CHRP
1148 select PPC_I8259
1130 help 1149 help
1131 Find out whether you have ISA slots on your motherboard. ISA is the 1150 Find out whether you have ISA slots on your motherboard. ISA is the
1132 name of a bus system, i.e. the way the CPU talks to the other stuff 1151 name of a bus system, i.e. the way the CPU talks to the other stuff
@@ -1139,6 +1158,17 @@ config GENERIC_ISA_DMA
1139 depends on POWER3 || POWER4 || 6xx && !CPM2 1158 depends on POWER3 || POWER4 || 6xx && !CPM2
1140 default y 1159 default y
1141 1160
1161config PPC_I8259
1162 bool
1163 default y if 85xx
1164 default n
1165
1166config PPC_INDIRECT_PCI
1167 bool
1168 depends on PCI
1169 default y if 40x || 44x || 85xx || 83xx
1170 default n
1171
1142config EISA 1172config EISA
1143 bool 1173 bool
1144 help 1174 help
@@ -1175,6 +1205,7 @@ config MPC83xx_PCI2
1175config PCI_QSPAN 1205config PCI_QSPAN
1176 bool "QSpan PCI" 1206 bool "QSpan PCI"
1177 depends on !4xx && !CPM2 && 8xx 1207 depends on !4xx && !CPM2 && 8xx
1208 select PPC_I8259
1178 help 1209 help
1179 Say Y here if you have a system based on a Motorola 8xx-series 1210 Say Y here if you have a system based on a Motorola 8xx-series
1180 embedded processor with a QSPAN PCI interface, otherwise say N. 1211 embedded processor with a QSPAN PCI interface, otherwise say N.
@@ -1182,6 +1213,7 @@ config PCI_QSPAN
1182config PCI_8260 1213config PCI_8260
1183 bool 1214 bool
1184 depends on PCI && 8260 1215 depends on PCI && 8260
1216 select PPC_INDIRECT_PCI
1185 default y 1217 default y
1186 1218
1187config 8260_PCI9 1219config 8260_PCI9
@@ -1368,7 +1400,7 @@ endmenu
1368 1400
1369source "lib/Kconfig" 1401source "lib/Kconfig"
1370 1402
1371source "arch/ppc/oprofile/Kconfig" 1403source "arch/powerpc/oprofile/Kconfig"
1372 1404
1373source "arch/ppc/Kconfig.debug" 1405source "arch/ppc/Kconfig.debug"
1374 1406
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 16e2675f3270..94d5716fa7c3 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -26,6 +26,10 @@ CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
26AFLAGS += -Iarch/$(ARCH) 26AFLAGS += -Iarch/$(ARCH)
27CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \ 27CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
28 -ffixed-r2 -mmultiple 28 -ffixed-r2 -mmultiple
29
30# No AltiVec instruction when building kernel
31CFLAGS += $(call cc-option, -mno-altivec)
32
29CPP = $(CC) -E $(CFLAGS) 33CPP = $(CC) -E $(CFLAGS)
30# Temporary hack until we have migrated to asm-powerpc 34# Temporary hack until we have migrated to asm-powerpc
31LINUXINCLUDE += -Iarch/$(ARCH)/include 35LINUXINCLUDE += -Iarch/$(ARCH)/include
@@ -57,10 +61,12 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
57 61
58head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o 62head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
59head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o 63head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
60head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o 64head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
61 65
62core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ 66core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
63 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ 67 arch/ppc/platforms/ \
68 arch/ppc/mm/ arch/ppc/lib/ \
69 arch/ppc/syslib/ arch/powerpc/sysdev/
64core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ 70core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
65core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ 71core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
66core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ 72core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
@@ -71,7 +77,7 @@ drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
71drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/ 77drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
72drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/ 78drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
73 79
74drivers-$(CONFIG_OPROFILE) += arch/ppc/oprofile/ 80drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
75 81
76BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm 82BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
77 83
diff --git a/arch/ppc/boot/of1275/claim.c b/arch/ppc/boot/of1275/claim.c
index e060292ae2a7..13169a5c4339 100644
--- a/arch/ppc/boot/of1275/claim.c
+++ b/arch/ppc/boot/of1275/claim.c
@@ -29,6 +29,7 @@ claim(unsigned int virt, unsigned int size, unsigned int align)
29 args.virt = virt; 29 args.virt = virt;
30 args.size = size; 30 args.size = size;
31 args.align = align; 31 args.align = align;
32 args.ret = (void *) 0;
32 (*of_prom_entry)(&args); 33 (*of_prom_entry)(&args);
33 return args.ret; 34 return args.ret;
34} 35}
diff --git a/arch/ppc/boot/openfirmware/chrpmain.c b/arch/ppc/boot/openfirmware/chrpmain.c
index effe4a0624b0..245dbd9fc120 100644
--- a/arch/ppc/boot/openfirmware/chrpmain.c
+++ b/arch/ppc/boot/openfirmware/chrpmain.c
@@ -78,7 +78,7 @@ boot(int a1, int a2, void *prom)
78 begin_avail = avail_high = avail_ram; 78 begin_avail = avail_high = avail_ram;
79 end_avail = scratch + sizeof(scratch); 79 end_avail = scratch + sizeof(scratch);
80 printf("gunzipping (0x%p <- 0x%p:0x%p)...", dst, im, im+len); 80 printf("gunzipping (0x%p <- 0x%p:0x%p)...", dst, im, im+len);
81 gunzip(dst, 0x400000, im, &len); 81 gunzip(dst, PROG_SIZE - PROG_START, im, &len);
82 printf("done %u bytes\n\r", len); 82 printf("done %u bytes\n\r", len);
83 printf("%u bytes of heap consumed, max in use %u\n\r", 83 printf("%u bytes of heap consumed, max in use %u\n\r",
84 avail_high - begin_avail, heap_max); 84 avail_high - begin_avail, heap_max);
diff --git a/arch/ppc/boot/openfirmware/coffmain.c b/arch/ppc/boot/openfirmware/coffmain.c
index 04ba9d57e110..2da8855e2be0 100644
--- a/arch/ppc/boot/openfirmware/coffmain.c
+++ b/arch/ppc/boot/openfirmware/coffmain.c
@@ -38,7 +38,7 @@ static char heap[SCRATCH_SIZE];
38static unsigned long ram_start = 0; 38static unsigned long ram_start = 0;
39static unsigned long ram_end = 0x1000000; 39static unsigned long ram_end = 0x1000000;
40 40
41static unsigned long prog_start = 0x900000; 41static unsigned long prog_start = 0x800000;
42static unsigned long prog_size = 0x700000; 42static unsigned long prog_size = 0x700000;
43 43
44typedef void (*kernel_start_t)(int, int, void *); 44typedef void (*kernel_start_t)(int, int, void *);
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b1457a8a9c0f..b35346df1e37 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4ifneq ($(CONFIG_PPC_MERGE),y)
4 5
5extra-$(CONFIG_PPC_STD_MMU) := head.o 6extra-$(CONFIG_PPC_STD_MMU) := head.o
6extra-$(CONFIG_40x) := head_4xx.o 7extra-$(CONFIG_40x) := head_4xx.o
@@ -9,13 +10,12 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
9extra-$(CONFIG_8xx) := head_8xx.o 10extra-$(CONFIG_8xx) := head_8xx.o
10extra-$(CONFIG_6xx) += idle_6xx.o 11extra-$(CONFIG_6xx) += idle_6xx.o
11extra-$(CONFIG_POWER4) += idle_power4.o 12extra-$(CONFIG_POWER4) += idle_power4.o
12extra-$(CONFIG_PPC_FPU) += fpu.o
13extra-y += vmlinux.lds 13extra-y += vmlinux.lds
14 14
15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ 15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
16 process.o signal.o ptrace.o align.o \ 16 process.o align.o \
17 semaphore.o syscalls.o setup.o \ 17 setup.o \
18 cputable.o ppc_htab.o perfmon.o 18 ppc_htab.o
19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
21obj-$(CONFIG_POWER4) += cpu_setup_power4.o 21obj-$(CONFIG_POWER4) += cpu_setup_power4.o
@@ -25,7 +25,6 @@ obj-$(CONFIG_PCI) += pci.o
25obj-$(CONFIG_KGDB) += ppc-stub.o 25obj-$(CONFIG_KGDB) += ppc-stub.o
26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o 26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
27obj-$(CONFIG_TAU) += temp.o 27obj-$(CONFIG_TAU) += temp.o
28obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
29ifndef CONFIG_E200 28ifndef CONFIG_E200
30obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o 29obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
31endif 30endif
@@ -35,3 +34,21 @@ ifndef CONFIG_MATH_EMULATION
35obj-$(CONFIG_8xx) += softemu8xx.o 34obj-$(CONFIG_8xx) += softemu8xx.o
36endif 35endif
37 36
37# These are here while we do the architecture merge
38
39else
40obj-y := irq.o idle.o \
41 align.o
42obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
43obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
44obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
46obj-$(CONFIG_PCI) += pci.o
47obj-$(CONFIG_KGDB) += ppc-stub.o
48obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
49obj-$(CONFIG_TAU) += temp.o
50ifndef CONFIG_E200
51obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
52endif
53obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
54endif
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index ff81da9598d8..ab398c4b70b6 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs)
375#ifdef CONFIG_PPC_FPU 375#ifdef CONFIG_PPC_FPU
376 preempt_disable(); 376 preempt_disable();
377 enable_kernel_fp(); 377 enable_kernel_fp();
378 cvt_fd(&data.f, &data.d, &current->thread.fpscr); 378 cvt_fd(&data.f, &data.d, &current->thread);
379 preempt_enable(); 379 preempt_enable();
380#else 380#else
381 return 0; 381 return 0;
@@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs)
385#ifdef CONFIG_PPC_FPU 385#ifdef CONFIG_PPC_FPU
386 preempt_disable(); 386 preempt_disable();
387 enable_kernel_fp(); 387 enable_kernel_fp();
388 cvt_df(&data.d, &data.f, &current->thread.fpscr); 388 cvt_df(&data.d, &data.f, &current->thread);
389 preempt_enable(); 389 preempt_enable();
390#else 390#else
391 return 0; 391 return 0;
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index d9ad1d776d0e..968261d69572 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -130,10 +130,10 @@ main(void)
130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
132 132
133 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
133 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 134 DEFINE(TI_TASK, offsetof(struct thread_info, task));
134 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
135 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
136 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
139 139
@@ -141,6 +141,7 @@ main(void)
141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
142 DEFINE(pbe_next, offsetof(struct pbe, next)); 142 DEFINE(pbe_next, offsetof(struct pbe, next));
143 143
144 DEFINE(TASK_SIZE, TASK_SIZE);
144 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 145 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
145 return 0; 146 return 0;
146} 147}
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index ba396438ede3..55ed7716636f 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -17,8 +17,6 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/cache.h> 18#include <asm/cache.h>
19 19
20_GLOBAL(__setup_cpu_601)
21 blr
22_GLOBAL(__setup_cpu_603) 20_GLOBAL(__setup_cpu_603)
23 b setup_common_caches 21 b setup_common_caches
24_GLOBAL(__setup_cpu_604) 22_GLOBAL(__setup_cpu_604)
@@ -292,10 +290,10 @@ _GLOBAL(__init_fpu_registers)
292#define CS_SIZE 32 290#define CS_SIZE 32
293 291
294 .data 292 .data
295 .balign L1_CACHE_LINE_SIZE 293 .balign L1_CACHE_BYTES
296cpu_state_storage: 294cpu_state_storage:
297 .space CS_SIZE 295 .space CS_SIZE
298 .balign L1_CACHE_LINE_SIZE,0 296 .balign L1_CACHE_BYTES,0
299 .text 297 .text
300 298
301/* Called in normal context to backup CPU 0 state. This 299/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
index 7e4fbb653724..d7bfd60e21fc 100644
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ b/arch/ppc/kernel/cpu_setup_power4.S
@@ -63,8 +63,6 @@ _GLOBAL(__970_cpu_preinit)
63 isync 63 isync
64 blr 64 blr
65 65
66_GLOBAL(__setup_cpu_power4)
67 blr
68_GLOBAL(__setup_cpu_ppc970) 66_GLOBAL(__setup_cpu_ppc970)
69 mfspr r0,SPRN_HID0 67 mfspr r0,SPRN_HID0
70 li r11,5 /* clear DOZE and SLEEP */ 68 li r11,5 /* clear DOZE and SLEEP */
@@ -88,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970)
88#define CS_SIZE 32 86#define CS_SIZE 32
89 87
90 .data 88 .data
91 .balign L1_CACHE_LINE_SIZE 89 .balign L1_CACHE_BYTES
92cpu_state_storage: 90cpu_state_storage:
93 .space CS_SIZE 91 .space CS_SIZE
94 .balign L1_CACHE_LINE_SIZE,0 92 .balign L1_CACHE_BYTES,0
95 .text 93 .text
96 94
97/* Called in normal context to backup CPU 0 state. This 95/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 03d4886869f3..f044edbb454f 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
200 bl do_show_syscall 200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */ 201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */ 202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10) 203 li r11,0
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR 204 stb r11,TI_SC_NOERR(r10)
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10) 205 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A 206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace 207 bne- syscall_dotrace
@@ -227,8 +226,8 @@ ret_from_syscall:
227 cmplw 0,r3,r11 226 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 227 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f 228 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12) 229 lbz r11,TI_SC_NOERR(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR 230 cmpwi r11,0
232 bne 30f 231 bne 30f
233 neg r3,r3 232 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */ 233 lwz r10,_CCR(r1) /* Set SO bit in CR */
@@ -633,7 +632,8 @@ sigreturn_exit:
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 632 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12) 633 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A 634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 bnel- do_syscall_trace_leave 635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */ 637 /* fall through */
638 638
639 .globl ret_from_except_full 639 .globl ret_from_except_full
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1960fb8c259c..c5a890dca9cf 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -349,12 +349,12 @@ i##n: \
349 349
350/* System reset */ 350/* System reset */
351/* core99 pmac starts the seconary here by changing the vector, and 351/* core99 pmac starts the seconary here by changing the vector, and
352 putting it back to what it was (UnknownException) when done. */ 352 putting it back to what it was (unknown_exception) when done. */
353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) 353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
354 . = 0x100 354 . = 0x100
355 b __secondary_start_gemini 355 b __secondary_start_gemini
356#else 356#else
357 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 357 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
358#endif 358#endif
359 359
360/* Machine check */ 360/* Machine check */
@@ -389,7 +389,7 @@ i##n: \
389 cmpwi cr1,r4,0 389 cmpwi cr1,r4,0
390 bne cr1,1f 390 bne cr1,1f
391#endif 391#endif
392 EXC_XFER_STD(0x200, MachineCheckException) 392 EXC_XFER_STD(0x200, machine_check_exception)
393#ifdef CONFIG_PPC_CHRP 393#ifdef CONFIG_PPC_CHRP
3941: b machine_check_in_rtas 3941: b machine_check_in_rtas
395#endif 395#endif
@@ -456,10 +456,10 @@ Alignment:
456 mfspr r5,SPRN_DSISR 456 mfspr r5,SPRN_DSISR
457 stw r5,_DSISR(r11) 457 stw r5,_DSISR(r11)
458 addi r3,r1,STACK_FRAME_OVERHEAD 458 addi r3,r1,STACK_FRAME_OVERHEAD
459 EXC_XFER_EE(0x600, AlignmentException) 459 EXC_XFER_EE(0x600, alignment_exception)
460 460
461/* Program check exception */ 461/* Program check exception */
462 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 462 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
463 463
464/* Floating-point unavailable */ 464/* Floating-point unavailable */
465 . = 0x800 465 . = 0x800
@@ -467,13 +467,13 @@ FPUnavailable:
467 EXCEPTION_PROLOG 467 EXCEPTION_PROLOG
468 bne load_up_fpu /* if from user, just load it up */ 468 bne load_up_fpu /* if from user, just load it up */
469 addi r3,r1,STACK_FRAME_OVERHEAD 469 addi r3,r1,STACK_FRAME_OVERHEAD
470 EXC_XFER_EE_LITE(0x800, KernelFP) 470 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
471 471
472/* Decrementer */ 472/* Decrementer */
473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
474 474
475 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 475 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
476 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 476 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
477 477
478/* System call */ 478/* System call */
479 . = 0xc00 479 . = 0xc00
@@ -482,8 +482,8 @@ SystemCall:
482 EXC_XFER_EE_LITE(0xc00, DoSyscall) 482 EXC_XFER_EE_LITE(0xc00, DoSyscall)
483 483
484/* Single step - not used on 601 */ 484/* Single step - not used on 601 */
485 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 485 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
486 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 486 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
487 487
488/* 488/*
489 * The Altivec unavailable trap is at 0x0f20. Foo. 489 * The Altivec unavailable trap is at 0x0f20. Foo.
@@ -502,7 +502,7 @@ SystemCall:
502Trap_0f: 502Trap_0f:
503 EXCEPTION_PROLOG 503 EXCEPTION_PROLOG
504 addi r3,r1,STACK_FRAME_OVERHEAD 504 addi r3,r1,STACK_FRAME_OVERHEAD
505 EXC_XFER_EE(0xf00, UnknownException) 505 EXC_XFER_EE(0xf00, unknown_exception)
506 506
507/* 507/*
508 * Handle TLB miss for instruction on 603/603e. 508 * Handle TLB miss for instruction on 603/603e.
@@ -702,44 +702,44 @@ DataStoreTLBMiss:
702 rfi 702 rfi
703 703
704#ifndef CONFIG_ALTIVEC 704#ifndef CONFIG_ALTIVEC
705#define AltivecAssistException UnknownException 705#define altivec_assist_exception unknown_exception
706#endif 706#endif
707 707
708 EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) 708 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) 709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
710 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
711#ifdef CONFIG_POWER4 711#ifdef CONFIG_POWER4
712 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 712 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) 713 EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) 714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
715#else /* !CONFIG_POWER4 */ 715#else /* !CONFIG_POWER4 */
716 EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) 716 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) 717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
718 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 718 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
719#endif /* CONFIG_POWER4 */ 719#endif /* CONFIG_POWER4 */
720 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 720 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 721 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 722 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
723 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 723 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
724 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 724 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
725 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 725 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
726 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 726 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) 727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
728 EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) 728 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
729 EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) 729 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
730 EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) 730 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
731 EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) 731 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
732 EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) 732 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
733 EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) 733 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
734 EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) 734 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
735 EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) 735 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
736 EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) 736 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
737 EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) 737 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
738 EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) 738 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
739 EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) 739 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
740 EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) 740 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
741 EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) 741 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
742 EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) 742 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
743 743
744 .globl mol_trampoline 744 .globl mol_trampoline
745 .set mol_trampoline, i0x2f00 745 .set mol_trampoline, i0x2f00
@@ -751,7 +751,7 @@ AltiVecUnavailable:
751#ifdef CONFIG_ALTIVEC 751#ifdef CONFIG_ALTIVEC
752 bne load_up_altivec /* if from user, just load it up */ 752 bne load_up_altivec /* if from user, just load it up */
753#endif /* CONFIG_ALTIVEC */ 753#endif /* CONFIG_ALTIVEC */
754 EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) 754 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
755 755
756#ifdef CONFIG_PPC64BRIDGE 756#ifdef CONFIG_PPC64BRIDGE
757DataAccess: 757DataAccess:
@@ -767,12 +767,12 @@ DataSegment:
767 addi r3,r1,STACK_FRAME_OVERHEAD 767 addi r3,r1,STACK_FRAME_OVERHEAD
768 mfspr r4,SPRN_DAR 768 mfspr r4,SPRN_DAR
769 stw r4,_DAR(r11) 769 stw r4,_DAR(r11)
770 EXC_XFER_STD(0x380, UnknownException) 770 EXC_XFER_STD(0x380, unknown_exception)
771 771
772InstructionSegment: 772InstructionSegment:
773 EXCEPTION_PROLOG 773 EXCEPTION_PROLOG
774 addi r3,r1,STACK_FRAME_OVERHEAD 774 addi r3,r1,STACK_FRAME_OVERHEAD
775 EXC_XFER_STD(0x480, UnknownException) 775 EXC_XFER_STD(0x480, unknown_exception)
776#endif /* CONFIG_PPC64BRIDGE */ 776#endif /* CONFIG_PPC64BRIDGE */
777 777
778#ifdef CONFIG_ALTIVEC 778#ifdef CONFIG_ALTIVEC
@@ -804,7 +804,7 @@ load_up_altivec:
804 beq 1f 804 beq 1f
805 add r4,r4,r6 805 add r4,r4,r6
806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ 806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
807 SAVE_32VR(0,r10,r4) 807 SAVE_32VRS(0,r10,r4)
808 mfvscr vr0 808 mfvscr vr0
809 li r10,THREAD_VSCR 809 li r10,THREAD_VSCR
810 stvx vr0,r10,r4 810 stvx vr0,r10,r4
@@ -824,7 +824,7 @@ load_up_altivec:
824 stw r4,THREAD_USED_VR(r5) 824 stw r4,THREAD_USED_VR(r5)
825 lvx vr0,r10,r5 825 lvx vr0,r10,r5
826 mtvscr vr0 826 mtvscr vr0
827 REST_32VR(0,r10,r5) 827 REST_32VRS(0,r10,r5)
828#ifndef CONFIG_SMP 828#ifndef CONFIG_SMP
829 subi r4,r5,THREAD 829 subi r4,r5,THREAD
830 sub r4,r4,r6 830 sub r4,r4,r6
@@ -870,7 +870,7 @@ giveup_altivec:
870 addi r3,r3,THREAD /* want THREAD of task */ 870 addi r3,r3,THREAD /* want THREAD of task */
871 lwz r5,PT_REGS(r3) 871 lwz r5,PT_REGS(r3)
872 cmpwi 0,r5,0 872 cmpwi 0,r5,0
873 SAVE_32VR(0, r4, r3) 873 SAVE_32VRS(0, r4, r3)
874 mfvscr vr0 874 mfvscr vr0
875 li r4,THREAD_VSCR 875 li r4,THREAD_VSCR
876 stvx vr0,r4,r3 876 stvx vr0,r4,r3
@@ -916,7 +916,7 @@ relocate_kernel:
916copy_and_flush: 916copy_and_flush:
917 addi r5,r5,-4 917 addi r5,r5,-4
918 addi r6,r6,-4 918 addi r6,r6,-4
9194: li r0,L1_CACHE_LINE_SIZE/4 9194: li r0,L1_CACHE_BYTES/4
920 mtctr r0 920 mtctr r0
9213: addi r6,r6,4 /* copy a cache line */ 9213: addi r6,r6,4 /* copy a cache line */
922 lwzx r0,r6,r4 922 lwzx r0,r6,r4
@@ -1059,7 +1059,6 @@ __secondary_start:
1059 1059
1060 lis r3,-KERNELBASE@h 1060 lis r3,-KERNELBASE@h
1061 mr r4,r24 1061 mr r4,r24
1062 bl identify_cpu
1063 bl call_setup_cpu /* Call setup_cpu for this CPU */ 1062 bl call_setup_cpu /* Call setup_cpu for this CPU */
1064#ifdef CONFIG_6xx 1063#ifdef CONFIG_6xx
1065 lis r3,-KERNELBASE@h 1064 lis r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@ __secondary_start:
1109 * Those generic dummy functions are kept for CPUs not 1108 * Those generic dummy functions are kept for CPUs not
1110 * included in CONFIG_6xx 1109 * included in CONFIG_6xx
1111 */ 1110 */
1112_GLOBAL(__setup_cpu_power3)
1113 blr
1114_GLOBAL(__setup_cpu_generic)
1115 blr
1116
1117#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) 1111#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
1118_GLOBAL(__save_cpu_setup) 1112_GLOBAL(__save_cpu_setup)
1119 blr 1113 blr
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 599245b0407e..8b49679fad54 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -309,13 +309,13 @@ skpinv: addi r4,r4,1 /* Increment */
309 309
310interrupt_base: 310interrupt_base:
311 /* Critical Input Interrupt */ 311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313 313
314 /* Machine Check Interrupt */ 314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A 315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else 317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif 319#endif
320 320
321 /* Data Storage Interrupt */ 321 /* Data Storage Interrupt */
@@ -442,7 +442,7 @@ interrupt_base:
442#ifdef CONFIG_PPC_FPU 442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION 443 FP_UNAVAILABLE_EXCEPTION
444#else 444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif 446#endif
447 447
448 /* System Call Interrupt */ 448 /* System Call Interrupt */
@@ -451,21 +451,21 @@ interrupt_base:
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452 452
453 /* Auxillary Processor Unavailable Interrupt */ 453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455 455
456 /* Decrementer Interrupt */ 456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION 457 DECREMENTER_EXCEPTION
458 458
459 /* Fixed Internal Timer Interrupt */ 459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */ 460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462 462
463 /* Watchdog Timer Interrupt */ 463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */ 464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT 465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else 467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) 468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif 469#endif
470 470
471 /* Data TLB Error Interrupt */ 471 /* Data TLB Error Interrupt */
@@ -743,14 +743,18 @@ _GLOBAL(set_context)
743 * goes at the beginning of the data segment, which is page-aligned. 743 * goes at the beginning of the data segment, which is page-aligned.
744 */ 744 */
745 .data 745 .data
746_GLOBAL(sdata) 746 .align 12
747_GLOBAL(empty_zero_page) 747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
748 .space 4096 751 .space 4096
749 752
750/* 753/*
751 * To support >32-bit physical addresses, we use an 8KB pgdir. 754 * To support >32-bit physical addresses, we use an 8KB pgdir.
752 */ 755 */
753_GLOBAL(swapper_pg_dir) 756 .globl swapper_pg_dir
757swapper_pg_dir:
754 .space 8192 758 .space 8192
755 759
756/* Reserved 4k for the critical exception stack & 4k for the machine 760/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -759,13 +763,15 @@ _GLOBAL(swapper_pg_dir)
759 .align 12 763 .align 12
760exception_stack_bottom: 764exception_stack_bottom:
761 .space BOOKE_EXCEPTION_STACK_SIZE 765 .space BOOKE_EXCEPTION_STACK_SIZE
762_GLOBAL(exception_stack_top) 766 .globl exception_stack_top
767exception_stack_top:
763 768
764/* 769/*
765 * This space gets a copy of optional info passed to us by the bootstrap 770 * This space gets a copy of optional info passed to us by the bootstrap
766 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
767 */ 772 */
768_GLOBAL(cmd_line) 773 .globl cmd_line
774cmd_line:
769 .space 512 775 .space 512
770 776
771/* 777/*
@@ -774,5 +780,3 @@ _GLOBAL(cmd_line)
774 */ 780 */
775abatron_pteptrs: 781abatron_pteptrs:
776 .space 8 782 .space 8
777
778
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index 8562b807b37c..10c261c67021 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -245,12 +245,12 @@ label:
245/* 245/*
246 * 0x0100 - Critical Interrupt Exception 246 * 0x0100 - Critical Interrupt Exception
247 */ 247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) 248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249 249
250/* 250/*
251 * 0x0200 - Machine Check Exception 251 * 0x0200 - Machine Check Exception
252 */ 252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254 254
255/* 255/*
256 * 0x0300 - Data Storage Exception 256 * 0x0300 - Data Storage Exception
@@ -405,7 +405,7 @@ label:
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ 405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11) 406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD 407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, AlignmentException) 408 EXC_XFER_EE(0x600, alignment_exception)
409 409
410/* 0x0700 - Program Exception */ 410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck) 411 START_EXCEPTION(0x0700, ProgramCheck)
@@ -413,21 +413,21 @@ label:
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */ 413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11) 414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD 415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, ProgramCheckException) 416 EXC_XFER_STD(0x700, program_check_exception)
417 417
418 EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) 418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) 419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) 420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) 421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422 422
423/* 0x0C00 - System Call Exception */ 423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall) 424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG 425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall) 426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427 427
428 EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) 428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) 429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) 430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431 431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */ 432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer) 433 START_EXCEPTION(0x1000, Decrementer)
@@ -444,14 +444,14 @@ label:
444 444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception 445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/ 446*/
447 STND_EXCEPTION(0x1010, FITException, UnknownException) 447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448 448
449/* 0x1020 - Watchdog Timer (WDT) Exception 449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/ 450*/
451#ifdef CONFIG_BOOKE_WDT 451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) 452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else 453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) 454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif 455#endif
456#endif 456#endif
457 457
@@ -656,25 +656,25 @@ label:
656 mfspr r10, SPRN_SPRG0 656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess 657 b InstructionAccess
658 658
659 EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) 659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) 660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51 663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */ 664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17) 665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss 666 b DTLBMiss
667#else 667#else
668 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif 669#endif
670 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) 672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) 673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) 674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) 675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) 676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) 677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678 678
679/* Check for a single step debug exception while in an exception 679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case 680 * handler before state has been saved. This is to catch the case
@@ -988,10 +988,14 @@ _GLOBAL(set_context)
988 * goes at the beginning of the data segment, which is page-aligned. 988 * goes at the beginning of the data segment, which is page-aligned.
989 */ 989 */
990 .data 990 .data
991_GLOBAL(sdata) 991 .align 12
992_GLOBAL(empty_zero_page) 992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
993 .space 4096 996 .space 4096
994_GLOBAL(swapper_pg_dir) 997 .globl swapper_pg_dir
998swapper_pg_dir:
995 .space 4096 999 .space 4096
996 1000
997 1001
@@ -1001,12 +1005,14 @@ _GLOBAL(swapper_pg_dir)
1001exception_stack_bottom: 1005exception_stack_bottom:
1002 .space 4096 1006 .space 4096
1003critical_stack_top: 1007critical_stack_top:
1004_GLOBAL(exception_stack_top) 1008 .globl exception_stack_top
1009exception_stack_top:
1005 1010
1006/* This space gets a copy of optional info passed to us by the bootstrap 1011/* This space gets a copy of optional info passed to us by the bootstrap
1007 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1008 */ 1013 */
1009_GLOBAL(cmd_line) 1014 .globl cmd_line
1015cmd_line:
1010 .space 512 1016 .space 512
1011 1017
1012/* Room for two PTE pointers, usually the kernel and current user pointers 1018/* Room for two PTE pointers, usually the kernel and current user pointers
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index cb1a3a54a026..de0978742221 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -203,7 +203,7 @@ i##n: \
203 ret_from_except) 203 ret_from_except)
204 204
205/* System reset */ 205/* System reset */
206 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207 207
208/* Machine check */ 208/* Machine check */
209 . = 0x200 209 . = 0x200
@@ -214,7 +214,7 @@ MachineCheck:
214 mfspr r5,SPRN_DSISR 214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11) 215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD 216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, MachineCheckException) 217 EXC_XFER_STD(0x200, machine_check_exception)
218 218
219/* Data access exception. 219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other 220 * This is "never generated" by the MPC8xx. We jump to it for other
@@ -252,20 +252,20 @@ Alignment:
252 mfspr r5,SPRN_DSISR 252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11) 253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD 254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, AlignmentException) 255 EXC_XFER_EE(0x600, alignment_exception)
256 256
257/* Program check exception */ 257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259 259
260/* No FPU on MPC8xx. This exception is not supposed to happen. 260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/ 261*/
262 EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) 262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263 263
264/* Decrementer */ 264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266 266
267 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269 269
270/* System call */ 270/* System call */
271 . = 0xc00 271 . = 0xc00
@@ -274,9 +274,9 @@ SystemCall:
274 EXC_XFER_EE_LITE(0xc00, DoSyscall) 274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275 275
276/* Single step - not used on 601 */ 276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) 279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280 280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs 281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions. 282 * for all unimplemented and illegal instructions.
@@ -540,22 +540,22 @@ DataTLBError:
540#endif 540#endif
541 b DataAccess 541 b DataAccess
542 542
543 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550 550
551/* On the MPC8xx, these next four traps are used for development 551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to 552 * support of breakpoints and such. Someday I will get around to
553 * using them. 553 * using them.
554 */ 554 */
555 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559 559
560 . = 0x2000 560 . = 0x2000
561 561
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 9342acf12e72..aeb349b47af3 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -335,7 +335,7 @@ label:
335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ 335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
336 stw r4,_DEAR(r11); \ 336 stw r4,_DEAR(r11); \
337 addi r3,r1,STACK_FRAME_OVERHEAD; \ 337 addi r3,r1,STACK_FRAME_OVERHEAD; \
338 EXC_XFER_EE(0x0600, AlignmentException) 338 EXC_XFER_EE(0x0600, alignment_exception)
339 339
340#define PROGRAM_EXCEPTION \ 340#define PROGRAM_EXCEPTION \
341 START_EXCEPTION(Program) \ 341 START_EXCEPTION(Program) \
@@ -343,7 +343,7 @@ label:
343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ 343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
344 stw r4,_ESR(r11); \ 344 stw r4,_ESR(r11); \
345 addi r3,r1,STACK_FRAME_OVERHEAD; \ 345 addi r3,r1,STACK_FRAME_OVERHEAD; \
346 EXC_XFER_STD(0x0700, ProgramCheckException) 346 EXC_XFER_STD(0x0700, program_check_exception)
347 347
348#define DECREMENTER_EXCEPTION \ 348#define DECREMENTER_EXCEPTION \
349 START_EXCEPTION(Decrementer) \ 349 START_EXCEPTION(Decrementer) \
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index 8e52e8408316..5063c603fad4 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -426,14 +426,14 @@ skpinv: addi r6,r6,1 /* Increment */
426 426
427interrupt_base: 427interrupt_base:
428 /* Critical Input Interrupt */ 428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430 430
431 /* Machine Check Interrupt */ 431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200 432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */ 433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else 435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif 437#endif
438 438
439 /* Data Storage Interrupt */ 439 /* Data Storage Interrupt */
@@ -542,9 +542,9 @@ interrupt_base:
542#else 542#else
543#ifdef CONFIG_E200 543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) 545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else 546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif 548#endif
549#endif 549#endif
550 550
@@ -554,20 +554,20 @@ interrupt_base:
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555 555
556 /* Auxillary Processor Unavailable Interrupt */ 556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558 558
559 /* Decrementer Interrupt */ 559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION 560 DECREMENTER_EXCEPTION
561 561
562 /* Fixed Internal Timer Interrupt */ 562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */ 563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565 565
566 /* Watchdog Timer Interrupt */ 566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT 567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else 569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) 570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif 571#endif
572 572
573 /* Data TLB Error Interrupt */ 573 /* Data TLB Error Interrupt */
@@ -696,21 +696,21 @@ interrupt_base:
696 addi r3,r1,STACK_FRAME_OVERHEAD 696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE) 697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else 698#else
699 EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) 699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */ 700#endif /* CONFIG_SPE */
701 701
702 /* SPE Floating Point Data */ 702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE 703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else 705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) 706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */ 707#endif /* CONFIG_SPE */
708 708
709 /* SPE Floating Point Round */ 709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711 711
712 /* Performance Monitor */ 712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) 713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714 714
715 715
716 /* Debug Interrupt */ 716 /* Debug Interrupt */
@@ -853,7 +853,7 @@ load_up_spe:
853 cmpi 0,r4,0 853 cmpi 0,r4,0
854 beq 1f 854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVR(0,r10,r4) 856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */ 857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC 859 li r5,THREAD_ACC
@@ -873,7 +873,7 @@ load_up_spe:
873 stw r4,THREAD_USED_SPE(r5) 873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5 874 evlddx evr4,r10,r5
875 evmra evr4,evr4 875 evmra evr4,evr4
876 REST_32EVR(0,r10,r5) 876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP 877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD 878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3) 879 stw r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@ _GLOBAL(giveup_spe)
963 addi r3,r3,THREAD /* want THREAD of task */ 963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3) 964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0 965 cmpi 0,r5,0
966 SAVE_32EVR(0, r4, r3) 966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */ 967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC 969 li r4,THREAD_ACC
@@ -1028,10 +1028,14 @@ _GLOBAL(set_context)
1028 * goes at the beginning of the data segment, which is page-aligned. 1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */ 1029 */
1030 .data 1030 .data
1031_GLOBAL(sdata) 1031 .align 12
1032_GLOBAL(empty_zero_page) 1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1033 .space 4096 1036 .space 4096
1034_GLOBAL(swapper_pg_dir) 1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1035 .space 4096 1039 .space 4096
1036 1040
1037/* Reserved 4k for the critical exception stack & 4k for the machine 1041/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -1040,13 +1044,15 @@ _GLOBAL(swapper_pg_dir)
1040 .align 12 1044 .align 12
1041exception_stack_bottom: 1045exception_stack_bottom:
1042 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1043_GLOBAL(exception_stack_top) 1047 .globl exception_stack_top
1048exception_stack_top:
1044 1049
1045/* 1050/*
1046 * This space gets a copy of optional info passed to us by the bootstrap 1051 * This space gets a copy of optional info passed to us by the bootstrap
1047 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1048 */ 1053 */
1049_GLOBAL(cmd_line) 1054 .globl cmd_line
1055cmd_line:
1050 .space 512 1056 .space 512
1051 1057
1052/* 1058/*
@@ -1055,4 +1061,3 @@ _GLOBAL(cmd_line)
1055 */ 1061 */
1056abatron_pteptrs: 1062abatron_pteptrs:
1057 .space 8 1063 .space 8
1058
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index fba29c876b62..11e5b44713f7 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -32,6 +32,7 @@
32#include <asm/cache.h> 32#include <asm/cache.h>
33#include <asm/cputable.h> 33#include <asm/cputable.h>
34#include <asm/machdep.h> 34#include <asm/machdep.h>
35#include <asm/smp.h>
35 36
36void default_idle(void) 37void default_idle(void)
37{ 38{
@@ -74,7 +75,7 @@ void cpu_idle(void)
74/* 75/*
75 * Register the sysctl to set/clear powersave_nap. 76 * Register the sysctl to set/clear powersave_nap.
76 */ 77 */
77extern unsigned long powersave_nap; 78extern int powersave_nap;
78 79
79static ctl_table powersave_nap_ctl_table[]={ 80static ctl_table powersave_nap_ctl_table[]={
80 { 81 {
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 8843f3af230f..772e428aaa59 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -57,6 +57,7 @@
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/prom.h> 58#include <asm/prom.h>
59#include <asm/ptrace.h> 59#include <asm/ptrace.h>
60#include <asm/machdep.h>
60 61
61#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 62#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
62 63
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 861115249b35..d7f4e982b539 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
203 * L1 icache 203 * L1 icache
204 */ 204 */
205 b 20f 205 b 20f
206 .balign L1_CACHE_LINE_SIZE 206 .balign L1_CACHE_BYTES
20722: 20722:
208 sync 208 sync
209 mtspr SPRN_L2CR,r3 209 mtspr SPRN_L2CR,r3
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 90d917d2e856..3056ede2424d 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -125,9 +125,8 @@ _GLOBAL(identify_cpu)
1251: 1251:
126 addis r6,r3,cur_cpu_spec@ha 126 addis r6,r3,cur_cpu_spec@ha
127 addi r6,r6,cur_cpu_spec@l 127 addi r6,r6,cur_cpu_spec@l
128 slwi r4,r4,2
129 sub r8,r8,r3 128 sub r8,r8,r3
130 stwx r8,r4,r6 129 stw r8,0(r6)
131 blr 130 blr
132 131
133/* 132/*
@@ -186,19 +185,18 @@ _GLOBAL(do_cpu_ftr_fixups)
186 * 185 *
187 * Setup function is called with: 186 * Setup function is called with:
188 * r3 = data offset 187 * r3 = data offset
189 * r4 = CPU number 188 * r4 = ptr to CPU spec (relocated)
190 * r5 = ptr to CPU spec (relocated)
191 */ 189 */
192_GLOBAL(call_setup_cpu) 190_GLOBAL(call_setup_cpu)
193 addis r5,r3,cur_cpu_spec@ha 191 addis r4,r3,cur_cpu_spec@ha
194 addi r5,r5,cur_cpu_spec@l 192 addi r4,r4,cur_cpu_spec@l
195 slwi r4,r24,2 193 lwz r4,0(r4)
196 lwzx r5,r4,r5 194 add r4,r4,r3
195 lwz r5,CPU_SPEC_SETUP(r4)
196 cmpi 0,r5,0
197 add r5,r5,r3 197 add r5,r5,r3
198 lwz r6,CPU_SPEC_SETUP(r5) 198 beqlr
199 add r6,r6,r3 199 mtctr r5
200 mtctr r6
201 mr r4,r24
202 bctr 200 bctr
203 201
204#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) 202#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@ _GLOBAL(low_choose_7447a_dfs)
273 271
274#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ 272#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
275 273
276/* void local_save_flags_ptr(unsigned long *flags) */
277_GLOBAL(local_save_flags_ptr)
278 mfmsr r4
279 stw r4,0(r3)
280 blr
281 /*
282 * Need these nops here for taking over save/restore to
283 * handle lost intrs
284 * -- Cort
285 */
286 nop
287 nop
288 nop
289 nop
290 nop
291 nop
292 nop
293 nop
294 nop
295 nop
296 nop
297 nop
298 nop
299 nop
300 nop
301 nop
302 nop
303_GLOBAL(local_save_flags_ptr_end)
304
305/* void local_irq_restore(unsigned long flags) */
306_GLOBAL(local_irq_restore)
307/*
308 * Just set/clear the MSR_EE bit through restore/flags but do not
309 * change anything else. This is needed by the RT system and makes
310 * sense anyway.
311 * -- Cort
312 */
313 mfmsr r4
314 /* Copy all except the MSR_EE bit from r4 (current MSR value)
315 to r3. This is the sort of thing the rlwimi instruction is
316 designed for. -- paulus. */
317 rlwimi r3,r4,0,17,15
318 /* Check if things are setup the way we want _already_. */
319 cmpw 0,r3,r4
320 beqlr
3211: SYNC
322 mtmsr r3
323 SYNC
324 blr
325 nop
326 nop
327 nop
328 nop
329 nop
330 nop
331 nop
332 nop
333 nop
334 nop
335 nop
336 nop
337 nop
338 nop
339 nop
340 nop
341 nop
342 nop
343 nop
344_GLOBAL(local_irq_restore_end)
345
346_GLOBAL(local_irq_disable)
347 mfmsr r0 /* Get current interrupt state */
348 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
349 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
350 SYNC /* Some chip revs have problems here... */
351 mtmsr r0 /* Update machine state */
352 blr /* Done */
353 /*
354 * Need these nops here for taking over save/restore to
355 * handle lost intrs
356 * -- Cort
357 */
358 nop
359 nop
360 nop
361 nop
362 nop
363 nop
364 nop
365 nop
366 nop
367 nop
368 nop
369 nop
370 nop
371 nop
372 nop
373_GLOBAL(local_irq_disable_end)
374
375_GLOBAL(local_irq_enable)
376 mfmsr r3 /* Get current state */
377 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
378 SYNC /* Some chip revs have problems here... */
379 mtmsr r3 /* Update machine state */
380 blr
381 /*
382 * Need these nops here for taking over save/restore to
383 * handle lost intrs
384 * -- Cort
385 */
386 nop
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
396 nop
397 nop
398 nop
399 nop
400 nop
401 nop
402_GLOBAL(local_irq_enable_end)
403
404/* 274/*
405 * complement mask on the msr then "or" some values on. 275 * complement mask on the msr then "or" some values on.
406 * _nmask_and_or_msr(nmask, value_to_or) 276 * _nmask_and_or_msr(nmask, value_to_or)
@@ -628,21 +498,21 @@ _GLOBAL(flush_icache_range)
628BEGIN_FTR_SECTION 498BEGIN_FTR_SECTION
629 blr /* for 601, do nothing */ 499 blr /* for 601, do nothing */
630END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 500END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
631 li r5,L1_CACHE_LINE_SIZE-1 501 li r5,L1_CACHE_BYTES-1
632 andc r3,r3,r5 502 andc r3,r3,r5
633 subf r4,r3,r4 503 subf r4,r3,r4
634 add r4,r4,r5 504 add r4,r4,r5
635 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 505 srwi. r4,r4,L1_CACHE_SHIFT
636 beqlr 506 beqlr
637 mtctr r4 507 mtctr r4
638 mr r6,r3 508 mr r6,r3
6391: dcbst 0,r3 5091: dcbst 0,r3
640 addi r3,r3,L1_CACHE_LINE_SIZE 510 addi r3,r3,L1_CACHE_BYTES
641 bdnz 1b 511 bdnz 1b
642 sync /* wait for dcbst's to get to ram */ 512 sync /* wait for dcbst's to get to ram */
643 mtctr r4 513 mtctr r4
6442: icbi 0,r6 5142: icbi 0,r6
645 addi r6,r6,L1_CACHE_LINE_SIZE 515 addi r6,r6,L1_CACHE_BYTES
646 bdnz 2b 516 bdnz 2b
647 sync /* additional sync needed on g4 */ 517 sync /* additional sync needed on g4 */
648 isync 518 isync
@@ -655,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
655 * clean_dcache_range(unsigned long start, unsigned long stop) 525 * clean_dcache_range(unsigned long start, unsigned long stop)
656 */ 526 */
657_GLOBAL(clean_dcache_range) 527_GLOBAL(clean_dcache_range)
658 li r5,L1_CACHE_LINE_SIZE-1 528 li r5,L1_CACHE_BYTES-1
659 andc r3,r3,r5 529 andc r3,r3,r5
660 subf r4,r3,r4 530 subf r4,r3,r4
661 add r4,r4,r5 531 add r4,r4,r5
662 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 532 srwi. r4,r4,L1_CACHE_SHIFT
663 beqlr 533 beqlr
664 mtctr r4 534 mtctr r4
665 535
6661: dcbst 0,r3 5361: dcbst 0,r3
667 addi r3,r3,L1_CACHE_LINE_SIZE 537 addi r3,r3,L1_CACHE_BYTES
668 bdnz 1b 538 bdnz 1b
669 sync /* wait for dcbst's to get to ram */ 539 sync /* wait for dcbst's to get to ram */
670 blr 540 blr
@@ -676,16 +546,16 @@ _GLOBAL(clean_dcache_range)
676 * flush_dcache_range(unsigned long start, unsigned long stop) 546 * flush_dcache_range(unsigned long start, unsigned long stop)
677 */ 547 */
678_GLOBAL(flush_dcache_range) 548_GLOBAL(flush_dcache_range)
679 li r5,L1_CACHE_LINE_SIZE-1 549 li r5,L1_CACHE_BYTES-1
680 andc r3,r3,r5 550 andc r3,r3,r5
681 subf r4,r3,r4 551 subf r4,r3,r4
682 add r4,r4,r5 552 add r4,r4,r5
683 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 553 srwi. r4,r4,L1_CACHE_SHIFT
684 beqlr 554 beqlr
685 mtctr r4 555 mtctr r4
686 556
6871: dcbf 0,r3 5571: dcbf 0,r3
688 addi r3,r3,L1_CACHE_LINE_SIZE 558 addi r3,r3,L1_CACHE_BYTES
689 bdnz 1b 559 bdnz 1b
690 sync /* wait for dcbst's to get to ram */ 560 sync /* wait for dcbst's to get to ram */
691 blr 561 blr
@@ -698,16 +568,16 @@ _GLOBAL(flush_dcache_range)
698 * invalidate_dcache_range(unsigned long start, unsigned long stop) 568 * invalidate_dcache_range(unsigned long start, unsigned long stop)
699 */ 569 */
700_GLOBAL(invalidate_dcache_range) 570_GLOBAL(invalidate_dcache_range)
701 li r5,L1_CACHE_LINE_SIZE-1 571 li r5,L1_CACHE_BYTES-1
702 andc r3,r3,r5 572 andc r3,r3,r5
703 subf r4,r3,r4 573 subf r4,r3,r4
704 add r4,r4,r5 574 add r4,r4,r5
705 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 575 srwi. r4,r4,L1_CACHE_SHIFT
706 beqlr 576 beqlr
707 mtctr r4 577 mtctr r4
708 578
7091: dcbi 0,r3 5791: dcbi 0,r3
710 addi r3,r3,L1_CACHE_LINE_SIZE 580 addi r3,r3,L1_CACHE_BYTES
711 bdnz 1b 581 bdnz 1b
712 sync /* wait for dcbi's to get to ram */ 582 sync /* wait for dcbi's to get to ram */
713 blr 583 blr
@@ -728,7 +598,7 @@ _GLOBAL(flush_dcache_all)
728 mtctr r4 598 mtctr r4
729 lis r5, KERNELBASE@h 599 lis r5, KERNELBASE@h
7301: lwz r3, 0(r5) /* Load one word from every line */ 6001: lwz r3, 0(r5) /* Load one word from every line */
731 addi r5, r5, L1_CACHE_LINE_SIZE 601 addi r5, r5, L1_CACHE_BYTES
732 bdnz 1b 602 bdnz 1b
733 blr 603 blr
734#endif /* CONFIG_NOT_COHERENT_CACHE */ 604#endif /* CONFIG_NOT_COHERENT_CACHE */
@@ -746,16 +616,16 @@ BEGIN_FTR_SECTION
746 blr /* for 601, do nothing */ 616 blr /* for 601, do nothing */
747END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 617END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
748 rlwinm r3,r3,0,0,19 /* Get page base address */ 618 rlwinm r3,r3,0,0,19 /* Get page base address */
749 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 619 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
750 mtctr r4 620 mtctr r4
751 mr r6,r3 621 mr r6,r3
7520: dcbst 0,r3 /* Write line to ram */ 6220: dcbst 0,r3 /* Write line to ram */
753 addi r3,r3,L1_CACHE_LINE_SIZE 623 addi r3,r3,L1_CACHE_BYTES
754 bdnz 0b 624 bdnz 0b
755 sync 625 sync
756 mtctr r4 626 mtctr r4
7571: icbi 0,r6 6271: icbi 0,r6
758 addi r6,r6,L1_CACHE_LINE_SIZE 628 addi r6,r6,L1_CACHE_BYTES
759 bdnz 1b 629 bdnz 1b
760 sync 630 sync
761 isync 631 isync
@@ -778,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
778 mtmsr r0 648 mtmsr r0
779 isync 649 isync
780 rlwinm r3,r3,0,0,19 /* Get page base address */ 650 rlwinm r3,r3,0,0,19 /* Get page base address */
781 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 651 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
782 mtctr r4 652 mtctr r4
783 mr r6,r3 653 mr r6,r3
7840: dcbst 0,r3 /* Write line to ram */ 6540: dcbst 0,r3 /* Write line to ram */
785 addi r3,r3,L1_CACHE_LINE_SIZE 655 addi r3,r3,L1_CACHE_BYTES
786 bdnz 0b 656 bdnz 0b
787 sync 657 sync
788 mtctr r4 658 mtctr r4
7891: icbi 0,r6 6591: icbi 0,r6
790 addi r6,r6,L1_CACHE_LINE_SIZE 660 addi r6,r6,L1_CACHE_BYTES
791 bdnz 1b 661 bdnz 1b
792 sync 662 sync
793 mtmsr r10 /* restore DR */ 663 mtmsr r10 /* restore DR */
@@ -802,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
802 * void clear_pages(void *page, int order) ; 672 * void clear_pages(void *page, int order) ;
803 */ 673 */
804_GLOBAL(clear_pages) 674_GLOBAL(clear_pages)
805 li r0,4096/L1_CACHE_LINE_SIZE 675 li r0,4096/L1_CACHE_BYTES
806 slw r0,r0,r4 676 slw r0,r0,r4
807 mtctr r0 677 mtctr r0
808#ifdef CONFIG_8xx 678#ifdef CONFIG_8xx
@@ -814,7 +684,7 @@ _GLOBAL(clear_pages)
814#else 684#else
8151: dcbz 0,r3 6851: dcbz 0,r3
816#endif 686#endif
817 addi r3,r3,L1_CACHE_LINE_SIZE 687 addi r3,r3,L1_CACHE_BYTES
818 bdnz 1b 688 bdnz 1b
819 blr 689 blr
820 690
@@ -840,7 +710,7 @@ _GLOBAL(copy_page)
840 710
841#ifdef CONFIG_8xx 711#ifdef CONFIG_8xx
842 /* don't use prefetch on 8xx */ 712 /* don't use prefetch on 8xx */
843 li r0,4096/L1_CACHE_LINE_SIZE 713 li r0,4096/L1_CACHE_BYTES
844 mtctr r0 714 mtctr r0
8451: COPY_16_BYTES 7151: COPY_16_BYTES
846 bdnz 1b 716 bdnz 1b
@@ -854,13 +724,13 @@ _GLOBAL(copy_page)
854 li r11,4 724 li r11,4
855 mtctr r0 725 mtctr r0
85611: dcbt r11,r4 72611: dcbt r11,r4
857 addi r11,r11,L1_CACHE_LINE_SIZE 727 addi r11,r11,L1_CACHE_BYTES
858 bdnz 11b 728 bdnz 11b
859#else /* MAX_COPY_PREFETCH == 1 */ 729#else /* MAX_COPY_PREFETCH == 1 */
860 dcbt r5,r4 730 dcbt r5,r4
861 li r11,L1_CACHE_LINE_SIZE+4 731 li r11,L1_CACHE_BYTES+4
862#endif /* MAX_COPY_PREFETCH */ 732#endif /* MAX_COPY_PREFETCH */
863 li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH 733 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
864 crclr 4*cr0+eq 734 crclr 4*cr0+eq
8652: 7352:
866 mtctr r0 736 mtctr r0
@@ -868,12 +738,12 @@ _GLOBAL(copy_page)
868 dcbt r11,r4 738 dcbt r11,r4
869 dcbz r5,r3 739 dcbz r5,r3
870 COPY_16_BYTES 740 COPY_16_BYTES
871#if L1_CACHE_LINE_SIZE >= 32 741#if L1_CACHE_BYTES >= 32
872 COPY_16_BYTES 742 COPY_16_BYTES
873#if L1_CACHE_LINE_SIZE >= 64 743#if L1_CACHE_BYTES >= 64
874 COPY_16_BYTES 744 COPY_16_BYTES
875 COPY_16_BYTES 745 COPY_16_BYTES
876#if L1_CACHE_LINE_SIZE >= 128 746#if L1_CACHE_BYTES >= 128
877 COPY_16_BYTES 747 COPY_16_BYTES
878 COPY_16_BYTES 748 COPY_16_BYTES
879 COPY_16_BYTES 749 COPY_16_BYTES
@@ -1098,33 +968,6 @@ _GLOBAL(_get_SP)
1098 blr 968 blr
1099 969
1100/* 970/*
1101 * These are used in the alignment trap handler when emulating
1102 * single-precision loads and stores.
1103 * We restore and save the fpscr so the task gets the same result
1104 * and exceptions as if the cpu had performed the load or store.
1105 */
1106
1107#ifdef CONFIG_PPC_FPU
1108_GLOBAL(cvt_fd)
1109 lfd 0,-4(r5) /* load up fpscr value */
1110 mtfsf 0xff,0
1111 lfs 0,0(r3)
1112 stfd 0,0(r4)
1113 mffs 0 /* save new fpscr value */
1114 stfd 0,-4(r5)
1115 blr
1116
1117_GLOBAL(cvt_df)
1118 lfd 0,-4(r5) /* load up fpscr value */
1119 mtfsf 0xff,0
1120 lfd 0,0(r3)
1121 stfs 0,0(r4)
1122 mffs 0 /* save new fpscr value */
1123 stfd 0,-4(r5)
1124 blr
1125#endif
1126
1127/*
1128 * Create a kernel thread 971 * Create a kernel thread
1129 * kernel_thread(fn, arg, flags) 972 * kernel_thread(fn, arg, flags)
1130 */ 973 */
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 854e45beb387..e8f4e576750a 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -21,6 +21,7 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/machdep.h>
24 25
25#undef DEBUG 26#undef DEBUG
26 27
@@ -53,7 +54,7 @@ static u8* pci_to_OF_bus_map;
53/* By default, we don't re-assign bus numbers. We do this only on 54/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs 55 * some pmacs
55 */ 56 */
56int pci_assign_all_busses; 57int pci_assign_all_buses;
57 58
58struct pci_controller* hose_head; 59struct pci_controller* hose_head;
59struct pci_controller** hose_tail = &hose_head; 60struct pci_controller** hose_tail = &hose_head;
@@ -644,7 +645,7 @@ pcibios_alloc_controller(void)
644/* 645/*
645 * Functions below are used on OpenFirmware machines. 646 * Functions below are used on OpenFirmware machines.
646 */ 647 */
647static void __openfirmware 648static void
648make_one_node_map(struct device_node* node, u8 pci_bus) 649make_one_node_map(struct device_node* node, u8 pci_bus)
649{ 650{
650 int *bus_range; 651 int *bus_range;
@@ -678,7 +679,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
678 } 679 }
679} 680}
680 681
681void __openfirmware 682void
682pcibios_make_OF_bus_map(void) 683pcibios_make_OF_bus_map(void)
683{ 684{
684 int i; 685 int i;
@@ -720,7 +721,7 @@ pcibios_make_OF_bus_map(void)
720 721
721typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); 722typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
722 723
723static struct device_node* __openfirmware 724static struct device_node*
724scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) 725scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
725{ 726{
726 struct device_node* sub_node; 727 struct device_node* sub_node;
@@ -761,7 +762,7 @@ scan_OF_pci_childs_iterator(struct device_node* node, void* data)
761 return 0; 762 return 0;
762} 763}
763 764
764static struct device_node* __openfirmware 765static struct device_node*
765scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) 766scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
766{ 767{
767 u8 filter_data[2] = {bus, dev_fn}; 768 u8 filter_data[2] = {bus, dev_fn};
@@ -813,18 +814,20 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
813 /* Now, lookup childs of the hose */ 814 /* Now, lookup childs of the hose */
814 return scan_OF_childs_for_device(node->child, busnr, devfn); 815 return scan_OF_childs_for_device(node->child, busnr, devfn);
815} 816}
817EXPORT_SYMBOL(pci_busdev_to_OF_node);
816 818
817struct device_node* 819struct device_node*
818pci_device_to_OF_node(struct pci_dev *dev) 820pci_device_to_OF_node(struct pci_dev *dev)
819{ 821{
820 return pci_busdev_to_OF_node(dev->bus, dev->devfn); 822 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
821} 823}
824EXPORT_SYMBOL(pci_device_to_OF_node);
822 825
823/* This routine is meant to be used early during boot, when the 826/* This routine is meant to be used early during boot, when the
824 * PCI bus numbers have not yet been assigned, and you need to 827 * PCI bus numbers have not yet been assigned, and you need to
825 * issue PCI config cycles to an OF device. 828 * issue PCI config cycles to an OF device.
826 * It could also be used to "fix" RTAS config cycles if you want 829 * It could also be used to "fix" RTAS config cycles if you want
827 * to set pci_assign_all_busses to 1 and still use RTAS for PCI 830 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
828 * config cycles. 831 * config cycles.
829 */ 832 */
830struct pci_controller* 833struct pci_controller*
@@ -842,7 +845,7 @@ pci_find_hose_for_OF_device(struct device_node* node)
842 return NULL; 845 return NULL;
843} 846}
844 847
845static int __openfirmware 848static int
846find_OF_pci_device_filter(struct device_node* node, void* data) 849find_OF_pci_device_filter(struct device_node* node, void* data)
847{ 850{
848 return ((void *)node == data); 851 return ((void *)node == data);
@@ -890,6 +893,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
890 } 893 }
891 return -ENODEV; 894 return -ENODEV;
892} 895}
896EXPORT_SYMBOL(pci_device_from_OF_node);
893 897
894void __init 898void __init
895pci_process_bridge_OF_ranges(struct pci_controller *hose, 899pci_process_bridge_OF_ranges(struct pci_controller *hose,
@@ -1030,6 +1034,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
1030} 1034}
1031static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 1035static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1032 1036
1037#else /* CONFIG_PPC_OF */
1038void pcibios_make_OF_bus_map(void)
1039{
1040}
1033#endif /* CONFIG_PPC_OF */ 1041#endif /* CONFIG_PPC_OF */
1034 1042
1035/* Add sysfs properties */ 1043/* Add sysfs properties */
@@ -1262,12 +1270,12 @@ pcibios_init(void)
1262 1270
1263 /* Scan all of the recorded PCI controllers. */ 1271 /* Scan all of the recorded PCI controllers. */
1264 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { 1272 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1265 if (pci_assign_all_busses) 1273 if (pci_assign_all_buses)
1266 hose->first_busno = next_busno; 1274 hose->first_busno = next_busno;
1267 hose->last_busno = 0xff; 1275 hose->last_busno = 0xff;
1268 bus = pci_scan_bus(hose->first_busno, hose->ops, hose); 1276 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1269 hose->last_busno = bus->subordinate; 1277 hose->last_busno = bus->subordinate;
1270 if (pci_assign_all_busses || next_busno <= hose->last_busno) 1278 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1271 next_busno = hose->last_busno + pcibios_assign_bus_offset; 1279 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1272 } 1280 }
1273 pci_bus_count = next_busno; 1281 pci_bus_count = next_busno;
@@ -1276,7 +1284,7 @@ pcibios_init(void)
1276 * numbers vs. kernel bus numbers since we may have to 1284 * numbers vs. kernel bus numbers since we may have to
1277 * remap them. 1285 * remap them.
1278 */ 1286 */
1279 if (pci_assign_all_busses && have_of) 1287 if (pci_assign_all_buses && have_of)
1280 pcibios_make_OF_bus_map(); 1288 pcibios_make_OF_bus_map();
1281 1289
1282 /* Do machine dependent PCI interrupt routing */ 1290 /* Do machine dependent PCI interrupt routing */
@@ -1586,16 +1594,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1586 * above routine 1594 * above routine
1587 */ 1595 */
1588pgprot_t pci_phys_mem_access_prot(struct file *file, 1596pgprot_t pci_phys_mem_access_prot(struct file *file,
1589 unsigned long offset, 1597 unsigned long pfn,
1590 unsigned long size, 1598 unsigned long size,
1591 pgprot_t protection) 1599 pgprot_t protection)
1592{ 1600{
1593 struct pci_dev *pdev = NULL; 1601 struct pci_dev *pdev = NULL;
1594 struct resource *found = NULL; 1602 struct resource *found = NULL;
1595 unsigned long prot = pgprot_val(protection); 1603 unsigned long prot = pgprot_val(protection);
1604 unsigned long offset = pfn << PAGE_SHIFT;
1596 int i; 1605 int i;
1597 1606
1598 if (page_is_ram(offset >> PAGE_SHIFT)) 1607 if (page_is_ram(pfn))
1599 return prot; 1608 return prot;
1600 1609
1601 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 1610 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
deleted file mode 100644
index 22df9a596a0f..000000000000
--- a/arch/ppc/kernel/perfmon.c
+++ /dev/null
@@ -1,96 +0,0 @@
1/* kernel/perfmon.c
2 * PPC 32 Performance Monitor Infrastructure
3 *
4 * Author: Andy Fleming
5 * Copyright (c) 2004 Freescale Semiconductor, Inc
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/a.out.h>
23#include <linux/interrupt.h>
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/prctl.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/reg.h>
34#include <asm/xmon.h>
35
36/* A lock to regulate grabbing the interrupt */
37DEFINE_SPINLOCK(perfmon_lock);
38
39#if defined (CONFIG_FSL_BOOKE) && !defined (CONFIG_E200)
40static void dummy_perf(struct pt_regs *regs)
41{
42 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
43
44 pmgc0 &= ~PMGC0_PMIE;
45 mtpmr(PMRN_PMGC0, pmgc0);
46}
47
48#elif defined(CONFIG_6xx)
49/* Ensure exceptions are disabled */
50static void dummy_perf(struct pt_regs *regs)
51{
52 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
53
54 mmcr0 &= ~MMCR0_PMXE;
55 mtspr(SPRN_MMCR0, mmcr0);
56}
57#else
58static void dummy_perf(struct pt_regs *regs)
59{
60}
61#endif
62
63void (*perf_irq)(struct pt_regs *) = dummy_perf;
64
65/* Grab the interrupt, if it's free.
66 * Returns 0 on success, -1 if the interrupt is taken already */
67int request_perfmon_irq(void (*handler)(struct pt_regs *))
68{
69 int err = 0;
70
71 spin_lock(&perfmon_lock);
72
73 if (perf_irq == dummy_perf)
74 perf_irq = handler;
75 else {
76 pr_info("perfmon irq already handled by %p\n", perf_irq);
77 err = -1;
78 }
79
80 spin_unlock(&perfmon_lock);
81
82 return err;
83}
84
85void free_perfmon_irq(void)
86{
87 spin_lock(&perfmon_lock);
88
89 perf_irq = dummy_perf;
90
91 spin_unlock(&perfmon_lock);
92}
93
94EXPORT_SYMBOL(perf_irq);
95EXPORT_SYMBOL(request_perfmon_irq);
96EXPORT_SYMBOL(free_perfmon_irq);
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
index 03526bfb0840..32455dfcc36b 100644
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ b/arch/ppc/kernel/perfmon_fsl_booke.c
@@ -32,7 +32,7 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/reg.h> 33#include <asm/reg.h>
34#include <asm/xmon.h> 34#include <asm/xmon.h>
35#include <asm/perfmon.h> 35#include <asm/pmc.h>
36 36
37static inline u32 get_pmlca(int ctr); 37static inline u32 get_pmlca(int ctr);
38static inline void set_pmlca(int ctr, u32 pmlca); 38static inline void set_pmlca(int ctr, u32 pmlca);
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 88f6bb7b6964..ae24196d78f6 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -53,10 +53,10 @@
53 53
54extern void transfer_to_handler(void); 54extern void transfer_to_handler(void);
55extern void do_IRQ(struct pt_regs *regs); 55extern void do_IRQ(struct pt_regs *regs);
56extern void MachineCheckException(struct pt_regs *regs); 56extern void machine_check_exception(struct pt_regs *regs);
57extern void AlignmentException(struct pt_regs *regs); 57extern void alignment_exception(struct pt_regs *regs);
58extern void ProgramCheckException(struct pt_regs *regs); 58extern void program_check_exception(struct pt_regs *regs);
59extern void SingleStepException(struct pt_regs *regs); 59extern void single_step_exception(struct pt_regs *regs);
60extern int do_signal(sigset_t *, struct pt_regs *); 60extern int do_signal(sigset_t *, struct pt_regs *);
61extern int pmac_newworld; 61extern int pmac_newworld;
62extern int sys_sigreturn(struct pt_regs *regs); 62extern int sys_sigreturn(struct pt_regs *regs);
@@ -72,10 +72,10 @@ EXPORT_SYMBOL(clear_user_page);
72EXPORT_SYMBOL(do_signal); 72EXPORT_SYMBOL(do_signal);
73EXPORT_SYMBOL(transfer_to_handler); 73EXPORT_SYMBOL(transfer_to_handler);
74EXPORT_SYMBOL(do_IRQ); 74EXPORT_SYMBOL(do_IRQ);
75EXPORT_SYMBOL(MachineCheckException); 75EXPORT_SYMBOL(machine_check_exception);
76EXPORT_SYMBOL(AlignmentException); 76EXPORT_SYMBOL(alignment_exception);
77EXPORT_SYMBOL(ProgramCheckException); 77EXPORT_SYMBOL(program_check_exception);
78EXPORT_SYMBOL(SingleStepException); 78EXPORT_SYMBOL(single_step_exception);
79EXPORT_SYMBOL(sys_sigreturn); 79EXPORT_SYMBOL(sys_sigreturn);
80EXPORT_SYMBOL(ppc_n_lost_interrupts); 80EXPORT_SYMBOL(ppc_n_lost_interrupts);
81EXPORT_SYMBOL(ppc_lost_interrupts); 81EXPORT_SYMBOL(ppc_lost_interrupts);
@@ -230,9 +230,6 @@ EXPORT_SYMBOL(find_all_nodes);
230EXPORT_SYMBOL(get_property); 230EXPORT_SYMBOL(get_property);
231EXPORT_SYMBOL(request_OF_resource); 231EXPORT_SYMBOL(request_OF_resource);
232EXPORT_SYMBOL(release_OF_resource); 232EXPORT_SYMBOL(release_OF_resource);
233EXPORT_SYMBOL(pci_busdev_to_OF_node);
234EXPORT_SYMBOL(pci_device_to_OF_node);
235EXPORT_SYMBOL(pci_device_from_OF_node);
236EXPORT_SYMBOL(of_find_node_by_name); 233EXPORT_SYMBOL(of_find_node_by_name);
237EXPORT_SYMBOL(of_find_node_by_type); 234EXPORT_SYMBOL(of_find_node_by_type);
238EXPORT_SYMBOL(of_find_compatible_node); 235EXPORT_SYMBOL(of_find_compatible_node);
@@ -272,16 +269,6 @@ EXPORT_SYMBOL(screen_info);
272#endif 269#endif
273 270
274EXPORT_SYMBOL(__delay); 271EXPORT_SYMBOL(__delay);
275#ifndef INLINE_IRQS
276EXPORT_SYMBOL(local_irq_enable);
277EXPORT_SYMBOL(local_irq_enable_end);
278EXPORT_SYMBOL(local_irq_disable);
279EXPORT_SYMBOL(local_irq_disable_end);
280EXPORT_SYMBOL(local_save_flags_ptr);
281EXPORT_SYMBOL(local_save_flags_ptr_end);
282EXPORT_SYMBOL(local_irq_restore);
283EXPORT_SYMBOL(local_irq_restore_end);
284#endif
285EXPORT_SYMBOL(timer_interrupt); 272EXPORT_SYMBOL(timer_interrupt);
286EXPORT_SYMBOL(irq_desc); 273EXPORT_SYMBOL(irq_desc);
287EXPORT_SYMBOL(tb_ticks_per_jiffy); 274EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -335,11 +322,6 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
335extern long *intercept_table; 322extern long *intercept_table;
336EXPORT_SYMBOL(intercept_table); 323EXPORT_SYMBOL(intercept_table);
337#endif /* CONFIG_PPC_STD_MMU */ 324#endif /* CONFIG_PPC_STD_MMU */
338EXPORT_SYMBOL(cur_cpu_spec);
339#ifdef CONFIG_PPC_PMAC
340extern unsigned long agp_special_page;
341EXPORT_SYMBOL(agp_special_page);
342#endif
343#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 325#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
344EXPORT_SYMBOL(__mtdcr); 326EXPORT_SYMBOL(__mtdcr);
345EXPORT_SYMBOL(__mfdcr); 327EXPORT_SYMBOL(__mfdcr);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 82de66e4db6d..cb1c7b92f8c6 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
152} 152}
153#endif /* defined(CHECK_STACK) */ 153#endif /* defined(CHECK_STACK) */
154 154
155#ifdef CONFIG_ALTIVEC 155/*
156int 156 * Make sure the floating-point register state in the
157dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) 157 * the thread_struct is up to date for task tsk.
158 */
159void flush_fp_to_thread(struct task_struct *tsk)
158{ 160{
159 if (regs->msr & MSR_VEC) 161 if (tsk->thread.regs) {
160 giveup_altivec(current); 162 /*
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 163 * We need to disable preemption here because if we didn't,
164 * another process could get scheduled after the regs->msr
165 * test but before we have finished saving the FP registers
166 * to the thread_struct. That process could take over the
167 * FPU, and then when we get scheduled again we would store
168 * bogus values for the remaining FP registers.
169 */
170 preempt_disable();
171 if (tsk->thread.regs->msr & MSR_FP) {
172#ifdef CONFIG_SMP
173 /*
174 * This should only ever be called for current or
175 * for a stopped child process. Since we save away
176 * the FP register state on context switch on SMP,
177 * there is something wrong if a stopped child appears
178 * to still have its FP state in the CPU registers.
179 */
180 BUG_ON(tsk != current);
181#endif
182 giveup_fpu(current);
183 }
184 preempt_enable();
185 }
186}
187
188void enable_kernel_fp(void)
189{
190 WARN_ON(preemptible());
191
192#ifdef CONFIG_SMP
193 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
194 giveup_fpu(current);
195 else
196 giveup_fpu(NULL); /* just enables FP for kernel */
197#else
198 giveup_fpu(last_task_used_math);
199#endif /* CONFIG_SMP */
200}
201EXPORT_SYMBOL(enable_kernel_fp);
202
203int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
204{
205 preempt_disable();
206 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
207 giveup_fpu(tsk);
208 preempt_enable();
209 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
162 return 1; 210 return 1;
163} 211}
164 212
165void 213#ifdef CONFIG_ALTIVEC
166enable_kernel_altivec(void) 214void enable_kernel_altivec(void)
167{ 215{
168 WARN_ON(preemptible()); 216 WARN_ON(preemptible());
169 217
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
177#endif /* __SMP __ */ 225#endif /* __SMP __ */
178} 226}
179EXPORT_SYMBOL(enable_kernel_altivec); 227EXPORT_SYMBOL(enable_kernel_altivec);
180#endif /* CONFIG_ALTIVEC */
181 228
182#ifdef CONFIG_SPE 229/*
183int 230 * Make sure the VMX/Altivec register state in the
184dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) 231 * the thread_struct is up to date for task tsk.
232 */
233void flush_altivec_to_thread(struct task_struct *tsk)
185{ 234{
186 if (regs->msr & MSR_SPE) 235 if (tsk->thread.regs) {
187 giveup_spe(current); 236 preempt_disable();
188 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ 237 if (tsk->thread.regs->msr & MSR_VEC) {
189 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); 238#ifdef CONFIG_SMP
239 BUG_ON(tsk != current);
240#endif
241 giveup_altivec(current);
242 }
243 preempt_enable();
244 }
245}
246
247int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
248{
249 if (regs->msr & MSR_VEC)
250 giveup_altivec(current);
251 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
190 return 1; 252 return 1;
191} 253}
254#endif /* CONFIG_ALTIVEC */
192 255
256#ifdef CONFIG_SPE
193void 257void
194enable_kernel_spe(void) 258enable_kernel_spe(void)
195{ 259{
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
205#endif /* __SMP __ */ 269#endif /* __SMP __ */
206} 270}
207EXPORT_SYMBOL(enable_kernel_spe); 271EXPORT_SYMBOL(enable_kernel_spe);
208#endif /* CONFIG_SPE */
209 272
210void 273void flush_spe_to_thread(struct task_struct *tsk)
211enable_kernel_fp(void)
212{ 274{
213 WARN_ON(preemptible()); 275 if (tsk->thread.regs) {
214 276 preempt_disable();
277 if (tsk->thread.regs->msr & MSR_SPE) {
215#ifdef CONFIG_SMP 278#ifdef CONFIG_SMP
216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 279 BUG_ON(tsk != current);
217 giveup_fpu(current); 280#endif
218 else 281 giveup_spe(current);
219 giveup_fpu(NULL); /* just enables FP for kernel */ 282 }
220#else 283 preempt_enable();
221 giveup_fpu(last_task_used_math); 284 }
222#endif /* CONFIG_SMP */
223} 285}
224EXPORT_SYMBOL(enable_kernel_fp);
225 286
226int 287int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
227dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
228{ 288{
229 preempt_disable(); 289 if (regs->msr & MSR_SPE)
230 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) 290 giveup_spe(current);
231 giveup_fpu(tsk); 291 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
232 preempt_enable(); 292 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
233 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
234 return 1; 293 return 1;
235} 294}
295#endif /* CONFIG_SPE */
236 296
237struct task_struct *__switch_to(struct task_struct *prev, 297struct task_struct *__switch_to(struct task_struct *prev,
238 struct task_struct *new) 298 struct task_struct *new)
@@ -287,11 +347,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
287#endif /* CONFIG_SPE */ 347#endif /* CONFIG_SPE */
288#endif /* CONFIG_SMP */ 348#endif /* CONFIG_SMP */
289 349
350#ifdef CONFIG_ALTIVEC
290 /* Avoid the trap. On smp this this never happens since 351 /* Avoid the trap. On smp this this never happens since
291 * we don't set last_task_used_altivec -- Cort 352 * we don't set last_task_used_altivec -- Cort
292 */ 353 */
293 if (new->thread.regs && last_task_used_altivec == new) 354 if (new->thread.regs && last_task_used_altivec == new)
294 new->thread.regs->msr |= MSR_VEC; 355 new->thread.regs->msr |= MSR_VEC;
356#endif
295#ifdef CONFIG_SPE 357#ifdef CONFIG_SPE
296 /* Avoid the trap. On smp this this never happens since 358 /* Avoid the trap. On smp this this never happens since
297 * we don't set last_task_used_spe 359 * we don't set last_task_used_spe
@@ -482,7 +544,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
482 last_task_used_spe = NULL; 544 last_task_used_spe = NULL;
483#endif 545#endif
484 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 546 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
485 current->thread.fpscr = 0; 547 current->thread.fpscr.val = 0;
486#ifdef CONFIG_ALTIVEC 548#ifdef CONFIG_ALTIVEC
487 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 549 memset(current->thread.vr, 0, sizeof(current->thread.vr));
488 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 550 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
@@ -557,14 +619,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
557 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 619 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
558} 620}
559 621
560int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, 622int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
623 unsigned long p4, unsigned long p5, unsigned long p6,
561 struct pt_regs *regs) 624 struct pt_regs *regs)
562{ 625{
563 CHECK_FULL_REGS(regs); 626 CHECK_FULL_REGS(regs);
564 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 627 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
565} 628}
566 629
567int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, 630int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
631 unsigned long p4, unsigned long p5, unsigned long p6,
568 struct pt_regs *regs) 632 struct pt_regs *regs)
569{ 633{
570 CHECK_FULL_REGS(regs); 634 CHECK_FULL_REGS(regs);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 545cfd0fab59..6bcb85d2b7fd 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -71,7 +71,8 @@ struct ide_machdep_calls ppc_ide_md;
71unsigned long boot_mem_size; 71unsigned long boot_mem_size;
72 72
73unsigned long ISA_DMA_THRESHOLD; 73unsigned long ISA_DMA_THRESHOLD;
74unsigned long DMA_MODE_READ, DMA_MODE_WRITE; 74unsigned int DMA_MODE_READ;
75unsigned int DMA_MODE_WRITE;
75 76
76#ifdef CONFIG_PPC_MULTIPLATFORM 77#ifdef CONFIG_PPC_MULTIPLATFORM
77int _machine = 0; 78int _machine = 0;
@@ -82,8 +83,18 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
82 unsigned long r5, unsigned long r6, unsigned long r7); 83 unsigned long r5, unsigned long r6, unsigned long r7);
83extern void chrp_init(unsigned long r3, unsigned long r4, 84extern void chrp_init(unsigned long r3, unsigned long r4,
84 unsigned long r5, unsigned long r6, unsigned long r7); 85 unsigned long r5, unsigned long r6, unsigned long r7);
86
87dev_t boot_dev;
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 88#endif /* CONFIG_PPC_MULTIPLATFORM */
86 89
90int have_of;
91EXPORT_SYMBOL(have_of);
92
93#ifdef __DO_IRQ_CANON
94int ppc_do_canonicalize_irqs;
95EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
96#endif
97
87#ifdef CONFIG_MAGIC_SYSRQ 98#ifdef CONFIG_MAGIC_SYSRQ
88unsigned long SYSRQ_KEY = 0x54; 99unsigned long SYSRQ_KEY = 0x54;
89#endif /* CONFIG_MAGIC_SYSRQ */ 100#endif /* CONFIG_MAGIC_SYSRQ */
@@ -185,18 +196,18 @@ int show_cpuinfo(struct seq_file *m, void *v)
185 seq_printf(m, "processor\t: %d\n", i); 196 seq_printf(m, "processor\t: %d\n", i);
186 seq_printf(m, "cpu\t\t: "); 197 seq_printf(m, "cpu\t\t: ");
187 198
188 if (cur_cpu_spec[i]->pvr_mask) 199 if (cur_cpu_spec->pvr_mask)
189 seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name); 200 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
190 else 201 else
191 seq_printf(m, "unknown (%08x)", pvr); 202 seq_printf(m, "unknown (%08x)", pvr);
192#ifdef CONFIG_ALTIVEC 203#ifdef CONFIG_ALTIVEC
193 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC) 204 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
194 seq_printf(m, ", altivec supported"); 205 seq_printf(m, ", altivec supported");
195#endif 206#endif
196 seq_printf(m, "\n"); 207 seq_printf(m, "\n");
197 208
198#ifdef CONFIG_TAU 209#ifdef CONFIG_TAU
199 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) { 210 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
200#ifdef CONFIG_TAU_AVERAGE 211#ifdef CONFIG_TAU_AVERAGE
201 /* more straightforward, but potentially misleading */ 212 /* more straightforward, but potentially misleading */
202 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 213 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +350,7 @@ early_init(int r3, int r4, int r5)
339 * Assume here that all clock rates are the same in a 350 * Assume here that all clock rates are the same in a
340 * smp system. -- Cort 351 * smp system. -- Cort
341 */ 352 */
342int __openfirmware 353int
343of_show_percpuinfo(struct seq_file *m, int i) 354of_show_percpuinfo(struct seq_file *m, int i)
344{ 355{
345 struct device_node *cpu_node; 356 struct device_node *cpu_node;
@@ -404,11 +415,15 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
404 _machine = _MACH_prep; 415 _machine = _MACH_prep;
405 } 416 }
406 417
418#ifdef CONFIG_PPC_PREP
407 /* not much more to do here, if prep */ 419 /* not much more to do here, if prep */
408 if (_machine == _MACH_prep) { 420 if (_machine == _MACH_prep) {
409 prep_init(r3, r4, r5, r6, r7); 421 prep_init(r3, r4, r5, r6, r7);
410 return; 422 return;
411 } 423 }
424#endif
425
426 have_of = 1;
412 427
413 /* prom_init has already been called from __start */ 428 /* prom_init has already been called from __start */
414 if (boot_infos) 429 if (boot_infos)
@@ -479,12 +494,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
479#endif /* CONFIG_ADB */ 494#endif /* CONFIG_ADB */
480 495
481 switch (_machine) { 496 switch (_machine) {
497#ifdef CONFIG_PPC_PMAC
482 case _MACH_Pmac: 498 case _MACH_Pmac:
483 pmac_init(r3, r4, r5, r6, r7); 499 pmac_init(r3, r4, r5, r6, r7);
484 break; 500 break;
501#endif
502#ifdef CONFIG_PPC_CHRP
485 case _MACH_chrp: 503 case _MACH_chrp:
486 chrp_init(r3, r4, r5, r6, r7); 504 chrp_init(r3, r4, r5, r6, r7);
487 break; 505 break;
506#endif
488 } 507 }
489} 508}
490 509
@@ -721,7 +740,7 @@ void __init setup_arch(char **cmdline_p)
721#endif 740#endif
722 741
723#ifdef CONFIG_XMON 742#ifdef CONFIG_XMON
724 xmon_map_scc(); 743 xmon_init(1);
725 if (strstr(cmd_line, "xmon")) 744 if (strstr(cmd_line, "xmon"))
726 xmon(NULL); 745 xmon(NULL);
727#endif /* CONFIG_XMON */ 746#endif /* CONFIG_XMON */
@@ -745,12 +764,12 @@ void __init setup_arch(char **cmdline_p)
745 * for a possibly more accurate value. 764 * for a possibly more accurate value.
746 */ 765 */
747 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { 766 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
748 dcache_bsize = cur_cpu_spec[0]->dcache_bsize; 767 dcache_bsize = cur_cpu_spec->dcache_bsize;
749 icache_bsize = cur_cpu_spec[0]->icache_bsize; 768 icache_bsize = cur_cpu_spec->icache_bsize;
750 ucache_bsize = 0; 769 ucache_bsize = 0;
751 } else 770 } else
752 ucache_bsize = dcache_bsize = icache_bsize 771 ucache_bsize = dcache_bsize = icache_bsize
753 = cur_cpu_spec[0]->dcache_bsize; 772 = cur_cpu_spec->dcache_bsize;
754 773
755 /* reboot on panic */ 774 /* reboot on panic */
756 panic_timeout = 180; 775 panic_timeout = 180;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
deleted file mode 100644
index 2244bf91e593..000000000000
--- a/arch/ppc/kernel/signal.c
+++ /dev/null
@@ -1,771 +0,0 @@
1/*
2 * arch/ppc/kernel/signal.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/signal.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/elf.h>
29#include <linux/tty.h>
30#include <linux/binfmts.h>
31#include <linux/suspend.h>
32#include <asm/ucontext.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/cacheflush.h>
36
37#undef DEBUG_SIG
38
39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
40
41extern void sigreturn_exit(struct pt_regs *);
42
43#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
44
45int do_signal(sigset_t *oldset, struct pt_regs *regs);
46
47/*
48 * Atomically swap in the new signal mask, and wait for a signal.
49 */
50int
51sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
52 struct pt_regs *regs)
53{
54 sigset_t saveset;
55
56 mask &= _BLOCKABLE;
57 spin_lock_irq(&current->sighand->siglock);
58 saveset = current->blocked;
59 siginitset(&current->blocked, mask);
60 recalc_sigpending();
61 spin_unlock_irq(&current->sighand->siglock);
62
63 regs->result = -EINTR;
64 regs->gpr[3] = EINTR;
65 regs->ccr |= 0x10000000;
66 while (1) {
67 current->state = TASK_INTERRUPTIBLE;
68 schedule();
69 if (do_signal(&saveset, regs))
70 sigreturn_exit(regs);
71 }
72}
73
74int
75sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
76 int p6, int p7, struct pt_regs *regs)
77{
78 sigset_t saveset, newset;
79
80 /* XXX: Don't preclude handling different sized sigset_t's. */
81 if (sigsetsize != sizeof(sigset_t))
82 return -EINVAL;
83
84 if (copy_from_user(&newset, unewset, sizeof(newset)))
85 return -EFAULT;
86 sigdelsetmask(&newset, ~_BLOCKABLE);
87
88 spin_lock_irq(&current->sighand->siglock);
89 saveset = current->blocked;
90 current->blocked = newset;
91 recalc_sigpending();
92 spin_unlock_irq(&current->sighand->siglock);
93
94 regs->result = -EINTR;
95 regs->gpr[3] = EINTR;
96 regs->ccr |= 0x10000000;
97 while (1) {
98 current->state = TASK_INTERRUPTIBLE;
99 schedule();
100 if (do_signal(&saveset, regs))
101 sigreturn_exit(regs);
102 }
103}
104
105
106int
107sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
108 int r6, int r7, int r8, struct pt_regs *regs)
109{
110 return do_sigaltstack(uss, uoss, regs->gpr[1]);
111}
112
113int
114sys_sigaction(int sig, const struct old_sigaction __user *act,
115 struct old_sigaction __user *oact)
116{
117 struct k_sigaction new_ka, old_ka;
118 int ret;
119
120 if (act) {
121 old_sigset_t mask;
122 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
123 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
124 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
125 return -EFAULT;
126 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
127 __get_user(mask, &act->sa_mask);
128 siginitset(&new_ka.sa.sa_mask, mask);
129 }
130
131 ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
132
133 if (!ret && oact) {
134 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
135 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
136 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
137 return -EFAULT;
138 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
139 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
140 }
141
142 return ret;
143}
144
145/*
146 * When we have signals to deliver, we set up on the
147 * user stack, going down from the original stack pointer:
148 * a sigregs struct
149 * a sigcontext struct
150 * a gap of __SIGNAL_FRAMESIZE bytes
151 *
152 * Each of these things must be a multiple of 16 bytes in size.
153 *
154 */
155struct sigregs {
156 struct mcontext mctx; /* all the register values */
157 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
158 and 18 fp regs below sp before decrementing it. */
159 int abigap[56];
160};
161
162/* We use the mc_pad field for the signal return trampoline. */
163#define tramp mc_pad
164
165/*
166 * When we have rt signals to deliver, we set up on the
167 * user stack, going down from the original stack pointer:
168 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
169 * a gap of __SIGNAL_FRAMESIZE+16 bytes
170 * (the +16 is to get the siginfo and ucontext in the same
171 * positions as in older kernels).
172 *
173 * Each of these things must be a multiple of 16 bytes in size.
174 *
175 */
176struct rt_sigframe
177{
178 struct siginfo info;
179 struct ucontext uc;
180 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
181 and 18 fp regs below sp before decrementing it. */
182 int abigap[56];
183};
184
185/*
186 * Save the current user registers on the user stack.
187 * We only save the altivec/spe registers if the process has used
188 * altivec/spe instructions at some point.
189 */
190static int
191save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
192{
193 /* save general and floating-point registers */
194 CHECK_FULL_REGS(regs);
195 preempt_disable();
196 if (regs->msr & MSR_FP)
197 giveup_fpu(current);
198#ifdef CONFIG_ALTIVEC
199 if (current->thread.used_vr && (regs->msr & MSR_VEC))
200 giveup_altivec(current);
201#endif /* CONFIG_ALTIVEC */
202#ifdef CONFIG_SPE
203 if (current->thread.used_spe && (regs->msr & MSR_SPE))
204 giveup_spe(current);
205#endif /* CONFIG_ALTIVEC */
206 preempt_enable();
207
208 if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE)
209 || __copy_to_user(&frame->mc_fregs, current->thread.fpr,
210 ELF_NFPREG * sizeof(double)))
211 return 1;
212
213 current->thread.fpscr = 0; /* turn off all fp exceptions */
214
215#ifdef CONFIG_ALTIVEC
216 /* save altivec registers */
217 if (current->thread.used_vr) {
218 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
219 ELF_NVRREG * sizeof(vector128)))
220 return 1;
221 /* set MSR_VEC in the saved MSR value to indicate that
222 frame->mc_vregs contains valid data */
223 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
224 return 1;
225 }
226 /* else assert((regs->msr & MSR_VEC) == 0) */
227
228 /* We always copy to/from vrsave, it's 0 if we don't have or don't
229 * use altivec. Since VSCR only contains 32 bits saved in the least
230 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
231 * most significant bits of that same vector. --BenH
232 */
233 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
234 return 1;
235#endif /* CONFIG_ALTIVEC */
236
237#ifdef CONFIG_SPE
238 /* save spe registers */
239 if (current->thread.used_spe) {
240 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
241 ELF_NEVRREG * sizeof(u32)))
242 return 1;
243 /* set MSR_SPE in the saved MSR value to indicate that
244 frame->mc_vregs contains valid data */
245 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
246 return 1;
247 }
248 /* else assert((regs->msr & MSR_SPE) == 0) */
249
250 /* We always copy to/from spefscr */
251 if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
252 return 1;
253#endif /* CONFIG_SPE */
254
255 if (sigret) {
256 /* Set up the sigreturn trampoline: li r0,sigret; sc */
257 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
258 || __put_user(0x44000002UL, &frame->tramp[1]))
259 return 1;
260 flush_icache_range((unsigned long) &frame->tramp[0],
261 (unsigned long) &frame->tramp[2]);
262 }
263
264 return 0;
265}
266
267/*
268 * Restore the current user register values from the user stack,
269 * (except for MSR).
270 */
271static int
272restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
273{
274 unsigned long save_r2 = 0;
275#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
276 unsigned long msr;
277#endif
278
279 /* backup/restore the TLS as we don't want it to be modified */
280 if (!sig)
281 save_r2 = regs->gpr[2];
282 /* copy up to but not including MSR */
283 if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
284 return 1;
285 /* copy from orig_r3 (the word after the MSR) up to the end */
286 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
287 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
288 return 1;
289 if (!sig)
290 regs->gpr[2] = save_r2;
291
292 /* force the process to reload the FP registers from
293 current->thread when it next does FP instructions */
294 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
295 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
296 sizeof(sr->mc_fregs)))
297 return 1;
298
299#ifdef CONFIG_ALTIVEC
300 /* force the process to reload the altivec registers from
301 current->thread when it next does altivec instructions */
302 regs->msr &= ~MSR_VEC;
303 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
304 /* restore altivec registers from the stack */
305 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
306 sizeof(sr->mc_vregs)))
307 return 1;
308 } else if (current->thread.used_vr)
309 memset(&current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
310
311 /* Always get VRSAVE back */
312 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
313 return 1;
314#endif /* CONFIG_ALTIVEC */
315
316#ifdef CONFIG_SPE
317 /* force the process to reload the spe registers from
318 current->thread when it next does spe instructions */
319 regs->msr &= ~MSR_SPE;
320 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
321 /* restore spe registers from the stack */
322 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
323 ELF_NEVRREG * sizeof(u32)))
324 return 1;
325 } else if (current->thread.used_spe)
326 memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
327
328 /* Always get SPEFSCR back */
329 if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
330 return 1;
331#endif /* CONFIG_SPE */
332
333#ifndef CONFIG_SMP
334 preempt_disable();
335 if (last_task_used_math == current)
336 last_task_used_math = NULL;
337 if (last_task_used_altivec == current)
338 last_task_used_altivec = NULL;
339 if (last_task_used_spe == current)
340 last_task_used_spe = NULL;
341 preempt_enable();
342#endif
343 return 0;
344}
345
346/*
347 * Restore the user process's signal mask
348 */
349static void
350restore_sigmask(sigset_t *set)
351{
352 sigdelsetmask(set, ~_BLOCKABLE);
353 spin_lock_irq(&current->sighand->siglock);
354 current->blocked = *set;
355 recalc_sigpending();
356 spin_unlock_irq(&current->sighand->siglock);
357}
358
359/*
360 * Set up a signal frame for a "real-time" signal handler
361 * (one which gets siginfo).
362 */
363static void
364handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
365 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
366 unsigned long newsp)
367{
368 struct rt_sigframe __user *rt_sf;
369 struct mcontext __user *frame;
370 unsigned long origsp = newsp;
371
372 /* Set up Signal Frame */
373 /* Put a Real Time Context onto stack */
374 newsp -= sizeof(*rt_sf);
375 rt_sf = (struct rt_sigframe __user *) newsp;
376
377 /* create a stack frame for the caller of the handler */
378 newsp -= __SIGNAL_FRAMESIZE + 16;
379
380 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
381 goto badframe;
382
383 /* Put the siginfo & fill in most of the ucontext */
384 if (copy_siginfo_to_user(&rt_sf->info, info)
385 || __put_user(0, &rt_sf->uc.uc_flags)
386 || __put_user(0, &rt_sf->uc.uc_link)
387 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
388 || __put_user(sas_ss_flags(regs->gpr[1]),
389 &rt_sf->uc.uc_stack.ss_flags)
390 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
391 || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
392 || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset)))
393 goto badframe;
394
395 /* Save user registers on the stack */
396 frame = &rt_sf->uc.uc_mcontext;
397 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
398 goto badframe;
399
400 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
401 goto badframe;
402 regs->gpr[1] = newsp;
403 regs->gpr[3] = sig;
404 regs->gpr[4] = (unsigned long) &rt_sf->info;
405 regs->gpr[5] = (unsigned long) &rt_sf->uc;
406 regs->gpr[6] = (unsigned long) rt_sf;
407 regs->nip = (unsigned long) ka->sa.sa_handler;
408 regs->link = (unsigned long) frame->tramp;
409 regs->trap = 0;
410
411 return;
412
413badframe:
414#ifdef DEBUG_SIG
415 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
416 regs, frame, newsp);
417#endif
418 force_sigsegv(sig, current);
419}
420
421static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
422{
423 sigset_t set;
424 struct mcontext __user *mcp;
425
426 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
427 || __get_user(mcp, &ucp->uc_regs))
428 return -EFAULT;
429 restore_sigmask(&set);
430 if (restore_user_regs(regs, mcp, sig))
431 return -EFAULT;
432
433 return 0;
434}
435
436int sys_swapcontext(struct ucontext __user *old_ctx,
437 struct ucontext __user *new_ctx,
438 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
439{
440 unsigned char tmp;
441
442 /* Context size is for future use. Right now, we only make sure
443 * we are passed something we understand
444 */
445 if (ctx_size < sizeof(struct ucontext))
446 return -EINVAL;
447
448 if (old_ctx != NULL) {
449 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
450 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
451 || __copy_to_user(&old_ctx->uc_sigmask,
452 &current->blocked, sizeof(sigset_t))
453 || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs))
454 return -EFAULT;
455 }
456 if (new_ctx == NULL)
457 return 0;
458 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
459 || __get_user(tmp, (u8 __user *) new_ctx)
460 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
461 return -EFAULT;
462
463 /*
464 * If we get a fault copying the context into the kernel's
465 * image of the user's registers, we can't just return -EFAULT
466 * because the user's registers will be corrupted. For instance
467 * the NIP value may have been updated but not some of the
468 * other registers. Given that we have done the access_ok
469 * and successfully read the first and last bytes of the region
470 * above, this should only happen in an out-of-memory situation
471 * or if another thread unmaps the region containing the context.
472 * We kill the task with a SIGSEGV in this situation.
473 */
474 if (do_setcontext(new_ctx, regs, 0))
475 do_exit(SIGSEGV);
476 sigreturn_exit(regs);
477 /* doesn't actually return back to here */
478 return 0;
479}
480
481int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
482 struct pt_regs *regs)
483{
484 struct rt_sigframe __user *rt_sf;
485
486 /* Always make any pending restarted system calls return -EINTR */
487 current_thread_info()->restart_block.fn = do_no_restart_syscall;
488
489 rt_sf = (struct rt_sigframe __user *)
490 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
491 if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
492 goto bad;
493 if (do_setcontext(&rt_sf->uc, regs, 1))
494 goto bad;
495
496 /*
497 * It's not clear whether or why it is desirable to save the
498 * sigaltstack setting on signal delivery and restore it on
499 * signal return. But other architectures do this and we have
500 * always done it up until now so it is probably better not to
501 * change it. -- paulus
502 */
503 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
504
505 sigreturn_exit(regs); /* doesn't return here */
506 return 0;
507
508 bad:
509 force_sig(SIGSEGV, current);
510 return 0;
511}
512
513int sys_debug_setcontext(struct ucontext __user *ctx,
514 int ndbg, struct sig_dbg_op __user *dbg,
515 int r6, int r7, int r8,
516 struct pt_regs *regs)
517{
518 struct sig_dbg_op op;
519 int i;
520 unsigned long new_msr = regs->msr;
521#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
522 unsigned long new_dbcr0 = current->thread.dbcr0;
523#endif
524
525 for (i=0; i<ndbg; i++) {
526 if (__copy_from_user(&op, dbg, sizeof(op)))
527 return -EFAULT;
528 switch (op.dbg_type) {
529 case SIG_DBG_SINGLE_STEPPING:
530#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
531 if (op.dbg_value) {
532 new_msr |= MSR_DE;
533 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
534 } else {
535 new_msr &= ~MSR_DE;
536 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
537 }
538#else
539 if (op.dbg_value)
540 new_msr |= MSR_SE;
541 else
542 new_msr &= ~MSR_SE;
543#endif
544 break;
545 case SIG_DBG_BRANCH_TRACING:
546#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
547 return -EINVAL;
548#else
549 if (op.dbg_value)
550 new_msr |= MSR_BE;
551 else
552 new_msr &= ~MSR_BE;
553#endif
554 break;
555
556 default:
557 return -EINVAL;
558 }
559 }
560
561 /* We wait until here to actually install the values in the
562 registers so if we fail in the above loop, it will not
563 affect the contents of these registers. After this point,
564 failure is a problem, anyway, and it's very unlikely unless
565 the user is really doing something wrong. */
566 regs->msr = new_msr;
567#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
568 current->thread.dbcr0 = new_dbcr0;
569#endif
570
571 /*
572 * If we get a fault copying the context into the kernel's
573 * image of the user's registers, we can't just return -EFAULT
574 * because the user's registers will be corrupted. For instance
575 * the NIP value may have been updated but not some of the
576 * other registers. Given that we have done the access_ok
577 * and successfully read the first and last bytes of the region
578 * above, this should only happen in an out-of-memory situation
579 * or if another thread unmaps the region containing the context.
580 * We kill the task with a SIGSEGV in this situation.
581 */
582 if (do_setcontext(ctx, regs, 1)) {
583 force_sig(SIGSEGV, current);
584 goto out;
585 }
586
587 /*
588 * It's not clear whether or why it is desirable to save the
589 * sigaltstack setting on signal delivery and restore it on
590 * signal return. But other architectures do this and we have
591 * always done it up until now so it is probably better not to
592 * change it. -- paulus
593 */
594 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
595
596 sigreturn_exit(regs);
597 /* doesn't actually return back to here */
598
599 out:
600 return 0;
601}
602
603/*
604 * OK, we're invoking a handler
605 */
606static void
607handle_signal(unsigned long sig, struct k_sigaction *ka,
608 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
609 unsigned long newsp)
610{
611 struct sigcontext __user *sc;
612 struct sigregs __user *frame;
613 unsigned long origsp = newsp;
614
615 /* Set up Signal Frame */
616 newsp -= sizeof(struct sigregs);
617 frame = (struct sigregs __user *) newsp;
618
619 /* Put a sigcontext on the stack */
620 newsp -= sizeof(*sc);
621 sc = (struct sigcontext __user *) newsp;
622
623 /* create a stack frame for the caller of the handler */
624 newsp -= __SIGNAL_FRAMESIZE;
625
626 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
627 goto badframe;
628
629#if _NSIG != 64
630#error "Please adjust handle_signal()"
631#endif
632 if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
633 || __put_user(oldset->sig[0], &sc->oldmask)
634 || __put_user(oldset->sig[1], &sc->_unused[3])
635 || __put_user((struct pt_regs __user *)frame, &sc->regs)
636 || __put_user(sig, &sc->signal))
637 goto badframe;
638
639 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
640 goto badframe;
641
642 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
643 goto badframe;
644 regs->gpr[1] = newsp;
645 regs->gpr[3] = sig;
646 regs->gpr[4] = (unsigned long) sc;
647 regs->nip = (unsigned long) ka->sa.sa_handler;
648 regs->link = (unsigned long) frame->mctx.tramp;
649 regs->trap = 0;
650
651 return;
652
653badframe:
654#ifdef DEBUG_SIG
655 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
656 regs, frame, newsp);
657#endif
658 force_sigsegv(sig, current);
659}
660
661/*
662 * Do a signal return; undo the signal stack.
663 */
664int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
665 struct pt_regs *regs)
666{
667 struct sigcontext __user *sc;
668 struct sigcontext sigctx;
669 struct mcontext __user *sr;
670 sigset_t set;
671
672 /* Always make any pending restarted system calls return -EINTR */
673 current_thread_info()->restart_block.fn = do_no_restart_syscall;
674
675 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
676 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
677 goto badframe;
678
679 set.sig[0] = sigctx.oldmask;
680 set.sig[1] = sigctx._unused[3];
681 restore_sigmask(&set);
682
683 sr = (struct mcontext __user *) sigctx.regs;
684 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
685 || restore_user_regs(regs, sr, 1))
686 goto badframe;
687
688 sigreturn_exit(regs); /* doesn't return */
689 return 0;
690
691badframe:
692 force_sig(SIGSEGV, current);
693 return 0;
694}
695
696/*
697 * Note that 'init' is a special process: it doesn't get signals it doesn't
698 * want to handle. Thus you cannot kill init even with a SIGKILL even by
699 * mistake.
700 */
701int do_signal(sigset_t *oldset, struct pt_regs *regs)
702{
703 siginfo_t info;
704 struct k_sigaction ka;
705 unsigned long frame, newsp;
706 int signr, ret;
707
708 if (try_to_freeze()) {
709 signr = 0;
710 if (!signal_pending(current))
711 goto no_signal;
712 }
713
714 if (!oldset)
715 oldset = &current->blocked;
716
717 newsp = frame = 0;
718
719 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
720 no_signal:
721 if (TRAP(regs) == 0x0C00 /* System Call! */
722 && regs->ccr & 0x10000000 /* error signalled */
723 && ((ret = regs->gpr[3]) == ERESTARTSYS
724 || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
725 || ret == ERESTART_RESTARTBLOCK)) {
726
727 if (signr > 0
728 && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
729 || (ret == ERESTARTSYS
730 && !(ka.sa.sa_flags & SA_RESTART)))) {
731 /* make the system call return an EINTR error */
732 regs->result = -EINTR;
733 regs->gpr[3] = EINTR;
734 /* note that the cr0.SO bit is already set */
735 } else {
736 regs->nip -= 4; /* Back up & retry system call */
737 regs->result = 0;
738 regs->trap = 0;
739 if (ret == ERESTART_RESTARTBLOCK)
740 regs->gpr[0] = __NR_restart_syscall;
741 else
742 regs->gpr[3] = regs->orig_gpr3;
743 }
744 }
745
746 if (signr == 0)
747 return 0; /* no signals delivered */
748
749 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
750 && !on_sig_stack(regs->gpr[1]))
751 newsp = current->sas_ss_sp + current->sas_ss_size;
752 else
753 newsp = regs->gpr[1];
754 newsp &= ~0xfUL;
755
756 /* Whee! Actually deliver the signal. */
757 if (ka.sa.sa_flags & SA_SIGINFO)
758 handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
759 else
760 handle_signal(signr, &ka, &info, oldset, regs, newsp);
761
762 spin_lock_irq(&current->sighand->siglock);
763 sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
764 if (!(ka.sa.sa_flags & SA_NODEFER))
765 sigaddset(&current->blocked, signr);
766 recalc_sigpending();
767 spin_unlock_irq(&current->sighand->siglock);
768
769 return 1;
770}
771
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 726fe7ce1747..bc5bf1124836 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -34,11 +34,11 @@
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/machdep.h>
37 38
38volatile int smp_commenced; 39volatile int smp_commenced;
39int smp_tb_synchronized; 40int smp_tb_synchronized;
40struct cpuinfo_PPC cpu_data[NR_CPUS]; 41struct cpuinfo_PPC cpu_data[NR_CPUS];
41struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
42atomic_t ipi_recv; 42atomic_t ipi_recv;
43atomic_t ipi_sent; 43atomic_t ipi_sent;
44cpumask_t cpu_online_map; 44cpumask_t cpu_online_map;
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpu_online_map);
51EXPORT_SYMBOL(cpu_possible_map); 51EXPORT_SYMBOL(cpu_possible_map);
52 52
53/* SMP operations for this machine */ 53/* SMP operations for this machine */
54static struct smp_ops_t *smp_ops; 54struct smp_ops_t *smp_ops;
55 55
56/* all cpu mappings are 1-1 -- Cort */ 56/* all cpu mappings are 1-1 -- Cort */
57volatile unsigned long cpu_callin_map[NR_CPUS]; 57volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -74,11 +74,11 @@ extern void __save_cpu_setup(void);
74#define PPC_MSG_XMON_BREAK 3 74#define PPC_MSG_XMON_BREAK 3
75 75
76static inline void 76static inline void
77smp_message_pass(int target, int msg, unsigned long data, int wait) 77smp_message_pass(int target, int msg)
78{ 78{
79 if (smp_ops){ 79 if (smp_ops) {
80 atomic_inc(&ipi_sent); 80 atomic_inc(&ipi_sent);
81 smp_ops->message_pass(target,msg,data,wait); 81 smp_ops->message_pass(target, msg);
82 } 82 }
83} 83}
84 84
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
119void smp_send_tlb_invalidate(int cpu) 119void smp_send_tlb_invalidate(int cpu)
120{ 120{
121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) 121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0); 122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
123} 123}
124 124
125void smp_send_reschedule(int cpu) 125void smp_send_reschedule(int cpu)
@@ -135,13 +135,13 @@ void smp_send_reschedule(int cpu)
135 */ 135 */
136 /* This is only used if `cpu' is running an idle task, 136 /* This is only used if `cpu' is running an idle task,
137 so it will reschedule itself anyway... */ 137 so it will reschedule itself anyway... */
138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); 138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
139} 139}
140 140
141#ifdef CONFIG_XMON 141#ifdef CONFIG_XMON
142void smp_send_xmon_break(int cpu) 142void smp_send_xmon_break(int cpu)
143{ 143{
144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0); 144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
145} 145}
146#endif /* CONFIG_XMON */ 146#endif /* CONFIG_XMON */
147 147
@@ -224,7 +224,7 @@ static int __smp_call_function(void (*func) (void *info), void *info,
224 spin_lock(&call_lock); 224 spin_lock(&call_lock);
225 call_data = &data; 225 call_data = &data;
226 /* Send a message to all other CPUs and wait for them to respond */ 226 /* Send a message to all other CPUs and wait for them to respond */
227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0); 227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
228 228
229 /* Wait for response */ 229 /* Wait for response */
230 timeout = 1000000; 230 timeout = 1000000;
@@ -294,7 +294,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
294 smp_store_cpu_info(smp_processor_id()); 294 smp_store_cpu_info(smp_processor_id());
295 cpu_callin_map[smp_processor_id()] = 1; 295 cpu_callin_map[smp_processor_id()] = 1;
296 296
297 smp_ops = ppc_md.smp_ops;
298 if (smp_ops == NULL) { 297 if (smp_ops == NULL) {
299 printk("SMP not supported on this machine.\n"); 298 printk("SMP not supported on this machine.\n");
300 return; 299 return;
@@ -308,9 +307,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
308 /* Backup CPU 0 state */ 307 /* Backup CPU 0 state */
309 __save_cpu_setup(); 308 __save_cpu_setup();
310 309
311 if (smp_ops->space_timers)
312 smp_ops->space_timers(num_cpus);
313
314 for_each_cpu(cpu) { 310 for_each_cpu(cpu) {
315 if (cpu == smp_processor_id()) 311 if (cpu == smp_processor_id())
316 continue; 312 continue;
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
deleted file mode 100644
index 127f040de9de..000000000000
--- a/arch/ppc/kernel/syscalls.c
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * arch/ppc/kernel/sys_ppc.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/sys_i386.c"
8 * Adapted from the i386 version by Gary Thomas
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au).
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/PPC
14 * platform.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/sem.h>
29#include <linux/msg.h>
30#include <linux/shm.h>
31#include <linux/stat.h>
32#include <linux/syscalls.h>
33#include <linux/mman.h>
34#include <linux/sys.h>
35#include <linux/ipc.h>
36#include <linux/utsname.h>
37#include <linux/file.h>
38#include <linux/unistd.h>
39
40#include <asm/uaccess.h>
41#include <asm/ipc.h>
42#include <asm/semaphore.h>
43
44
45/*
46 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
47 *
48 * This is really horribly ugly.
49 */
50int
51sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
52{
53 int version, ret;
54
55 version = call >> 16; /* hack for backward compatibility */
56 call &= 0xffff;
57
58 ret = -ENOSYS;
59 switch (call) {
60 case SEMOP:
61 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
62 second, NULL);
63 break;
64 case SEMTIMEDOP:
65 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
66 second, (const struct timespec __user *) fifth);
67 break;
68 case SEMGET:
69 ret = sys_semget (first, second, third);
70 break;
71 case SEMCTL: {
72 union semun fourth;
73
74 if (!ptr)
75 break;
76 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
77 || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
78 break;
79 ret = sys_semctl (first, second, third, fourth);
80 break;
81 }
82 case MSGSND:
83 ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third);
84 break;
85 case MSGRCV:
86 switch (version) {
87 case 0: {
88 struct ipc_kludge tmp;
89
90 if (!ptr)
91 break;
92 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
93 || (ret = copy_from_user(&tmp,
94 (struct ipc_kludge __user *) ptr,
95 sizeof (tmp)) ? -EFAULT : 0))
96 break;
97 ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
98 third);
99 break;
100 }
101 default:
102 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
103 second, fifth, third);
104 break;
105 }
106 break;
107 case MSGGET:
108 ret = sys_msgget ((key_t) first, second);
109 break;
110 case MSGCTL:
111 ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
112 break;
113 case SHMAT: {
114 ulong raddr;
115
116 if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third,
117 sizeof(ulong)) ? 0 : -EFAULT))
118 break;
119 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
120 if (ret)
121 break;
122 ret = put_user (raddr, (ulong __user *) third);
123 break;
124 }
125 case SHMDT:
126 ret = sys_shmdt ((char __user *)ptr);
127 break;
128 case SHMGET:
129 ret = sys_shmget (first, second, third);
130 break;
131 case SHMCTL:
132 ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
133 break;
134 }
135
136 return ret;
137}
138
139/*
140 * sys_pipe() is the normal C calling standard for creating
141 * a pipe. It's not the way unix traditionally does this, though.
142 */
143int sys_pipe(int __user *fildes)
144{
145 int fd[2];
146 int error;
147
148 error = do_pipe(fd);
149 if (!error) {
150 if (copy_to_user(fildes, fd, 2*sizeof(int)))
151 error = -EFAULT;
152 }
153 return error;
154}
155
156static inline unsigned long
157do_mmap2(unsigned long addr, size_t len,
158 unsigned long prot, unsigned long flags,
159 unsigned long fd, unsigned long pgoff)
160{
161 struct file * file = NULL;
162 int ret = -EBADF;
163
164 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
165 if (!(flags & MAP_ANONYMOUS)) {
166 if (!(file = fget(fd)))
167 goto out;
168 }
169
170 down_write(&current->mm->mmap_sem);
171 ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
172 up_write(&current->mm->mmap_sem);
173 if (file)
174 fput(file);
175out:
176 return ret;
177}
178
179unsigned long sys_mmap2(unsigned long addr, size_t len,
180 unsigned long prot, unsigned long flags,
181 unsigned long fd, unsigned long pgoff)
182{
183 return do_mmap2(addr, len, prot, flags, fd, pgoff);
184}
185
186unsigned long sys_mmap(unsigned long addr, size_t len,
187 unsigned long prot, unsigned long flags,
188 unsigned long fd, off_t offset)
189{
190 int err = -EINVAL;
191
192 if (offset & ~PAGE_MASK)
193 goto out;
194
195 err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
196out:
197 return err;
198}
199
200/*
201 * Due to some executables calling the wrong select we sometimes
202 * get wrong args. This determines how the args are being passed
203 * (a single ptr to them all args passed) then calls
204 * sys_select() with the appropriate args. -- Cort
205 */
206int
207ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
208{
209 if ( (unsigned long)n >= 4096 )
210 {
211 unsigned long __user *buffer = (unsigned long __user *)n;
212 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
213 || __get_user(n, buffer)
214 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
215 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
216 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
217 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
218 return -EFAULT;
219 }
220 return sys_select(n, inp, outp, exp, tvp);
221}
222
223int sys_uname(struct old_utsname __user * name)
224{
225 int err = -EFAULT;
226
227 down_read(&uts_sem);
228 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
229 err = 0;
230 up_read(&uts_sem);
231 return err;
232}
233
234int sys_olduname(struct oldold_utsname __user * name)
235{
236 int error;
237
238 if (!name)
239 return -EFAULT;
240 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
241 return -EFAULT;
242
243 down_read(&uts_sem);
244 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
245 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
246 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
247 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
248 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
249 error -= __put_user(0,name->release+__OLD_UTS_LEN);
250 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
251 error -= __put_user(0,name->version+__OLD_UTS_LEN);
252 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
253 error = __put_user(0,name->machine+__OLD_UTS_LEN);
254 up_read(&uts_sem);
255
256 error = error ? -EFAULT : 0;
257 return error;
258}
259
260/*
261 * We put the arguments in a different order so we only use 6
262 * registers for arguments, rather than 7 as sys_fadvise64_64 needs
263 * (because `offset' goes in r5/r6).
264 */
265long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
266{
267 return sys_fadvise64_64(fd, offset, len, advice);
268}
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 22d7fd1e0aea..53ea723af60a 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -66,11 +66,6 @@
66 66
67#include <asm/time.h> 67#include <asm/time.h>
68 68
69/* XXX false sharing with below? */
70u64 jiffies_64 = INITIAL_JIFFIES;
71
72EXPORT_SYMBOL(jiffies_64);
73
74unsigned long disarm_decr[NR_CPUS]; 69unsigned long disarm_decr[NR_CPUS];
75 70
76extern struct timezone sys_tz; 71extern struct timezone sys_tz;
@@ -121,6 +116,15 @@ unsigned long profile_pc(struct pt_regs *regs)
121EXPORT_SYMBOL(profile_pc); 116EXPORT_SYMBOL(profile_pc);
122#endif 117#endif
123 118
119void wakeup_decrementer(void)
120{
121 set_dec(tb_ticks_per_jiffy);
122 /* No currently-supported powerbook has a 601,
123 * so use get_tbl, not native
124 */
125 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
126}
127
124/* 128/*
125 * timer_interrupt - gets called when the decrementer overflows, 129 * timer_interrupt - gets called when the decrementer overflows,
126 * with interrupts disabled. 130 * with interrupts disabled.
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 961ede87be72..f265b81e7008 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -41,9 +41,14 @@
41#ifdef CONFIG_PMAC_BACKLIGHT 41#ifdef CONFIG_PMAC_BACKLIGHT
42#include <asm/backlight.h> 42#include <asm/backlight.h>
43#endif 43#endif
44#include <asm/perfmon.h> 44#include <asm/pmc.h>
45 45
46#ifdef CONFIG_XMON 46#ifdef CONFIG_XMON
47extern int xmon_bpt(struct pt_regs *regs);
48extern int xmon_sstep(struct pt_regs *regs);
49extern int xmon_iabr_match(struct pt_regs *regs);
50extern int xmon_dabr_match(struct pt_regs *regs);
51
47void (*debugger)(struct pt_regs *regs) = xmon; 52void (*debugger)(struct pt_regs *regs) = xmon;
48int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; 53int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
49int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; 54int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
@@ -74,7 +79,7 @@ void (*debugger_fault_handler)(struct pt_regs *regs);
74 79
75DEFINE_SPINLOCK(die_lock); 80DEFINE_SPINLOCK(die_lock);
76 81
77void die(const char * str, struct pt_regs * fp, long err) 82int die(const char * str, struct pt_regs * fp, long err)
78{ 83{
79 static int die_counter; 84 static int die_counter;
80 int nl = 0; 85 int nl = 0;
@@ -232,7 +237,7 @@ platform_machine_check(struct pt_regs *regs)
232{ 237{
233} 238}
234 239
235void MachineCheckException(struct pt_regs *regs) 240void machine_check_exception(struct pt_regs *regs)
236{ 241{
237 unsigned long reason = get_mc_reason(regs); 242 unsigned long reason = get_mc_reason(regs);
238 243
@@ -393,14 +398,14 @@ void SMIException(struct pt_regs *regs)
393#endif 398#endif
394} 399}
395 400
396void UnknownException(struct pt_regs *regs) 401void unknown_exception(struct pt_regs *regs)
397{ 402{
398 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 403 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
399 regs->nip, regs->msr, regs->trap, print_tainted()); 404 regs->nip, regs->msr, regs->trap, print_tainted());
400 _exception(SIGTRAP, regs, 0, 0); 405 _exception(SIGTRAP, regs, 0, 0);
401} 406}
402 407
403void InstructionBreakpoint(struct pt_regs *regs) 408void instruction_breakpoint_exception(struct pt_regs *regs)
404{ 409{
405 if (debugger_iabr_match(regs)) 410 if (debugger_iabr_match(regs))
406 return; 411 return;
@@ -575,7 +580,7 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
575#define module_find_bug(x) NULL 580#define module_find_bug(x) NULL
576#endif 581#endif
577 582
578static struct bug_entry *find_bug(unsigned long bugaddr) 583struct bug_entry *find_bug(unsigned long bugaddr)
579{ 584{
580 struct bug_entry *bug; 585 struct bug_entry *bug;
581 586
@@ -622,7 +627,7 @@ int check_bug_trap(struct pt_regs *regs)
622 return 0; 627 return 0;
623} 628}
624 629
625void ProgramCheckException(struct pt_regs *regs) 630void program_check_exception(struct pt_regs *regs)
626{ 631{
627 unsigned int reason = get_reason(regs); 632 unsigned int reason = get_reason(regs);
628 extern int do_mathemu(struct pt_regs *regs); 633 extern int do_mathemu(struct pt_regs *regs);
@@ -654,7 +659,7 @@ void ProgramCheckException(struct pt_regs *regs)
654 giveup_fpu(current); 659 giveup_fpu(current);
655 preempt_enable(); 660 preempt_enable();
656 661
657 fpscr = current->thread.fpscr; 662 fpscr = current->thread.fpscr.val;
658 fpscr &= fpscr << 22; /* mask summary bits with enables */ 663 fpscr &= fpscr << 22; /* mask summary bits with enables */
659 if (fpscr & FPSCR_VX) 664 if (fpscr & FPSCR_VX)
660 code = FPE_FLTINV; 665 code = FPE_FLTINV;
@@ -701,7 +706,7 @@ void ProgramCheckException(struct pt_regs *regs)
701 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 706 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
702} 707}
703 708
704void SingleStepException(struct pt_regs *regs) 709void single_step_exception(struct pt_regs *regs)
705{ 710{
706 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 711 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
707 if (debugger_sstep(regs)) 712 if (debugger_sstep(regs))
@@ -709,7 +714,7 @@ void SingleStepException(struct pt_regs *regs)
709 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 714 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
710} 715}
711 716
712void AlignmentException(struct pt_regs *regs) 717void alignment_exception(struct pt_regs *regs)
713{ 718{
714 int fixed; 719 int fixed;
715 720
@@ -814,7 +819,18 @@ void TAUException(struct pt_regs *regs)
814} 819}
815#endif /* CONFIG_INT_TAU */ 820#endif /* CONFIG_INT_TAU */
816 821
817void AltivecUnavailException(struct pt_regs *regs) 822/*
823 * FP unavailable trap from kernel - print a message, but let
824 * the task use FP in the kernel until it returns to user mode.
825 */
826void kernel_fp_unavailable_exception(struct pt_regs *regs)
827{
828 regs->msr |= MSR_FP;
829 printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
830 current, regs->nip);
831}
832
833void altivec_unavailable_exception(struct pt_regs *regs)
818{ 834{
819 static int kernel_altivec_count; 835 static int kernel_altivec_count;
820 836
@@ -835,7 +851,7 @@ void AltivecUnavailException(struct pt_regs *regs)
835} 851}
836 852
837#ifdef CONFIG_ALTIVEC 853#ifdef CONFIG_ALTIVEC
838void AltivecAssistException(struct pt_regs *regs) 854void altivec_assist_exception(struct pt_regs *regs)
839{ 855{
840 int err; 856 int err;
841 857
@@ -872,7 +888,7 @@ void AltivecAssistException(struct pt_regs *regs)
872#endif /* CONFIG_ALTIVEC */ 888#endif /* CONFIG_ALTIVEC */
873 889
874#ifdef CONFIG_E500 890#ifdef CONFIG_E500
875void PerformanceMonitorException(struct pt_regs *regs) 891void performance_monitor_exception(struct pt_regs *regs)
876{ 892{
877 perf_irq(regs); 893 perf_irq(regs);
878} 894}
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
deleted file mode 100644
index 82a21346bf80..000000000000
--- a/arch/ppc/kernel/vector.S
+++ /dev/null
@@ -1,217 +0,0 @@
1#include <asm/ppc_asm.h>
2#include <asm/processor.h>
3
4/*
5 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called
7 * with preempt disabled.
8 */
9 .data
10fpzero:
11 .long 0
12fpone:
13 .long 0x3f800000 /* 1.0 in single-precision FP */
14fphalf:
15 .long 0x3f000000 /* 0.5 in single-precision FP */
16
17 .text
18/*
19 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention.
21 */
22fpenable:
23 mfmsr r10
24 ori r11,r10,MSR_FP
25 mtmsr r11
26 isync
27 stfd fr0,24(r1)
28 stfd fr1,16(r1)
29 stfd fr31,8(r1)
30 lis r11,fpzero@ha
31 mffs fr31
32 lfs fr1,fpzero@l(r11)
33 mtfsf 0xff,fr1
34 blr
35
36fpdisable:
37 mtfsf 0xff,fr31
38 lfd fr31,8(r1)
39 lfd fr1,16(r1)
40 lfd fr0,24(r1)
41 mtmsr r10
42 isync
43 blr
44
45/*
46 * Vector add, floating point.
47 */
48 .globl vaddfp
49vaddfp:
50 stwu r1,-32(r1)
51 mflr r0
52 stw r0,36(r1)
53 bl fpenable
54 li r0,4
55 mtctr r0
56 li r6,0
571: lfsx fr0,r4,r6
58 lfsx fr1,r5,r6
59 fadds fr0,fr0,fr1
60 stfsx fr0,r3,r6
61 addi r6,r6,4
62 bdnz 1b
63 bl fpdisable
64 lwz r0,36(r1)
65 mtlr r0
66 addi r1,r1,32
67 blr
68
69/*
70 * Vector subtract, floating point.
71 */
72 .globl vsubfp
73vsubfp:
74 stwu r1,-32(r1)
75 mflr r0
76 stw r0,36(r1)
77 bl fpenable
78 li r0,4
79 mtctr r0
80 li r6,0
811: lfsx fr0,r4,r6
82 lfsx fr1,r5,r6
83 fsubs fr0,fr0,fr1
84 stfsx fr0,r3,r6
85 addi r6,r6,4
86 bdnz 1b
87 bl fpdisable
88 lwz r0,36(r1)
89 mtlr r0
90 addi r1,r1,32
91 blr
92
93/*
94 * Vector multiply and add, floating point.
95 */
96 .globl vmaddfp
97vmaddfp:
98 stwu r1,-48(r1)
99 mflr r0
100 stw r0,52(r1)
101 bl fpenable
102 stfd fr2,32(r1)
103 li r0,4
104 mtctr r0
105 li r7,0
1061: lfsx fr0,r4,r7
107 lfsx fr1,r5,r7
108 lfsx fr2,r6,r7
109 fmadds fr0,fr0,fr2,fr1
110 stfsx fr0,r3,r7
111 addi r7,r7,4
112 bdnz 1b
113 lfd fr2,32(r1)
114 bl fpdisable
115 lwz r0,52(r1)
116 mtlr r0
117 addi r1,r1,48
118 blr
119
120/*
121 * Vector negative multiply and subtract, floating point.
122 */
123 .globl vnmsubfp
124vnmsubfp:
125 stwu r1,-48(r1)
126 mflr r0
127 stw r0,52(r1)
128 bl fpenable
129 stfd fr2,32(r1)
130 li r0,4
131 mtctr r0
132 li r7,0
1331: lfsx fr0,r4,r7
134 lfsx fr1,r5,r7
135 lfsx fr2,r6,r7
136 fnmsubs fr0,fr0,fr2,fr1
137 stfsx fr0,r3,r7
138 addi r7,r7,4
139 bdnz 1b
140 lfd fr2,32(r1)
141 bl fpdisable
142 lwz r0,52(r1)
143 mtlr r0
144 addi r1,r1,48
145 blr
146
147/*
148 * Vector reciprocal estimate. We just compute 1.0/x.
149 * r3 -> destination, r4 -> source.
150 */
151 .globl vrefp
152vrefp:
153 stwu r1,-32(r1)
154 mflr r0
155 stw r0,36(r1)
156 bl fpenable
157 lis r9,fpone@ha
158 li r0,4
159 lfs fr1,fpone@l(r9)
160 mtctr r0
161 li r6,0
1621: lfsx fr0,r4,r6
163 fdivs fr0,fr1,fr0
164 stfsx fr0,r3,r6
165 addi r6,r6,4
166 bdnz 1b
167 bl fpdisable
168 lwz r0,36(r1)
169 mtlr r0
170 addi r1,r1,32
171 blr
172
173/*
174 * Vector reciprocal square-root estimate, floating point.
175 * We use the frsqrte instruction for the initial estimate followed
176 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
177 * r3 -> destination, r4 -> source.
178 */
179 .globl vrsqrtefp
180vrsqrtefp:
181 stwu r1,-48(r1)
182 mflr r0
183 stw r0,52(r1)
184 bl fpenable
185 stfd fr2,32(r1)
186 stfd fr3,40(r1)
187 stfd fr4,48(r1)
188 stfd fr5,56(r1)
189 lis r9,fpone@ha
190 lis r8,fphalf@ha
191 li r0,4
192 lfs fr4,fpone@l(r9)
193 lfs fr5,fphalf@l(r8)
194 mtctr r0
195 li r6,0
1961: lfsx fr0,r4,r6
197 frsqrte fr1,fr0 /* r = frsqrte(s) */
198 fmuls fr3,fr1,fr0 /* r * s */
199 fmuls fr2,fr1,fr5 /* r * 0.5 */
200 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
201 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
202 fmuls fr3,fr1,fr0 /* r * s */
203 fmuls fr2,fr1,fr5 /* r * 0.5 */
204 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
205 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
206 stfsx fr1,r3,r6
207 addi r6,r6,4
208 bdnz 1b
209 lfd fr5,56(r1)
210 lfd fr4,48(r1)
211 lfd fr3,40(r1)
212 lfd fr2,32(r1)
213 bl fpdisable
214 lwz r0,36(r1)
215 mtlr r0
216 addi r1,r1,32
217 blr
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 17d2db7e537d..09c6525cfa61 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -149,32 +149,6 @@ SECTIONS
149 149
150 . = ALIGN(4096); 150 . = ALIGN(4096);
151 _sextratext = .; 151 _sextratext = .;
152 __pmac_begin = .;
153 .pmac.text : { *(.pmac.text) }
154 .pmac.data : { *(.pmac.data) }
155 . = ALIGN(4096);
156 __pmac_end = .;
157
158 . = ALIGN(4096);
159 __prep_begin = .;
160 .prep.text : { *(.prep.text) }
161 .prep.data : { *(.prep.data) }
162 . = ALIGN(4096);
163 __prep_end = .;
164
165 . = ALIGN(4096);
166 __chrp_begin = .;
167 .chrp.text : { *(.chrp.text) }
168 .chrp.data : { *(.chrp.data) }
169 . = ALIGN(4096);
170 __chrp_end = .;
171
172 . = ALIGN(4096);
173 __openfirmware_begin = .;
174 .openfirmware.text : { *(.openfirmware.text) }
175 .openfirmware.data : { *(.openfirmware.data) }
176 . = ALIGN(4096);
177 __openfirmware_end = .;
178 _eextratext = .; 152 _eextratext = .;
179 153
180 __bss_start = .; 154 __bss_start = .;
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 36c9b97fd92a..2e258c49e8be 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -65,9 +65,9 @@
65 .stabs "arch/ppc/lib/",N_SO,0,0,0f 65 .stabs "arch/ppc/lib/",N_SO,0,0,0f
66 .stabs "string.S",N_SO,0,0,0f 66 .stabs "string.S",N_SO,0,0,0f
67 67
68CACHELINE_BYTES = L1_CACHE_LINE_SIZE 68CACHELINE_BYTES = L1_CACHE_BYTES
69LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE 69LG_CACHELINE_BYTES = L1_CACHE_SHIFT
70CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) 70CACHELINE_MASK = (L1_CACHE_BYTES-1)
71 71
72_GLOBAL(strcpy) 72_GLOBAL(strcpy)
73 addi r5,r3,-1 73 addi r5,r3,-1
@@ -265,12 +265,12 @@ _GLOBAL(cacheable_memcpy)
265 dcbz r11,r6 265 dcbz r11,r6
266#endif 266#endif
267 COPY_16_BYTES 267 COPY_16_BYTES
268#if L1_CACHE_LINE_SIZE >= 32 268#if L1_CACHE_BYTES >= 32
269 COPY_16_BYTES 269 COPY_16_BYTES
270#if L1_CACHE_LINE_SIZE >= 64 270#if L1_CACHE_BYTES >= 64
271 COPY_16_BYTES 271 COPY_16_BYTES
272 COPY_16_BYTES 272 COPY_16_BYTES
273#if L1_CACHE_LINE_SIZE >= 128 273#if L1_CACHE_BYTES >= 128
274 COPY_16_BYTES 274 COPY_16_BYTES
275 COPY_16_BYTES 275 COPY_16_BYTES
276 COPY_16_BYTES 276 COPY_16_BYTES
@@ -485,12 +485,12 @@ _GLOBAL(__copy_tofrom_user)
485 .text 485 .text
486/* the main body of the cacheline loop */ 486/* the main body of the cacheline loop */
487 COPY_16_BYTES_WITHEX(0) 487 COPY_16_BYTES_WITHEX(0)
488#if L1_CACHE_LINE_SIZE >= 32 488#if L1_CACHE_BYTES >= 32
489 COPY_16_BYTES_WITHEX(1) 489 COPY_16_BYTES_WITHEX(1)
490#if L1_CACHE_LINE_SIZE >= 64 490#if L1_CACHE_BYTES >= 64
491 COPY_16_BYTES_WITHEX(2) 491 COPY_16_BYTES_WITHEX(2)
492 COPY_16_BYTES_WITHEX(3) 492 COPY_16_BYTES_WITHEX(3)
493#if L1_CACHE_LINE_SIZE >= 128 493#if L1_CACHE_BYTES >= 128
494 COPY_16_BYTES_WITHEX(4) 494 COPY_16_BYTES_WITHEX(4)
495 COPY_16_BYTES_WITHEX(5) 495 COPY_16_BYTES_WITHEX(5)
496 COPY_16_BYTES_WITHEX(6) 496 COPY_16_BYTES_WITHEX(6)
@@ -544,12 +544,12 @@ _GLOBAL(__copy_tofrom_user)
544 * 104f (if in read part) or 105f (if in write part), after updating r5 544 * 104f (if in read part) or 105f (if in write part), after updating r5
545 */ 545 */
546 COPY_16_BYTES_EXCODE(0) 546 COPY_16_BYTES_EXCODE(0)
547#if L1_CACHE_LINE_SIZE >= 32 547#if L1_CACHE_BYTES >= 32
548 COPY_16_BYTES_EXCODE(1) 548 COPY_16_BYTES_EXCODE(1)
549#if L1_CACHE_LINE_SIZE >= 64 549#if L1_CACHE_BYTES >= 64
550 COPY_16_BYTES_EXCODE(2) 550 COPY_16_BYTES_EXCODE(2)
551 COPY_16_BYTES_EXCODE(3) 551 COPY_16_BYTES_EXCODE(3)
552#if L1_CACHE_LINE_SIZE >= 128 552#if L1_CACHE_BYTES >= 128
553 COPY_16_BYTES_EXCODE(4) 553 COPY_16_BYTES_EXCODE(4)
554 COPY_16_BYTES_EXCODE(5) 554 COPY_16_BYTES_EXCODE(5)
555 COPY_16_BYTES_EXCODE(6) 555 COPY_16_BYTES_EXCODE(6)
diff --git a/arch/ppc/math-emu/sfp-machine.h b/arch/ppc/math-emu/sfp-machine.h
index 686e06d29186..4b17d83cfcdd 100644
--- a/arch/ppc/math-emu/sfp-machine.h
+++ b/arch/ppc/math-emu/sfp-machine.h
@@ -166,7 +166,7 @@ extern int fp_pack_ds(void *, long, unsigned long, unsigned long, long, long);
166#include <linux/kernel.h> 166#include <linux/kernel.h>
167#include <linux/sched.h> 167#include <linux/sched.h>
168 168
169#define __FPU_FPSCR (current->thread.fpscr) 169#define __FPU_FPSCR (current->thread.fpscr.val)
170 170
171/* We only actually write to the destination register 171/* We only actually write to the destination register
172 * if exceptions signalled (if any) will not trap. 172 * if exceptions signalled (if any) will not trap.
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index f421a4b337f6..99b48abd3296 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -69,15 +69,12 @@ int init_bootmem_done;
69int boot_mapsize; 69int boot_mapsize;
70#ifdef CONFIG_PPC_PMAC 70#ifdef CONFIG_PPC_PMAC
71unsigned long agp_special_page; 71unsigned long agp_special_page;
72EXPORT_SYMBOL(agp_special_page);
72#endif 73#endif
73 74
74extern char _end[]; 75extern char _end[];
75extern char etext[], _stext[]; 76extern char etext[], _stext[];
76extern char __init_begin, __init_end; 77extern char __init_begin, __init_end;
77extern char __prep_begin, __prep_end;
78extern char __chrp_begin, __chrp_end;
79extern char __pmac_begin, __pmac_end;
80extern char __openfirmware_begin, __openfirmware_end;
81 78
82#ifdef CONFIG_HIGHMEM 79#ifdef CONFIG_HIGHMEM
83pte_t *kmap_pte; 80pte_t *kmap_pte;
@@ -167,14 +164,6 @@ void free_initmem(void)
167 164
168 printk ("Freeing unused kernel memory:"); 165 printk ("Freeing unused kernel memory:");
169 FREESEC(init); 166 FREESEC(init);
170 if (_machine != _MACH_Pmac)
171 FREESEC(pmac);
172 if (_machine != _MACH_chrp)
173 FREESEC(chrp);
174 if (_machine != _MACH_prep)
175 FREESEC(prep);
176 if (!have_of)
177 FREESEC(openfirmware);
178 printk("\n"); 167 printk("\n");
179 ppc_md.progress = NULL; 168 ppc_md.progress = NULL;
180#undef FREESEC 169#undef FREESEC
@@ -648,18 +637,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
648 */ 637 */
649int page_is_ram(unsigned long pfn) 638int page_is_ram(unsigned long pfn)
650{ 639{
651 unsigned long paddr = (pfn << PAGE_SHIFT); 640 return pfn < max_pfn;
652
653 return paddr < __pa(high_memory);
654} 641}
655 642
656pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 643pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
657 unsigned long size, pgprot_t vma_prot) 644 unsigned long size, pgprot_t vma_prot)
658{ 645{
659 if (ppc_md.phys_mem_access_prot) 646 if (ppc_md.phys_mem_access_prot)
660 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); 647 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
661 648
662 if (!page_is_ram(addr >> PAGE_SHIFT)) 649 if (!page_is_ram(pfn))
663 vma_prot = __pgprot(pgprot_val(vma_prot) 650 vma_prot = __pgprot(pgprot_val(vma_prot)
664 | _PAGE_GUARDED | _PAGE_NO_CACHE); 651 | _PAGE_GUARDED | _PAGE_NO_CACHE);
665 return vma_prot; 652 return vma_prot;
diff --git a/arch/ppc/oprofile/common.c b/arch/ppc/oprofile/common.c
deleted file mode 100644
index 3169c67abea7..000000000000
--- a/arch/ppc/oprofile/common.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * PPC 32 oprofile support
3 * Based on PPC64 oprofile support
4 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * Copyright (C) Freescale Semiconductor, Inc 2004
7 *
8 * Author: Andy Fleming
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/oprofile.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/smp.h>
20#include <linux/errno.h>
21#include <asm/ptrace.h>
22#include <asm/system.h>
23#include <asm/perfmon.h>
24#include <asm/cputable.h>
25
26#include "op_impl.h"
27
28static struct op_ppc32_model *model;
29
30static struct op_counter_config ctr[OP_MAX_COUNTER];
31static struct op_system_config sys;
32
33static void op_handle_interrupt(struct pt_regs *regs)
34{
35 model->handle_interrupt(regs, ctr);
36}
37
38static int op_ppc32_setup(void)
39{
40 /* Install our interrupt handler into the existing hook. */
41 if(request_perfmon_irq(&op_handle_interrupt))
42 return -EBUSY;
43
44 mb();
45
46 /* Pre-compute the values to stuff in the hardware registers. */
47 model->reg_setup(ctr, &sys, model->num_counters);
48
49#if 0
50 /* FIXME: Make multi-cpu work */
51 /* Configure the registers on all cpus. */
52 on_each_cpu(model->reg_setup, NULL, 0, 1);
53#endif
54
55 return 0;
56}
57
58static void op_ppc32_shutdown(void)
59{
60 mb();
61
62 /* Remove our interrupt handler. We may be removing this module. */
63 free_perfmon_irq();
64}
65
66static void op_ppc32_cpu_start(void *dummy)
67{
68 model->start(ctr);
69}
70
71static int op_ppc32_start(void)
72{
73 on_each_cpu(op_ppc32_cpu_start, NULL, 0, 1);
74 return 0;
75}
76
77static inline void op_ppc32_cpu_stop(void *dummy)
78{
79 model->stop();
80}
81
82static void op_ppc32_stop(void)
83{
84 on_each_cpu(op_ppc32_cpu_stop, NULL, 0, 1);
85}
86
87static int op_ppc32_create_files(struct super_block *sb, struct dentry *root)
88{
89 int i;
90
91 for (i = 0; i < model->num_counters; ++i) {
92 struct dentry *dir;
93 char buf[3];
94
95 snprintf(buf, sizeof buf, "%d", i);
96 dir = oprofilefs_mkdir(sb, root, buf);
97
98 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
99 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
100 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
101 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
102 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
103
104 /* FIXME: Not sure if this is used */
105 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
106 }
107
108 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
109 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
110
111 /* Default to tracing both kernel and user */
112 sys.enable_kernel = 1;
113 sys.enable_user = 1;
114
115 return 0;
116}
117
118static struct oprofile_operations oprof_ppc32_ops = {
119 .create_files = op_ppc32_create_files,
120 .setup = op_ppc32_setup,
121 .shutdown = op_ppc32_shutdown,
122 .start = op_ppc32_start,
123 .stop = op_ppc32_stop,
124 .cpu_type = NULL /* To be filled in below. */
125};
126
127int __init oprofile_arch_init(struct oprofile_operations *ops)
128{
129 char *name;
130 int cpu_id = smp_processor_id();
131
132#ifdef CONFIG_FSL_BOOKE
133 model = &op_model_fsl_booke;
134#else
135 return -ENODEV;
136#endif
137
138 name = kmalloc(32, GFP_KERNEL);
139
140 if (NULL == name)
141 return -ENOMEM;
142
143 sprintf(name, "ppc/%s", cur_cpu_spec[cpu_id]->cpu_name);
144
145 oprof_ppc32_ops.cpu_type = name;
146
147 model->num_counters = cur_cpu_spec[cpu_id]->num_pmcs;
148
149 *ops = oprof_ppc32_ops;
150
151 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
152 oprof_ppc32_ops.cpu_type);
153
154 return 0;
155}
156
157void oprofile_arch_exit(void)
158{
159 kfree(oprof_ppc32_ops.cpu_type);
160 oprof_ppc32_ops.cpu_type = NULL;
161}
diff --git a/arch/ppc/oprofile/op_impl.h b/arch/ppc/oprofile/op_impl.h
deleted file mode 100644
index bc336dc971e3..000000000000
--- a/arch/ppc/oprofile/op_impl.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * Based on alpha version.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef OP_IMPL_H
13#define OP_IMPL_H 1
14
15#define OP_MAX_COUNTER 8
16
17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config {
19 unsigned long enabled;
20 unsigned long event;
21 unsigned long count;
22 unsigned long kernel;
23 unsigned long user;
24 unsigned long unit_mask;
25};
26
27/* System-wide configuration as set via oprofilefs. */
28struct op_system_config {
29 unsigned long enable_kernel;
30 unsigned long enable_user;
31};
32
33/* Per-arch configuration */
34struct op_ppc32_model {
35 void (*reg_setup) (struct op_counter_config *,
36 struct op_system_config *,
37 int num_counters);
38 void (*start) (struct op_counter_config *);
39 void (*stop) (void);
40 void (*handle_interrupt) (struct pt_regs *,
41 struct op_counter_config *);
42 int num_counters;
43};
44
45#endif /* OP_IMPL_H */
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c
index 78a403b48dba..159b228eca1e 100644
--- a/arch/ppc/platforms/4xx/bamboo.c
+++ b/arch/ppc/platforms/4xx/bamboo.c
@@ -51,7 +51,7 @@
51#include <syslib/gen550.h> 51#include <syslib/gen550.h>
52#include <syslib/ibm440gx_common.h> 52#include <syslib/ibm440gx_common.h>
53 53
54bd_t __res; 54extern bd_t __res;
55 55
56static struct ibm44x_clocks clocks __initdata; 56static struct ibm44x_clocks clocks __initdata;
57 57
@@ -425,17 +425,7 @@ bamboo_setup_arch(void)
425void __init platform_init(unsigned long r3, unsigned long r4, 425void __init platform_init(unsigned long r3, unsigned long r4,
426 unsigned long r5, unsigned long r6, unsigned long r7) 426 unsigned long r5, unsigned long r6, unsigned long r7)
427{ 427{
428 parse_bootinfo(find_bootinfo()); 428 ibm44x_platform_init(r3, r4, r5, r6, r7);
429
430 /*
431 * If we were passed in a board information, copy it into the
432 * residual data area.
433 */
434 if (r3)
435 __res = *(bd_t *)(r3 + KERNELBASE);
436
437
438 ibm44x_platform_init();
439 429
440 ppc_md.setup_arch = bamboo_setup_arch; 430 ppc_md.setup_arch = bamboo_setup_arch;
441 ppc_md.show_cpuinfo = bamboo_show_cpuinfo; 431 ppc_md.show_cpuinfo = bamboo_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 27b778ab903b..64ebae19cdbb 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -54,7 +54,7 @@
54#include <syslib/gen550.h> 54#include <syslib/gen550.h>
55#include <syslib/ibm440gp_common.h> 55#include <syslib/ibm440gp_common.h>
56 56
57bd_t __res; 57extern bd_t __res;
58 58
59static struct ibm44x_clocks clocks __initdata; 59static struct ibm44x_clocks clocks __initdata;
60 60
@@ -90,7 +90,7 @@ ebony_calibrate_decr(void)
90 * on Rev. C silicon then errata forces us to 90 * on Rev. C silicon then errata forces us to
91 * use the internal clock. 91 * use the internal clock.
92 */ 92 */
93 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GP Rev. B") == 0) 93 if (strcmp(cur_cpu_spec->cpu_name, "440GP Rev. B") == 0)
94 freq = EBONY_440GP_RB_SYSCLK; 94 freq = EBONY_440GP_RB_SYSCLK;
95 else 95 else
96 freq = EBONY_440GP_RC_SYSCLK; 96 freq = EBONY_440GP_RC_SYSCLK;
@@ -317,16 +317,7 @@ ebony_setup_arch(void)
317void __init platform_init(unsigned long r3, unsigned long r4, 317void __init platform_init(unsigned long r3, unsigned long r4,
318 unsigned long r5, unsigned long r6, unsigned long r7) 318 unsigned long r5, unsigned long r6, unsigned long r7)
319{ 319{
320 parse_bootinfo(find_bootinfo()); 320 ibm44x_platform_init(r3, r4, r5, r6, r7);
321
322 /*
323 * If we were passed in a board information, copy it into the
324 * residual data area.
325 */
326 if (r3)
327 __res = *(bd_t *)(r3 + KERNELBASE);
328
329 ibm44x_platform_init();
330 321
331 ppc_md.setup_arch = ebony_setup_arch; 322 ppc_md.setup_arch = ebony_setup_arch;
332 ppc_md.show_cpuinfo = ebony_show_cpuinfo; 323 ppc_md.show_cpuinfo = ebony_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c
index 16d953bda22c..d810b736d9bf 100644
--- a/arch/ppc/platforms/4xx/luan.c
+++ b/arch/ppc/platforms/4xx/luan.c
@@ -52,7 +52,7 @@
52#include <syslib/ibm440gx_common.h> 52#include <syslib/ibm440gx_common.h>
53#include <syslib/ibm440sp_common.h> 53#include <syslib/ibm440sp_common.h>
54 54
55bd_t __res; 55extern bd_t __res;
56 56
57static struct ibm44x_clocks clocks __initdata; 57static struct ibm44x_clocks clocks __initdata;
58 58
@@ -355,16 +355,7 @@ luan_setup_arch(void)
355void __init platform_init(unsigned long r3, unsigned long r4, 355void __init platform_init(unsigned long r3, unsigned long r4,
356 unsigned long r5, unsigned long r6, unsigned long r7) 356 unsigned long r5, unsigned long r6, unsigned long r7)
357{ 357{
358 parse_bootinfo(find_bootinfo()); 358 ibm44x_platform_init(r3, r4, r5, r6, r7);
359
360 /*
361 * If we were passed in a board information, copy it into the
362 * residual data area.
363 */
364 if (r3)
365 __res = *(bd_t *)(r3 + KERNELBASE);
366
367 ibm44x_platform_init();
368 359
369 ppc_md.setup_arch = luan_setup_arch; 360 ppc_md.setup_arch = luan_setup_arch;
370 ppc_md.show_cpuinfo = luan_show_cpuinfo; 361 ppc_md.show_cpuinfo = luan_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index 506949c5dd29..73b2c98158f6 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -52,7 +52,7 @@
52#include <syslib/gen550.h> 52#include <syslib/gen550.h>
53#include <syslib/ibm440gx_common.h> 53#include <syslib/ibm440gx_common.h>
54 54
55bd_t __res; 55extern bd_t __res;
56 56
57static struct ibm44x_clocks clocks __initdata; 57static struct ibm44x_clocks clocks __initdata;
58 58
@@ -286,6 +286,15 @@ ocotea_setup_arch(void)
286 286
287 ibm440gx_tah_enable(); 287 ibm440gx_tah_enable();
288 288
289 /*
290 * Determine various clocks.
291 * To be completely correct we should get SysClk
292 * from FPGA, because it can be changed by on-board switches
293 * --ebs
294 */
295 ibm440gx_get_clocks(&clocks, 33333333, 6 * 1843200);
296 ocp_sys_info.opb_bus_freq = clocks.opb;
297
289 /* Setup TODC access */ 298 /* Setup TODC access */
290 TODC_INIT(TODC_TYPE_DS1743, 299 TODC_INIT(TODC_TYPE_DS1743,
291 0, 300 0,
@@ -324,25 +333,7 @@ static void __init ocotea_init(void)
324void __init platform_init(unsigned long r3, unsigned long r4, 333void __init platform_init(unsigned long r3, unsigned long r4,
325 unsigned long r5, unsigned long r6, unsigned long r7) 334 unsigned long r5, unsigned long r6, unsigned long r7)
326{ 335{
327 parse_bootinfo(find_bootinfo()); 336 ibm44x_platform_init(r3, r4, r5, r6, r7);
328
329 /*
330 * If we were passed in a board information, copy it into the
331 * residual data area.
332 */
333 if (r3)
334 __res = *(bd_t *)(r3 + KERNELBASE);
335
336 /*
337 * Determine various clocks.
338 * To be completely correct we should get SysClk
339 * from FPGA, because it can be changed by on-board switches
340 * --ebs
341 */
342 ibm440gx_get_clocks(&clocks, 33333333, 6 * 1843200);
343 ocp_sys_info.opb_bus_freq = clocks.opb;
344
345 ibm44x_platform_init();
346 337
347 ppc_md.setup_arch = ocotea_setup_arch; 338 ppc_md.setup_arch = ocotea_setup_arch;
348 ppc_md.show_cpuinfo = ocotea_show_cpuinfo; 339 ppc_md.show_cpuinfo = ocotea_show_cpuinfo;
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.h b/arch/ppc/platforms/83xx/mpc834x_sys.h
index 1584cd77a9ef..58e44c042535 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.h
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <syslib/ppc83xx_setup.h> 22#include <syslib/ppc83xx_setup.h>
24#include <asm/ppcboot.h> 23#include <asm/ppcboot.h>
25 24
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index 7dc8a68acfd0..7e952c1228cb 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -52,6 +52,10 @@
52 52
53#include <syslib/ppc85xx_setup.h> 53#include <syslib/ppc85xx_setup.h>
54 54
55static const char *GFAR_PHY_0 = "phy0:0";
56static const char *GFAR_PHY_1 = "phy0:1";
57static const char *GFAR_PHY_3 = "phy0:3";
58
55/* ************************************************************************ 59/* ************************************************************************
56 * 60 *
57 * Setup the architecture 61 * Setup the architecture
@@ -63,6 +67,7 @@ mpc8540ads_setup_arch(void)
63 bd_t *binfo = (bd_t *) __res; 67 bd_t *binfo = (bd_t *) __res;
64 unsigned int freq; 68 unsigned int freq;
65 struct gianfar_platform_data *pdata; 69 struct gianfar_platform_data *pdata;
70 struct gianfar_mdio_data *mdata;
66 71
67 /* get the core frequency */ 72 /* get the core frequency */
68 freq = binfo->bi_intfreq; 73 freq = binfo->bi_intfreq;
@@ -89,34 +94,35 @@ mpc8540ads_setup_arch(void)
89 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 94 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
90#endif 95#endif
91 96
97 /* setup the board related info for the MDIO bus */
98 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
99
100 mdata->irq[0] = MPC85xx_IRQ_EXT5;
101 mdata->irq[1] = MPC85xx_IRQ_EXT5;
102 mdata->irq[2] = -1;
103 mdata->irq[3] = MPC85xx_IRQ_EXT5;
104 mdata->irq[31] = -1;
105 mdata->paddr += binfo->bi_immr_base;
106
92 /* setup the board related information for the enet controllers */ 107 /* setup the board related information for the enet controllers */
93 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 108 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
94 if (pdata) { 109 if (pdata) {
95 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 110 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
96 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 111 pdata->bus_id = GFAR_PHY_0;
97 pdata->phyid = 0;
98 /* fixup phy address */
99 pdata->phy_reg_addr += binfo->bi_immr_base;
100 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 112 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
101 } 113 }
102 114
103 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 115 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
104 if (pdata) { 116 if (pdata) {
105 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 117 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
106 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 118 pdata->bus_id = GFAR_PHY_1;
107 pdata->phyid = 1;
108 /* fixup phy address */
109 pdata->phy_reg_addr += binfo->bi_immr_base;
110 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 119 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
111 } 120 }
112 121
113 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC); 122 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
114 if (pdata) { 123 if (pdata) {
115 pdata->board_flags = 0; 124 pdata->board_flags = 0;
116 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 125 pdata->bus_id = GFAR_PHY_3;
117 pdata->phyid = 3;
118 /* fixup phy address */
119 pdata->phy_reg_addr += binfo->bi_immr_base;
120 memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6); 126 memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
121 } 127 }
122 128
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 8841fd7da6ee..208433f1e93a 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -56,6 +56,10 @@
56#include <syslib/ppc85xx_setup.h> 56#include <syslib/ppc85xx_setup.h>
57 57
58 58
59static const char *GFAR_PHY_0 = "phy0:0";
60static const char *GFAR_PHY_1 = "phy0:1";
61static const char *GFAR_PHY_3 = "phy0:3";
62
59/* ************************************************************************ 63/* ************************************************************************
60 * 64 *
61 * Setup the architecture 65 * Setup the architecture
@@ -68,6 +72,7 @@ mpc8560ads_setup_arch(void)
68 bd_t *binfo = (bd_t *) __res; 72 bd_t *binfo = (bd_t *) __res;
69 unsigned int freq; 73 unsigned int freq;
70 struct gianfar_platform_data *pdata; 74 struct gianfar_platform_data *pdata;
75 struct gianfar_mdio_data *mdata;
71 76
72 cpm2_reset(); 77 cpm2_reset();
73 78
@@ -86,24 +91,28 @@ mpc8560ads_setup_arch(void)
86 mpc85xx_setup_hose(); 91 mpc85xx_setup_hose();
87#endif 92#endif
88 93
94 /* setup the board related info for the MDIO bus */
95 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
96
97 mdata->irq[0] = MPC85xx_IRQ_EXT5;
98 mdata->irq[1] = MPC85xx_IRQ_EXT5;
99 mdata->irq[2] = -1;
100 mdata->irq[3] = MPC85xx_IRQ_EXT5;
101 mdata->irq[31] = -1;
102 mdata->paddr += binfo->bi_immr_base;
103
89 /* setup the board related information for the enet controllers */ 104 /* setup the board related information for the enet controllers */
90 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 105 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
91 if (pdata) { 106 if (pdata) {
92 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 107 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
93 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 108 pdata->bus_id = GFAR_PHY_0;
94 pdata->phyid = 0;
95 /* fixup phy address */
96 pdata->phy_reg_addr += binfo->bi_immr_base;
97 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 109 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
98 } 110 }
99 111
100 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 112 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
101 if (pdata) { 113 if (pdata) {
102 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 114 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
103 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 115 pdata->bus_id = GFAR_PHY_1;
104 pdata->phyid = 1;
105 /* fixup phy address */
106 pdata->phy_reg_addr += binfo->bi_immr_base;
107 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 116 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
108 } 117 }
109 118
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
index 3875e839cff7..84acf6e8d45e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <asm/ppcboot.h> 22#include <asm/ppcboot.h>
24 23
25#define BOARD_CCSRBAR ((uint)0xe0000000) 24#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 9f9039498ae5..a21156967a5e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -173,10 +173,7 @@ mpc85xx_cds_init_IRQ(void)
173#ifdef CONFIG_PCI 173#ifdef CONFIG_PCI
174 openpic_hookup_cascade(PIRQ0A, "82c59 cascade", i8259_irq); 174 openpic_hookup_cascade(PIRQ0A, "82c59 cascade", i8259_irq);
175 175
176 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 176 i8259_init(0, 0);
177 irq_desc[i].handler = &i8259_pic;
178
179 i8259_init(0);
180#endif 177#endif
181 178
182#ifdef CONFIG_CPM2 179#ifdef CONFIG_CPM2
@@ -394,6 +391,9 @@ mpc85xx_cds_pcibios_fixup(void)
394 391
395TODC_ALLOC(); 392TODC_ALLOC();
396 393
394static const char *GFAR_PHY_0 = "phy0:0";
395static const char *GFAR_PHY_1 = "phy0:1";
396
397/* ************************************************************************ 397/* ************************************************************************
398 * 398 *
399 * Setup the architecture 399 * Setup the architecture
@@ -405,6 +405,7 @@ mpc85xx_cds_setup_arch(void)
405 bd_t *binfo = (bd_t *) __res; 405 bd_t *binfo = (bd_t *) __res;
406 unsigned int freq; 406 unsigned int freq;
407 struct gianfar_platform_data *pdata; 407 struct gianfar_platform_data *pdata;
408 struct gianfar_mdio_data *mdata;
408 409
409 /* get the core frequency */ 410 /* get the core frequency */
410 freq = binfo->bi_intfreq; 411 freq = binfo->bi_intfreq;
@@ -448,44 +449,42 @@ mpc85xx_cds_setup_arch(void)
448 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 449 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
449#endif 450#endif
450 451
452 /* setup the board related info for the MDIO bus */
453 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
454
455 mdata->irq[0] = MPC85xx_IRQ_EXT5;
456 mdata->irq[1] = MPC85xx_IRQ_EXT5;
457 mdata->irq[2] = -1;
458 mdata->irq[3] = -1;
459 mdata->irq[31] = -1;
460 mdata->paddr += binfo->bi_immr_base;
461
451 /* setup the board related information for the enet controllers */ 462 /* setup the board related information for the enet controllers */
452 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 463 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
453 if (pdata) { 464 if (pdata) {
454 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 465 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
455 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 466 pdata->bus_id = GFAR_PHY_0;
456 pdata->phyid = 0;
457 /* fixup phy address */
458 pdata->phy_reg_addr += binfo->bi_immr_base;
459 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 467 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
460 } 468 }
461 469
462 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 470 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
463 if (pdata) { 471 if (pdata) {
464 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 472 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
465 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 473 pdata->bus_id = GFAR_PHY_1;
466 pdata->phyid = 1;
467 /* fixup phy address */
468 pdata->phy_reg_addr += binfo->bi_immr_base;
469 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 474 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
470 } 475 }
471 476
472 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1); 477 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1);
473 if (pdata) { 478 if (pdata) {
474 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 479 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
475 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 480 pdata->bus_id = GFAR_PHY_0;
476 pdata->phyid = 0;
477 /* fixup phy address */
478 pdata->phy_reg_addr += binfo->bi_immr_base;
479 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 481 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
480 } 482 }
481 483
482 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2); 484 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2);
483 if (pdata) { 485 if (pdata) {
484 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 486 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
485 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 487 pdata->bus_id = GFAR_PHY_1;
486 pdata->phyid = 1;
487 /* fixup phy address */
488 pdata->phy_reg_addr += binfo->bi_immr_base;
489 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 488 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
490 } 489 }
491 490
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index c76760a781c1..b4ee1707a836 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -91,6 +91,9 @@ sbc8560_early_serial_map(void)
91} 91}
92#endif 92#endif
93 93
94static const char *GFAR_PHY_25 = "phy0:25";
95static const char *GFAR_PHY_26 = "phy0:26";
96
94/* ************************************************************************ 97/* ************************************************************************
95 * 98 *
96 * Setup the architecture 99 * Setup the architecture
@@ -102,6 +105,7 @@ sbc8560_setup_arch(void)
102 bd_t *binfo = (bd_t *) __res; 105 bd_t *binfo = (bd_t *) __res;
103 unsigned int freq; 106 unsigned int freq;
104 struct gianfar_platform_data *pdata; 107 struct gianfar_platform_data *pdata;
108 struct gianfar_mdio_data *mdata;
105 109
106 /* get the core frequency */ 110 /* get the core frequency */
107 freq = binfo->bi_intfreq; 111 freq = binfo->bi_intfreq;
@@ -126,24 +130,26 @@ sbc8560_setup_arch(void)
126 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 130 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
127#endif 131#endif
128 132
133 /* setup the board related info for the MDIO bus */
134 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
135
136 mdata->irq[25] = MPC85xx_IRQ_EXT6;
137 mdata->irq[26] = MPC85xx_IRQ_EXT7;
138 mdata->irq[31] = -1;
139 mdata->paddr += binfo->bi_immr_base;
140
129 /* setup the board related information for the enet controllers */ 141 /* setup the board related information for the enet controllers */
130 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 142 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
131 if (pdata) { 143 if (pdata) {
132 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 144 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
133 pdata->interruptPHY = MPC85xx_IRQ_EXT6; 145 pdata->bus_id = GFAR_PHY_25;
134 pdata->phyid = 25;
135 /* fixup phy address */
136 pdata->phy_reg_addr += binfo->bi_immr_base;
137 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 146 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
138 } 147 }
139 148
140 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 149 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
141 if (pdata) { 150 if (pdata) {
142 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 151 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
143 pdata->interruptPHY = MPC85xx_IRQ_EXT7; 152 pdata->bus_id = GFAR_PHY_26;
144 pdata->phyid = 26;
145 /* fixup phy address */
146 pdata->phy_reg_addr += binfo->bi_immr_base;
147 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 153 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
148 } 154 }
149 155
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 20940f4044f4..1e1b85f8193a 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -91,6 +91,9 @@ static u8 gp3_openpic_initsenses[] __initdata = {
91 0x0, /* External 11: */ 91 0x0, /* External 11: */
92}; 92};
93 93
94static const char *GFAR_PHY_2 = "phy0:2";
95static const char *GFAR_PHY_4 = "phy0:4";
96
94/* 97/*
95 * Setup the architecture 98 * Setup the architecture
96 */ 99 */
@@ -100,6 +103,7 @@ gp3_setup_arch(void)
100 bd_t *binfo = (bd_t *) __res; 103 bd_t *binfo = (bd_t *) __res;
101 unsigned int freq; 104 unsigned int freq;
102 struct gianfar_platform_data *pdata; 105 struct gianfar_platform_data *pdata;
106 struct gianfar_mdio_data *mdata;
103 107
104 cpm2_reset(); 108 cpm2_reset();
105 109
@@ -118,23 +122,26 @@ gp3_setup_arch(void)
118 mpc85xx_setup_hose(); 122 mpc85xx_setup_hose();
119#endif 123#endif
120 124
125 /* setup the board related info for the MDIO bus */
126 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
127
128 mdata->irq[2] = MPC85xx_IRQ_EXT5;
129 mdata->irq[4] = MPC85xx_IRQ_EXT5;
130 mdata->irq[31] = -1;
131 mdata->paddr += binfo->bi_immr_base;
132
121 /* setup the board related information for the enet controllers */ 133 /* setup the board related information for the enet controllers */
122 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 134 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
123 if (pdata) { 135 if (pdata) {
124 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */ 136 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
125 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 137 pdata->bus_id = GFAR_PHY_2;
126 pdata->phyid = 2;
127 pdata->phy_reg_addr += binfo->bi_immr_base;
128 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 138 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
129 } 139 }
130 140
131 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 141 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
132 if (pdata) { 142 if (pdata) {
133 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */ 143 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
134 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 144 pdata->bus_id = GFAR_PHY_4;
135 pdata->phyid = 4;
136 /* fixup phy address */
137 pdata->phy_reg_addr += binfo->bi_immr_base;
138 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 145 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
139 } 146 }
140 147
diff --git a/arch/ppc/platforms/85xx/stx_gp3.h b/arch/ppc/platforms/85xx/stx_gp3.h
index 7bcc6c35a417..95fdf4b0680b 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.h
+++ b/arch/ppc/platforms/85xx/stx_gp3.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h>
25#include <asm/ppcboot.h> 24#include <asm/ppcboot.h>
26 25
27#define BOARD_CCSRBAR ((uint)0xe0000000) 26#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/Makefile b/arch/ppc/platforms/Makefile
index ff7452e5d8e5..7c5cdabf6f3c 100644
--- a/arch/ppc/platforms/Makefile
+++ b/arch/ppc/platforms/Makefile
@@ -14,6 +14,9 @@ obj-$(CONFIG_PPC_PMAC) += pmac_pic.o pmac_setup.o pmac_time.o \
14 pmac_low_i2c.o pmac_cache.o 14 pmac_low_i2c.o pmac_cache.o
15obj-$(CONFIG_PPC_CHRP) += chrp_setup.o chrp_time.o chrp_pci.o \ 15obj-$(CONFIG_PPC_CHRP) += chrp_setup.o chrp_time.o chrp_pci.o \
16 chrp_pegasos_eth.o 16 chrp_pegasos_eth.o
17ifeq ($(CONFIG_PPC_CHRP),y)
18obj-$(CONFIG_NVRAM) += chrp_nvram.o
19endif
17obj-$(CONFIG_PPC_PREP) += prep_pci.o prep_setup.o 20obj-$(CONFIG_PPC_PREP) += prep_pci.o prep_setup.o
18ifeq ($(CONFIG_PPC_PMAC),y) 21ifeq ($(CONFIG_PPC_PMAC),y)
19obj-$(CONFIG_NVRAM) += pmac_nvram.o 22obj-$(CONFIG_NVRAM) += pmac_nvram.o
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index df6ff98c023a..48a4a510d598 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -541,7 +541,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
541 541
542 ppc_md.setup_arch = chestnut_setup_arch; 542 ppc_md.setup_arch = chestnut_setup_arch;
543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo; 543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo;
544 ppc_md.irq_canonicalize = NULL;
545 ppc_md.init_IRQ = mv64360_init_irq; 544 ppc_md.init_IRQ = mv64360_init_irq;
546 ppc_md.get_irq = mv64360_get_irq; 545 ppc_md.get_irq = mv64360_get_irq;
547 ppc_md.init = NULL; 546 ppc_md.init = NULL;
diff --git a/arch/ppc/platforms/chrp_nvram.c b/arch/ppc/platforms/chrp_nvram.c
new file mode 100644
index 000000000000..465ba9b090ef
--- /dev/null
+++ b/arch/ppc/platforms/chrp_nvram.c
@@ -0,0 +1,83 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /dev/nvram driver for PPC
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <asm/uaccess.h>
18#include <asm/prom.h>
19#include <asm/machdep.h>
20
21static unsigned int nvram_size;
22static unsigned char nvram_buf[4];
23static DEFINE_SPINLOCK(nvram_lock);
24
25static unsigned char chrp_nvram_read(int addr)
26{
27 unsigned long done, flags;
28 unsigned char ret;
29
30 if (addr >= nvram_size) {
31 printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
32 current->comm, addr, nvram_size);
33 return 0xff;
34 }
35 spin_lock_irqsave(&nvram_lock, flags);
36 if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
37 ret = 0xff;
38 else
39 ret = nvram_buf[0];
40 spin_unlock_irqrestore(&nvram_lock, flags);
41
42 return ret;
43}
44
45static void chrp_nvram_write(int addr, unsigned char val)
46{
47 unsigned long done, flags;
48
49 if (addr >= nvram_size) {
50 printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
51 current->comm, addr, nvram_size);
52 return;
53 }
54 spin_lock_irqsave(&nvram_lock, flags);
55 nvram_buf[0] = val;
56 if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
57 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
58 spin_unlock_irqrestore(&nvram_lock, flags);
59}
60
61void __init chrp_nvram_init(void)
62{
63 struct device_node *nvram;
64 unsigned int *nbytes_p, proplen;
65
66 nvram = of_find_node_by_type(NULL, "nvram");
67 if (nvram == NULL)
68 return;
69
70 nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen);
71 if (nbytes_p == NULL || proplen != sizeof(unsigned int))
72 return;
73
74 nvram_size = *nbytes_p;
75
76 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
77 of_node_put(nvram);
78
79 ppc_md.nvram_read_val = chrp_nvram_read;
80 ppc_md.nvram_write_val = chrp_nvram_write;
81
82 return;
83}
diff --git a/arch/ppc/platforms/chrp_pci.c b/arch/ppc/platforms/chrp_pci.c
index 7d3fbb5c5db2..bd047aac01b1 100644
--- a/arch/ppc/platforms/chrp_pci.c
+++ b/arch/ppc/platforms/chrp_pci.c
@@ -29,7 +29,7 @@ void __iomem *gg2_pci_config_base;
29 * limit the bus number to 3 bits 29 * limit the bus number to 3 bits
30 */ 30 */
31 31
32int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, 32int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
33 int len, u32 *val) 33 int len, u32 *val)
34{ 34{
35 volatile void __iomem *cfg_data; 35 volatile void __iomem *cfg_data;
@@ -56,7 +56,7 @@ int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
56 return PCIBIOS_SUCCESSFUL; 56 return PCIBIOS_SUCCESSFUL;
57} 57}
58 58
59int __chrp gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, 59int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
60 int len, u32 val) 60 int len, u32 val)
61{ 61{
62 volatile void __iomem *cfg_data; 62 volatile void __iomem *cfg_data;
@@ -92,7 +92,7 @@ static struct pci_ops gg2_pci_ops =
92/* 92/*
93 * Access functions for PCI config space using RTAS calls. 93 * Access functions for PCI config space using RTAS calls.
94 */ 94 */
95int __chrp 95int
96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
97 int len, u32 *val) 97 int len, u32 *val)
98{ 98{
@@ -108,7 +108,7 @@ rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; 108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
109} 109}
110 110
111int __chrp 111int
112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
113 int len, u32 val) 113 int len, u32 val)
114{ 114{
@@ -203,7 +203,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
203 printk ("RTAS supporting Pegasos OF not found, please upgrade" 203 printk ("RTAS supporting Pegasos OF not found, please upgrade"
204 " your firmware\n"); 204 " your firmware\n");
205 } 205 }
206 pci_assign_all_busses = 1; 206 pci_assign_all_buses = 1;
207} 207}
208 208
209void __init 209void __init
diff --git a/arch/ppc/platforms/chrp_pegasos_eth.c b/arch/ppc/platforms/chrp_pegasos_eth.c
index cad5bfa153b2..a9052305c35d 100644
--- a/arch/ppc/platforms/chrp_pegasos_eth.c
+++ b/arch/ppc/platforms/chrp_pegasos_eth.c
@@ -17,7 +17,20 @@
17#include <linux/mv643xx.h> 17#include <linux/mv643xx.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19 19
20/* Pegasos 2 specific Marvell MV 64361 gigabit ethernet port setup */ 20#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
21#define PEGASOS2_MARVELL_REGSIZE (0x00004000)
22#define PEGASOS2_SRAM_BASE (0xf2000000)
23#define PEGASOS2_SRAM_SIZE (256*1024)
24
25#define PEGASOS2_SRAM_BASE_ETH0 (PEGASOS2_SRAM_BASE)
26#define PEGASOS2_SRAM_BASE_ETH1 (PEGASOS2_SRAM_BASE_ETH0 + (PEGASOS2_SRAM_SIZE / 2) )
27
28
29#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
30#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
31
32#undef BE_VERBOSE
33
21static struct resource mv643xx_eth_shared_resources[] = { 34static struct resource mv643xx_eth_shared_resources[] = {
22 [0] = { 35 [0] = {
23 .name = "ethernet shared base", 36 .name = "ethernet shared base",
@@ -44,7 +57,16 @@ static struct resource mv643xx_eth0_resources[] = {
44 }, 57 },
45}; 58};
46 59
47static struct mv643xx_eth_platform_data eth0_pd; 60
61static struct mv643xx_eth_platform_data eth0_pd = {
62 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
63 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
64 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
65
66 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH0 + PEGASOS2_SRAM_TXRING_SIZE,
67 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
68 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
69};
48 70
49static struct platform_device eth0_device = { 71static struct platform_device eth0_device = {
50 .name = MV643XX_ETH_NAME, 72 .name = MV643XX_ETH_NAME,
@@ -65,7 +87,15 @@ static struct resource mv643xx_eth1_resources[] = {
65 }, 87 },
66}; 88};
67 89
68static struct mv643xx_eth_platform_data eth1_pd; 90static struct mv643xx_eth_platform_data eth1_pd = {
91 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
92 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
93 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
94
95 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH1 + PEGASOS2_SRAM_TXRING_SIZE,
96 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
97 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
98};
69 99
70static struct platform_device eth1_device = { 100static struct platform_device eth1_device = {
71 .name = MV643XX_ETH_NAME, 101 .name = MV643XX_ETH_NAME,
@@ -83,9 +113,62 @@ static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
83 &eth1_device, 113 &eth1_device,
84}; 114};
85 115
116/***********/
117/***********/
118#define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); }
119#define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset)
120
121static void __iomem *mv643xx_reg_base;
122
123static int Enable_SRAM(void)
124{
125 u32 ALong;
126
127 if (mv643xx_reg_base == NULL)
128 mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE,
129 PEGASOS2_MARVELL_REGSIZE);
130
131 if (mv643xx_reg_base == NULL)
132 return -ENOMEM;
133
134#ifdef BE_VERBOSE
135 printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n",
136 (void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base);
137#endif
138
139 MV_WRITE(MV64340_SRAM_CONFIG, 0);
86 140
87int 141 MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16);
88mv643xx_eth_add_pds(void) 142
143 MV_READ(MV64340_BASE_ADDR_ENABLE, ALong);
144 ALong &= ~(1 << 19);
145 MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong);
146
147 ALong = 0x02;
148 ALong |= PEGASOS2_SRAM_BASE & 0xffff0000;
149 MV_WRITE(MV643XX_ETH_BAR_4, ALong);
150
151 MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000);
152
153 MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
154 ALong &= ~(1 << 4);
155 MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
156
157#ifdef BE_VERBOSE
158 printk("Pegasos II/Marvell MV64361: register unmapped\n");
159 printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE);
160#endif
161
162 iounmap(mv643xx_reg_base);
163 mv643xx_reg_base = NULL;
164
165 return 1;
166}
167
168
169/***********/
170/***********/
171int mv643xx_eth_add_pds(void)
89{ 172{
90 int ret = 0; 173 int ret = 0;
91 static struct pci_device_id pci_marvell_mv64360[] = { 174 static struct pci_device_id pci_marvell_mv64360[] = {
@@ -93,9 +176,38 @@ mv643xx_eth_add_pds(void)
93 { } 176 { }
94 }; 177 };
95 178
179#ifdef BE_VERBOSE
180 printk("Pegasos II/Marvell MV64361: init\n");
181#endif
182
96 if (pci_dev_present(pci_marvell_mv64360)) { 183 if (pci_dev_present(pci_marvell_mv64360)) {
97 ret = platform_add_devices(mv643xx_eth_pd_devs, ARRAY_SIZE(mv643xx_eth_pd_devs)); 184 ret = platform_add_devices(mv643xx_eth_pd_devs,
185 ARRAY_SIZE(mv643xx_eth_pd_devs));
186
187 if ( Enable_SRAM() < 0)
188 {
189 eth0_pd.tx_sram_addr = 0;
190 eth0_pd.tx_sram_size = 0;
191 eth0_pd.rx_sram_addr = 0;
192 eth0_pd.rx_sram_size = 0;
193
194 eth1_pd.tx_sram_addr = 0;
195 eth1_pd.tx_sram_size = 0;
196 eth1_pd.rx_sram_addr = 0;
197 eth1_pd.rx_sram_size = 0;
198
199#ifdef BE_VERBOSE
200 printk("Pegasos II/Marvell MV64361: Can't enable the "
201 "SRAM\n");
202#endif
203 }
98 } 204 }
205
206#ifdef BE_VERBOSE
207 printk("Pegasos II/Marvell MV64361: init is over\n");
208#endif
209
99 return ret; 210 return ret;
100} 211}
212
101device_initcall(mv643xx_eth_add_pds); 213device_initcall(mv643xx_eth_add_pds);
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index 66346f0de7ec..f1b70ab3c6fd 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -104,7 +104,7 @@ static const char *gg2_cachemodes[4] = {
104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode" 104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
105}; 105};
106 106
107int __chrp 107int
108chrp_show_cpuinfo(struct seq_file *m) 108chrp_show_cpuinfo(struct seq_file *m)
109{ 109{
110 int i, sdramen; 110 int i, sdramen;
@@ -302,7 +302,7 @@ void __init chrp_setup_arch(void)
302 pci_create_OF_bus_map(); 302 pci_create_OF_bus_map();
303} 303}
304 304
305void __chrp 305void
306chrp_event_scan(void) 306chrp_event_scan(void)
307{ 307{
308 unsigned char log[1024]; 308 unsigned char log[1024];
@@ -313,7 +313,7 @@ chrp_event_scan(void)
313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset; 313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
314} 314}
315 315
316void __chrp 316void
317chrp_restart(char *cmd) 317chrp_restart(char *cmd)
318{ 318{
319 printk("RTAS system-reboot returned %d\n", 319 printk("RTAS system-reboot returned %d\n",
@@ -321,7 +321,7 @@ chrp_restart(char *cmd)
321 for (;;); 321 for (;;);
322} 322}
323 323
324void __chrp 324void
325chrp_power_off(void) 325chrp_power_off(void)
326{ 326{
327 /* allow power on only with power button press */ 327 /* allow power on only with power button press */
@@ -330,20 +330,12 @@ chrp_power_off(void)
330 for (;;); 330 for (;;);
331} 331}
332 332
333void __chrp 333void
334chrp_halt(void) 334chrp_halt(void)
335{ 335{
336 chrp_power_off(); 336 chrp_power_off();
337} 337}
338 338
339u_int __chrp
340chrp_irq_canonicalize(u_int irq)
341{
342 if (irq == 2)
343 return 9;
344 return irq;
345}
346
347/* 339/*
348 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property. 340 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property.
349 * Then checks if it has an interrupt-ranges property. If it does then 341 * Then checks if it has an interrupt-ranges property. If it does then
@@ -444,9 +436,7 @@ void __init chrp_init_IRQ(void)
444 i8259_irq); 436 i8259_irq);
445 437
446 } 438 }
447 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 439 i8259_init(chrp_int_ack, 0);
448 irq_desc[i].handler = &i8259_pic;
449 i8259_init(chrp_int_ack);
450 440
451#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 441#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
452 /* see if there is a keyboard in the device tree 442 /* see if there is a keyboard in the device tree
@@ -464,8 +454,7 @@ void __init
464chrp_init2(void) 454chrp_init2(void)
465{ 455{
466#ifdef CONFIG_NVRAM 456#ifdef CONFIG_NVRAM
467// XX replace this in a more saner way 457 chrp_nvram_init();
468// pmac_nvram_init();
469#endif 458#endif
470 459
471 request_region(0x20,0x20,"pic1"); 460 request_region(0x20,0x20,"pic1");
@@ -499,6 +488,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
499 DMA_MODE_READ = 0x44; 488 DMA_MODE_READ = 0x44;
500 DMA_MODE_WRITE = 0x48; 489 DMA_MODE_WRITE = 0x48;
501 isa_io_base = CHRP_ISA_IO_BASE; /* default value */ 490 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
491 ppc_do_canonicalize_irqs = 1;
502 492
503 if (root) 493 if (root)
504 machine = get_property(root, "model", NULL); 494 machine = get_property(root, "model", NULL);
@@ -517,7 +507,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
517 ppc_md.show_percpuinfo = of_show_percpuinfo; 507 ppc_md.show_percpuinfo = of_show_percpuinfo;
518 ppc_md.show_cpuinfo = chrp_show_cpuinfo; 508 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
519 509
520 ppc_md.irq_canonicalize = chrp_irq_canonicalize;
521 ppc_md.init_IRQ = chrp_init_IRQ; 510 ppc_md.init_IRQ = chrp_init_IRQ;
522 if (_chrp_type == _CHRP_Pegasos) 511 if (_chrp_type == _CHRP_Pegasos)
523 ppc_md.get_irq = i8259_irq; 512 ppc_md.get_irq = i8259_irq;
@@ -561,7 +550,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
561#endif 550#endif
562 551
563#ifdef CONFIG_SMP 552#ifdef CONFIG_SMP
564 ppc_md.smp_ops = &chrp_smp_ops; 553 smp_ops = &chrp_smp_ops;
565#endif /* CONFIG_SMP */ 554#endif /* CONFIG_SMP */
566 555
567 /* 556 /*
@@ -571,7 +560,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
571 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); 560 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
572} 561}
573 562
574void __chrp 563void
575rtas_display_progress(char *s, unsigned short hex) 564rtas_display_progress(char *s, unsigned short hex)
576{ 565{
577 int width; 566 int width;
@@ -598,7 +587,7 @@ rtas_display_progress(char *s, unsigned short hex)
598 call_rtas( "display-character", 1, 1, NULL, ' ' ); 587 call_rtas( "display-character", 1, 1, NULL, ' ' );
599} 588}
600 589
601void __chrp 590void
602rtas_indicator_progress(char *s, unsigned short hex) 591rtas_indicator_progress(char *s, unsigned short hex)
603{ 592{
604 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex); 593 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
diff --git a/arch/ppc/platforms/chrp_smp.c b/arch/ppc/platforms/chrp_smp.c
index 0ea1f7d9e46a..97e539557ecb 100644
--- a/arch/ppc/platforms/chrp_smp.c
+++ b/arch/ppc/platforms/chrp_smp.c
@@ -31,6 +31,7 @@
31#include <asm/residual.h> 31#include <asm/residual.h>
32#include <asm/time.h> 32#include <asm/time.h>
33#include <asm/open_pic.h> 33#include <asm/open_pic.h>
34#include <asm/machdep.h>
34 35
35extern unsigned long smp_chrp_cpu_nr; 36extern unsigned long smp_chrp_cpu_nr;
36 37
@@ -88,7 +89,7 @@ smp_chrp_take_timebase(void)
88} 89}
89 90
90/* CHRP with openpic */ 91/* CHRP with openpic */
91struct smp_ops_t chrp_smp_ops __chrpdata = { 92struct smp_ops_t chrp_smp_ops = {
92 .message_pass = smp_openpic_message_pass, 93 .message_pass = smp_openpic_message_pass,
93 .probe = smp_chrp_probe, 94 .probe = smp_chrp_probe,
94 .kick_cpu = smp_chrp_kick_cpu, 95 .kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c
index 6037ce7796f5..29d074c305f0 100644
--- a/arch/ppc/platforms/chrp_time.c
+++ b/arch/ppc/platforms/chrp_time.c
@@ -52,7 +52,7 @@ long __init chrp_time_init(void)
52 return 0; 52 return 0;
53} 53}
54 54
55int __chrp chrp_cmos_clock_read(int addr) 55int chrp_cmos_clock_read(int addr)
56{ 56{
57 if (nvram_as1 != 0) 57 if (nvram_as1 != 0)
58 outb(addr>>8, nvram_as1); 58 outb(addr>>8, nvram_as1);
@@ -60,7 +60,7 @@ int __chrp chrp_cmos_clock_read(int addr)
60 return (inb(nvram_data)); 60 return (inb(nvram_data));
61} 61}
62 62
63void __chrp chrp_cmos_clock_write(unsigned long val, int addr) 63void chrp_cmos_clock_write(unsigned long val, int addr)
64{ 64{
65 if (nvram_as1 != 0) 65 if (nvram_as1 != 0)
66 outb(addr>>8, nvram_as1); 66 outb(addr>>8, nvram_as1);
@@ -72,7 +72,7 @@ void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
72/* 72/*
73 * Set the hardware clock. -- Cort 73 * Set the hardware clock. -- Cort
74 */ 74 */
75int __chrp chrp_set_rtc_time(unsigned long nowtime) 75int chrp_set_rtc_time(unsigned long nowtime)
76{ 76{
77 unsigned char save_control, save_freq_select; 77 unsigned char save_control, save_freq_select;
78 struct rtc_time tm; 78 struct rtc_time tm;
@@ -118,7 +118,7 @@ int __chrp chrp_set_rtc_time(unsigned long nowtime)
118 return 0; 118 return 0;
119} 119}
120 120
121unsigned long __chrp chrp_get_rtc_time(void) 121unsigned long chrp_get_rtc_time(void)
122{ 122{
123 unsigned int year, mon, day, hour, min, sec; 123 unsigned int year, mon, day, hour, min, sec;
124 int uip, i; 124 int uip, i;
diff --git a/arch/ppc/platforms/ev64360.c b/arch/ppc/platforms/ev64360.c
index 9811a8a52c25..53388a1c334f 100644
--- a/arch/ppc/platforms/ev64360.c
+++ b/arch/ppc/platforms/ev64360.c
@@ -35,6 +35,7 @@
35#include <asm/bootinfo.h> 35#include <asm/bootinfo.h>
36#include <asm/ppcboot.h> 36#include <asm/ppcboot.h>
37#include <asm/mv64x60.h> 37#include <asm/mv64x60.h>
38#include <asm/machdep.h>
38#include <platforms/ev64360.h> 39#include <platforms/ev64360.h>
39 40
40#define BOARD_VENDOR "Marvell" 41#define BOARD_VENDOR "Marvell"
diff --git a/arch/ppc/platforms/fads.h b/arch/ppc/platforms/fads.h
index b60c56450b67..a48fb8d723e4 100644
--- a/arch/ppc/platforms/fads.h
+++ b/arch/ppc/platforms/fads.h
@@ -25,6 +25,8 @@
25 25
26#if defined(CONFIG_MPC86XADS) 26#if defined(CONFIG_MPC86XADS)
27 27
28#define BOARD_CHIP_NAME "MPC86X"
29
28/* U-Boot maps BCSR to 0xff080000 */ 30/* U-Boot maps BCSR to 0xff080000 */
29#define BCSR_ADDR ((uint)0xff080000) 31#define BCSR_ADDR ((uint)0xff080000)
30 32
diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c
index 3a5ff9fb71d6..729897c59033 100644
--- a/arch/ppc/platforms/gemini_setup.c
+++ b/arch/ppc/platforms/gemini_setup.c
@@ -35,6 +35,7 @@
35#include <asm/time.h> 35#include <asm/time.h>
36#include <asm/open_pic.h> 36#include <asm/open_pic.h>
37#include <asm/bootinfo.h> 37#include <asm/bootinfo.h>
38#include <asm/machdep.h>
38 39
39void gemini_find_bridges(void); 40void gemini_find_bridges(void);
40static int gemini_get_clock_speed(void); 41static int gemini_get_clock_speed(void);
@@ -555,7 +556,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
555 556
556 ppc_md.setup_arch = gemini_setup_arch; 557 ppc_md.setup_arch = gemini_setup_arch;
557 ppc_md.show_cpuinfo = gemini_show_cpuinfo; 558 ppc_md.show_cpuinfo = gemini_show_cpuinfo;
558 ppc_md.irq_canonicalize = NULL;
559 ppc_md.init_IRQ = gemini_init_IRQ; 559 ppc_md.init_IRQ = gemini_init_IRQ;
560 ppc_md.get_irq = openpic_get_irq; 560 ppc_md.get_irq = openpic_get_irq;
561 ppc_md.init = NULL; 561 ppc_md.init = NULL;
@@ -575,6 +575,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup; 575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup;
576 576
577#ifdef CONFIG_SMP 577#ifdef CONFIG_SMP
578 ppc_md.smp_ops = &gemini_smp_ops; 578 smp_ops = &gemini_smp_ops;
579#endif /* CONFIG_SMP */ 579#endif /* CONFIG_SMP */
580} 580}
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c
index ff3796860123..b6a66d5e9d83 100644
--- a/arch/ppc/platforms/hdpu.c
+++ b/arch/ppc/platforms/hdpu.c
@@ -609,11 +609,6 @@ static void parse_bootinfo(unsigned long r3,
609} 609}
610 610
611#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 611#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
612static int hdpu_ide_check_region(ide_ioreg_t from, unsigned int extent)
613{
614 return check_region(from, extent);
615}
616
617static void 612static void
618hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) 613hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
619{ 614{
@@ -753,7 +748,7 @@ static int smp_hdpu_probe(void)
753} 748}
754 749
755static void 750static void
756smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait) 751smp_hdpu_message_pass(int target, int msg)
757{ 752{
758 if (msg > 0x3) { 753 if (msg > 0x3) {
759 printk("SMP %d: smp_message_pass: unknown msg %d\n", 754 printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -949,7 +944,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
949#endif /* CONFIG_SERIAL_TEXT_DEBUG */ 944#endif /* CONFIG_SERIAL_TEXT_DEBUG */
950 945
951#ifdef CONFIG_SMP 946#ifdef CONFIG_SMP
952 ppc_md.smp_ops = &hdpu_smp_ops; 947 smp_ops = &hdpu_smp_ops;
953#endif /* CONFIG_SMP */ 948#endif /* CONFIG_SMP */
954 949
955#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH) 950#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
index 2b53afae0e9c..a301c5ac58dd 100644
--- a/arch/ppc/platforms/katana.c
+++ b/arch/ppc/platforms/katana.c
@@ -42,6 +42,7 @@
42#include <asm/ppcboot.h> 42#include <asm/ppcboot.h>
43#include <asm/mv64x60.h> 43#include <asm/mv64x60.h>
44#include <platforms/katana.h> 44#include <platforms/katana.h>
45#include <asm/machdep.h>
45 46
46static struct mv64x60_handle bh; 47static struct mv64x60_handle bh;
47static katana_id_t katana_id; 48static katana_id_t katana_id;
@@ -520,7 +521,7 @@ katana_fixup_resources(struct pci_dev *dev)
520{ 521{
521 u16 v16; 522 u16 v16;
522 523
523 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_LINE_SIZE>>2); 524 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2);
524 525
525 pci_read_config_word(dev, PCI_COMMAND, &v16); 526 pci_read_config_word(dev, PCI_COMMAND, &v16);
526 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK; 527 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK;
diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c
index b604cf8b3cae..d44cc991179f 100644
--- a/arch/ppc/platforms/lite5200.c
+++ b/arch/ppc/platforms/lite5200.c
@@ -35,6 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/mpc52xx.h> 36#include <asm/mpc52xx.h>
37#include <asm/ppc_sys.h> 37#include <asm/ppc_sys.h>
38#include <asm/machdep.h>
38 39
39#include <syslib/mpc52xx_pci.h> 40#include <syslib/mpc52xx_pci.h>
40 41
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c
index a5569525e0af..06d247c23b82 100644
--- a/arch/ppc/platforms/lopec.c
+++ b/arch/ppc/platforms/lopec.c
@@ -144,15 +144,6 @@ lopec_show_cpuinfo(struct seq_file *m)
144 return 0; 144 return 0;
145} 145}
146 146
147static u32
148lopec_irq_canonicalize(u32 irq)
149{
150 if (irq == 2)
151 return 9;
152 else
153 return irq;
154}
155
156static void 147static void
157lopec_restart(char *cmd) 148lopec_restart(char *cmd)
158{ 149{
@@ -276,15 +267,11 @@ lopec_init_IRQ(void)
276 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 267 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
277 &i8259_irq); 268 &i8259_irq);
278 269
279 /* Map i8259 interrupts */
280 for(i = 0; i < NUM_8259_INTERRUPTS; i++)
281 irq_desc[i].handler = &i8259_pic;
282
283 /* 270 /*
284 * The EPIC allows for a read in the range of 0xFEF00000 -> 271 * The EPIC allows for a read in the range of 0xFEF00000 ->
285 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction. 272 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction.
286 */ 273 */
287 i8259_init(0xfef00000); 274 i8259_init(0xfef00000, 0);
288} 275}
289 276
290static int __init 277static int __init
@@ -379,10 +366,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
379 ISA_DMA_THRESHOLD = 0x00ffffff; 366 ISA_DMA_THRESHOLD = 0x00ffffff;
380 DMA_MODE_READ = 0x44; 367 DMA_MODE_READ = 0x44;
381 DMA_MODE_WRITE = 0x48; 368 DMA_MODE_WRITE = 0x48;
369 ppc_do_canonicalize_irqs = 1;
382 370
383 ppc_md.setup_arch = lopec_setup_arch; 371 ppc_md.setup_arch = lopec_setup_arch;
384 ppc_md.show_cpuinfo = lopec_show_cpuinfo; 372 ppc_md.show_cpuinfo = lopec_show_cpuinfo;
385 ppc_md.irq_canonicalize = lopec_irq_canonicalize;
386 ppc_md.init_IRQ = lopec_init_IRQ; 373 ppc_md.init_IRQ = lopec_init_IRQ;
387 ppc_md.get_irq = openpic_get_irq; 374 ppc_md.get_irq = openpic_get_irq;
388 375
diff --git a/arch/ppc/platforms/mpc885ads.h b/arch/ppc/platforms/mpc885ads.h
index eb386635b0fd..a80b7d116b49 100644
--- a/arch/ppc/platforms/mpc885ads.h
+++ b/arch/ppc/platforms/mpc885ads.h
@@ -88,5 +88,7 @@
88#define SICR_ENET_MASK ((uint)0x00ff0000) 88#define SICR_ENET_MASK ((uint)0x00ff0000)
89#define SICR_ENET_CLKRT ((uint)0x002c0000) 89#define SICR_ENET_CLKRT ((uint)0x002c0000)
90 90
91#define BOARD_CHIP_NAME "MPC885"
92
91#endif /* __ASM_MPC885ADS_H__ */ 93#endif /* __ASM_MPC885ADS_H__ */
92#endif /* __KERNEL__ */ 94#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c
index ce2ce88c8033..108eb182dddc 100644
--- a/arch/ppc/platforms/mvme5100.c
+++ b/arch/ppc/platforms/mvme5100.c
@@ -223,11 +223,7 @@ mvme5100_init_IRQ(void)
223 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 223 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
224 &i8259_irq); 224 &i8259_irq);
225 225
226 /* Map i8259 interrupts. */ 226 i8259_init(0, 0);
227 for (i = 0; i < NUM_8259_INTERRUPTS; i++)
228 irq_desc[i].handler = &i8259_pic;
229
230 i8259_init(0);
231#else 227#else
232 openpic_init(0); 228 openpic_init(0);
233#endif 229#endif
diff --git a/arch/ppc/platforms/pal4_setup.c b/arch/ppc/platforms/pal4_setup.c
index 12446b93e38c..f93a3f871932 100644
--- a/arch/ppc/platforms/pal4_setup.c
+++ b/arch/ppc/platforms/pal4_setup.c
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/todc.h> 29#include <asm/todc.h>
30#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
31#include <asm/machdep.h>
31 32
32#include <syslib/cpc700.h> 33#include <syslib/cpc700.h>
33 34
diff --git a/arch/ppc/platforms/pmac_backlight.c b/arch/ppc/platforms/pmac_backlight.c
index ed2b1cebc19a..8be2f7d071f0 100644
--- a/arch/ppc/platforms/pmac_backlight.c
+++ b/arch/ppc/platforms/pmac_backlight.c
@@ -37,7 +37,7 @@ static int backlight_req_enable = -1;
37static void backlight_callback(void *); 37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL); 38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39 39
40void __pmac register_backlight_controller(struct backlight_controller *ctrler, 40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type) 41 void *data, char *type)
42{ 42{
43 struct device_node* bk_node; 43 struct device_node* bk_node;
@@ -99,7 +99,7 @@ void __pmac register_backlight_controller(struct backlight_controller *ctrler,
99} 99}
100EXPORT_SYMBOL(register_backlight_controller); 100EXPORT_SYMBOL(register_backlight_controller);
101 101
102void __pmac unregister_backlight_controller(struct backlight_controller 102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data) 103 *ctrler, void *data)
104{ 104{
105 /* We keep the current backlight level (for now) */ 105 /* We keep the current backlight level (for now) */
@@ -108,7 +108,7 @@ void __pmac unregister_backlight_controller(struct backlight_controller
108} 108}
109EXPORT_SYMBOL(unregister_backlight_controller); 109EXPORT_SYMBOL(unregister_backlight_controller);
110 110
111static int __pmac __set_backlight_enable(int enable) 111static int __set_backlight_enable(int enable)
112{ 112{
113 int rc; 113 int rc;
114 114
@@ -122,7 +122,7 @@ static int __pmac __set_backlight_enable(int enable)
122 release_console_sem(); 122 release_console_sem();
123 return rc; 123 return rc;
124} 124}
125int __pmac set_backlight_enable(int enable) 125int set_backlight_enable(int enable)
126{ 126{
127 if (!backlighter) 127 if (!backlighter)
128 return -ENODEV; 128 return -ENODEV;
@@ -133,7 +133,7 @@ int __pmac set_backlight_enable(int enable)
133 133
134EXPORT_SYMBOL(set_backlight_enable); 134EXPORT_SYMBOL(set_backlight_enable);
135 135
136int __pmac get_backlight_enable(void) 136int get_backlight_enable(void)
137{ 137{
138 if (!backlighter) 138 if (!backlighter)
139 return -ENODEV; 139 return -ENODEV;
@@ -141,7 +141,7 @@ int __pmac get_backlight_enable(void)
141} 141}
142EXPORT_SYMBOL(get_backlight_enable); 142EXPORT_SYMBOL(get_backlight_enable);
143 143
144static int __pmac __set_backlight_level(int level) 144static int __set_backlight_level(int level)
145{ 145{
146 int rc = 0; 146 int rc = 0;
147 147
@@ -165,7 +165,7 @@ static int __pmac __set_backlight_level(int level)
165 } 165 }
166 return rc; 166 return rc;
167} 167}
168int __pmac set_backlight_level(int level) 168int set_backlight_level(int level)
169{ 169{
170 if (!backlighter) 170 if (!backlighter)
171 return -ENODEV; 171 return -ENODEV;
@@ -176,7 +176,7 @@ int __pmac set_backlight_level(int level)
176 176
177EXPORT_SYMBOL(set_backlight_level); 177EXPORT_SYMBOL(set_backlight_level);
178 178
179int __pmac get_backlight_level(void) 179int get_backlight_level(void)
180{ 180{
181 if (!backlighter) 181 if (!backlighter)
182 return -ENODEV; 182 return -ENODEV;
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index d4bc5f67ec53..fba7e4d7c0bf 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -136,7 +136,7 @@ static inline void debug_calc_bogomips(void)
136 136
137/* Switch CPU speed under 750FX CPU control 137/* Switch CPU speed under 750FX CPU control
138 */ 138 */
139static int __pmac cpu_750fx_cpu_speed(int low_speed) 139static int cpu_750fx_cpu_speed(int low_speed)
140{ 140{
141 u32 hid2; 141 u32 hid2;
142 142
@@ -172,7 +172,7 @@ static int __pmac cpu_750fx_cpu_speed(int low_speed)
172 return 0; 172 return 0;
173} 173}
174 174
175static unsigned int __pmac cpu_750fx_get_cpu_speed(void) 175static unsigned int cpu_750fx_get_cpu_speed(void)
176{ 176{
177 if (mfspr(SPRN_HID1) & HID1_PS) 177 if (mfspr(SPRN_HID1) & HID1_PS)
178 return low_freq; 178 return low_freq;
@@ -181,7 +181,7 @@ static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
181} 181}
182 182
183/* Switch CPU speed using DFS */ 183/* Switch CPU speed using DFS */
184static int __pmac dfs_set_cpu_speed(int low_speed) 184static int dfs_set_cpu_speed(int low_speed)
185{ 185{
186 if (low_speed == 0) { 186 if (low_speed == 0) {
187 /* ramping up, set voltage first */ 187 /* ramping up, set voltage first */
@@ -205,7 +205,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
205 return 0; 205 return 0;
206} 206}
207 207
208static unsigned int __pmac dfs_get_cpu_speed(void) 208static unsigned int dfs_get_cpu_speed(void)
209{ 209{
210 if (mfspr(SPRN_HID1) & HID1_DFS) 210 if (mfspr(SPRN_HID1) & HID1_DFS)
211 return low_freq; 211 return low_freq;
@@ -216,7 +216,7 @@ static unsigned int __pmac dfs_get_cpu_speed(void)
216 216
217/* Switch CPU speed using slewing GPIOs 217/* Switch CPU speed using slewing GPIOs
218 */ 218 */
219static int __pmac gpios_set_cpu_speed(int low_speed) 219static int gpios_set_cpu_speed(int low_speed)
220{ 220{
221 int gpio, timeout = 0; 221 int gpio, timeout = 0;
222 222
@@ -258,7 +258,7 @@ static int __pmac gpios_set_cpu_speed(int low_speed)
258 258
259/* Switch CPU speed under PMU control 259/* Switch CPU speed under PMU control
260 */ 260 */
261static int __pmac pmu_set_cpu_speed(int low_speed) 261static int pmu_set_cpu_speed(int low_speed)
262{ 262{
263 struct adb_request req; 263 struct adb_request req;
264 unsigned long save_l2cr; 264 unsigned long save_l2cr;
@@ -354,7 +354,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed)
354 return 0; 354 return 0;
355} 355}
356 356
357static int __pmac do_set_cpu_speed(int speed_mode, int notify) 357static int do_set_cpu_speed(int speed_mode, int notify)
358{ 358{
359 struct cpufreq_freqs freqs; 359 struct cpufreq_freqs freqs;
360 unsigned long l3cr; 360 unsigned long l3cr;
@@ -391,17 +391,17 @@ static int __pmac do_set_cpu_speed(int speed_mode, int notify)
391 return 0; 391 return 0;
392} 392}
393 393
394static unsigned int __pmac pmac_cpufreq_get_speed(unsigned int cpu) 394static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
395{ 395{
396 return cur_freq; 396 return cur_freq;
397} 397}
398 398
399static int __pmac pmac_cpufreq_verify(struct cpufreq_policy *policy) 399static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
400{ 400{
401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); 401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
402} 402}
403 403
404static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy, 404static int pmac_cpufreq_target( struct cpufreq_policy *policy,
405 unsigned int target_freq, 405 unsigned int target_freq,
406 unsigned int relation) 406 unsigned int relation)
407{ 407{
@@ -414,13 +414,13 @@ static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy,
414 return do_set_cpu_speed(newstate, 1); 414 return do_set_cpu_speed(newstate, 1);
415} 415}
416 416
417unsigned int __pmac pmac_get_one_cpufreq(int i) 417unsigned int pmac_get_one_cpufreq(int i)
418{ 418{
419 /* Supports only one CPU for now */ 419 /* Supports only one CPU for now */
420 return (i == 0) ? cur_freq : 0; 420 return (i == 0) ? cur_freq : 0;
421} 421}
422 422
423static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) 423static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
424{ 424{
425 if (policy->cpu != 0) 425 if (policy->cpu != 0)
426 return -ENODEV; 426 return -ENODEV;
@@ -433,7 +433,7 @@ static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); 433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
434} 434}
435 435
436static u32 __pmac read_gpio(struct device_node *np) 436static u32 read_gpio(struct device_node *np)
437{ 437{
438 u32 *reg = (u32 *)get_property(np, "reg", NULL); 438 u32 *reg = (u32 *)get_property(np, "reg", NULL);
439 u32 offset; 439 u32 offset;
@@ -452,7 +452,7 @@ static u32 __pmac read_gpio(struct device_node *np)
452 return offset; 452 return offset;
453} 453}
454 454
455static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) 455static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
456{ 456{
457 /* Ok, this could be made a bit smarter, but let's be robust for now. We 457 /* Ok, this could be made a bit smarter, but let's be robust for now. We
458 * always force a speed change to high speed before sleep, to make sure 458 * always force a speed change to high speed before sleep, to make sure
@@ -468,7 +468,7 @@ static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message
468 return 0; 468 return 0;
469} 469}
470 470
471static int __pmac pmac_cpufreq_resume(struct cpufreq_policy *policy) 471static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
472{ 472{
473 /* If we resume, first check if we have a get() function */ 473 /* If we resume, first check if we have a get() function */
474 if (get_speed_proc) 474 if (get_speed_proc)
@@ -501,7 +501,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
501}; 501};
502 502
503 503
504static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) 504static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
505{ 505{
506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL, 506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
507 "voltage-gpio"); 507 "voltage-gpio");
@@ -593,7 +593,7 @@ static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
593 return 0; 593 return 0;
594} 594}
595 595
596static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode) 596static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
597{ 597{
598 struct device_node *volt_gpio_np; 598 struct device_node *volt_gpio_np;
599 599
@@ -620,7 +620,7 @@ static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
620 return 0; 620 return 0;
621} 621}
622 622
623static int __pmac pmac_cpufreq_init_750FX(struct device_node *cpunode) 623static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
624{ 624{
625 struct device_node *volt_gpio_np; 625 struct device_node *volt_gpio_np;
626 u32 pvr, *value; 626 u32 pvr, *value;
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index dd6d45ae0501..58884a63ebdb 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -63,7 +63,7 @@ extern struct device_node *k2_skiplist[2];
63 * We use a single global lock to protect accesses. Each driver has 63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking 64 * to take care of its own locking
65 */ 65 */
66static DEFINE_SPINLOCK(feature_lock __pmacdata); 66static DEFINE_SPINLOCK(feature_lock);
67 67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); 68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); 69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
@@ -72,9 +72,9 @@ static DEFINE_SPINLOCK(feature_lock __pmacdata);
72/* 72/*
73 * Instance of some macio stuffs 73 * Instance of some macio stuffs
74 */ 74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata; 75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76 76
77struct macio_chip* __pmac macio_find(struct device_node* child, int type) 77struct macio_chip* macio_find(struct device_node* child, int type)
78{ 78{
79 while(child) { 79 while(child) {
80 int i; 80 int i;
@@ -89,7 +89,7 @@ struct macio_chip* __pmac macio_find(struct device_node* child, int type)
89} 89}
90EXPORT_SYMBOL_GPL(macio_find); 90EXPORT_SYMBOL_GPL(macio_find);
91 91
92static const char* macio_names[] __pmacdata = 92static const char* macio_names[] =
93{ 93{
94 "Unknown", 94 "Unknown",
95 "Grand Central", 95 "Grand Central",
@@ -116,10 +116,10 @@ static const char* macio_names[] __pmacdata =
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v))) 116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v))) 117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118 118
119static struct device_node* uninorth_node __pmacdata; 119static struct device_node* uninorth_node;
120static u32 __iomem * uninorth_base __pmacdata; 120static u32 __iomem * uninorth_base;
121static u32 uninorth_rev __pmacdata; 121static u32 uninorth_rev;
122static int uninorth_u3 __pmacdata; 122static int uninorth_u3;
123static void __iomem *u3_ht; 123static void __iomem *u3_ht;
124 124
125/* 125/*
@@ -142,13 +142,13 @@ struct pmac_mb_def
142 struct feature_table_entry* features; 142 struct feature_table_entry* features;
143 unsigned long board_flags; 143 unsigned long board_flags;
144}; 144};
145static struct pmac_mb_def pmac_mb __pmacdata; 145static struct pmac_mb_def pmac_mb;
146 146
147/* 147/*
148 * Here are the chip specific feature functions 148 * Here are the chip specific feature functions
149 */ 149 */
150 150
151static inline int __pmac 151static inline int
152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value) 152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value)
153{ 153{
154 struct macio_chip* macio; 154 struct macio_chip* macio;
@@ -170,7 +170,7 @@ simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int
170 170
171#ifndef CONFIG_POWER4 171#ifndef CONFIG_POWER4
172 172
173static long __pmac 173static long
174ohare_htw_scc_enable(struct device_node* node, long param, long value) 174ohare_htw_scc_enable(struct device_node* node, long param, long value)
175{ 175{
176 struct macio_chip* macio; 176 struct macio_chip* macio;
@@ -263,21 +263,21 @@ ohare_htw_scc_enable(struct device_node* node, long param, long value)
263 return 0; 263 return 0;
264} 264}
265 265
266static long __pmac 266static long
267ohare_floppy_enable(struct device_node* node, long param, long value) 267ohare_floppy_enable(struct device_node* node, long param, long value)
268{ 268{
269 return simple_feature_tweak(node, macio_ohare, 269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value); 270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271} 271}
272 272
273static long __pmac 273static long
274ohare_mesh_enable(struct device_node* node, long param, long value) 274ohare_mesh_enable(struct device_node* node, long param, long value)
275{ 275{
276 return simple_feature_tweak(node, macio_ohare, 276 return simple_feature_tweak(node, macio_ohare,
277 OHARE_FCR, OH_MESH_ENABLE, value); 277 OHARE_FCR, OH_MESH_ENABLE, value);
278} 278}
279 279
280static long __pmac 280static long
281ohare_ide_enable(struct device_node* node, long param, long value) 281ohare_ide_enable(struct device_node* node, long param, long value)
282{ 282{
283 switch(param) { 283 switch(param) {
@@ -298,7 +298,7 @@ ohare_ide_enable(struct device_node* node, long param, long value)
298 } 298 }
299} 299}
300 300
301static long __pmac 301static long
302ohare_ide_reset(struct device_node* node, long param, long value) 302ohare_ide_reset(struct device_node* node, long param, long value)
303{ 303{
304 switch(param) { 304 switch(param) {
@@ -313,7 +313,7 @@ ohare_ide_reset(struct device_node* node, long param, long value)
313 } 313 }
314} 314}
315 315
316static long __pmac 316static long
317ohare_sleep_state(struct device_node* node, long param, long value) 317ohare_sleep_state(struct device_node* node, long param, long value)
318{ 318{
319 struct macio_chip* macio = &macio_chips[0]; 319 struct macio_chip* macio = &macio_chips[0];
@@ -329,7 +329,7 @@ ohare_sleep_state(struct device_node* node, long param, long value)
329 return 0; 329 return 0;
330} 330}
331 331
332static long __pmac 332static long
333heathrow_modem_enable(struct device_node* node, long param, long value) 333heathrow_modem_enable(struct device_node* node, long param, long value)
334{ 334{
335 struct macio_chip* macio; 335 struct macio_chip* macio;
@@ -373,7 +373,7 @@ heathrow_modem_enable(struct device_node* node, long param, long value)
373 return 0; 373 return 0;
374} 374}
375 375
376static long __pmac 376static long
377heathrow_floppy_enable(struct device_node* node, long param, long value) 377heathrow_floppy_enable(struct device_node* node, long param, long value)
378{ 378{
379 return simple_feature_tweak(node, macio_unknown, 379 return simple_feature_tweak(node, macio_unknown,
@@ -382,7 +382,7 @@ heathrow_floppy_enable(struct device_node* node, long param, long value)
382 value); 382 value);
383} 383}
384 384
385static long __pmac 385static long
386heathrow_mesh_enable(struct device_node* node, long param, long value) 386heathrow_mesh_enable(struct device_node* node, long param, long value)
387{ 387{
388 struct macio_chip* macio; 388 struct macio_chip* macio;
@@ -411,7 +411,7 @@ heathrow_mesh_enable(struct device_node* node, long param, long value)
411 return 0; 411 return 0;
412} 412}
413 413
414static long __pmac 414static long
415heathrow_ide_enable(struct device_node* node, long param, long value) 415heathrow_ide_enable(struct device_node* node, long param, long value)
416{ 416{
417 switch(param) { 417 switch(param) {
@@ -426,7 +426,7 @@ heathrow_ide_enable(struct device_node* node, long param, long value)
426 } 426 }
427} 427}
428 428
429static long __pmac 429static long
430heathrow_ide_reset(struct device_node* node, long param, long value) 430heathrow_ide_reset(struct device_node* node, long param, long value)
431{ 431{
432 switch(param) { 432 switch(param) {
@@ -441,7 +441,7 @@ heathrow_ide_reset(struct device_node* node, long param, long value)
441 } 441 }
442} 442}
443 443
444static long __pmac 444static long
445heathrow_bmac_enable(struct device_node* node, long param, long value) 445heathrow_bmac_enable(struct device_node* node, long param, long value)
446{ 446{
447 struct macio_chip* macio; 447 struct macio_chip* macio;
@@ -470,7 +470,7 @@ heathrow_bmac_enable(struct device_node* node, long param, long value)
470 return 0; 470 return 0;
471} 471}
472 472
473static long __pmac 473static long
474heathrow_sound_enable(struct device_node* node, long param, long value) 474heathrow_sound_enable(struct device_node* node, long param, long value)
475{ 475{
476 struct macio_chip* macio; 476 struct macio_chip* macio;
@@ -501,16 +501,16 @@ heathrow_sound_enable(struct device_node* node, long param, long value)
501 return 0; 501 return 0;
502} 502}
503 503
504static u32 save_fcr[6] __pmacdata; 504static u32 save_fcr[6];
505static u32 save_mbcr __pmacdata; 505static u32 save_mbcr;
506static u32 save_gpio_levels[2] __pmacdata; 506static u32 save_gpio_levels[2];
507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT] __pmacdata; 507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT] __pmacdata; 508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
509static u32 save_unin_clock_ctl __pmacdata; 509static u32 save_unin_clock_ctl;
510static struct dbdma_regs save_dbdma[13] __pmacdata; 510static struct dbdma_regs save_dbdma[13];
511static struct dbdma_regs save_alt_dbdma[13] __pmacdata; 511static struct dbdma_regs save_alt_dbdma[13];
512 512
513static void __pmac 513static void
514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save) 514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
515{ 515{
516 int i; 516 int i;
@@ -527,7 +527,7 @@ dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
527 } 527 }
528} 528}
529 529
530static void __pmac 530static void
531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save) 531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
532{ 532{
533 int i; 533 int i;
@@ -547,7 +547,7 @@ dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
547 } 547 }
548} 548}
549 549
550static void __pmac 550static void
551heathrow_sleep(struct macio_chip* macio, int secondary) 551heathrow_sleep(struct macio_chip* macio, int secondary)
552{ 552{
553 if (secondary) { 553 if (secondary) {
@@ -580,7 +580,7 @@ heathrow_sleep(struct macio_chip* macio, int secondary)
580 (void)MACIO_IN32(HEATHROW_FCR); 580 (void)MACIO_IN32(HEATHROW_FCR);
581} 581}
582 582
583static void __pmac 583static void
584heathrow_wakeup(struct macio_chip* macio, int secondary) 584heathrow_wakeup(struct macio_chip* macio, int secondary)
585{ 585{
586 if (secondary) { 586 if (secondary) {
@@ -605,7 +605,7 @@ heathrow_wakeup(struct macio_chip* macio, int secondary)
605 } 605 }
606} 606}
607 607
608static long __pmac 608static long
609heathrow_sleep_state(struct device_node* node, long param, long value) 609heathrow_sleep_state(struct device_node* node, long param, long value)
610{ 610{
611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) 611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
@@ -622,7 +622,7 @@ heathrow_sleep_state(struct device_node* node, long param, long value)
622 return 0; 622 return 0;
623} 623}
624 624
625static long __pmac 625static long
626core99_scc_enable(struct device_node* node, long param, long value) 626core99_scc_enable(struct device_node* node, long param, long value)
627{ 627{
628 struct macio_chip* macio; 628 struct macio_chip* macio;
@@ -723,7 +723,7 @@ core99_scc_enable(struct device_node* node, long param, long value)
723 return 0; 723 return 0;
724} 724}
725 725
726static long __pmac 726static long
727core99_modem_enable(struct device_node* node, long param, long value) 727core99_modem_enable(struct device_node* node, long param, long value)
728{ 728{
729 struct macio_chip* macio; 729 struct macio_chip* macio;
@@ -775,7 +775,7 @@ core99_modem_enable(struct device_node* node, long param, long value)
775 return 0; 775 return 0;
776} 776}
777 777
778static long __pmac 778static long
779pangea_modem_enable(struct device_node* node, long param, long value) 779pangea_modem_enable(struct device_node* node, long param, long value)
780{ 780{
781 struct macio_chip* macio; 781 struct macio_chip* macio;
@@ -830,7 +830,7 @@ pangea_modem_enable(struct device_node* node, long param, long value)
830 return 0; 830 return 0;
831} 831}
832 832
833static long __pmac 833static long
834core99_ata100_enable(struct device_node* node, long value) 834core99_ata100_enable(struct device_node* node, long value)
835{ 835{
836 unsigned long flags; 836 unsigned long flags;
@@ -860,7 +860,7 @@ core99_ata100_enable(struct device_node* node, long value)
860 return 0; 860 return 0;
861} 861}
862 862
863static long __pmac 863static long
864core99_ide_enable(struct device_node* node, long param, long value) 864core99_ide_enable(struct device_node* node, long param, long value)
865{ 865{
866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
@@ -883,7 +883,7 @@ core99_ide_enable(struct device_node* node, long param, long value)
883 } 883 }
884} 884}
885 885
886static long __pmac 886static long
887core99_ide_reset(struct device_node* node, long param, long value) 887core99_ide_reset(struct device_node* node, long param, long value)
888{ 888{
889 switch(param) { 889 switch(param) {
@@ -901,7 +901,7 @@ core99_ide_reset(struct device_node* node, long param, long value)
901 } 901 }
902} 902}
903 903
904static long __pmac 904static long
905core99_gmac_enable(struct device_node* node, long param, long value) 905core99_gmac_enable(struct device_node* node, long param, long value)
906{ 906{
907 unsigned long flags; 907 unsigned long flags;
@@ -918,7 +918,7 @@ core99_gmac_enable(struct device_node* node, long param, long value)
918 return 0; 918 return 0;
919} 919}
920 920
921static long __pmac 921static long
922core99_gmac_phy_reset(struct device_node* node, long param, long value) 922core99_gmac_phy_reset(struct device_node* node, long param, long value)
923{ 923{
924 unsigned long flags; 924 unsigned long flags;
@@ -943,7 +943,7 @@ core99_gmac_phy_reset(struct device_node* node, long param, long value)
943 return 0; 943 return 0;
944} 944}
945 945
946static long __pmac 946static long
947core99_sound_chip_enable(struct device_node* node, long param, long value) 947core99_sound_chip_enable(struct device_node* node, long param, long value)
948{ 948{
949 struct macio_chip* macio; 949 struct macio_chip* macio;
@@ -973,7 +973,7 @@ core99_sound_chip_enable(struct device_node* node, long param, long value)
973 return 0; 973 return 0;
974} 974}
975 975
976static long __pmac 976static long
977core99_airport_enable(struct device_node* node, long param, long value) 977core99_airport_enable(struct device_node* node, long param, long value)
978{ 978{
979 struct macio_chip* macio; 979 struct macio_chip* macio;
@@ -1060,7 +1060,7 @@ core99_airport_enable(struct device_node* node, long param, long value)
1060} 1060}
1061 1061
1062#ifdef CONFIG_SMP 1062#ifdef CONFIG_SMP
1063static long __pmac 1063static long
1064core99_reset_cpu(struct device_node* node, long param, long value) 1064core99_reset_cpu(struct device_node* node, long param, long value)
1065{ 1065{
1066 unsigned int reset_io = 0; 1066 unsigned int reset_io = 0;
@@ -1104,7 +1104,7 @@ core99_reset_cpu(struct device_node* node, long param, long value)
1104} 1104}
1105#endif /* CONFIG_SMP */ 1105#endif /* CONFIG_SMP */
1106 1106
1107static long __pmac 1107static long
1108core99_usb_enable(struct device_node* node, long param, long value) 1108core99_usb_enable(struct device_node* node, long param, long value)
1109{ 1109{
1110 struct macio_chip* macio; 1110 struct macio_chip* macio;
@@ -1257,7 +1257,7 @@ core99_usb_enable(struct device_node* node, long param, long value)
1257 return 0; 1257 return 0;
1258} 1258}
1259 1259
1260static long __pmac 1260static long
1261core99_firewire_enable(struct device_node* node, long param, long value) 1261core99_firewire_enable(struct device_node* node, long param, long value)
1262{ 1262{
1263 unsigned long flags; 1263 unsigned long flags;
@@ -1284,7 +1284,7 @@ core99_firewire_enable(struct device_node* node, long param, long value)
1284 return 0; 1284 return 0;
1285} 1285}
1286 1286
1287static long __pmac 1287static long
1288core99_firewire_cable_power(struct device_node* node, long param, long value) 1288core99_firewire_cable_power(struct device_node* node, long param, long value)
1289{ 1289{
1290 unsigned long flags; 1290 unsigned long flags;
@@ -1315,7 +1315,7 @@ core99_firewire_cable_power(struct device_node* node, long param, long value)
1315 return 0; 1315 return 0;
1316} 1316}
1317 1317
1318static long __pmac 1318static long
1319intrepid_aack_delay_enable(struct device_node* node, long param, long value) 1319intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1320{ 1320{
1321 unsigned long flags; 1321 unsigned long flags;
@@ -1336,7 +1336,7 @@ intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1336 1336
1337#endif /* CONFIG_POWER4 */ 1337#endif /* CONFIG_POWER4 */
1338 1338
1339static long __pmac 1339static long
1340core99_read_gpio(struct device_node* node, long param, long value) 1340core99_read_gpio(struct device_node* node, long param, long value)
1341{ 1341{
1342 struct macio_chip* macio = &macio_chips[0]; 1342 struct macio_chip* macio = &macio_chips[0];
@@ -1345,7 +1345,7 @@ core99_read_gpio(struct device_node* node, long param, long value)
1345} 1345}
1346 1346
1347 1347
1348static long __pmac 1348static long
1349core99_write_gpio(struct device_node* node, long param, long value) 1349core99_write_gpio(struct device_node* node, long param, long value)
1350{ 1350{
1351 struct macio_chip* macio = &macio_chips[0]; 1351 struct macio_chip* macio = &macio_chips[0];
@@ -1356,7 +1356,7 @@ core99_write_gpio(struct device_node* node, long param, long value)
1356 1356
1357#ifdef CONFIG_POWER4 1357#ifdef CONFIG_POWER4
1358 1358
1359static long __pmac 1359static long
1360g5_gmac_enable(struct device_node* node, long param, long value) 1360g5_gmac_enable(struct device_node* node, long param, long value)
1361{ 1361{
1362 struct macio_chip* macio = &macio_chips[0]; 1362 struct macio_chip* macio = &macio_chips[0];
@@ -1380,7 +1380,7 @@ g5_gmac_enable(struct device_node* node, long param, long value)
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static long __pmac 1383static long
1384g5_fw_enable(struct device_node* node, long param, long value) 1384g5_fw_enable(struct device_node* node, long param, long value)
1385{ 1385{
1386 struct macio_chip* macio = &macio_chips[0]; 1386 struct macio_chip* macio = &macio_chips[0];
@@ -1403,7 +1403,7 @@ g5_fw_enable(struct device_node* node, long param, long value)
1403 return 0; 1403 return 0;
1404} 1404}
1405 1405
1406static long __pmac 1406static long
1407g5_mpic_enable(struct device_node* node, long param, long value) 1407g5_mpic_enable(struct device_node* node, long param, long value)
1408{ 1408{
1409 unsigned long flags; 1409 unsigned long flags;
@@ -1419,7 +1419,7 @@ g5_mpic_enable(struct device_node* node, long param, long value)
1419} 1419}
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422static long __pmac 1422static long
1423g5_reset_cpu(struct device_node* node, long param, long value) 1423g5_reset_cpu(struct device_node* node, long param, long value)
1424{ 1424{
1425 unsigned int reset_io = 0; 1425 unsigned int reset_io = 0;
@@ -1465,7 +1465,7 @@ g5_reset_cpu(struct device_node* node, long param, long value)
1465 * This takes the second CPU off the bus on dual CPU machines 1465 * This takes the second CPU off the bus on dual CPU machines
1466 * running UP 1466 * running UP
1467 */ 1467 */
1468void __pmac g5_phy_disable_cpu1(void) 1468void g5_phy_disable_cpu1(void)
1469{ 1469{
1470 UN_OUT(U3_API_PHY_CONFIG_1, 0); 1470 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1471} 1471}
@@ -1474,7 +1474,7 @@ void __pmac g5_phy_disable_cpu1(void)
1474 1474
1475#ifndef CONFIG_POWER4 1475#ifndef CONFIG_POWER4
1476 1476
1477static void __pmac 1477static void
1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode) 1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1479{ 1479{
1480 u32 temp; 1480 u32 temp;
@@ -1528,7 +1528,7 @@ keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1529} 1529}
1530 1530
1531static void __pmac 1531static void
1532pangea_shutdown(struct macio_chip* macio, int sleep_mode) 1532pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1533{ 1533{
1534 u32 temp; 1534 u32 temp;
@@ -1562,7 +1562,7 @@ pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1563} 1563}
1564 1564
1565static void __pmac 1565static void
1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode) 1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1567{ 1567{
1568 u32 temp; 1568 u32 temp;
@@ -1591,7 +1591,7 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1591} 1591}
1592 1592
1593 1593
1594void __pmac pmac_tweak_clock_spreading(int enable) 1594void pmac_tweak_clock_spreading(int enable)
1595{ 1595{
1596 struct macio_chip* macio = &macio_chips[0]; 1596 struct macio_chip* macio = &macio_chips[0];
1597 1597
@@ -1698,7 +1698,7 @@ void __pmac pmac_tweak_clock_spreading(int enable)
1698} 1698}
1699 1699
1700 1700
1701static int __pmac 1701static int
1702core99_sleep(void) 1702core99_sleep(void)
1703{ 1703{
1704 struct macio_chip* macio; 1704 struct macio_chip* macio;
@@ -1791,7 +1791,7 @@ core99_sleep(void)
1791 return 0; 1791 return 0;
1792} 1792}
1793 1793
1794static int __pmac 1794static int
1795core99_wake_up(void) 1795core99_wake_up(void)
1796{ 1796{
1797 struct macio_chip* macio; 1797 struct macio_chip* macio;
@@ -1854,7 +1854,7 @@ core99_wake_up(void)
1854 return 0; 1854 return 0;
1855} 1855}
1856 1856
1857static long __pmac 1857static long
1858core99_sleep_state(struct device_node* node, long param, long value) 1858core99_sleep_state(struct device_node* node, long param, long value)
1859{ 1859{
1860 /* Param == 1 means to enter the "fake sleep" mode that is 1860 /* Param == 1 means to enter the "fake sleep" mode that is
@@ -1884,7 +1884,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
1884 1884
1885#endif /* CONFIG_POWER4 */ 1885#endif /* CONFIG_POWER4 */
1886 1886
1887static long __pmac 1887static long
1888generic_dev_can_wake(struct device_node* node, long param, long value) 1888generic_dev_can_wake(struct device_node* node, long param, long value)
1889{ 1889{
1890 /* Todo: eventually check we are really dealing with on-board 1890 /* Todo: eventually check we are really dealing with on-board
@@ -1896,7 +1896,7 @@ generic_dev_can_wake(struct device_node* node, long param, long value)
1896 return 0; 1896 return 0;
1897} 1897}
1898 1898
1899static long __pmac 1899static long
1900generic_get_mb_info(struct device_node* node, long param, long value) 1900generic_get_mb_info(struct device_node* node, long param, long value)
1901{ 1901{
1902 switch(param) { 1902 switch(param) {
@@ -1919,7 +1919,7 @@ generic_get_mb_info(struct device_node* node, long param, long value)
1919 1919
1920/* Used on any machine 1920/* Used on any machine
1921 */ 1921 */
1922static struct feature_table_entry any_features[] __pmacdata = { 1922static struct feature_table_entry any_features[] = {
1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, 1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, 1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1925 { 0, NULL } 1925 { 0, NULL }
@@ -1931,7 +1931,7 @@ static struct feature_table_entry any_features[] __pmacdata = {
1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem 1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1932 * to have issues with turning on/off those asic cells 1932 * to have issues with turning on/off those asic cells
1933 */ 1933 */
1934static struct feature_table_entry ohare_features[] __pmacdata = { 1934static struct feature_table_entry ohare_features[] = {
1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, 1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, 1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
@@ -1945,7 +1945,7 @@ static struct feature_table_entry ohare_features[] __pmacdata = {
1945 * Separated as some features couldn't be properly tested 1945 * Separated as some features couldn't be properly tested
1946 * and the serial port control bits appear to confuse it. 1946 * and the serial port control bits appear to confuse it.
1947 */ 1947 */
1948static struct feature_table_entry heathrow_desktop_features[] __pmacdata = { 1948static struct feature_table_entry heathrow_desktop_features[] = {
1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, 1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, 1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
@@ -1957,7 +1957,7 @@ static struct feature_table_entry heathrow_desktop_features[] __pmacdata = {
1957/* Heathrow based laptop, that is the Wallstreet and mainstreet 1957/* Heathrow based laptop, that is the Wallstreet and mainstreet
1958 * powerbooks. 1958 * powerbooks.
1959 */ 1959 */
1960static struct feature_table_entry heathrow_laptop_features[] __pmacdata = { 1960static struct feature_table_entry heathrow_laptop_features[] = {
1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1973,7 +1973,7 @@ static struct feature_table_entry heathrow_laptop_features[] __pmacdata = {
1973/* Paddington based machines 1973/* Paddington based machines
1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. 1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
1975 */ 1975 */
1976static struct feature_table_entry paddington_features[] __pmacdata = { 1976static struct feature_table_entry paddington_features[] = {
1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1991,7 +1991,7 @@ static struct feature_table_entry paddington_features[] __pmacdata = {
1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo 1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
1992 * used on iBook2 & iMac "flow power". 1992 * used on iBook2 & iMac "flow power".
1993 */ 1993 */
1994static struct feature_table_entry core99_features[] __pmacdata = { 1994static struct feature_table_entry core99_features[] = {
1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, 1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2014,7 +2014,7 @@ static struct feature_table_entry core99_features[] __pmacdata = {
2014 2014
2015/* RackMac 2015/* RackMac
2016 */ 2016 */
2017static struct feature_table_entry rackmac_features[] __pmacdata = { 2017static struct feature_table_entry rackmac_features[] = {
2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2020 { PMAC_FTR_IDE_RESET, core99_ide_reset }, 2020 { PMAC_FTR_IDE_RESET, core99_ide_reset },
@@ -2034,7 +2034,7 @@ static struct feature_table_entry rackmac_features[] __pmacdata = {
2034 2034
2035/* Pangea features 2035/* Pangea features
2036 */ 2036 */
2037static struct feature_table_entry pangea_features[] __pmacdata = { 2037static struct feature_table_entry pangea_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2054,7 +2054,7 @@ static struct feature_table_entry pangea_features[] __pmacdata = {
2054 2054
2055/* Intrepid features 2055/* Intrepid features
2056 */ 2056 */
2057static struct feature_table_entry intrepid_features[] __pmacdata = { 2057static struct feature_table_entry intrepid_features[] = {
2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2077,7 +2077,7 @@ static struct feature_table_entry intrepid_features[] __pmacdata = {
2077 2077
2078/* G5 features 2078/* G5 features
2079 */ 2079 */
2080static struct feature_table_entry g5_features[] __pmacdata = { 2080static struct feature_table_entry g5_features[] = {
2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, 2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable }, 2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, 2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
@@ -2091,7 +2091,7 @@ static struct feature_table_entry g5_features[] __pmacdata = {
2091 2091
2092#endif /* CONFIG_POWER4 */ 2092#endif /* CONFIG_POWER4 */
2093 2093
2094static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { 2094static struct pmac_mb_def pmac_mb_defs[] = {
2095#ifndef CONFIG_POWER4 2095#ifndef CONFIG_POWER4
2096 /* 2096 /*
2097 * Desktops 2097 * Desktops
@@ -2356,7 +2356,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
2356/* 2356/*
2357 * The toplevel feature_call callback 2357 * The toplevel feature_call callback
2358 */ 2358 */
2359long __pmac 2359long
2360pmac_do_feature_call(unsigned int selector, ...) 2360pmac_do_feature_call(unsigned int selector, ...)
2361{ 2361{
2362 struct device_node* node; 2362 struct device_node* node;
@@ -2939,8 +2939,8 @@ void __init pmac_check_ht_link(void)
2939 * Early video resume hook 2939 * Early video resume hook
2940 */ 2940 */
2941 2941
2942static void (*pmac_early_vresume_proc)(void *data) __pmacdata; 2942static void (*pmac_early_vresume_proc)(void *data);
2943static void *pmac_early_vresume_data __pmacdata; 2943static void *pmac_early_vresume_data;
2944 2944
2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data) 2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2946{ 2946{
@@ -2953,7 +2953,7 @@ void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2953} 2953}
2954EXPORT_SYMBOL(pmac_set_early_video_resume); 2954EXPORT_SYMBOL(pmac_set_early_video_resume);
2955 2955
2956void __pmac pmac_call_early_video_resume(void) 2956void pmac_call_early_video_resume(void)
2957{ 2957{
2958 if (pmac_early_vresume_proc) 2958 if (pmac_early_vresume_proc)
2959 pmac_early_vresume_proc(pmac_early_vresume_data); 2959 pmac_early_vresume_proc(pmac_early_vresume_data);
@@ -2963,11 +2963,11 @@ void __pmac pmac_call_early_video_resume(void)
2963 * AGP related suspend/resume code 2963 * AGP related suspend/resume code
2964 */ 2964 */
2965 2965
2966static struct pci_dev *pmac_agp_bridge __pmacdata; 2966static struct pci_dev *pmac_agp_bridge;
2967static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata; 2967static int (*pmac_agp_suspend)(struct pci_dev *bridge);
2968static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata; 2968static int (*pmac_agp_resume)(struct pci_dev *bridge);
2969 2969
2970void __pmac pmac_register_agp_pm(struct pci_dev *bridge, 2970void pmac_register_agp_pm(struct pci_dev *bridge,
2971 int (*suspend)(struct pci_dev *bridge), 2971 int (*suspend)(struct pci_dev *bridge),
2972 int (*resume)(struct pci_dev *bridge)) 2972 int (*resume)(struct pci_dev *bridge))
2973{ 2973{
@@ -2984,7 +2984,7 @@ void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
2984} 2984}
2985EXPORT_SYMBOL(pmac_register_agp_pm); 2985EXPORT_SYMBOL(pmac_register_agp_pm);
2986 2986
2987void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev) 2987void pmac_suspend_agp_for_card(struct pci_dev *dev)
2988{ 2988{
2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) 2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
2990 return; 2990 return;
@@ -2994,7 +2994,7 @@ void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
2994} 2994}
2995EXPORT_SYMBOL(pmac_suspend_agp_for_card); 2995EXPORT_SYMBOL(pmac_suspend_agp_for_card);
2996 2996
2997void __pmac pmac_resume_agp_for_card(struct pci_dev *dev) 2997void pmac_resume_agp_for_card(struct pci_dev *dev)
2998{ 2998{
2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) 2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3000 return; 3000 return;
diff --git a/arch/ppc/platforms/pmac_nvram.c b/arch/ppc/platforms/pmac_nvram.c
index c9de64205996..8c9b008c7226 100644
--- a/arch/ppc/platforms/pmac_nvram.c
+++ b/arch/ppc/platforms/pmac_nvram.c
@@ -88,17 +88,17 @@ extern int system_running;
88static int (*core99_write_bank)(int bank, u8* datas); 88static int (*core99_write_bank)(int bank, u8* datas);
89static int (*core99_erase_bank)(int bank); 89static int (*core99_erase_bank)(int bank);
90 90
91static char *nvram_image __pmacdata; 91static char *nvram_image;
92 92
93 93
94static unsigned char __pmac core99_nvram_read_byte(int addr) 94static unsigned char core99_nvram_read_byte(int addr)
95{ 95{
96 if (nvram_image == NULL) 96 if (nvram_image == NULL)
97 return 0xff; 97 return 0xff;
98 return nvram_image[addr]; 98 return nvram_image[addr];
99} 99}
100 100
101static void __pmac core99_nvram_write_byte(int addr, unsigned char val) 101static void core99_nvram_write_byte(int addr, unsigned char val)
102{ 102{
103 if (nvram_image == NULL) 103 if (nvram_image == NULL)
104 return; 104 return;
@@ -106,18 +106,18 @@ static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
106} 106}
107 107
108 108
109static unsigned char __openfirmware direct_nvram_read_byte(int addr) 109static unsigned char direct_nvram_read_byte(int addr)
110{ 110{
111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); 111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
112} 112}
113 113
114static void __openfirmware direct_nvram_write_byte(int addr, unsigned char val) 114static void direct_nvram_write_byte(int addr, unsigned char val)
115{ 115{
116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); 116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
117} 117}
118 118
119 119
120static unsigned char __pmac indirect_nvram_read_byte(int addr) 120static unsigned char indirect_nvram_read_byte(int addr)
121{ 121{
122 unsigned char val; 122 unsigned char val;
123 unsigned long flags; 123 unsigned long flags;
@@ -130,7 +130,7 @@ static unsigned char __pmac indirect_nvram_read_byte(int addr)
130 return val; 130 return val;
131} 131}
132 132
133static void __pmac indirect_nvram_write_byte(int addr, unsigned char val) 133static void indirect_nvram_write_byte(int addr, unsigned char val)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 136
@@ -143,13 +143,13 @@ static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
143 143
144#ifdef CONFIG_ADB_PMU 144#ifdef CONFIG_ADB_PMU
145 145
146static void __pmac pmu_nvram_complete(struct adb_request *req) 146static void pmu_nvram_complete(struct adb_request *req)
147{ 147{
148 if (req->arg) 148 if (req->arg)
149 complete((struct completion *)req->arg); 149 complete((struct completion *)req->arg);
150} 150}
151 151
152static unsigned char __pmac pmu_nvram_read_byte(int addr) 152static unsigned char pmu_nvram_read_byte(int addr)
153{ 153{
154 struct adb_request req; 154 struct adb_request req;
155 DECLARE_COMPLETION(req_complete); 155 DECLARE_COMPLETION(req_complete);
@@ -165,7 +165,7 @@ static unsigned char __pmac pmu_nvram_read_byte(int addr)
165 return req.reply[0]; 165 return req.reply[0];
166} 166}
167 167
168static void __pmac pmu_nvram_write_byte(int addr, unsigned char val) 168static void pmu_nvram_write_byte(int addr, unsigned char val)
169{ 169{
170 struct adb_request req; 170 struct adb_request req;
171 DECLARE_COMPLETION(req_complete); 171 DECLARE_COMPLETION(req_complete);
@@ -183,7 +183,7 @@ static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
183#endif /* CONFIG_ADB_PMU */ 183#endif /* CONFIG_ADB_PMU */
184 184
185 185
186static u8 __pmac chrp_checksum(struct chrp_header* hdr) 186static u8 chrp_checksum(struct chrp_header* hdr)
187{ 187{
188 u8 *ptr; 188 u8 *ptr;
189 u16 sum = hdr->signature; 189 u16 sum = hdr->signature;
@@ -194,7 +194,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
194 return sum; 194 return sum;
195} 195}
196 196
197static u32 __pmac core99_calc_adler(u8 *buffer) 197static u32 core99_calc_adler(u8 *buffer)
198{ 198{
199 int cnt; 199 int cnt;
200 u32 low, high; 200 u32 low, high;
@@ -216,7 +216,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
216 return (high << 16) | low; 216 return (high << 16) | low;
217} 217}
218 218
219static u32 __pmac core99_check(u8* datas) 219static u32 core99_check(u8* datas)
220{ 220{
221 struct core99_header* hdr99 = (struct core99_header*)datas; 221 struct core99_header* hdr99 = (struct core99_header*)datas;
222 222
@@ -235,7 +235,7 @@ static u32 __pmac core99_check(u8* datas)
235 return hdr99->generation; 235 return hdr99->generation;
236} 236}
237 237
238static int __pmac sm_erase_bank(int bank) 238static int sm_erase_bank(int bank)
239{ 239{
240 int stat, i; 240 int stat, i;
241 unsigned long timeout; 241 unsigned long timeout;
@@ -267,7 +267,7 @@ static int __pmac sm_erase_bank(int bank)
267 return 0; 267 return 0;
268} 268}
269 269
270static int __pmac sm_write_bank(int bank, u8* datas) 270static int sm_write_bank(int bank, u8* datas)
271{ 271{
272 int i, stat = 0; 272 int i, stat = 0;
273 unsigned long timeout; 273 unsigned long timeout;
@@ -302,7 +302,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
302 return 0; 302 return 0;
303} 303}
304 304
305static int __pmac amd_erase_bank(int bank) 305static int amd_erase_bank(int bank)
306{ 306{
307 int i, stat = 0; 307 int i, stat = 0;
308 unsigned long timeout; 308 unsigned long timeout;
@@ -349,7 +349,7 @@ static int __pmac amd_erase_bank(int bank)
349 return 0; 349 return 0;
350} 350}
351 351
352static int __pmac amd_write_bank(int bank, u8* datas) 352static int amd_write_bank(int bank, u8* datas)
353{ 353{
354 int i, stat = 0; 354 int i, stat = 0;
355 unsigned long timeout; 355 unsigned long timeout;
@@ -430,7 +430,7 @@ static void __init lookup_partitions(void)
430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); 430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
431} 431}
432 432
433static void __pmac core99_nvram_sync(void) 433static void core99_nvram_sync(void)
434{ 434{
435 struct core99_header* hdr99; 435 struct core99_header* hdr99;
436 unsigned long flags; 436 unsigned long flags;
@@ -554,12 +554,12 @@ void __init pmac_nvram_init(void)
554 lookup_partitions(); 554 lookup_partitions();
555} 555}
556 556
557int __pmac pmac_get_partition(int partition) 557int pmac_get_partition(int partition)
558{ 558{
559 return nvram_partitions[partition]; 559 return nvram_partitions[partition];
560} 560}
561 561
562u8 __pmac pmac_xpram_read(int xpaddr) 562u8 pmac_xpram_read(int xpaddr)
563{ 563{
564 int offset = nvram_partitions[pmac_nvram_XPRAM]; 564 int offset = nvram_partitions[pmac_nvram_XPRAM];
565 565
@@ -569,7 +569,7 @@ u8 __pmac pmac_xpram_read(int xpaddr)
569 return ppc_md.nvram_read_val(xpaddr + offset); 569 return ppc_md.nvram_read_val(xpaddr + offset);
570} 570}
571 571
572void __pmac pmac_xpram_write(int xpaddr, u8 data) 572void pmac_xpram_write(int xpaddr, u8 data)
573{ 573{
574 int offset = nvram_partitions[pmac_nvram_XPRAM]; 574 int offset = nvram_partitions[pmac_nvram_XPRAM];
575 575
diff --git a/arch/ppc/platforms/pmac_pci.c b/arch/ppc/platforms/pmac_pci.c
index 719fb49fe2bc..786295b6ddd0 100644
--- a/arch/ppc/platforms/pmac_pci.c
+++ b/arch/ppc/platforms/pmac_pci.c
@@ -141,7 +141,7 @@ fixup_bus_range(struct device_node *bridge)
141 |(((unsigned long)(off)) & 0xFCUL) \ 141 |(((unsigned long)(off)) & 0xFCUL) \
142 |1UL) 142 |1UL)
143 143
144static void volatile __iomem * __pmac 144static void volatile __iomem *
145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset) 145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
146{ 146{
147 unsigned int caddr; 147 unsigned int caddr;
@@ -162,7 +162,7 @@ macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
162 return hose->cfg_data + offset; 162 return hose->cfg_data + offset;
163} 163}
164 164
165static int __pmac 165static int
166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
167 int len, u32 *val) 167 int len, u32 *val)
168{ 168{
@@ -190,7 +190,7 @@ macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
190 return PCIBIOS_SUCCESSFUL; 190 return PCIBIOS_SUCCESSFUL;
191} 191}
192 192
193static int __pmac 193static int
194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
195 int len, u32 val) 195 int len, u32 val)
196{ 196{
@@ -230,7 +230,7 @@ static struct pci_ops macrisc_pci_ops =
230/* 230/*
231 * Verifiy that a specific (bus, dev_fn) exists on chaos 231 * Verifiy that a specific (bus, dev_fn) exists on chaos
232 */ 232 */
233static int __pmac 233static int
234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) 234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
235{ 235{
236 struct device_node *np; 236 struct device_node *np;
@@ -252,7 +252,7 @@ chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
252 return PCIBIOS_SUCCESSFUL; 252 return PCIBIOS_SUCCESSFUL;
253} 253}
254 254
255static int __pmac 255static int
256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
257 int len, u32 *val) 257 int len, u32 *val)
258{ 258{
@@ -264,7 +264,7 @@ chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
264 return macrisc_read_config(bus, devfn, offset, len, val); 264 return macrisc_read_config(bus, devfn, offset, len, val);
265} 265}
266 266
267static int __pmac 267static int
268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
269 int len, u32 val) 269 int len, u32 val)
270{ 270{
@@ -294,7 +294,7 @@ static struct pci_ops chaos_pci_ops =
294 + (((unsigned long)bus) << 16) \ 294 + (((unsigned long)bus) << 16) \
295 + 0x01000000UL) 295 + 0x01000000UL)
296 296
297static void volatile __iomem * __pmac 297static void volatile __iomem *
298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset) 298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
299{ 299{
300 if (bus == hose->first_busno) { 300 if (bus == hose->first_busno) {
@@ -307,7 +307,7 @@ u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset); 307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
308} 308}
309 309
310static int __pmac 310static int
311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
312 int len, u32 *val) 312 int len, u32 *val)
313{ 313{
@@ -357,7 +357,7 @@ u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
357 return PCIBIOS_SUCCESSFUL; 357 return PCIBIOS_SUCCESSFUL;
358} 358}
359 359
360static int __pmac 360static int
361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
362 int len, u32 val) 362 int len, u32 val)
363{ 363{
@@ -575,7 +575,7 @@ pmac_find_bridges(void)
575 * some offset between bus number and domains for now when we 575 * some offset between bus number and domains for now when we
576 * assign all busses should help for now 576 * assign all busses should help for now
577 */ 577 */
578 if (pci_assign_all_busses) 578 if (pci_assign_all_buses)
579 pcibios_assign_bus_offset = 0x10; 579 pcibios_assign_bus_offset = 0x10;
580 580
581#ifdef CONFIG_POWER4 581#ifdef CONFIG_POWER4
@@ -643,7 +643,7 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
643static int __init 643static int __init
644setup_uninorth(struct pci_controller* hose, struct reg_property* addr) 644setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
645{ 645{
646 pci_assign_all_busses = 1; 646 pci_assign_all_buses = 1;
647 has_uninorth = 1; 647 has_uninorth = 1;
648 hose->ops = &macrisc_pci_ops; 648 hose->ops = &macrisc_pci_ops;
649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
@@ -677,7 +677,7 @@ setup_u3_agp(struct pci_controller* hose, struct reg_property* addr)
677{ 677{
678 /* On G5, we move AGP up to high bus number so we don't need 678 /* On G5, we move AGP up to high bus number so we don't need
679 * to reassign bus numbers for HT. If we ever have P2P bridges 679 * to reassign bus numbers for HT. If we ever have P2P bridges
680 * on AGP, we'll have to move pci_assign_all_busses to the 680 * on AGP, we'll have to move pci_assign_all_buses to the
681 * pci_controller structure so we enable it for AGP and not for 681 * pci_controller structure so we enable it for AGP and not for
682 * HT childs. 682 * HT childs.
683 * We hard code the address because of the different size of 683 * We hard code the address because of the different size of
@@ -899,7 +899,7 @@ pmac_pcibios_fixup(void)
899 pcibios_fixup_OF_interrupts(); 899 pcibios_fixup_OF_interrupts();
900} 900}
901 901
902int __pmac 902int
903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial) 903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
904{ 904{
905 struct device_node* node; 905 struct device_node* node;
@@ -1096,7 +1096,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1096 * Disable second function on K2-SATA, it's broken 1096 * Disable second function on K2-SATA, it's broken
1097 * and disable IO BARs on first one 1097 * and disable IO BARs on first one
1098 */ 1098 */
1099void __pmac pmac_pci_fixup_k2_sata(struct pci_dev* dev) 1099void pmac_pci_fixup_k2_sata(struct pci_dev* dev)
1100{ 1100{
1101 int i; 1101 int i;
1102 u16 cmd; 1102 u16 cmd;
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 2ce058895e03..9f2d95ea8564 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -35,6 +35,7 @@
35#include <asm/open_pic.h> 35#include <asm/open_pic.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/pmac_feature.h> 37#include <asm/pmac_feature.h>
38#include <asm/machdep.h>
38 39
39#include "pmac_pic.h" 40#include "pmac_pic.h"
40 41
@@ -53,7 +54,7 @@ struct pmac_irq_hw {
53}; 54};
54 55
55/* Default addresses */ 56/* Default addresses */
56static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = { 57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
57 (struct pmac_irq_hw *) 0xf3000020, 58 (struct pmac_irq_hw *) 0xf3000020,
58 (struct pmac_irq_hw *) 0xf3000010, 59 (struct pmac_irq_hw *) 0xf3000010,
59 (struct pmac_irq_hw *) 0xf4000020, 60 (struct pmac_irq_hw *) 0xf4000020,
@@ -64,22 +65,22 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
64#define OHARE_LEVEL_MASK 0x1ff00000 65#define OHARE_LEVEL_MASK 0x1ff00000
65#define HEATHROW_LEVEL_MASK 0x1ff00000 66#define HEATHROW_LEVEL_MASK 0x1ff00000
66 67
67static int max_irqs __pmacdata; 68static int max_irqs;
68static int max_real_irqs __pmacdata; 69static int max_real_irqs;
69static u32 level_mask[4] __pmacdata; 70static u32 level_mask[4];
70 71
71static DEFINE_SPINLOCK(pmac_pic_lock __pmacdata); 72static DEFINE_SPINLOCK(pmac_pic_lock);
72 73
73 74
74#define GATWICK_IRQ_POOL_SIZE 10 75#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE] __pmacdata; 76static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76 77
77/* 78/*
78 * Mark an irq as "lost". This is only used on the pmac 79 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask). 80 * since it can lose interrupts (see pmac_set_irq_mask).
80 * -- Cort 81 * -- Cort
81 */ 82 */
82void __pmac 83void
83__set_lost(unsigned long irq_nr, int nokick) 84__set_lost(unsigned long irq_nr, int nokick)
84{ 85{
85 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 86 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
@@ -89,7 +90,7 @@ __set_lost(unsigned long irq_nr, int nokick)
89 } 90 }
90} 91}
91 92
92static void __pmac 93static void
93pmac_mask_and_ack_irq(unsigned int irq_nr) 94pmac_mask_and_ack_irq(unsigned int irq_nr)
94{ 95{
95 unsigned long bit = 1UL << (irq_nr & 0x1f); 96 unsigned long bit = 1UL << (irq_nr & 0x1f);
@@ -114,7 +115,7 @@ pmac_mask_and_ack_irq(unsigned int irq_nr)
114 spin_unlock_irqrestore(&pmac_pic_lock, flags); 115 spin_unlock_irqrestore(&pmac_pic_lock, flags);
115} 116}
116 117
117static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 118static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
118{ 119{
119 unsigned long bit = 1UL << (irq_nr & 0x1f); 120 unsigned long bit = 1UL << (irq_nr & 0x1f);
120 int i = irq_nr >> 5; 121 int i = irq_nr >> 5;
@@ -147,7 +148,7 @@ static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
147/* When an irq gets requested for the first client, if it's an 148/* When an irq gets requested for the first client, if it's an
148 * edge interrupt, we clear any previous one on the controller 149 * edge interrupt, we clear any previous one on the controller
149 */ 150 */
150static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr) 151static unsigned int pmac_startup_irq(unsigned int irq_nr)
151{ 152{
152 unsigned long bit = 1UL << (irq_nr & 0x1f); 153 unsigned long bit = 1UL << (irq_nr & 0x1f);
153 int i = irq_nr >> 5; 154 int i = irq_nr >> 5;
@@ -160,20 +161,20 @@ static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
160 return 0; 161 return 0;
161} 162}
162 163
163static void __pmac pmac_mask_irq(unsigned int irq_nr) 164static void pmac_mask_irq(unsigned int irq_nr)
164{ 165{
165 clear_bit(irq_nr, ppc_cached_irq_mask); 166 clear_bit(irq_nr, ppc_cached_irq_mask);
166 pmac_set_irq_mask(irq_nr, 0); 167 pmac_set_irq_mask(irq_nr, 0);
167 mb(); 168 mb();
168} 169}
169 170
170static void __pmac pmac_unmask_irq(unsigned int irq_nr) 171static void pmac_unmask_irq(unsigned int irq_nr)
171{ 172{
172 set_bit(irq_nr, ppc_cached_irq_mask); 173 set_bit(irq_nr, ppc_cached_irq_mask);
173 pmac_set_irq_mask(irq_nr, 0); 174 pmac_set_irq_mask(irq_nr, 0);
174} 175}
175 176
176static void __pmac pmac_end_irq(unsigned int irq_nr) 177static void pmac_end_irq(unsigned int irq_nr)
177{ 178{
178 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 179 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
179 && irq_desc[irq_nr].action) { 180 && irq_desc[irq_nr].action) {
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index d6356f480d90..55d2beffe560 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -122,7 +122,7 @@ extern struct smp_ops_t psurge_smp_ops;
122extern struct smp_ops_t core99_smp_ops; 122extern struct smp_ops_t core99_smp_ops;
123#endif /* CONFIG_SMP */ 123#endif /* CONFIG_SMP */
124 124
125static int __pmac 125static int
126pmac_show_cpuinfo(struct seq_file *m) 126pmac_show_cpuinfo(struct seq_file *m)
127{ 127{
128 struct device_node *np; 128 struct device_node *np;
@@ -226,7 +226,7 @@ pmac_show_cpuinfo(struct seq_file *m)
226 return 0; 226 return 0;
227} 227}
228 228
229static int __openfirmware 229static int
230pmac_show_percpuinfo(struct seq_file *m, int i) 230pmac_show_percpuinfo(struct seq_file *m, int i)
231{ 231{
232#ifdef CONFIG_CPU_FREQ_PMAC 232#ifdef CONFIG_CPU_FREQ_PMAC
@@ -330,9 +330,9 @@ pmac_setup_arch(void)
330#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
331 /* Check for Core99 */ 331 /* Check for Core99 */
332 if (find_devices("uni-n") || find_devices("u3")) 332 if (find_devices("uni-n") || find_devices("u3"))
333 ppc_md.smp_ops = &core99_smp_ops; 333 smp_ops = &core99_smp_ops;
334 else 334 else
335 ppc_md.smp_ops = &psurge_smp_ops; 335 smp_ops = &psurge_smp_ops;
336#endif /* CONFIG_SMP */ 336#endif /* CONFIG_SMP */
337 337
338 pci_create_OF_bus_map(); 338 pci_create_OF_bus_map();
@@ -447,7 +447,7 @@ static int pmac_pm_enter(suspend_state_t state)
447 enable_kernel_fp(); 447 enable_kernel_fp();
448 448
449#ifdef CONFIG_ALTIVEC 449#ifdef CONFIG_ALTIVEC
450 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) 450 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
451 enable_kernel_altivec(); 451 enable_kernel_altivec();
452#endif /* CONFIG_ALTIVEC */ 452#endif /* CONFIG_ALTIVEC */
453 453
@@ -485,7 +485,7 @@ static int pmac_late_init(void)
485late_initcall(pmac_late_init); 485late_initcall(pmac_late_init);
486 486
487/* can't be __init - can be called whenever a disk is first accessed */ 487/* can't be __init - can be called whenever a disk is first accessed */
488void __pmac 488void
489note_bootable_part(dev_t dev, int part, int goodness) 489note_bootable_part(dev_t dev, int part, int goodness)
490{ 490{
491 static int found_boot = 0; 491 static int found_boot = 0;
@@ -511,7 +511,7 @@ note_bootable_part(dev_t dev, int part, int goodness)
511 } 511 }
512} 512}
513 513
514static void __pmac 514static void
515pmac_restart(char *cmd) 515pmac_restart(char *cmd)
516{ 516{
517#ifdef CONFIG_ADB_CUDA 517#ifdef CONFIG_ADB_CUDA
@@ -536,7 +536,7 @@ pmac_restart(char *cmd)
536 } 536 }
537} 537}
538 538
539static void __pmac 539static void
540pmac_power_off(void) 540pmac_power_off(void)
541{ 541{
542#ifdef CONFIG_ADB_CUDA 542#ifdef CONFIG_ADB_CUDA
@@ -561,7 +561,7 @@ pmac_power_off(void)
561 } 561 }
562} 562}
563 563
564static void __pmac 564static void
565pmac_halt(void) 565pmac_halt(void)
566{ 566{
567 pmac_power_off(); 567 pmac_power_off();
@@ -661,7 +661,6 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
661 ppc_md.setup_arch = pmac_setup_arch; 661 ppc_md.setup_arch = pmac_setup_arch;
662 ppc_md.show_cpuinfo = pmac_show_cpuinfo; 662 ppc_md.show_cpuinfo = pmac_show_cpuinfo;
663 ppc_md.show_percpuinfo = pmac_show_percpuinfo; 663 ppc_md.show_percpuinfo = pmac_show_percpuinfo;
664 ppc_md.irq_canonicalize = NULL;
665 ppc_md.init_IRQ = pmac_pic_init; 664 ppc_md.init_IRQ = pmac_pic_init;
666 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */ 665 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
667 666
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S
index 88419c77ac43..22b113d19b24 100644
--- a/arch/ppc/platforms/pmac_sleep.S
+++ b/arch/ppc/platforms/pmac_sleep.S
@@ -387,10 +387,10 @@ turn_on_mmu:
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ 387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388 388
389 .section .data 389 .section .data
390 .balign L1_CACHE_LINE_SIZE 390 .balign L1_CACHE_BYTES
391sleep_storage: 391sleep_storage:
392 .long 0 392 .long 0
393 .balign L1_CACHE_LINE_SIZE, 0 393 .balign L1_CACHE_BYTES, 0
394 394
395#endif /* CONFIG_6xx */ 395#endif /* CONFIG_6xx */
396 .section .text 396 .section .text
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 794a23994b82..26ff26238f03 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -186,7 +186,7 @@ static inline void psurge_clr_ipi(int cpu)
186 */ 186 */
187static unsigned long psurge_smp_message[NR_CPUS]; 187static unsigned long psurge_smp_message[NR_CPUS];
188 188
189void __pmac psurge_smp_message_recv(struct pt_regs *regs) 189void psurge_smp_message_recv(struct pt_regs *regs)
190{ 190{
191 int cpu = smp_processor_id(); 191 int cpu = smp_processor_id();
192 int msg; 192 int msg;
@@ -203,14 +203,13 @@ void __pmac psurge_smp_message_recv(struct pt_regs *regs)
203 smp_message_recv(msg, regs); 203 smp_message_recv(msg, regs);
204} 204}
205 205
206irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs) 206irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
207{ 207{
208 psurge_smp_message_recv(regs); 208 psurge_smp_message_recv(regs);
209 return IRQ_HANDLED; 209 return IRQ_HANDLED;
210} 210}
211 211
212static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data, 212static void smp_psurge_message_pass(int target, int msg)
213 int wait)
214{ 213{
215 int i; 214 int i;
216 215
@@ -629,7 +628,7 @@ void smp_core99_give_timebase(void)
629 628
630 629
631/* PowerSurge-style Macs */ 630/* PowerSurge-style Macs */
632struct smp_ops_t psurge_smp_ops __pmacdata = { 631struct smp_ops_t psurge_smp_ops = {
633 .message_pass = smp_psurge_message_pass, 632 .message_pass = smp_psurge_message_pass,
634 .probe = smp_psurge_probe, 633 .probe = smp_psurge_probe,
635 .kick_cpu = smp_psurge_kick_cpu, 634 .kick_cpu = smp_psurge_kick_cpu,
@@ -639,7 +638,7 @@ struct smp_ops_t psurge_smp_ops __pmacdata = {
639}; 638};
640 639
641/* Core99 Macs (dual G4s) */ 640/* Core99 Macs (dual G4s) */
642struct smp_ops_t core99_smp_ops __pmacdata = { 641struct smp_ops_t core99_smp_ops = {
643 .message_pass = smp_openpic_message_pass, 642 .message_pass = smp_openpic_message_pass,
644 .probe = smp_core99_probe, 643 .probe = smp_core99_probe,
645 .kick_cpu = smp_core99_kick_cpu, 644 .kick_cpu = smp_core99_kick_cpu,
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c
index efb819f9490d..edb9fcc64790 100644
--- a/arch/ppc/platforms/pmac_time.c
+++ b/arch/ppc/platforms/pmac_time.c
@@ -77,7 +77,7 @@ pmac_time_init(void)
77#endif 77#endif
78} 78}
79 79
80unsigned long __pmac 80unsigned long
81pmac_get_rtc_time(void) 81pmac_get_rtc_time(void)
82{ 82{
83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -118,7 +118,7 @@ pmac_get_rtc_time(void)
118 return 0; 118 return 0;
119} 119}
120 120
121int __pmac 121int
122pmac_set_rtc_time(unsigned long nowtime) 122pmac_set_rtc_time(unsigned long nowtime)
123{ 123{
124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -210,7 +210,7 @@ via_calibrate_decr(void)
210/* 210/*
211 * Reset the time after a sleep. 211 * Reset the time after a sleep.
212 */ 212 */
213static int __pmac 213static int
214time_sleep_notify(struct pmu_sleep_notifier *self, int when) 214time_sleep_notify(struct pmu_sleep_notifier *self, int when)
215{ 215{
216 static unsigned long time_diff; 216 static unsigned long time_diff;
@@ -235,7 +235,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when)
235 return PBOOK_SLEEP_OK; 235 return PBOOK_SLEEP_OK;
236} 236}
237 237
238static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { 238static struct pmu_sleep_notifier time_sleep_notifier = {
239 time_sleep_notify, SLEEP_LEVEL_MISC, 239 time_sleep_notify, SLEEP_LEVEL_MISC,
240}; 240};
241#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index e70aae20d6f9..22bd40cfb092 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -646,14 +646,6 @@ static void pplus_power_off(void)
646 pplus_halt(); 646 pplus_halt();
647} 647}
648 648
649static unsigned int pplus_irq_canonicalize(u_int irq)
650{
651 if (irq == 2)
652 return 9;
653 else
654 return irq;
655}
656
657static void __init pplus_init_IRQ(void) 649static void __init pplus_init_IRQ(void)
658{ 650{
659 int i; 651 int i;
@@ -673,10 +665,7 @@ static void __init pplus_init_IRQ(void)
673 ppc_md.get_irq = openpic_get_irq; 665 ppc_md.get_irq = openpic_get_irq;
674 } 666 }
675 667
676 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 668 i8259_init(0, 0);
677 irq_desc[i].handler = &i8259_pic;
678
679 i8259_init(0);
680 669
681 if (ppc_md.progress) 670 if (ppc_md.progress)
682 ppc_md.progress("init_irq: exit", 0); 671 ppc_md.progress("init_irq: exit", 0);
@@ -872,10 +861,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
872 ISA_DMA_THRESHOLD = 0x00ffffff; 861 ISA_DMA_THRESHOLD = 0x00ffffff;
873 DMA_MODE_READ = 0x44; 862 DMA_MODE_READ = 0x44;
874 DMA_MODE_WRITE = 0x48; 863 DMA_MODE_WRITE = 0x48;
864 ppc_do_canonicalize_irqs = 1;
875 865
876 ppc_md.setup_arch = pplus_setup_arch; 866 ppc_md.setup_arch = pplus_setup_arch;
877 ppc_md.show_cpuinfo = pplus_show_cpuinfo; 867 ppc_md.show_cpuinfo = pplus_show_cpuinfo;
878 ppc_md.irq_canonicalize = pplus_irq_canonicalize;
879 ppc_md.init_IRQ = pplus_init_IRQ; 868 ppc_md.init_IRQ = pplus_init_IRQ;
880 /* this gets changed later on if we have an OpenPIC -- Cort */ 869 /* this gets changed later on if we have an OpenPIC -- Cort */
881 ppc_md.get_irq = i8259_irq; 870 ppc_md.get_irq = i8259_irq;
@@ -911,6 +900,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
911 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc; 900 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc;
912#endif 901#endif
913#ifdef CONFIG_SMP 902#ifdef CONFIG_SMP
914 ppc_md.smp_ops = &pplus_smp_ops; 903 smp_ops = &pplus_smp_ops;
915#endif /* CONFIG_SMP */ 904#endif /* CONFIG_SMP */
916} 905}
diff --git a/arch/ppc/platforms/prep_pci.c b/arch/ppc/platforms/prep_pci.c
index 4760cb64251d..e50b9996848c 100644
--- a/arch/ppc/platforms/prep_pci.c
+++ b/arch/ppc/platforms/prep_pci.c
@@ -43,7 +43,7 @@ static unsigned long *ProcInfo;
43/* Tables for known hardware */ 43/* Tables for known hardware */
44 44
45/* Motorola PowerStackII - Utah */ 45/* Motorola PowerStackII - Utah */
46static char Utah_pci_IRQ_map[23] __prepdata = 46static char Utah_pci_IRQ_map[23] =
47{ 47{
48 0, /* Slot 0 - unused */ 48 0, /* Slot 0 - unused */
49 0, /* Slot 1 - unused */ 49 0, /* Slot 1 - unused */
@@ -72,7 +72,7 @@ static char Utah_pci_IRQ_map[23] __prepdata =
72 0, /* Slot 22 - unused */ 72 0, /* Slot 22 - unused */
73}; 73};
74 74
75static char Utah_pci_IRQ_routes[] __prepdata = 75static char Utah_pci_IRQ_routes[] =
76{ 76{
77 0, /* Line 0 - Unused */ 77 0, /* Line 0 - Unused */
78 9, /* Line 1 */ 78 9, /* Line 1 */
@@ -84,7 +84,7 @@ static char Utah_pci_IRQ_routes[] __prepdata =
84 84
85/* Motorola PowerStackII - Omaha */ 85/* Motorola PowerStackII - Omaha */
86/* no integrated SCSI or ethernet */ 86/* no integrated SCSI or ethernet */
87static char Omaha_pci_IRQ_map[23] __prepdata = 87static char Omaha_pci_IRQ_map[23] =
88{ 88{
89 0, /* Slot 0 - unused */ 89 0, /* Slot 0 - unused */
90 0, /* Slot 1 - unused */ 90 0, /* Slot 1 - unused */
@@ -111,7 +111,7 @@ static char Omaha_pci_IRQ_map[23] __prepdata =
111 0, 111 0,
112}; 112};
113 113
114static char Omaha_pci_IRQ_routes[] __prepdata = 114static char Omaha_pci_IRQ_routes[] =
115{ 115{
116 0, /* Line 0 - Unused */ 116 0, /* Line 0 - Unused */
117 9, /* Line 1 */ 117 9, /* Line 1 */
@@ -121,7 +121,7 @@ static char Omaha_pci_IRQ_routes[] __prepdata =
121}; 121};
122 122
123/* Motorola PowerStack */ 123/* Motorola PowerStack */
124static char Blackhawk_pci_IRQ_map[19] __prepdata = 124static char Blackhawk_pci_IRQ_map[19] =
125{ 125{
126 0, /* Slot 0 - unused */ 126 0, /* Slot 0 - unused */
127 0, /* Slot 1 - unused */ 127 0, /* Slot 1 - unused */
@@ -144,7 +144,7 @@ static char Blackhawk_pci_IRQ_map[19] __prepdata =
144 3, /* Slot P5 */ 144 3, /* Slot P5 */
145}; 145};
146 146
147static char Blackhawk_pci_IRQ_routes[] __prepdata = 147static char Blackhawk_pci_IRQ_routes[] =
148{ 148{
149 0, /* Line 0 - Unused */ 149 0, /* Line 0 - Unused */
150 9, /* Line 1 */ 150 9, /* Line 1 */
@@ -154,7 +154,7 @@ static char Blackhawk_pci_IRQ_routes[] __prepdata =
154}; 154};
155 155
156/* Motorola Mesquite */ 156/* Motorola Mesquite */
157static char Mesquite_pci_IRQ_map[23] __prepdata = 157static char Mesquite_pci_IRQ_map[23] =
158{ 158{
159 0, /* Slot 0 - unused */ 159 0, /* Slot 0 - unused */
160 0, /* Slot 1 - unused */ 160 0, /* Slot 1 - unused */
@@ -182,7 +182,7 @@ static char Mesquite_pci_IRQ_map[23] __prepdata =
182}; 182};
183 183
184/* Motorola Sitka */ 184/* Motorola Sitka */
185static char Sitka_pci_IRQ_map[21] __prepdata = 185static char Sitka_pci_IRQ_map[21] =
186{ 186{
187 0, /* Slot 0 - unused */ 187 0, /* Slot 0 - unused */
188 0, /* Slot 1 - unused */ 188 0, /* Slot 1 - unused */
@@ -208,7 +208,7 @@ static char Sitka_pci_IRQ_map[21] __prepdata =
208}; 208};
209 209
210/* Motorola MTX */ 210/* Motorola MTX */
211static char MTX_pci_IRQ_map[23] __prepdata = 211static char MTX_pci_IRQ_map[23] =
212{ 212{
213 0, /* Slot 0 - unused */ 213 0, /* Slot 0 - unused */
214 0, /* Slot 1 - unused */ 214 0, /* Slot 1 - unused */
@@ -237,7 +237,7 @@ static char MTX_pci_IRQ_map[23] __prepdata =
237 237
238/* Motorola MTX Plus */ 238/* Motorola MTX Plus */
239/* Secondary bus interrupt routing is not supported yet */ 239/* Secondary bus interrupt routing is not supported yet */
240static char MTXplus_pci_IRQ_map[23] __prepdata = 240static char MTXplus_pci_IRQ_map[23] =
241{ 241{
242 0, /* Slot 0 - unused */ 242 0, /* Slot 0 - unused */
243 0, /* Slot 1 - unused */ 243 0, /* Slot 1 - unused */
@@ -264,13 +264,13 @@ static char MTXplus_pci_IRQ_map[23] __prepdata =
264 0, /* Slot 22 - unused */ 264 0, /* Slot 22 - unused */
265}; 265};
266 266
267static char Raven_pci_IRQ_routes[] __prepdata = 267static char Raven_pci_IRQ_routes[] =
268{ 268{
269 0, /* This is a dummy structure */ 269 0, /* This is a dummy structure */
270}; 270};
271 271
272/* Motorola MVME16xx */ 272/* Motorola MVME16xx */
273static char Genesis_pci_IRQ_map[16] __prepdata = 273static char Genesis_pci_IRQ_map[16] =
274{ 274{
275 0, /* Slot 0 - unused */ 275 0, /* Slot 0 - unused */
276 0, /* Slot 1 - unused */ 276 0, /* Slot 1 - unused */
@@ -290,7 +290,7 @@ static char Genesis_pci_IRQ_map[16] __prepdata =
290 0, /* Slot 15 - unused */ 290 0, /* Slot 15 - unused */
291}; 291};
292 292
293static char Genesis_pci_IRQ_routes[] __prepdata = 293static char Genesis_pci_IRQ_routes[] =
294{ 294{
295 0, /* Line 0 - Unused */ 295 0, /* Line 0 - Unused */
296 10, /* Line 1 */ 296 10, /* Line 1 */
@@ -299,7 +299,7 @@ static char Genesis_pci_IRQ_routes[] __prepdata =
299 15 /* Line 4 */ 299 15 /* Line 4 */
300}; 300};
301 301
302static char Genesis2_pci_IRQ_map[23] __prepdata = 302static char Genesis2_pci_IRQ_map[23] =
303{ 303{
304 0, /* Slot 0 - unused */ 304 0, /* Slot 0 - unused */
305 0, /* Slot 1 - unused */ 305 0, /* Slot 1 - unused */
@@ -327,7 +327,7 @@ static char Genesis2_pci_IRQ_map[23] __prepdata =
327}; 327};
328 328
329/* Motorola Series-E */ 329/* Motorola Series-E */
330static char Comet_pci_IRQ_map[23] __prepdata = 330static char Comet_pci_IRQ_map[23] =
331{ 331{
332 0, /* Slot 0 - unused */ 332 0, /* Slot 0 - unused */
333 0, /* Slot 1 - unused */ 333 0, /* Slot 1 - unused */
@@ -354,7 +354,7 @@ static char Comet_pci_IRQ_map[23] __prepdata =
354 0, 354 0,
355}; 355};
356 356
357static char Comet_pci_IRQ_routes[] __prepdata = 357static char Comet_pci_IRQ_routes[] =
358{ 358{
359 0, /* Line 0 - Unused */ 359 0, /* Line 0 - Unused */
360 10, /* Line 1 */ 360 10, /* Line 1 */
@@ -364,7 +364,7 @@ static char Comet_pci_IRQ_routes[] __prepdata =
364}; 364};
365 365
366/* Motorola Series-EX */ 366/* Motorola Series-EX */
367static char Comet2_pci_IRQ_map[23] __prepdata = 367static char Comet2_pci_IRQ_map[23] =
368{ 368{
369 0, /* Slot 0 - unused */ 369 0, /* Slot 0 - unused */
370 0, /* Slot 1 - unused */ 370 0, /* Slot 1 - unused */
@@ -391,7 +391,7 @@ static char Comet2_pci_IRQ_map[23] __prepdata =
391 0, 391 0,
392}; 392};
393 393
394static char Comet2_pci_IRQ_routes[] __prepdata = 394static char Comet2_pci_IRQ_routes[] =
395{ 395{
396 0, /* Line 0 - Unused */ 396 0, /* Line 0 - Unused */
397 10, /* Line 1 */ 397 10, /* Line 1 */
@@ -405,7 +405,7 @@ static char Comet2_pci_IRQ_routes[] __prepdata =
405 * This is actually based on the Carolina motherboard 405 * This is actually based on the Carolina motherboard
406 * -- Cort 406 * -- Cort
407 */ 407 */
408static char ibm8xx_pci_IRQ_map[23] __prepdata = { 408static char ibm8xx_pci_IRQ_map[23] = {
409 0, /* Slot 0 - unused */ 409 0, /* Slot 0 - unused */
410 0, /* Slot 1 - unused */ 410 0, /* Slot 1 - unused */
411 0, /* Slot 2 - unused */ 411 0, /* Slot 2 - unused */
@@ -431,7 +431,7 @@ static char ibm8xx_pci_IRQ_map[23] __prepdata = {
431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
432}; 432};
433 433
434static char ibm8xx_pci_IRQ_routes[] __prepdata = { 434static char ibm8xx_pci_IRQ_routes[] = {
435 0, /* Line 0 - unused */ 435 0, /* Line 0 - unused */
436 15, /* Line 1 */ 436 15, /* Line 1 */
437 15, /* Line 2 */ 437 15, /* Line 2 */
@@ -443,7 +443,7 @@ static char ibm8xx_pci_IRQ_routes[] __prepdata = {
443 * a 6015 ibm board 443 * a 6015 ibm board
444 * -- Cort 444 * -- Cort
445 */ 445 */
446static char ibm6015_pci_IRQ_map[23] __prepdata = { 446static char ibm6015_pci_IRQ_map[23] = {
447 0, /* Slot 0 - unused */ 447 0, /* Slot 0 - unused */
448 0, /* Slot 1 - unused */ 448 0, /* Slot 1 - unused */
449 0, /* Slot 2 - unused */ 449 0, /* Slot 2 - unused */
@@ -469,7 +469,7 @@ static char ibm6015_pci_IRQ_map[23] __prepdata = {
469 2, /* Slot 22 - */ 469 2, /* Slot 22 - */
470}; 470};
471 471
472static char ibm6015_pci_IRQ_routes[] __prepdata = { 472static char ibm6015_pci_IRQ_routes[] = {
473 0, /* Line 0 - unused */ 473 0, /* Line 0 - unused */
474 13, /* Line 1 */ 474 13, /* Line 1 */
475 15, /* Line 2 */ 475 15, /* Line 2 */
@@ -479,7 +479,7 @@ static char ibm6015_pci_IRQ_routes[] __prepdata = {
479 479
480 480
481/* IBM Nobis and Thinkpad 850 */ 481/* IBM Nobis and Thinkpad 850 */
482static char Nobis_pci_IRQ_map[23] __prepdata ={ 482static char Nobis_pci_IRQ_map[23] ={
483 0, /* Slot 0 - unused */ 483 0, /* Slot 0 - unused */
484 0, /* Slot 1 - unused */ 484 0, /* Slot 1 - unused */
485 0, /* Slot 2 - unused */ 485 0, /* Slot 2 - unused */
@@ -498,7 +498,7 @@ static char Nobis_pci_IRQ_map[23] __prepdata ={
498 0, /* Slot 15 - unused */ 498 0, /* Slot 15 - unused */
499}; 499};
500 500
501static char Nobis_pci_IRQ_routes[] __prepdata = { 501static char Nobis_pci_IRQ_routes[] = {
502 0, /* Line 0 - Unused */ 502 0, /* Line 0 - Unused */
503 13, /* Line 1 */ 503 13, /* Line 1 */
504 13, /* Line 2 */ 504 13, /* Line 2 */
@@ -510,7 +510,7 @@ static char Nobis_pci_IRQ_routes[] __prepdata = {
510 * IBM RS/6000 43p/140 -- paulus 510 * IBM RS/6000 43p/140 -- paulus
511 * XXX we should get all this from the residual data 511 * XXX we should get all this from the residual data
512 */ 512 */
513static char ibm43p_pci_IRQ_map[23] __prepdata = { 513static char ibm43p_pci_IRQ_map[23] = {
514 0, /* Slot 0 - unused */ 514 0, /* Slot 0 - unused */
515 0, /* Slot 1 - unused */ 515 0, /* Slot 1 - unused */
516 0, /* Slot 2 - unused */ 516 0, /* Slot 2 - unused */
@@ -536,7 +536,7 @@ static char ibm43p_pci_IRQ_map[23] __prepdata = {
536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
537}; 537};
538 538
539static char ibm43p_pci_IRQ_routes[] __prepdata = { 539static char ibm43p_pci_IRQ_routes[] = {
540 0, /* Line 0 - unused */ 540 0, /* Line 0 - unused */
541 15, /* Line 1 */ 541 15, /* Line 1 */
542 15, /* Line 2 */ 542 15, /* Line 2 */
@@ -559,7 +559,7 @@ struct powerplus_irq_list
559 * are routed to OpenPIC inputs 5-8. These values are offset by 559 * are routed to OpenPIC inputs 5-8. These values are offset by
560 * 16 in the table to reflect the Linux kernel interrupt value. 560 * 16 in the table to reflect the Linux kernel interrupt value.
561 */ 561 */
562struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata = 562struct powerplus_irq_list Powerplus_pci_IRQ_list =
563{ 563{
564 {25, 26, 27, 28}, 564 {25, 26, 27, 28},
565 {21, 22, 23, 24} 565 {21, 22, 23, 24}
@@ -572,7 +572,7 @@ struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
572 * are routed to OpenPIC inputs 12-15. These values are offset by 572 * are routed to OpenPIC inputs 12-15. These values are offset by
573 * 16 in the table to reflect the Linux kernel interrupt value. 573 * 16 in the table to reflect the Linux kernel interrupt value.
574 */ 574 */
575struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata = 575struct powerplus_irq_list Mesquite_pci_IRQ_list =
576{ 576{
577 {24, 25, 26, 27}, 577 {24, 25, 26, 27},
578 {28, 29, 30, 31} 578 {28, 29, 30, 31}
@@ -582,7 +582,7 @@ struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
582 * This table represents the standard PCI swizzle defined in the 582 * This table represents the standard PCI swizzle defined in the
583 * PCI bus specification. 583 * PCI bus specification.
584 */ 584 */
585static unsigned char prep_pci_intpins[4][4] __prepdata = 585static unsigned char prep_pci_intpins[4][4] =
586{ 586{
587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */ 587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */
588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */ 588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */
@@ -622,7 +622,7 @@ static unsigned char prep_pci_intpins[4][4] __prepdata =
622#define MIN_DEVNR 11 622#define MIN_DEVNR 11
623#define MAX_DEVNR 22 623#define MAX_DEVNR 22
624 624
625static int __prep 625static int
626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
627 int len, u32 *val) 627 int len, u32 *val)
628{ 628{
@@ -652,7 +652,7 @@ prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
652 return PCIBIOS_SUCCESSFUL; 652 return PCIBIOS_SUCCESSFUL;
653} 653}
654 654
655static int __prep 655static int
656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
657 int len, u32 val) 657 int len, u32 val)
658{ 658{
@@ -804,7 +804,7 @@ struct mot_info {
804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */ 804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */
805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */ 805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */
806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */ 806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */
807} mot_info[] __prepdata = { 807} mot_info[] = {
808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF}, 808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF},
809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00}, 809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00},
810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00}, 810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00},
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index bc926be95472..067d7d53b81e 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
89#define cached_21 (((char *)(ppc_cached_irq_mask))[3]) 89#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2]) 90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
91 91
92/* for the mac fs */
93dev_t boot_dev;
94
95#ifdef CONFIG_SOUND_CS4232 92#ifdef CONFIG_SOUND_CS4232
96long ppc_cs4232_dma, ppc_cs4232_dma2; 93long ppc_cs4232_dma, ppc_cs4232_dma2;
97#endif 94#endif
@@ -173,7 +170,7 @@ prep_carolina_enable_l2(void)
173} 170}
174 171
175/* cpuinfo code common to all IBM PReP */ 172/* cpuinfo code common to all IBM PReP */
176static void __prep 173static void
177prep_ibm_cpuinfo(struct seq_file *m) 174prep_ibm_cpuinfo(struct seq_file *m)
178{ 175{
179 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 176 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -209,14 +206,14 @@ prep_ibm_cpuinfo(struct seq_file *m)
209 } 206 }
210} 207}
211 208
212static int __prep 209static int
213prep_gen_cpuinfo(struct seq_file *m) 210prep_gen_cpuinfo(struct seq_file *m)
214{ 211{
215 prep_ibm_cpuinfo(m); 212 prep_ibm_cpuinfo(m);
216 return 0; 213 return 0;
217} 214}
218 215
219static int __prep 216static int
220prep_sandalfoot_cpuinfo(struct seq_file *m) 217prep_sandalfoot_cpuinfo(struct seq_file *m)
221{ 218{
222 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 219 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -243,7 +240,7 @@ prep_sandalfoot_cpuinfo(struct seq_file *m)
243 return 0; 240 return 0;
244} 241}
245 242
246static int __prep 243static int
247prep_thinkpad_cpuinfo(struct seq_file *m) 244prep_thinkpad_cpuinfo(struct seq_file *m)
248{ 245{
249 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 246 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -314,7 +311,7 @@ prep_thinkpad_cpuinfo(struct seq_file *m)
314 return 0; 311 return 0;
315} 312}
316 313
317static int __prep 314static int
318prep_carolina_cpuinfo(struct seq_file *m) 315prep_carolina_cpuinfo(struct seq_file *m)
319{ 316{
320 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 317 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -350,7 +347,7 @@ prep_carolina_cpuinfo(struct seq_file *m)
350 return 0; 347 return 0;
351} 348}
352 349
353static int __prep 350static int
354prep_tiger1_cpuinfo(struct seq_file *m) 351prep_tiger1_cpuinfo(struct seq_file *m)
355{ 352{
356 unsigned int l2_reg = inb(PREP_IBM_L2INFO); 353 unsigned int l2_reg = inb(PREP_IBM_L2INFO);
@@ -393,7 +390,7 @@ prep_tiger1_cpuinfo(struct seq_file *m)
393 390
394 391
395/* Used by all Motorola PReP */ 392/* Used by all Motorola PReP */
396static int __prep 393static int
397prep_mot_cpuinfo(struct seq_file *m) 394prep_mot_cpuinfo(struct seq_file *m)
398{ 395{
399 unsigned int cachew = *((unsigned char *)CACHECRBA); 396 unsigned int cachew = *((unsigned char *)CACHECRBA);
@@ -454,7 +451,7 @@ no_l2:
454 return 0; 451 return 0;
455} 452}
456 453
457static void __prep 454static void
458prep_restart(char *cmd) 455prep_restart(char *cmd)
459{ 456{
460#define PREP_SP92 0x92 /* Special Port 92 */ 457#define PREP_SP92 0x92 /* Special Port 92 */
@@ -473,7 +470,7 @@ prep_restart(char *cmd)
473#undef PREP_SP92 470#undef PREP_SP92
474} 471}
475 472
476static void __prep 473static void
477prep_halt(void) 474prep_halt(void)
478{ 475{
479 local_irq_disable(); /* no interrupts */ 476 local_irq_disable(); /* no interrupts */
@@ -488,7 +485,7 @@ prep_halt(void)
488/* Carrera is the power manager in the Thinkpads. Unfortunately not much is 485/* Carrera is the power manager in the Thinkpads. Unfortunately not much is
489 * known about it, so we can't power down. 486 * known about it, so we can't power down.
490 */ 487 */
491static void __prep 488static void
492prep_carrera_poweroff(void) 489prep_carrera_poweroff(void)
493{ 490{
494 prep_halt(); 491 prep_halt();
@@ -501,7 +498,7 @@ prep_carrera_poweroff(void)
501 * somewhat in the IBM Carolina Technical Specification. 498 * somewhat in the IBM Carolina Technical Specification.
502 * -Hollis 499 * -Hollis
503 */ 500 */
504static void __prep 501static void
505utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value) 502utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
506{ 503{
507 /* 504 /*
@@ -539,7 +536,7 @@ utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
539 udelay(100); /* important: let controller recover */ 536 udelay(100); /* important: let controller recover */
540} 537}
541 538
542static void __prep 539static void
543prep_sig750_poweroff(void) 540prep_sig750_poweroff(void)
544{ 541{
545 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */ 542 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */
@@ -554,7 +551,7 @@ prep_sig750_poweroff(void)
554 /* not reached */ 551 /* not reached */
555} 552}
556 553
557static int __prep 554static int
558prep_show_percpuinfo(struct seq_file *m, int i) 555prep_show_percpuinfo(struct seq_file *m, int i)
559{ 556{
560 /* PREP's without residual data will give incorrect values here */ 557 /* PREP's without residual data will give incorrect values here */
@@ -700,12 +697,12 @@ prep_set_bat(void)
700/* 697/*
701 * IBM 3-digit status LED 698 * IBM 3-digit status LED
702 */ 699 */
703static unsigned int ibm_statusled_base __prepdata; 700static unsigned int ibm_statusled_base;
704 701
705static void __prep 702static void
706ibm_statusled_progress(char *s, unsigned short hex); 703ibm_statusled_progress(char *s, unsigned short hex);
707 704
708static int __prep 705static int
709ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2, 706ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
710 void * dummy3) 707 void * dummy3)
711{ 708{
@@ -713,13 +710,13 @@ ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
713 return NOTIFY_DONE; 710 return NOTIFY_DONE;
714} 711}
715 712
716static struct notifier_block ibm_statusled_block __prepdata = { 713static struct notifier_block ibm_statusled_block = {
717 ibm_statusled_panic, 714 ibm_statusled_panic,
718 NULL, 715 NULL,
719 INT_MAX /* try to do it first */ 716 INT_MAX /* try to do it first */
720}; 717};
721 718
722static void __prep 719static void
723ibm_statusled_progress(char *s, unsigned short hex) 720ibm_statusled_progress(char *s, unsigned short hex)
724{ 721{
725 static int notifier_installed; 722 static int notifier_installed;
@@ -945,19 +942,6 @@ prep_calibrate_decr(void)
945 todc_calibrate_decr(); 942 todc_calibrate_decr();
946} 943}
947 944
948static unsigned int __prep
949prep_irq_canonicalize(u_int irq)
950{
951 if (irq == 2)
952 {
953 return 9;
954 }
955 else
956 {
957 return irq;
958 }
959}
960
961static void __init 945static void __init
962prep_init_IRQ(void) 946prep_init_IRQ(void)
963{ 947{
@@ -970,11 +954,9 @@ prep_init_IRQ(void)
970 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 954 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
971 i8259_irq); 955 i8259_irq);
972 } 956 }
973 for ( i = 0 ; i < NUM_8259_INTERRUPTS ; i++ )
974 irq_desc[i].handler = &i8259_pic;
975 957
976 if (have_residual_data) { 958 if (have_residual_data) {
977 i8259_init(residual_isapic_addr()); 959 i8259_init(residual_isapic_addr(), 0);
978 return; 960 return;
979 } 961 }
980 962
@@ -985,18 +967,18 @@ prep_init_IRQ(void)
985 if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA) 967 if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA)
986 && ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN) 968 && ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN)
987 || (pci_did == PCI_DEVICE_ID_MOTOROLA_HAWK))) 969 || (pci_did == PCI_DEVICE_ID_MOTOROLA_HAWK)))
988 i8259_init(0); 970 i8259_init(0, 0);
989 else 971 else
990 /* PCI interrupt ack address given in section 6.1.8 of the 972 /* PCI interrupt ack address given in section 6.1.8 of the
991 * PReP specification. */ 973 * PReP specification. */
992 i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR); 974 i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR, 0);
993} 975}
994 976
995#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 977#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
996/* 978/*
997 * IDE stuff. 979 * IDE stuff.
998 */ 980 */
999static int __prep 981static int
1000prep_ide_default_irq(unsigned long base) 982prep_ide_default_irq(unsigned long base)
1001{ 983{
1002 switch (base) { 984 switch (base) {
@@ -1010,7 +992,7 @@ prep_ide_default_irq(unsigned long base)
1010 } 992 }
1011} 993}
1012 994
1013static unsigned long __prep 995static unsigned long
1014prep_ide_default_io_base(int index) 996prep_ide_default_io_base(int index)
1015{ 997{
1016 switch (index) { 998 switch (index) {
@@ -1055,7 +1037,7 @@ smp_prep_setup_cpu(int cpu_nr)
1055 do_openpic_setup_cpu(); 1037 do_openpic_setup_cpu();
1056} 1038}
1057 1039
1058static struct smp_ops_t prep_smp_ops __prepdata = { 1040static struct smp_ops_t prep_smp_ops = {
1059 smp_openpic_message_pass, 1041 smp_openpic_message_pass,
1060 smp_prep_probe, 1042 smp_prep_probe,
1061 smp_prep_kick_cpu, 1043 smp_prep_kick_cpu,
@@ -1113,6 +1095,7 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1113 ISA_DMA_THRESHOLD = 0x00ffffff; 1095 ISA_DMA_THRESHOLD = 0x00ffffff;
1114 DMA_MODE_READ = 0x44; 1096 DMA_MODE_READ = 0x44;
1115 DMA_MODE_WRITE = 0x48; 1097 DMA_MODE_WRITE = 0x48;
1098 ppc_do_canonicalize_irqs = 1;
1116 1099
1117 /* figure out what kind of prep workstation we are */ 1100 /* figure out what kind of prep workstation we are */
1118 if (have_residual_data) { 1101 if (have_residual_data) {
@@ -1139,7 +1122,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1139 ppc_md.setup_arch = prep_setup_arch; 1122 ppc_md.setup_arch = prep_setup_arch;
1140 ppc_md.show_percpuinfo = prep_show_percpuinfo; 1123 ppc_md.show_percpuinfo = prep_show_percpuinfo;
1141 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */ 1124 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */
1142 ppc_md.irq_canonicalize = prep_irq_canonicalize;
1143 ppc_md.init_IRQ = prep_init_IRQ; 1125 ppc_md.init_IRQ = prep_init_IRQ;
1144 /* this gets changed later on if we have an OpenPIC -- Cort */ 1126 /* this gets changed later on if we have an OpenPIC -- Cort */
1145 ppc_md.get_irq = i8259_irq; 1127 ppc_md.get_irq = i8259_irq;
@@ -1176,6 +1158,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1176#endif 1158#endif
1177 1159
1178#ifdef CONFIG_SMP 1160#ifdef CONFIG_SMP
1179 ppc_md.smp_ops = &prep_smp_ops; 1161 smp_ops = &prep_smp_ops;
1180#endif /* CONFIG_SMP */ 1162#endif /* CONFIG_SMP */
1181} 1163}
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index 0376c8cff5d1..6f97911c330d 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -514,13 +514,9 @@ static void __init ppc7d_init_irq(void)
514 int irq; 514 int irq;
515 515
516 pr_debug("%s\n", __FUNCTION__); 516 pr_debug("%s\n", __FUNCTION__);
517 i8259_init(0); 517 i8259_init(0, 0);
518 mv64360_init_irq(); 518 mv64360_init_irq();
519 519
520 /* IRQ 0..15 are handled by the cascaded 8259's of the Ali1535 */
521 for (irq = 0; irq < 16; irq++) {
522 irq_desc[irq].handler = &i8259_pic;
523 }
524 /* IRQs 5,6,9,10,11,14,15 are level sensitive */ 520 /* IRQs 5,6,9,10,11,14,15 are level sensitive */
525 irq_desc[5].status |= IRQ_LEVEL; 521 irq_desc[5].status |= IRQ_LEVEL;
526 irq_desc[6].status |= IRQ_LEVEL; 522 irq_desc[6].status |= IRQ_LEVEL;
@@ -1183,18 +1179,18 @@ static void __init ppc7d_setup_arch(void)
1183 ROOT_DEV = Root_HDA1; 1179 ROOT_DEV = Root_HDA1;
1184#endif 1180#endif
1185 1181
1186 if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) || 1182 if ((cur_cpu_spec->cpu_features & CPU_FTR_SPEC7450) ||
1187 (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)) 1183 (cur_cpu_spec->cpu_features & CPU_FTR_L3CR))
1188 /* 745x is different. We only want to pass along enable. */ 1184 /* 745x is different. We only want to pass along enable. */
1189 _set_L2CR(L2CR_L2E); 1185 _set_L2CR(L2CR_L2E);
1190 else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) 1186 else if (cur_cpu_spec->cpu_features & CPU_FTR_L2CR)
1191 /* All modules have 1MB of L2. We also assume that an 1187 /* All modules have 1MB of L2. We also assume that an
1192 * L2 divisor of 3 will work. 1188 * L2 divisor of 3 will work.
1193 */ 1189 */
1194 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3 1190 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3
1195 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF); 1191 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
1196 1192
1197 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR) 1193 if (cur_cpu_spec->cpu_features & CPU_FTR_L3CR)
1198 /* No L3 cache */ 1194 /* No L3 cache */
1199 _set_L3CR(0); 1195 _set_L3CR(0);
1200 1196
@@ -1424,6 +1420,7 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
1424 ppc_md.setup_arch = ppc7d_setup_arch; 1420 ppc_md.setup_arch = ppc7d_setup_arch;
1425 ppc_md.init = ppc7d_init2; 1421 ppc_md.init = ppc7d_init2;
1426 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo; 1422 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo;
1423 /* XXX this is broken... */
1427 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize; 1424 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize;
1428 ppc_md.init_IRQ = ppc7d_init_irq; 1425 ppc_md.init_IRQ = ppc7d_init_irq;
1429 ppc_md.get_irq = ppc7d_get_irq; 1426 ppc_md.get_irq = ppc7d_get_irq;
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c
index 0f84ca603612..c9911601cfdf 100644
--- a/arch/ppc/platforms/residual.c
+++ b/arch/ppc/platforms/residual.c
@@ -47,7 +47,7 @@
47#include <asm/ide.h> 47#include <asm/ide.h>
48 48
49 49
50unsigned char __res[sizeof(RESIDUAL)] __prepdata = {0,}; 50unsigned char __res[sizeof(RESIDUAL)] = {0,};
51RESIDUAL *res = (RESIDUAL *)&__res; 51RESIDUAL *res = (RESIDUAL *)&__res;
52 52
53char * PnP_BASE_TYPES[] __initdata = { 53char * PnP_BASE_TYPES[] __initdata = {
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c
index 5232283c1974..9eeed3572309 100644
--- a/arch/ppc/platforms/sandpoint.c
+++ b/arch/ppc/platforms/sandpoint.c
@@ -494,27 +494,10 @@ sandpoint_init_IRQ(void)
494 i8259_irq); 494 i8259_irq);
495 495
496 /* 496 /*
497 * openpic_init() has set up irq_desc[16-31] to be openpic
498 * interrupts. We need to set irq_desc[0-15] to be i8259
499 * interrupts.
500 */
501 for(i=0; i < NUM_8259_INTERRUPTS; i++)
502 irq_desc[i].handler = &i8259_pic;
503
504 /*
505 * The EPIC allows for a read in the range of 0xFEF00000 -> 497 * The EPIC allows for a read in the range of 0xFEF00000 ->
506 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction. 498 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction.
507 */ 499 */
508 i8259_init(0xfef00000); 500 i8259_init(0xfef00000, 0);
509}
510
511static u32
512sandpoint_irq_canonicalize(u32 irq)
513{
514 if (irq == 2)
515 return 9;
516 else
517 return irq;
518} 501}
519 502
520static unsigned long __init 503static unsigned long __init
@@ -727,10 +710,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
727 ISA_DMA_THRESHOLD = 0x00ffffff; 710 ISA_DMA_THRESHOLD = 0x00ffffff;
728 DMA_MODE_READ = 0x44; 711 DMA_MODE_READ = 0x44;
729 DMA_MODE_WRITE = 0x48; 712 DMA_MODE_WRITE = 0x48;
713 ppc_do_canonicalize_irqs = 1;
730 714
731 ppc_md.setup_arch = sandpoint_setup_arch; 715 ppc_md.setup_arch = sandpoint_setup_arch;
732 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo; 716 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo;
733 ppc_md.irq_canonicalize = sandpoint_irq_canonicalize;
734 ppc_md.init_IRQ = sandpoint_init_IRQ; 717 ppc_md.init_IRQ = sandpoint_init_IRQ;
735 ppc_md.get_irq = openpic_get_irq; 718 ppc_md.get_irq = openpic_get_irq;
736 719
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index b8d08f33f7ee..b4ef15b45c4a 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -31,52 +31,49 @@ obj-$(CONFIG_GEN_RTC) += todc_time.o
31obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o 31obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
32obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o 32obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
33ifeq ($(CONFIG_40x),y) 33ifeq ($(CONFIG_40x),y)
34obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o ppc405_pci.o 34obj-$(CONFIG_PCI) += pci_auto.o ppc405_pci.o
35endif 35endif
36endif 36endif
37obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \ 37obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \
38 ppc_sys.o mpc8xx_devices.o mpc8xx_sys.o 38 ppc_sys.o mpc8xx_devices.o mpc8xx_sys.o
39ifeq ($(CONFIG_8xx),y) 39obj-$(CONFIG_PCI_QSPAN) += qspan_pci.o
40obj-$(CONFIG_PCI) += qspan_pci.o i8259.o 40obj-$(CONFIG_PPC_OF) += prom_init.o prom.o
41endif 41obj-$(CONFIG_PPC_PMAC) += open_pic.o
42obj-$(CONFIG_PPC_OF) += prom_init.o prom.o of_device.o
43obj-$(CONFIG_PPC_PMAC) += open_pic.o indirect_pci.o
44obj-$(CONFIG_POWER4) += open_pic2.o 42obj-$(CONFIG_POWER4) += open_pic2.o
45obj-$(CONFIG_PPC_CHRP) += open_pic.o indirect_pci.o i8259.o 43obj-$(CONFIG_PPC_CHRP) += open_pic.o
46obj-$(CONFIG_PPC_PREP) += open_pic.o indirect_pci.o i8259.o todc_time.o 44obj-$(CONFIG_PPC_PREP) += open_pic.o todc_time.o
47obj-$(CONFIG_BAMBOO) += indirect_pci.o pci_auto.o todc_time.o 45obj-$(CONFIG_BAMBOO) += pci_auto.o todc_time.o
48obj-$(CONFIG_CPCI690) += todc_time.o pci_auto.o 46obj-$(CONFIG_CPCI690) += todc_time.o pci_auto.o
49obj-$(CONFIG_EBONY) += indirect_pci.o pci_auto.o todc_time.o 47obj-$(CONFIG_EBONY) += pci_auto.o todc_time.o
50obj-$(CONFIG_EV64260) += todc_time.o pci_auto.o 48obj-$(CONFIG_EV64260) += todc_time.o pci_auto.o
51obj-$(CONFIG_CHESTNUT) += mv64360_pic.o pci_auto.o 49obj-$(CONFIG_CHESTNUT) += mv64360_pic.o pci_auto.o
52obj-$(CONFIG_GEMINI) += open_pic.o indirect_pci.o 50obj-$(CONFIG_GEMINI) += open_pic.o
53obj-$(CONFIG_GT64260) += gt64260_pic.o 51obj-$(CONFIG_GT64260) += gt64260_pic.o
54obj-$(CONFIG_LOPEC) += i8259.o pci_auto.o todc_time.o 52obj-$(CONFIG_LOPEC) += pci_auto.o todc_time.o
55obj-$(CONFIG_HDPU) += pci_auto.o 53obj-$(CONFIG_HDPU) += pci_auto.o
56obj-$(CONFIG_LUAN) += indirect_pci.o pci_auto.o todc_time.o 54obj-$(CONFIG_LUAN) += pci_auto.o todc_time.o
57obj-$(CONFIG_KATANA) += pci_auto.o 55obj-$(CONFIG_KATANA) += pci_auto.o
58obj-$(CONFIG_MV64360) += mv64360_pic.o 56obj-$(CONFIG_MV64360) += mv64360_pic.o
59obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_win.o indirect_pci.o 57obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_win.o
60obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o indirect_pci.o \ 58obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o \
61 pci_auto.o hawk_common.o 59 pci_auto.o hawk_common.o
62obj-$(CONFIG_MVME5100_IPMC761_PRESENT) += i8259.o 60obj-$(CONFIG_OCOTEA) += pci_auto.o todc_time.o
63obj-$(CONFIG_OCOTEA) += indirect_pci.o pci_auto.o todc_time.o
64obj-$(CONFIG_PAL4) += cpc700_pic.o 61obj-$(CONFIG_PAL4) += cpc700_pic.o
65obj-$(CONFIG_POWERPMC250) += pci_auto.o 62obj-$(CONFIG_POWERPMC250) += pci_auto.o
66obj-$(CONFIG_PPLUS) += hawk_common.o open_pic.o i8259.o \ 63obj-$(CONFIG_PPLUS) += hawk_common.o open_pic.o \
67 indirect_pci.o todc_time.o pci_auto.o 64 todc_time.o pci_auto.o
68obj-$(CONFIG_PRPMC750) += open_pic.o indirect_pci.o pci_auto.o \ 65obj-$(CONFIG_PRPMC750) += open_pic.o pci_auto.o \
69 hawk_common.o 66 hawk_common.o
70obj-$(CONFIG_HARRIER) += harrier.o 67obj-$(CONFIG_HARRIER) += harrier.o
71obj-$(CONFIG_PRPMC800) += open_pic.o indirect_pci.o pci_auto.o 68obj-$(CONFIG_PRPMC800) += open_pic.o pci_auto.o
72obj-$(CONFIG_RADSTONE_PPC7D) += i8259.o pci_auto.o 69obj-$(CONFIG_RADSTONE_PPC7D) += pci_auto.o
73obj-$(CONFIG_SANDPOINT) += i8259.o pci_auto.o todc_time.o 70obj-$(CONFIG_SANDPOINT) += pci_auto.o todc_time.o
74obj-$(CONFIG_SBC82xx) += todc_time.o 71obj-$(CONFIG_SBC82xx) += todc_time.o
75obj-$(CONFIG_SPRUCE) += cpc700_pic.o indirect_pci.o pci_auto.o \ 72obj-$(CONFIG_SPRUCE) += cpc700_pic.o pci_auto.o \
76 todc_time.o 73 todc_time.o
77obj-$(CONFIG_8260) += m8260_setup.o pq2_devices.o pq2_sys.o \ 74obj-$(CONFIG_8260) += m8260_setup.o pq2_devices.o pq2_sys.o \
78 ppc_sys.o 75 ppc_sys.o
79obj-$(CONFIG_PCI_8260) += m82xx_pci.o indirect_pci.o pci_auto.o 76obj-$(CONFIG_PCI_8260) += m82xx_pci.o pci_auto.o
80obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o 77obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o
81obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o 78obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
82ifeq ($(CONFIG_PPC_GEN550),y) 79ifeq ($(CONFIG_PPC_GEN550),y)
@@ -87,20 +84,18 @@ ifeq ($(CONFIG_SERIAL_MPSC_CONSOLE),y)
87obj-$(CONFIG_SERIAL_TEXT_DEBUG) += mv64x60_dbg.o 84obj-$(CONFIG_SERIAL_TEXT_DEBUG) += mv64x60_dbg.o
88endif 85endif
89obj-$(CONFIG_BOOTX_TEXT) += btext.o 86obj-$(CONFIG_BOOTX_TEXT) += btext.o
90obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o indirect_pci.o ppc_sys.o 87obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o ppc_sys.o
91obj-$(CONFIG_MPC10X_OPENPIC) += open_pic.o 88obj-$(CONFIG_MPC10X_OPENPIC) += open_pic.o
92obj-$(CONFIG_40x) += dcr.o
93obj-$(CONFIG_BOOKE) += dcr.o
94obj-$(CONFIG_85xx) += open_pic.o ppc85xx_common.o ppc85xx_setup.o \ 89obj-$(CONFIG_85xx) += open_pic.o ppc85xx_common.o ppc85xx_setup.o \
95 ppc_sys.o i8259.o mpc85xx_sys.o \ 90 ppc_sys.o mpc85xx_sys.o \
96 mpc85xx_devices.o 91 mpc85xx_devices.o
97ifeq ($(CONFIG_85xx),y) 92ifeq ($(CONFIG_85xx),y)
98obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o 93obj-$(CONFIG_PCI) += pci_auto.o
99endif 94endif
100obj-$(CONFIG_83xx) += ipic.o ppc83xx_setup.o ppc_sys.o \ 95obj-$(CONFIG_83xx) += ipic.o ppc83xx_setup.o ppc_sys.o \
101 mpc83xx_sys.o mpc83xx_devices.o 96 mpc83xx_sys.o mpc83xx_devices.o
102ifeq ($(CONFIG_83xx),y) 97ifeq ($(CONFIG_83xx),y)
103obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o 98obj-$(CONFIG_PCI) += pci_auto.o
104endif 99endif
105obj-$(CONFIG_MPC8548_CDS) += todc_time.o 100obj-$(CONFIG_MPC8548_CDS) += todc_time.o
106obj-$(CONFIG_MPC8555_CDS) += todc_time.o 101obj-$(CONFIG_MPC8555_CDS) += todc_time.o
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index 7734f6836174..12fa83e6774a 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -53,8 +53,8 @@ extern char *klimit;
53 * chrp only uses it during early boot. 53 * chrp only uses it during early boot.
54 */ 54 */
55#ifdef CONFIG_XMON 55#ifdef CONFIG_XMON
56#define BTEXT __pmac 56#define BTEXT
57#define BTDATA __pmacdata 57#define BTDATA
58#else 58#else
59#define BTEXT __init 59#define BTEXT __init
60#define BTDATA __initdata 60#define BTDATA __initdata
@@ -187,7 +187,7 @@ btext_setup_display(int width, int height, int depth, int pitch,
187 * changes. 187 * changes.
188 */ 188 */
189 189
190void __openfirmware 190void
191map_boot_text(void) 191map_boot_text(void)
192{ 192{
193 unsigned long base, offset, size; 193 unsigned long base, offset, size;
diff --git a/arch/ppc/syslib/gt64260_pic.c b/arch/ppc/syslib/gt64260_pic.c
index 44aa87385451..f97b3a9abd1e 100644
--- a/arch/ppc/syslib/gt64260_pic.c
+++ b/arch/ppc/syslib/gt64260_pic.c
@@ -45,6 +45,7 @@
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/mv64x60.h> 47#include <asm/mv64x60.h>
48#include <asm/machdep.h>
48 49
49#define CPU_INTR_STR "gt64260 cpu interface error" 50#define CPU_INTR_STR "gt64260 cpu interface error"
50#define PCI0_INTR_STR "gt64260 pci 0 error" 51#define PCI0_INTR_STR "gt64260 pci 0 error"
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
index 0bb919859b8b..c36db279b43d 100644
--- a/arch/ppc/syslib/ibm440gx_common.c
+++ b/arch/ppc/syslib/ibm440gx_common.c
@@ -236,9 +236,9 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C, 236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
237 enable it on all other revisions 237 enable it on all other revisions
238 */ 238 */
239 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. A") == 0 || 239 if (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. A") == 0 ||
240 strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. B") == 0 240 strcmp(cur_cpu_spec->cpu_name, "440GX Rev. B") == 0
241 || (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. C") 241 || (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. C")
242 == 0 && p->cpu > 667000000)) 242 == 0 && p->cpu > 667000000))
243 ibm440gx_l2c_disable(); 243 ibm440gx_l2c_disable();
244 else 244 else
diff --git a/arch/ppc/syslib/ibm44x_common.c b/arch/ppc/syslib/ibm44x_common.c
index 7612e0623f99..5152c8e41340 100644
--- a/arch/ppc/syslib/ibm44x_common.c
+++ b/arch/ppc/syslib/ibm44x_common.c
@@ -27,9 +27,14 @@
27#include <asm/time.h> 27#include <asm/time.h>
28#include <asm/ppc4xx_pic.h> 28#include <asm/ppc4xx_pic.h>
29#include <asm/param.h> 29#include <asm/param.h>
30#include <asm/bootinfo.h>
31#include <asm/ppcboot.h>
30 32
31#include <syslib/gen550.h> 33#include <syslib/gen550.h>
32 34
35/* Global Variables */
36bd_t __res;
37
33phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size) 38phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
34{ 39{
35 phys_addr_t page_4gb = 0; 40 phys_addr_t page_4gb = 0;
@@ -150,8 +155,36 @@ static unsigned long __init ibm44x_find_end_of_memory(void)
150 return mem_size; 155 return mem_size;
151} 156}
152 157
153void __init ibm44x_platform_init(void) 158void __init ibm44x_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
159 unsigned long r6, unsigned long r7)
154{ 160{
161 parse_bootinfo(find_bootinfo());
162
163 /*
164 * If we were passed in a board information, copy it into the
165 * residual data area.
166 */
167 if (r3)
168 __res = *(bd_t *)(r3 + KERNELBASE);
169
170#if defined(CONFIG_BLK_DEV_INITRD)
171 /*
172 * If the init RAM disk has been configured in, and there's a valid
173 * starting address for it, set it up.
174 */
175 if (r4) {
176 initrd_start = r4 + KERNELBASE;
177 initrd_end = r5 + KERNELBASE;
178 }
179#endif /* CONFIG_BLK_DEV_INITRD */
180
181 /* Copy the kernel command line arguments to a safe place. */
182
183 if (r6) {
184 *(char *) (r7 + KERNELBASE) = 0;
185 strcpy(cmd_line, (char *) (r6 + KERNELBASE));
186 }
187
155 ppc_md.init_IRQ = ppc4xx_pic_init; 188 ppc_md.init_IRQ = ppc4xx_pic_init;
156 ppc_md.find_end_of_memory = ibm44x_find_end_of_memory; 189 ppc_md.find_end_of_memory = ibm44x_find_end_of_memory;
157 ppc_md.restart = ibm44x_restart; 190 ppc_md.restart = ibm44x_restart;
@@ -178,7 +211,7 @@ void __init ibm44x_platform_init(void)
178#endif 211#endif
179} 212}
180 213
181/* Called from MachineCheckException */ 214/* Called from machine_check_exception */
182void platform_machine_check(struct pt_regs *regs) 215void platform_machine_check(struct pt_regs *regs)
183{ 216{
184 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n", 217 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n",
diff --git a/arch/ppc/syslib/ibm44x_common.h b/arch/ppc/syslib/ibm44x_common.h
index c16b6a5ac6ab..b25a8995e4e9 100644
--- a/arch/ppc/syslib/ibm44x_common.h
+++ b/arch/ppc/syslib/ibm44x_common.h
@@ -36,7 +36,8 @@ struct ibm44x_clocks {
36}; 36};
37 37
38/* common 44x platform init */ 38/* common 44x platform init */
39void ibm44x_platform_init(void) __init; 39void ibm44x_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
40 unsigned long r6, unsigned long r7) __init;
40 41
41/* initialize decrementer and tick-related variables */ 42/* initialize decrementer and tick-related variables */
42void ibm44x_calibrate_decr(unsigned int freq) __init; 43void ibm44x_calibrate_decr(unsigned int freq) __init;
diff --git a/arch/ppc/syslib/m8260_setup.c b/arch/ppc/syslib/m8260_setup.c
index 8f80a42dfdb7..76a2aa4ce65e 100644
--- a/arch/ppc/syslib/m8260_setup.c
+++ b/arch/ppc/syslib/m8260_setup.c
@@ -62,6 +62,10 @@ m8260_setup_arch(void)
62 if (initrd_start) 62 if (initrd_start)
63 ROOT_DEV = Root_RAM0; 63 ROOT_DEV = Root_RAM0;
64#endif 64#endif
65
66 identify_ppc_sys_by_name_and_id(BOARD_CHIP_NAME,
67 in_be32(CPM_MAP_ADDR + CPM_IMMR_OFFSET));
68
65 m82xx_board_setup(); 69 m82xx_board_setup();
66} 70}
67 71
diff --git a/arch/ppc/syslib/m82xx_pci.c b/arch/ppc/syslib/m82xx_pci.c
index 9db58c587b46..1d1c3956c1ae 100644
--- a/arch/ppc/syslib/m82xx_pci.c
+++ b/arch/ppc/syslib/m82xx_pci.c
@@ -302,11 +302,11 @@ pq2ads_setup_pci(struct pci_controller *hose)
302 302
303void __init pq2_find_bridges(void) 303void __init pq2_find_bridges(void)
304{ 304{
305 extern int pci_assign_all_busses; 305 extern int pci_assign_all_buses;
306 struct pci_controller * hose; 306 struct pci_controller * hose;
307 int host_bridge; 307 int host_bridge;
308 308
309 pci_assign_all_busses = 1; 309 pci_assign_all_buses = 1;
310 310
311 hose = pcibios_alloc_controller(); 311 hose = pcibios_alloc_controller();
312 312
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 4c888da89b3c..97ffbc70574f 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -144,12 +144,12 @@ void __init m8xx_calibrate_decr(void)
144 int freq, fp, divisor; 144 int freq, fp, divisor;
145 145
146 /* Unlock the SCCR. */ 146 /* Unlock the SCCR. */
147 ((volatile immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk = ~KAPWR_KEY; 147 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk, ~KAPWR_KEY);
148 ((volatile immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk = KAPWR_KEY; 148 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk, KAPWR_KEY);
149 149
150 /* Force all 8xx processors to use divide by 16 processor clock. */ 150 /* Force all 8xx processors to use divide by 16 processor clock. */
151 ((volatile immap_t *)IMAP_ADDR)->im_clkrst.car_sccr |= 0x02000000; 151 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_sccr,
152 152 in_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_sccr)|0x02000000);
153 /* Processor frequency is MHz. 153 /* Processor frequency is MHz.
154 * The value 'fp' is the number of decrementer ticks per second. 154 * The value 'fp' is the number of decrementer ticks per second.
155 */ 155 */
@@ -175,28 +175,24 @@ void __init m8xx_calibrate_decr(void)
175 * we guarantee the registers are locked, then we unlock them 175 * we guarantee the registers are locked, then we unlock them
176 * for our use. 176 * for our use.
177 */ 177 */
178 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk = ~KAPWR_KEY; 178 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk, ~KAPWR_KEY);
179 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck = ~KAPWR_KEY; 179 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck, ~KAPWR_KEY);
180 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk = ~KAPWR_KEY; 180 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk, ~KAPWR_KEY);
181 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk = KAPWR_KEY; 181 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk, KAPWR_KEY);
182 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck = KAPWR_KEY; 182 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck, KAPWR_KEY);
183 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk = KAPWR_KEY; 183 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk, KAPWR_KEY);
184 184
185 /* Disable the RTC one second and alarm interrupts. */ 185 /* Disable the RTC one second and alarm interrupts. */
186 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc &= 186 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) & ~(RTCSC_SIE | RTCSC_ALE));
187 ~(RTCSC_SIE | RTCSC_ALE);
188 /* Enable the RTC */ 187 /* Enable the RTC */
189 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc |= 188 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) | (RTCSC_RTF | RTCSC_RTE));
190 (RTCSC_RTF | RTCSC_RTE);
191 189
192 /* Enabling the decrementer also enables the timebase interrupts 190 /* Enabling the decrementer also enables the timebase interrupts
193 * (or from the other point of view, to get decrementer interrupts 191 * (or from the other point of view, to get decrementer interrupts
194 * we have to enable the timebase). The decrementer interrupt 192 * we have to enable the timebase). The decrementer interrupt
195 * is wired into the vector table, nothing to do here for that. 193 * is wired into the vector table, nothing to do here for that.
196 */ 194 */
197 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_tbscr = 195 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_tbscr, (mk_int_int_mask(DEC_INTERRUPT) << 8) | (TBSCR_TBF | TBSCR_TBE));
198 ((mk_int_int_mask(DEC_INTERRUPT) << 8) |
199 (TBSCR_TBF | TBSCR_TBE));
200 196
201 if (setup_irq(DEC_INTERRUPT, &tbint_irqaction)) 197 if (setup_irq(DEC_INTERRUPT, &tbint_irqaction))
202 panic("Could not allocate timer IRQ!"); 198 panic("Could not allocate timer IRQ!");
@@ -216,9 +212,9 @@ void __init m8xx_calibrate_decr(void)
216static int 212static int
217m8xx_set_rtc_time(unsigned long time) 213m8xx_set_rtc_time(unsigned long time)
218{ 214{
219 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck = KAPWR_KEY; 215 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck, KAPWR_KEY);
220 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtc = time; 216 out_be32(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtc, time);
221 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck = ~KAPWR_KEY; 217 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck, ~KAPWR_KEY);
222 return(0); 218 return(0);
223} 219}
224 220
@@ -226,7 +222,7 @@ static unsigned long
226m8xx_get_rtc_time(void) 222m8xx_get_rtc_time(void)
227{ 223{
228 /* Get time from the RTC. */ 224 /* Get time from the RTC. */
229 return((unsigned long)(((immap_t *)IMAP_ADDR)->im_sit.sit_rtc)); 225 return (unsigned long) in_be32(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtc);
230} 226}
231 227
232static void 228static void
@@ -235,13 +231,13 @@ m8xx_restart(char *cmd)
235 __volatile__ unsigned char dummy; 231 __volatile__ unsigned char dummy;
236 232
237 local_irq_disable(); 233 local_irq_disable();
238 ((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr |= 0x00000080; 234 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr, in_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr) | 0x00000080);
239 235
240 /* Clear the ME bit in MSR to cause checkstop on machine check 236 /* Clear the ME bit in MSR to cause checkstop on machine check
241 */ 237 */
242 mtmsr(mfmsr() & ~0x1000); 238 mtmsr(mfmsr() & ~0x1000);
243 239
244 dummy = ((immap_t *)IMAP_ADDR)->im_clkrst.res[0]; 240 dummy = in_8(&((immap_t *)IMAP_ADDR)->im_clkrst.res[0]);
245 printk("Restart failed\n"); 241 printk("Restart failed\n");
246 while(1); 242 while(1);
247} 243}
@@ -306,8 +302,7 @@ m8xx_init_IRQ(void)
306 i8259_init(0); 302 i8259_init(0);
307 303
308 /* The i8259 cascade interrupt must be level sensitive. */ 304 /* The i8259 cascade interrupt must be level sensitive. */
309 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel &= 305 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel, in_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel & ~(0x80000000 >> ISA_BRIDGE_INT)));
310 ~(0x80000000 >> ISA_BRIDGE_INT);
311 306
312 if (setup_irq(ISA_BRIDGE_INT, &mbx_i8259_irqaction)) 307 if (setup_irq(ISA_BRIDGE_INT, &mbx_i8259_irqaction))
313 enable_irq(ISA_BRIDGE_INT); 308 enable_irq(ISA_BRIDGE_INT);
@@ -404,9 +399,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
404 strcpy(cmd_line, (char *)(r6+KERNELBASE)); 399 strcpy(cmd_line, (char *)(r6+KERNELBASE));
405 } 400 }
406 401
402 identify_ppc_sys_by_name(BOARD_CHIP_NAME);
403
407 ppc_md.setup_arch = m8xx_setup_arch; 404 ppc_md.setup_arch = m8xx_setup_arch;
408 ppc_md.show_percpuinfo = m8xx_show_percpuinfo; 405 ppc_md.show_percpuinfo = m8xx_show_percpuinfo;
409 ppc_md.irq_canonicalize = NULL;
410 ppc_md.init_IRQ = m8xx_init_IRQ; 406 ppc_md.init_IRQ = m8xx_init_IRQ;
411 ppc_md.get_irq = m8xx_get_irq; 407 ppc_md.get_irq = m8xx_get_irq;
412 ppc_md.init = NULL; 408 ppc_md.init = NULL;
diff --git a/arch/ppc/syslib/m8xx_wdt.c b/arch/ppc/syslib/m8xx_wdt.c
index 2ddc857e7fc7..c5ac5ce5d7d2 100644
--- a/arch/ppc/syslib/m8xx_wdt.c
+++ b/arch/ppc/syslib/m8xx_wdt.c
@@ -29,8 +29,8 @@ void m8xx_wdt_reset(void)
29{ 29{
30 volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR; 30 volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
31 31
32 imap->im_siu_conf.sc_swsr = 0x556c; /* write magic1 */ 32 out_be16(imap->im_siu_conf.sc_swsr, 0x556c); /* write magic1 */
33 imap->im_siu_conf.sc_swsr = 0xaa39; /* write magic2 */ 33 out_be16(imap->im_siu_conf.sc_swsr, 0xaa39); /* write magic2 */
34} 34}
35 35
36static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs) 36static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
@@ -39,7 +39,7 @@ static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
39 39
40 m8xx_wdt_reset(); 40 m8xx_wdt_reset();
41 41
42 imap->im_sit.sit_piscr |= PISCR_PS; /* clear irq */ 42 out_be16(imap->im_sit.sit_piscr, in_be16(imap->im_sit.sit_piscr | PISCR_PS)); /* clear irq */
43 43
44 return IRQ_HANDLED; 44 return IRQ_HANDLED;
45} 45}
@@ -51,7 +51,7 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
51 u32 sypcr; 51 u32 sypcr;
52 u32 pitrtclk; 52 u32 pitrtclk;
53 53
54 sypcr = imap->im_siu_conf.sc_sypcr; 54 sypcr = in_be32(imap->im_siu_conf.sc_sypcr);
55 55
56 if (!(sypcr & 0x04)) { 56 if (!(sypcr & 0x04)) {
57 printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n", 57 printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n",
@@ -87,9 +87,9 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
87 else 87 else
88 pitc = pitrtclk * wdt_timeout / binfo->bi_intfreq / 2; 88 pitc = pitrtclk * wdt_timeout / binfo->bi_intfreq / 2;
89 89
90 imap->im_sit.sit_pitc = pitc << 16; 90 out_be32(imap->im_sit.sit_pitc, pitc << 16);
91 imap->im_sit.sit_piscr = 91
92 (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE; 92 out_be16(imap->im_sit.sit_piscr, (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE);
93 93
94 if (setup_irq(PIT_INTERRUPT, &m8xx_wdt_irqaction)) 94 if (setup_irq(PIT_INTERRUPT, &m8xx_wdt_irqaction))
95 panic("m8xx_wdt: error setting up the watchdog irq!"); 95 panic("m8xx_wdt: error setting up the watchdog irq!");
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c
index 59cf3e8bd1a0..4ac19080eb85 100644
--- a/arch/ppc/syslib/mpc52xx_pci.c
+++ b/arch/ppc/syslib/mpc52xx_pci.c
@@ -21,6 +21,7 @@
21#include "mpc52xx_pci.h" 21#include "mpc52xx_pci.h"
22 22
23#include <asm/delay.h> 23#include <asm/delay.h>
24#include <asm/machdep.h>
24 25
25 26
26static int 27static int
@@ -181,7 +182,7 @@ mpc52xx_find_bridges(void)
181 struct mpc52xx_pci __iomem *pci_regs; 182 struct mpc52xx_pci __iomem *pci_regs;
182 struct pci_controller *hose; 183 struct pci_controller *hose;
183 184
184 pci_assign_all_busses = 1; 185 pci_assign_all_buses = 1;
185 186
186 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE); 187 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE);
187 if (!pci_regs) 188 if (!pci_regs)
diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
index 95b3b8a7f0ba..dbf8acac507f 100644
--- a/arch/ppc/syslib/mpc83xx_devices.c
+++ b/arch/ppc/syslib/mpc83xx_devices.c
@@ -21,6 +21,7 @@
21#include <asm/mpc83xx.h> 21#include <asm/mpc83xx.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/ppc_sys.h> 23#include <asm/ppc_sys.h>
24#include <asm/machdep.h>
24 25
25/* We use offsets for IORESOURCE_MEM since we do not know at compile time 26/* We use offsets for IORESOURCE_MEM since we do not know at compile time
26 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup 27 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup
diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
index bbc5ac0de878..2ede677a0a53 100644
--- a/arch/ppc/syslib/mpc85xx_devices.c
+++ b/arch/ppc/syslib/mpc85xx_devices.c
@@ -25,19 +25,20 @@
25/* We use offsets for IORESOURCE_MEM since we do not know at compile time 25/* We use offsets for IORESOURCE_MEM since we do not know at compile time
26 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup 26 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
27 */ 27 */
28struct gianfar_mdio_data mpc85xx_mdio_pdata = {
29 .paddr = MPC85xx_MIIM_OFFSET,
30};
28 31
29static struct gianfar_platform_data mpc85xx_tsec1_pdata = { 32static struct gianfar_platform_data mpc85xx_tsec1_pdata = {
30 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 33 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
31 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | 34 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
32 FSL_GIANFAR_DEV_HAS_MULTI_INTR, 35 FSL_GIANFAR_DEV_HAS_MULTI_INTR,
33 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
34}; 36};
35 37
36static struct gianfar_platform_data mpc85xx_tsec2_pdata = { 38static struct gianfar_platform_data mpc85xx_tsec2_pdata = {
37 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 39 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
38 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | 40 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
39 FSL_GIANFAR_DEV_HAS_MULTI_INTR, 41 FSL_GIANFAR_DEV_HAS_MULTI_INTR,
40 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
41}; 42};
42 43
43static struct gianfar_platform_data mpc85xx_etsec1_pdata = { 44static struct gianfar_platform_data mpc85xx_etsec1_pdata = {
@@ -46,7 +47,6 @@ static struct gianfar_platform_data mpc85xx_etsec1_pdata = {
46 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 47 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
47 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 48 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
48 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 49 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
49 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
50}; 50};
51 51
52static struct gianfar_platform_data mpc85xx_etsec2_pdata = { 52static struct gianfar_platform_data mpc85xx_etsec2_pdata = {
@@ -55,7 +55,6 @@ static struct gianfar_platform_data mpc85xx_etsec2_pdata = {
55 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 55 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
56 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 56 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
57 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 57 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
58 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
59}; 58};
60 59
61static struct gianfar_platform_data mpc85xx_etsec3_pdata = { 60static struct gianfar_platform_data mpc85xx_etsec3_pdata = {
@@ -64,7 +63,6 @@ static struct gianfar_platform_data mpc85xx_etsec3_pdata = {
64 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 63 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
65 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 64 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
66 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 65 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
67 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
68}; 66};
69 67
70static struct gianfar_platform_data mpc85xx_etsec4_pdata = { 68static struct gianfar_platform_data mpc85xx_etsec4_pdata = {
@@ -73,11 +71,10 @@ static struct gianfar_platform_data mpc85xx_etsec4_pdata = {
73 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 71 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
74 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 72 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
75 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 73 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
76 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
77}; 74};
78 75
79static struct gianfar_platform_data mpc85xx_fec_pdata = { 76static struct gianfar_platform_data mpc85xx_fec_pdata = {
80 .phy_reg_addr = MPC85xx_ENET1_OFFSET, 77 .device_flags = 0,
81}; 78};
82 79
83static struct fsl_i2c_platform_data mpc85xx_fsl_i2c_pdata = { 80static struct fsl_i2c_platform_data mpc85xx_fsl_i2c_pdata = {
@@ -719,6 +716,12 @@ struct platform_device ppc_sys_platform_devices[] = {
719 }, 716 },
720 }, 717 },
721 }, 718 },
719 [MPC85xx_MDIO] = {
720 .name = "fsl-gianfar_mdio",
721 .id = 0,
722 .dev.platform_data = &mpc85xx_mdio_pdata,
723 .num_resources = 0,
724 },
722}; 725};
723 726
724static int __init mach_mpc85xx_fixup(struct platform_device *pdev) 727static int __init mach_mpc85xx_fixup(struct platform_device *pdev)
diff --git a/arch/ppc/syslib/mpc85xx_sys.c b/arch/ppc/syslib/mpc85xx_sys.c
index 6e3184ab354f..cb68d8c58348 100644
--- a/arch/ppc/syslib/mpc85xx_sys.c
+++ b/arch/ppc/syslib/mpc85xx_sys.c
@@ -24,19 +24,19 @@ struct ppc_sys_spec ppc_sys_specs[] = {
24 .ppc_sys_name = "8540", 24 .ppc_sys_name = "8540",
25 .mask = 0xFFFF0000, 25 .mask = 0xFFFF0000,
26 .value = 0x80300000, 26 .value = 0x80300000,
27 .num_devices = 10, 27 .num_devices = 11,
28 .device_list = (enum ppc_sys_devices[]) 28 .device_list = (enum ppc_sys_devices[])
29 { 29 {
30 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_FEC, MPC85xx_IIC1, 30 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_FEC, MPC85xx_IIC1,
31 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 31 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
32 MPC85xx_PERFMON, MPC85xx_DUART, 32 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_MDIO,
33 }, 33 },
34 }, 34 },
35 { 35 {
36 .ppc_sys_name = "8560", 36 .ppc_sys_name = "8560",
37 .mask = 0xFFFF0000, 37 .mask = 0xFFFF0000,
38 .value = 0x80700000, 38 .value = 0x80700000,
39 .num_devices = 19, 39 .num_devices = 20,
40 .device_list = (enum ppc_sys_devices[]) 40 .device_list = (enum ppc_sys_devices[])
41 { 41 {
42 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 42 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -45,14 +45,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
45 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1, 45 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1,
46 MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3, MPC85xx_CPM_SCC4, 46 MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3, MPC85xx_CPM_SCC4,
47 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, MPC85xx_CPM_FCC3, 47 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, MPC85xx_CPM_FCC3,
48 MPC85xx_CPM_MCC1, MPC85xx_CPM_MCC2, 48 MPC85xx_CPM_MCC1, MPC85xx_CPM_MCC2, MPC85xx_MDIO,
49 }, 49 },
50 }, 50 },
51 { 51 {
52 .ppc_sys_name = "8541", 52 .ppc_sys_name = "8541",
53 .mask = 0xFFFF0000, 53 .mask = 0xFFFF0000,
54 .value = 0x80720000, 54 .value = 0x80720000,
55 .num_devices = 13, 55 .num_devices = 14,
56 .device_list = (enum ppc_sys_devices[]) 56 .device_list = (enum ppc_sys_devices[])
57 { 57 {
58 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 58 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -60,13 +60,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
60 MPC85xx_PERFMON, MPC85xx_DUART, 60 MPC85xx_PERFMON, MPC85xx_DUART,
61 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, 61 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
62 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 62 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
63 MPC85xx_MDIO,
63 }, 64 },
64 }, 65 },
65 { 66 {
66 .ppc_sys_name = "8541E", 67 .ppc_sys_name = "8541E",
67 .mask = 0xFFFF0000, 68 .mask = 0xFFFF0000,
68 .value = 0x807A0000, 69 .value = 0x807A0000,
69 .num_devices = 14, 70 .num_devices = 15,
70 .device_list = (enum ppc_sys_devices[]) 71 .device_list = (enum ppc_sys_devices[])
71 { 72 {
72 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 73 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -74,13 +75,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
74 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 75 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
75 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, 76 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
76 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 77 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
78 MPC85xx_MDIO,
77 }, 79 },
78 }, 80 },
79 { 81 {
80 .ppc_sys_name = "8555", 82 .ppc_sys_name = "8555",
81 .mask = 0xFFFF0000, 83 .mask = 0xFFFF0000,
82 .value = 0x80710000, 84 .value = 0x80710000,
83 .num_devices = 19, 85 .num_devices = 20,
84 .device_list = (enum ppc_sys_devices[]) 86 .device_list = (enum ppc_sys_devices[])
85 { 87 {
86 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 88 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -91,13 +93,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
91 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 93 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
92 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2, 94 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
93 MPC85xx_CPM_USB, 95 MPC85xx_CPM_USB,
96 MPC85xx_MDIO,
94 }, 97 },
95 }, 98 },
96 { 99 {
97 .ppc_sys_name = "8555E", 100 .ppc_sys_name = "8555E",
98 .mask = 0xFFFF0000, 101 .mask = 0xFFFF0000,
99 .value = 0x80790000, 102 .value = 0x80790000,
100 .num_devices = 20, 103 .num_devices = 21,
101 .device_list = (enum ppc_sys_devices[]) 104 .device_list = (enum ppc_sys_devices[])
102 { 105 {
103 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 106 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -108,6 +111,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
108 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 111 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
109 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2, 112 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
110 MPC85xx_CPM_USB, 113 MPC85xx_CPM_USB,
114 MPC85xx_MDIO,
111 }, 115 },
112 }, 116 },
113 /* SVRs on 8548 rev1.0 matches for 8548/8547/8545 */ 117 /* SVRs on 8548 rev1.0 matches for 8548/8547/8545 */
@@ -115,104 +119,112 @@ struct ppc_sys_spec ppc_sys_specs[] = {
115 .ppc_sys_name = "8548E", 119 .ppc_sys_name = "8548E",
116 .mask = 0xFFFF00F0, 120 .mask = 0xFFFF00F0,
117 .value = 0x80390010, 121 .value = 0x80390010,
118 .num_devices = 13, 122 .num_devices = 14,
119 .device_list = (enum ppc_sys_devices[]) 123 .device_list = (enum ppc_sys_devices[])
120 { 124 {
121 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 125 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
122 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 126 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
123 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 127 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
124 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 128 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
129 MPC85xx_MDIO,
125 }, 130 },
126 }, 131 },
127 { 132 {
128 .ppc_sys_name = "8548", 133 .ppc_sys_name = "8548",
129 .mask = 0xFFFF00F0, 134 .mask = 0xFFFF00F0,
130 .value = 0x80310010, 135 .value = 0x80310010,
131 .num_devices = 12, 136 .num_devices = 13,
132 .device_list = (enum ppc_sys_devices[]) 137 .device_list = (enum ppc_sys_devices[])
133 { 138 {
134 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 139 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
135 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 140 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
136 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 141 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
137 MPC85xx_PERFMON, MPC85xx_DUART, 142 MPC85xx_PERFMON, MPC85xx_DUART,
143 MPC85xx_MDIO,
138 }, 144 },
139 }, 145 },
140 { 146 {
141 .ppc_sys_name = "8547E", 147 .ppc_sys_name = "8547E",
142 .mask = 0xFFFF00F0, 148 .mask = 0xFFFF00F0,
143 .value = 0x80390010, 149 .value = 0x80390010,
144 .num_devices = 13, 150 .num_devices = 14,
145 .device_list = (enum ppc_sys_devices[]) 151 .device_list = (enum ppc_sys_devices[])
146 { 152 {
147 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 153 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
148 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 154 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
149 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 155 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
150 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 156 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
157 MPC85xx_MDIO,
151 }, 158 },
152 }, 159 },
153 { 160 {
154 .ppc_sys_name = "8547", 161 .ppc_sys_name = "8547",
155 .mask = 0xFFFF00F0, 162 .mask = 0xFFFF00F0,
156 .value = 0x80310010, 163 .value = 0x80310010,
157 .num_devices = 12, 164 .num_devices = 13,
158 .device_list = (enum ppc_sys_devices[]) 165 .device_list = (enum ppc_sys_devices[])
159 { 166 {
160 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 167 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
161 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 168 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
162 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 169 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
163 MPC85xx_PERFMON, MPC85xx_DUART, 170 MPC85xx_PERFMON, MPC85xx_DUART,
171 MPC85xx_MDIO,
164 }, 172 },
165 }, 173 },
166 { 174 {
167 .ppc_sys_name = "8545E", 175 .ppc_sys_name = "8545E",
168 .mask = 0xFFFF00F0, 176 .mask = 0xFFFF00F0,
169 .value = 0x80390010, 177 .value = 0x80390010,
170 .num_devices = 11, 178 .num_devices = 12,
171 .device_list = (enum ppc_sys_devices[]) 179 .device_list = (enum ppc_sys_devices[])
172 { 180 {
173 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 181 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
174 MPC85xx_IIC1, MPC85xx_IIC2, 182 MPC85xx_IIC1, MPC85xx_IIC2,
175 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 183 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
176 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 184 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
185 MPC85xx_MDIO,
177 }, 186 },
178 }, 187 },
179 { 188 {
180 .ppc_sys_name = "8545", 189 .ppc_sys_name = "8545",
181 .mask = 0xFFFF00F0, 190 .mask = 0xFFFF00F0,
182 .value = 0x80310010, 191 .value = 0x80310010,
183 .num_devices = 10, 192 .num_devices = 11,
184 .device_list = (enum ppc_sys_devices[]) 193 .device_list = (enum ppc_sys_devices[])
185 { 194 {
186 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 195 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
187 MPC85xx_IIC1, MPC85xx_IIC2, 196 MPC85xx_IIC1, MPC85xx_IIC2,
188 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 197 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
189 MPC85xx_PERFMON, MPC85xx_DUART, 198 MPC85xx_PERFMON, MPC85xx_DUART,
199 MPC85xx_MDIO,
190 }, 200 },
191 }, 201 },
192 { 202 {
193 .ppc_sys_name = "8543E", 203 .ppc_sys_name = "8543E",
194 .mask = 0xFFFF00F0, 204 .mask = 0xFFFF00F0,
195 .value = 0x803A0010, 205 .value = 0x803A0010,
196 .num_devices = 11, 206 .num_devices = 12,
197 .device_list = (enum ppc_sys_devices[]) 207 .device_list = (enum ppc_sys_devices[])
198 { 208 {
199 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 209 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
200 MPC85xx_IIC1, MPC85xx_IIC2, 210 MPC85xx_IIC1, MPC85xx_IIC2,
201 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 211 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
202 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 212 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
213 MPC85xx_MDIO,
203 }, 214 },
204 }, 215 },
205 { 216 {
206 .ppc_sys_name = "8543", 217 .ppc_sys_name = "8543",
207 .mask = 0xFFFF00F0, 218 .mask = 0xFFFF00F0,
208 .value = 0x80320010, 219 .value = 0x80320010,
209 .num_devices = 10, 220 .num_devices = 11,
210 .device_list = (enum ppc_sys_devices[]) 221 .device_list = (enum ppc_sys_devices[])
211 { 222 {
212 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 223 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
213 MPC85xx_IIC1, MPC85xx_IIC2, 224 MPC85xx_IIC1, MPC85xx_IIC2,
214 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 225 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
215 MPC85xx_PERFMON, MPC85xx_DUART, 226 MPC85xx_PERFMON, MPC85xx_DUART,
227 MPC85xx_MDIO,
216 }, 228 },
217 }, 229 },
218 { /* default match */ 230 { /* default match */
diff --git a/arch/ppc/syslib/mpc8xx_sys.c b/arch/ppc/syslib/mpc8xx_sys.c
index a532ccc861c0..3cc27d29e3af 100644
--- a/arch/ppc/syslib/mpc8xx_sys.c
+++ b/arch/ppc/syslib/mpc8xx_sys.c
@@ -24,7 +24,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
24 .ppc_sys_name = "MPC86X", 24 .ppc_sys_name = "MPC86X",
25 .mask = 0xFFFFFFFF, 25 .mask = 0xFFFFFFFF,
26 .value = 0x00000000, 26 .value = 0x00000000,
27 .num_devices = 2, 27 .num_devices = 7,
28 .device_list = (enum ppc_sys_devices[]) 28 .device_list = (enum ppc_sys_devices[])
29 { 29 {
30 MPC8xx_CPM_FEC1, 30 MPC8xx_CPM_FEC1,
@@ -40,7 +40,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
40 .ppc_sys_name = "MPC885", 40 .ppc_sys_name = "MPC885",
41 .mask = 0xFFFFFFFF, 41 .mask = 0xFFFFFFFF,
42 .value = 0x00000000, 42 .value = 0x00000000,
43 .num_devices = 3, 43 .num_devices = 8,
44 .device_list = (enum ppc_sys_devices[]) 44 .device_list = (enum ppc_sys_devices[])
45 { 45 {
46 MPC8xx_CPM_FEC1, 46 MPC8xx_CPM_FEC1,
diff --git a/arch/ppc/syslib/mv64360_pic.c b/arch/ppc/syslib/mv64360_pic.c
index 8356da4678a2..58b0aa813e85 100644
--- a/arch/ppc/syslib/mv64360_pic.c
+++ b/arch/ppc/syslib/mv64360_pic.c
@@ -48,6 +48,7 @@
48#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/mv64x60.h> 50#include <asm/mv64x60.h>
51#include <asm/machdep.h>
51 52
52#ifdef CONFIG_IRQ_ALL_CPUS 53#ifdef CONFIG_IRQ_ALL_CPUS
53#error "The mv64360 does not support distribution of IRQs on all CPUs" 54#error "The mv64360 does not support distribution of IRQs on all CPUs"
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 4849850a59ed..a781c50d2f4c 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -1304,7 +1304,7 @@ mv64x60_config_pci_params(struct pci_controller *hose,
1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val); 1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1305 1305
1306 /* Set latency timer, cache line size, clear BIST */ 1306 /* Set latency timer, cache line size, clear BIST */
1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2); 1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val); 1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1309 1309
1310 mv64x60_pci_exclude_bridge = save_exclude; 1310 mv64x60_pci_exclude_bridge = save_exclude;
diff --git a/arch/ppc/syslib/mv64x60_dbg.c b/arch/ppc/syslib/mv64x60_dbg.c
index 2927c7adf5e5..fa5b2e45e0ca 100644
--- a/arch/ppc/syslib/mv64x60_dbg.c
+++ b/arch/ppc/syslib/mv64x60_dbg.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/mv64x60.h> 26#include <asm/mv64x60.h>
27#include <asm/machdep.h>
27 28
28 29
29#if defined(CONFIG_SERIAL_TEXT_DEBUG) 30#if defined(CONFIG_SERIAL_TEXT_DEBUG)
diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c
deleted file mode 100644
index 93c7231ea709..000000000000
--- a/arch/ppc/syslib/of_device.c
+++ /dev/null
@@ -1,276 +0,0 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <asm/errno.h>
8#include <asm/of_device.h>
9
10/**
11 * of_match_device - Tell if an of_device structure has a matching
12 * of_match structure
13 * @ids: array of of device match structures to search in
14 * @dev: the of device structure to match against
15 *
16 * Used by a driver to check whether an of_device present in the
17 * system is in its list of supported devices.
18 */
19const struct of_device_id * of_match_device(const struct of_device_id *matches,
20 const struct of_device *dev)
21{
22 if (!dev->node)
23 return NULL;
24 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
25 int match = 1;
26 if (matches->name[0])
27 match &= dev->node->name
28 && !strcmp(matches->name, dev->node->name);
29 if (matches->type[0])
30 match &= dev->node->type
31 && !strcmp(matches->type, dev->node->type);
32 if (matches->compatible[0])
33 match &= device_is_compatible(dev->node,
34 matches->compatible);
35 if (match)
36 return matches;
37 matches++;
38 }
39 return NULL;
40}
41
42static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
43{
44 struct of_device * of_dev = to_of_device(dev);
45 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
46 const struct of_device_id * matches = of_drv->match_table;
47
48 if (!matches)
49 return 0;
50
51 return of_match_device(matches, of_dev) != NULL;
52}
53
54struct of_device *of_dev_get(struct of_device *dev)
55{
56 struct device *tmp;
57
58 if (!dev)
59 return NULL;
60 tmp = get_device(&dev->dev);
61 if (tmp)
62 return to_of_device(tmp);
63 else
64 return NULL;
65}
66
67void of_dev_put(struct of_device *dev)
68{
69 if (dev)
70 put_device(&dev->dev);
71}
72
73
74static int of_device_probe(struct device *dev)
75{
76 int error = -ENODEV;
77 struct of_platform_driver *drv;
78 struct of_device *of_dev;
79 const struct of_device_id *match;
80
81 drv = to_of_platform_driver(dev->driver);
82 of_dev = to_of_device(dev);
83
84 if (!drv->probe)
85 return error;
86
87 of_dev_get(of_dev);
88
89 match = of_match_device(drv->match_table, of_dev);
90 if (match)
91 error = drv->probe(of_dev, match);
92 if (error)
93 of_dev_put(of_dev);
94
95 return error;
96}
97
98static int of_device_remove(struct device *dev)
99{
100 struct of_device * of_dev = to_of_device(dev);
101 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
102
103 if (dev->driver && drv->remove)
104 drv->remove(of_dev);
105 return 0;
106}
107
108static int of_device_suspend(struct device *dev, pm_message_t state)
109{
110 struct of_device * of_dev = to_of_device(dev);
111 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
112 int error = 0;
113
114 if (dev->driver && drv->suspend)
115 error = drv->suspend(of_dev, state);
116 return error;
117}
118
119static int of_device_resume(struct device * dev)
120{
121 struct of_device * of_dev = to_of_device(dev);
122 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
123 int error = 0;
124
125 if (dev->driver && drv->resume)
126 error = drv->resume(of_dev);
127 return error;
128}
129
130struct bus_type of_platform_bus_type = {
131 .name = "of_platform",
132 .match = of_platform_bus_match,
133 .suspend = of_device_suspend,
134 .resume = of_device_resume,
135};
136
137static int __init of_bus_driver_init(void)
138{
139 return bus_register(&of_platform_bus_type);
140}
141
142postcore_initcall(of_bus_driver_init);
143
144int of_register_driver(struct of_platform_driver *drv)
145{
146 int count = 0;
147
148 /* initialize common driver fields */
149 drv->driver.name = drv->name;
150 drv->driver.bus = &of_platform_bus_type;
151 drv->driver.probe = of_device_probe;
152 drv->driver.remove = of_device_remove;
153
154 /* register with core */
155 count = driver_register(&drv->driver);
156 return count ? count : 1;
157}
158
159void of_unregister_driver(struct of_platform_driver *drv)
160{
161 driver_unregister(&drv->driver);
162}
163
164
165static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
166{
167 struct of_device *ofdev;
168
169 ofdev = to_of_device(dev);
170 return sprintf(buf, "%s", ofdev->node->full_name);
171}
172
173static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
174
175/**
176 * of_release_dev - free an of device structure when all users of it are finished.
177 * @dev: device that's been disconnected
178 *
179 * Will be called only by the device core when all users of this of device are
180 * done.
181 */
182void of_release_dev(struct device *dev)
183{
184 struct of_device *ofdev;
185
186 ofdev = to_of_device(dev);
187 of_node_put(ofdev->node);
188 kfree(ofdev);
189}
190
191int of_device_register(struct of_device *ofdev)
192{
193 int rc;
194 struct of_device **odprop;
195
196 BUG_ON(ofdev->node == NULL);
197
198 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
199 if (!odprop) {
200 struct property *new_prop;
201
202 new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
203 GFP_KERNEL);
204 if (new_prop == NULL)
205 return -ENOMEM;
206 new_prop->name = "linux,device";
207 new_prop->length = sizeof(sizeof(struct of_device *));
208 new_prop->value = (unsigned char *)&new_prop[1];
209 odprop = (struct of_device **)new_prop->value;
210 *odprop = NULL;
211 prom_add_property(ofdev->node, new_prop);
212 }
213 *odprop = ofdev;
214
215 rc = device_register(&ofdev->dev);
216 if (rc)
217 return rc;
218
219 device_create_file(&ofdev->dev, &dev_attr_devspec);
220
221 return 0;
222}
223
224void of_device_unregister(struct of_device *ofdev)
225{
226 struct of_device **odprop;
227
228 device_remove_file(&ofdev->dev, &dev_attr_devspec);
229
230 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
231 if (odprop)
232 *odprop = NULL;
233
234 device_unregister(&ofdev->dev);
235}
236
237struct of_device* of_platform_device_create(struct device_node *np,
238 const char *bus_id,
239 struct device *parent)
240{
241 struct of_device *dev;
242 u32 *reg;
243
244 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
245 if (!dev)
246 return NULL;
247 memset(dev, 0, sizeof(*dev));
248
249 dev->node = of_node_get(np);
250 dev->dma_mask = 0xffffffffUL;
251 dev->dev.dma_mask = &dev->dma_mask;
252 dev->dev.parent = parent;
253 dev->dev.bus = &of_platform_bus_type;
254 dev->dev.release = of_release_dev;
255
256 reg = (u32 *)get_property(np, "reg", NULL);
257 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
258
259 if (of_device_register(dev) != 0) {
260 kfree(dev);
261 return NULL;
262 }
263
264 return dev;
265}
266
267EXPORT_SYMBOL(of_match_device);
268EXPORT_SYMBOL(of_platform_bus_type);
269EXPORT_SYMBOL(of_register_driver);
270EXPORT_SYMBOL(of_unregister_driver);
271EXPORT_SYMBOL(of_device_register);
272EXPORT_SYMBOL(of_device_unregister);
273EXPORT_SYMBOL(of_dev_get);
274EXPORT_SYMBOL(of_dev_put);
275EXPORT_SYMBOL(of_platform_device_create);
276EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 1cf5de21a3fd..894779712b46 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -23,6 +23,7 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/open_pic.h> 24#include <asm/open_pic.h>
25#include <asm/i8259.h> 25#include <asm/i8259.h>
26#include <asm/machdep.h>
26 27
27#include "open_pic_defs.h" 28#include "open_pic_defs.h"
28 29
@@ -889,7 +890,7 @@ openpic_get_irq(struct pt_regs *regs)
889 890
890#ifdef CONFIG_SMP 891#ifdef CONFIG_SMP
891void 892void
892smp_openpic_message_pass(int target, int msg, unsigned long data, int wait) 893smp_openpic_message_pass(int target, int msg)
893{ 894{
894 cpumask_t mask = CPU_MASK_ALL; 895 cpumask_t mask = CPU_MASK_ALL;
895 /* make sure we're sending something that translates to an IPI */ 896 /* make sure we're sending something that translates to an IPI */
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
index 16cff91d9f41..1c40049b9a45 100644
--- a/arch/ppc/syslib/open_pic2.c
+++ b/arch/ppc/syslib/open_pic2.c
@@ -27,6 +27,7 @@
27#include <asm/sections.h> 27#include <asm/sections.h>
28#include <asm/open_pic.h> 28#include <asm/open_pic.h>
29#include <asm/i8259.h> 29#include <asm/i8259.h>
30#include <asm/machdep.h>
30 31
31#include "open_pic_defs.h" 32#include "open_pic_defs.h"
32 33
diff --git a/arch/ppc/syslib/ppc403_pic.c b/arch/ppc/syslib/ppc403_pic.c
index ce4d1deb86e9..c46043c47225 100644
--- a/arch/ppc/syslib/ppc403_pic.c
+++ b/arch/ppc/syslib/ppc403_pic.c
@@ -26,6 +26,7 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/ppc4xx_pic.h> 28#include <asm/ppc4xx_pic.h>
29#include <asm/machdep.h>
29 30
30/* Function Prototypes */ 31/* Function Prototypes */
31 32
diff --git a/arch/ppc/syslib/ppc4xx_pic.c b/arch/ppc/syslib/ppc4xx_pic.c
index 40086212b9c3..0b435633a0d1 100644
--- a/arch/ppc/syslib/ppc4xx_pic.c
+++ b/arch/ppc/syslib/ppc4xx_pic.c
@@ -25,6 +25,7 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/ppc4xx_pic.h> 27#include <asm/ppc4xx_pic.h>
28#include <asm/machdep.h>
28 29
29/* See comment in include/arch-ppc/ppc4xx_pic.h 30/* See comment in include/arch-ppc/ppc4xx_pic.h
30 * for more info about these two variables 31 * for more info about these two variables
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c
index bf83240689dc..e83a83fd95e1 100644
--- a/arch/ppc/syslib/ppc4xx_setup.c
+++ b/arch/ppc/syslib/ppc4xx_setup.c
@@ -278,7 +278,7 @@ ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */ 278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
279} 279}
280 280
281/* Called from MachineCheckException */ 281/* Called from machine_check_exception */
282void platform_machine_check(struct pt_regs *regs) 282void platform_machine_check(struct pt_regs *regs)
283{ 283{
284#if defined(DCRN_PLB0_BEAR) 284#if defined(DCRN_PLB0_BEAR)
diff --git a/arch/ppc/syslib/ppc83xx_setup.c b/arch/ppc/syslib/ppc83xx_setup.c
index 890484e576e7..4da168a6ad03 100644
--- a/arch/ppc/syslib/ppc83xx_setup.c
+++ b/arch/ppc/syslib/ppc83xx_setup.c
@@ -40,6 +40,7 @@
40#include <asm/ppc_sys.h> 40#include <asm/ppc_sys.h>
41#include <asm/kgdb.h> 41#include <asm/kgdb.h>
42#include <asm/delay.h> 42#include <asm/delay.h>
43#include <asm/machdep.h>
43 44
44#include <syslib/ppc83xx_setup.h> 45#include <syslib/ppc83xx_setup.h>
45#if defined(CONFIG_PCI) 46#if defined(CONFIG_PCI)
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
index 832b8bf99ae7..de2f90576577 100644
--- a/arch/ppc/syslib/ppc85xx_setup.c
+++ b/arch/ppc/syslib/ppc85xx_setup.c
@@ -29,6 +29,7 @@
29#include <asm/mmu.h> 29#include <asm/mmu.h>
30#include <asm/ppc_sys.h> 30#include <asm/ppc_sys.h>
31#include <asm/kgdb.h> 31#include <asm/kgdb.h>
32#include <asm/machdep.h>
32 33
33#include <syslib/ppc85xx_setup.h> 34#include <syslib/ppc85xx_setup.h>
34 35
diff --git a/arch/ppc/syslib/ppc8xx_pic.c b/arch/ppc/syslib/ppc8xx_pic.c
index d3b01c6c97de..3e6f51a61d46 100644
--- a/arch/ppc/syslib/ppc8xx_pic.c
+++ b/arch/ppc/syslib/ppc8xx_pic.c
@@ -6,6 +6,7 @@
6#include <linux/signal.h> 6#include <linux/signal.h>
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <asm/irq.h> 8#include <asm/irq.h>
9#include <asm/io.h>
9#include <asm/8xx_immap.h> 10#include <asm/8xx_immap.h>
10#include <asm/mpc8xx.h> 11#include <asm/mpc8xx.h>
11#include "ppc8xx_pic.h" 12#include "ppc8xx_pic.h"
@@ -29,8 +30,7 @@ static void m8xx_mask_irq(unsigned int irq_nr)
29 word = irq_nr >> 5; 30 word = irq_nr >> 5;
30 31
31 ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); 32 ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
32 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 33 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
33 ppc_cached_irq_mask[word];
34} 34}
35 35
36static void m8xx_unmask_irq(unsigned int irq_nr) 36static void m8xx_unmask_irq(unsigned int irq_nr)
@@ -41,8 +41,7 @@ static void m8xx_unmask_irq(unsigned int irq_nr)
41 word = irq_nr >> 5; 41 word = irq_nr >> 5;
42 42
43 ppc_cached_irq_mask[word] |= (1 << (31-bit)); 43 ppc_cached_irq_mask[word] |= (1 << (31-bit));
44 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 44 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
45 ppc_cached_irq_mask[word];
46} 45}
47 46
48static void m8xx_end_irq(unsigned int irq_nr) 47static void m8xx_end_irq(unsigned int irq_nr)
@@ -55,8 +54,7 @@ static void m8xx_end_irq(unsigned int irq_nr)
55 word = irq_nr >> 5; 54 word = irq_nr >> 5;
56 55
57 ppc_cached_irq_mask[word] |= (1 << (31-bit)); 56 ppc_cached_irq_mask[word] |= (1 << (31-bit));
58 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 57 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
59 ppc_cached_irq_mask[word];
60 } 58 }
61} 59}
62 60
@@ -69,9 +67,8 @@ static void m8xx_mask_and_ack(unsigned int irq_nr)
69 word = irq_nr >> 5; 67 word = irq_nr >> 5;
70 68
71 ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); 69 ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
72 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 70 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
73 ppc_cached_irq_mask[word]; 71 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend, 1 << (31-bit));
74 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend = 1 << (31-bit);
75} 72}
76 73
77struct hw_interrupt_type ppc8xx_pic = { 74struct hw_interrupt_type ppc8xx_pic = {
@@ -93,7 +90,7 @@ m8xx_get_irq(struct pt_regs *regs)
93 /* For MPC8xx, read the SIVEC register and shift the bits down 90 /* For MPC8xx, read the SIVEC register and shift the bits down
94 * to get the irq number. 91 * to get the irq number.
95 */ 92 */
96 irq = ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec >> 26; 93 irq = in_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec) >> 26;
97 94
98 /* 95 /*
99 * When we read the sivec without an interrupt to process, we will 96 * When we read the sivec without an interrupt to process, we will
diff --git a/arch/ppc/syslib/ppc_sys.c b/arch/ppc/syslib/ppc_sys.c
index 52ba0c68078d..62ee86e80711 100644
--- a/arch/ppc/syslib/ppc_sys.c
+++ b/arch/ppc/syslib/ppc_sys.c
@@ -69,6 +69,9 @@ static int __init find_chip_by_name_and_id(char *name, u32 id)
69 matched[j++] = i; 69 matched[j++] = i;
70 i++; 70 i++;
71 } 71 }
72
73 ret = i;
74
72 if (j != 0) { 75 if (j != 0) {
73 for (i = 0; i < j; i++) { 76 for (i = 0; i < j; i++) {
74 if ((ppc_sys_specs[matched[i]].mask & id) == 77 if ((ppc_sys_specs[matched[i]].mask & id) ==
diff --git a/arch/ppc/syslib/pq2_devices.c b/arch/ppc/syslib/pq2_devices.c
index 1d3869768f96..6f88ba93412b 100644
--- a/arch/ppc/syslib/pq2_devices.c
+++ b/arch/ppc/syslib/pq2_devices.c
@@ -18,6 +18,7 @@
18#include <asm/cpm2.h> 18#include <asm/cpm2.h>
19#include <asm/irq.h> 19#include <asm/irq.h>
20#include <asm/ppc_sys.h> 20#include <asm/ppc_sys.h>
21#include <asm/machdep.h>
21 22
22struct platform_device ppc_sys_platform_devices[] = { 23struct platform_device ppc_sys_platform_devices[] = {
23 [MPC82xx_CPM_FCC1] = { 24 [MPC82xx_CPM_FCC1] = {
diff --git a/arch/ppc/syslib/prep_nvram.c b/arch/ppc/syslib/prep_nvram.c
index 8599850ca772..2c6364d9641f 100644
--- a/arch/ppc/syslib/prep_nvram.c
+++ b/arch/ppc/syslib/prep_nvram.c
@@ -22,14 +22,14 @@
22static char nvramData[MAX_PREP_NVRAM]; 22static char nvramData[MAX_PREP_NVRAM];
23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0]; 23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
24 24
25unsigned char __prep prep_nvram_read_val(int addr) 25unsigned char prep_nvram_read_val(int addr)
26{ 26{
27 outb(addr, PREP_NVRAM_AS0); 27 outb(addr, PREP_NVRAM_AS0);
28 outb(addr>>8, PREP_NVRAM_AS1); 28 outb(addr>>8, PREP_NVRAM_AS1);
29 return inb(PREP_NVRAM_DATA); 29 return inb(PREP_NVRAM_DATA);
30} 30}
31 31
32void __prep prep_nvram_write_val(int addr, 32void prep_nvram_write_val(int addr,
33 unsigned char val) 33 unsigned char val)
34{ 34{
35 outb(addr, PREP_NVRAM_AS0); 35 outb(addr, PREP_NVRAM_AS0);
@@ -81,8 +81,7 @@ void __init init_prep_nvram(void)
81 } 81 }
82} 82}
83 83
84__prep 84char *prep_nvram_get_var(const char *name)
85char __prep *prep_nvram_get_var(const char *name)
86{ 85{
87 char *cp; 86 char *cp;
88 int namelen; 87 int namelen;
@@ -101,8 +100,7 @@ char __prep *prep_nvram_get_var(const char *name)
101 return NULL; 100 return NULL;
102} 101}
103 102
104__prep 103char *prep_nvram_first_var(void)
105char __prep *prep_nvram_first_var(void)
106{ 104{
107 if (nvram->Header.GELength == 0) { 105 if (nvram->Header.GELength == 0) {
108 return NULL; 106 return NULL;
@@ -112,8 +110,7 @@ char __prep *prep_nvram_first_var(void)
112 } 110 }
113} 111}
114 112
115__prep 113char *prep_nvram_next_var(char *name)
116char __prep *prep_nvram_next_var(char *name)
117{ 114{
118 char *cp; 115 char *cp;
119 116
diff --git a/arch/ppc/syslib/prom.c b/arch/ppc/syslib/prom.c
index 2c64ed627475..278da6ee62ea 100644
--- a/arch/ppc/syslib/prom.c
+++ b/arch/ppc/syslib/prom.c
@@ -89,7 +89,7 @@ extern char cmd_line[512]; /* XXX */
89extern boot_infos_t *boot_infos; 89extern boot_infos_t *boot_infos;
90unsigned long dev_tree_size; 90unsigned long dev_tree_size;
91 91
92void __openfirmware 92void
93phys_call_rtas(int service, int nargs, int nret, ...) 93phys_call_rtas(int service, int nargs, int nret, ...)
94{ 94{
95 va_list list; 95 va_list list;
@@ -862,7 +862,7 @@ find_type_devices(const char *type)
862/* 862/*
863 * Returns all nodes linked together 863 * Returns all nodes linked together
864 */ 864 */
865struct device_node * __openfirmware 865struct device_node *
866find_all_nodes(void) 866find_all_nodes(void)
867{ 867{
868 struct device_node *head, **prevp, *np; 868 struct device_node *head, **prevp, *np;
@@ -1165,7 +1165,7 @@ get_property(struct device_node *np, const char *name, int *lenp)
1165/* 1165/*
1166 * Add a property to a node 1166 * Add a property to a node
1167 */ 1167 */
1168void __openfirmware 1168void
1169prom_add_property(struct device_node* np, struct property* prop) 1169prom_add_property(struct device_node* np, struct property* prop)
1170{ 1170{
1171 struct property **next = &np->properties; 1171 struct property **next = &np->properties;
@@ -1177,7 +1177,7 @@ prom_add_property(struct device_node* np, struct property* prop)
1177} 1177}
1178 1178
1179/* I quickly hacked that one, check against spec ! */ 1179/* I quickly hacked that one, check against spec ! */
1180static inline unsigned long __openfirmware 1180static inline unsigned long
1181bus_space_to_resource_flags(unsigned int bus_space) 1181bus_space_to_resource_flags(unsigned int bus_space)
1182{ 1182{
1183 u8 space = (bus_space >> 24) & 0xf; 1183 u8 space = (bus_space >> 24) & 0xf;
@@ -1194,7 +1194,7 @@ bus_space_to_resource_flags(unsigned int bus_space)
1194 } 1194 }
1195} 1195}
1196 1196
1197static struct resource* __openfirmware 1197static struct resource*
1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range) 1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1199{ 1199{
1200 unsigned long mask; 1200 unsigned long mask;
@@ -1224,7 +1224,7 @@ find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1224 * or other nodes attached to the root node. Ultimately, put some 1224 * or other nodes attached to the root node. Ultimately, put some
1225 * link to resources in the OF node. 1225 * link to resources in the OF node.
1226 */ 1226 */
1227struct resource* __openfirmware 1227struct resource*
1228request_OF_resource(struct device_node* node, int index, const char* name_postfix) 1228request_OF_resource(struct device_node* node, int index, const char* name_postfix)
1229{ 1229{
1230 struct pci_dev* pcidev; 1230 struct pci_dev* pcidev;
@@ -1280,7 +1280,7 @@ fail:
1280 return NULL; 1280 return NULL;
1281} 1281}
1282 1282
1283int __openfirmware 1283int
1284release_OF_resource(struct device_node* node, int index) 1284release_OF_resource(struct device_node* node, int index)
1285{ 1285{
1286 struct pci_dev* pcidev; 1286 struct pci_dev* pcidev;
@@ -1346,7 +1346,7 @@ release_OF_resource(struct device_node* node, int index)
1346} 1346}
1347 1347
1348#if 0 1348#if 0
1349void __openfirmware 1349void
1350print_properties(struct device_node *np) 1350print_properties(struct device_node *np)
1351{ 1351{
1352 struct property *pp; 1352 struct property *pp;
@@ -1400,7 +1400,7 @@ print_properties(struct device_node *np)
1400static DEFINE_SPINLOCK(rtas_lock); 1400static DEFINE_SPINLOCK(rtas_lock);
1401 1401
1402/* this can be called after setup -- Cort */ 1402/* this can be called after setup -- Cort */
1403int __openfirmware 1403int
1404call_rtas(const char *service, int nargs, int nret, 1404call_rtas(const char *service, int nargs, int nret,
1405 unsigned long *outputs, ...) 1405 unsigned long *outputs, ...)
1406{ 1406{
diff --git a/arch/ppc/syslib/xilinx_pic.c b/arch/ppc/syslib/xilinx_pic.c
index 2cbcad278cef..47f04c71fe9c 100644
--- a/arch/ppc/syslib/xilinx_pic.c
+++ b/arch/ppc/syslib/xilinx_pic.c
@@ -17,6 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/xparameters.h> 18#include <asm/xparameters.h>
19#include <asm/ibm4xx.h> 19#include <asm/ibm4xx.h>
20#include <asm/machdep.h>
20 21
21/* No one else should require these constants, so define them locally here. */ 22/* No one else should require these constants, so define them locally here. */
22#define ISR 0 /* Interrupt Status Register */ 23#define ISR 0 /* Interrupt Status Register */
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index 507d4eeffe07..98612d420346 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -478,8 +478,9 @@ void *xmon_stdout;
478void *xmon_stderr; 478void *xmon_stderr;
479 479
480void 480void
481xmon_init(void) 481xmon_init(int arg)
482{ 482{
483 xmon_map_scc();
483} 484}
484 485
485int 486int
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index be7869e39465..66bfaa3211a2 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -148,9 +148,14 @@ Commands:\n\
148 r print registers\n\ 148 r print registers\n\
149 S print special registers\n\ 149 S print special registers\n\
150 t print backtrace\n\ 150 t print backtrace\n\
151 la lookup address in system.map\n\ 151 la lookup address\n\
152 ls lookup symbol in system.map\n\ 152 ls lookup symbol\n\
153 C checksum\n\
154 p call function with arguments\n\
155 T print time\n\
153 x exit monitor\n\ 156 x exit monitor\n\
157 zr reboot\n\
158 zh halt\n\
154"; 159";
155 160
156static int xmon_trace[NR_CPUS]; 161static int xmon_trace[NR_CPUS];
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index c658650af429..42677cc96508 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -10,6 +10,9 @@ config MMU
10 bool 10 bool
11 default y 11 default y
12 12
13config PPC_STD_MMU
14 def_bool y
15
13config UID16 16config UID16
14 bool 17 bool
15 18
@@ -120,6 +123,11 @@ config MPIC
120 bool 123 bool
121 default y 124 default y
122 125
126config PPC_I8259
127 depends on PPC_PSERIES
128 bool
129 default y
130
123config BPA_IIC 131config BPA_IIC
124 depends on PPC_BPA 132 depends on PPC_BPA
125 bool 133 bool
@@ -186,6 +194,12 @@ config BOOTX_TEXT
186 Say Y here to see progress messages from the boot firmware in text 194 Say Y here to see progress messages from the boot firmware in text
187 mode. Requires an Open Firmware compatible video card. 195 mode. Requires an Open Firmware compatible video card.
188 196
197config POWER4
198 def_bool y
199
200config PPC_FPU
201 def_bool y
202
189config POWER4_ONLY 203config POWER4_ONLY
190 bool "Optimize for POWER4" 204 bool "Optimize for POWER4"
191 default n 205 default n
@@ -234,6 +248,10 @@ config HMT
234 This option enables hardware multithreading on RS64 cpus. 248 This option enables hardware multithreading on RS64 cpus.
235 pSeries systems p620 and p660 have such a cpu type. 249 pSeries systems p620 and p660 have such a cpu type.
236 250
251config NUMA
252 bool "NUMA support"
253 default y if SMP && PPC_PSERIES
254
237config ARCH_SELECT_MEMORY_MODEL 255config ARCH_SELECT_MEMORY_MODEL
238 def_bool y 256 def_bool y
239 257
@@ -249,9 +267,6 @@ config ARCH_DISCONTIGMEM_DEFAULT
249 def_bool y 267 def_bool y
250 depends on ARCH_DISCONTIGMEM_ENABLE 268 depends on ARCH_DISCONTIGMEM_ENABLE
251 269
252config ARCH_FLATMEM_ENABLE
253 def_bool y
254
255config ARCH_SPARSEMEM_ENABLE 270config ARCH_SPARSEMEM_ENABLE
256 def_bool y 271 def_bool y
257 depends on ARCH_DISCONTIGMEM_ENABLE 272 depends on ARCH_DISCONTIGMEM_ENABLE
@@ -274,10 +289,6 @@ config NODES_SPAN_OTHER_NODES
274 def_bool y 289 def_bool y
275 depends on NEED_MULTIPLE_NODES 290 depends on NEED_MULTIPLE_NODES
276 291
277config NUMA
278 bool "NUMA support"
279 default y if DISCONTIGMEM || SPARSEMEM
280
281config SCHED_SMT 292config SCHED_SMT
282 bool "SMT (Hyperthreading) scheduler support" 293 bool "SMT (Hyperthreading) scheduler support"
283 depends on SMP 294 depends on SMP
@@ -307,6 +318,11 @@ config PPC_RTAS
307 depends on PPC_PSERIES || PPC_BPA 318 depends on PPC_PSERIES || PPC_BPA
308 default y 319 default y
309 320
321config RTAS_ERROR_LOGGING
322 bool
323 depends on PPC_RTAS
324 default y
325
310config RTAS_PROC 326config RTAS_PROC
311 bool "Proc interface to RTAS" 327 bool "Proc interface to RTAS"
312 depends on PPC_RTAS 328 depends on PPC_RTAS
@@ -357,7 +373,6 @@ config HOTPLUG_CPU
357 373
358config PROC_DEVICETREE 374config PROC_DEVICETREE
359 bool "Support for Open Firmware device tree in /proc" 375 bool "Support for Open Firmware device tree in /proc"
360 depends on !PPC_ISERIES
361 help 376 help
362 This option adds a device-tree directory under /proc which contains 377 This option adds a device-tree directory under /proc which contains
363 an image of the device tree that the kernel copies from Open 378 an image of the device tree that the kernel copies from Open
@@ -461,7 +476,7 @@ config VIOPATH
461 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH 476 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
462 default y 477 default y
463 478
464source "arch/ppc64/oprofile/Kconfig" 479source "arch/powerpc/oprofile/Kconfig"
465 480
466source "arch/ppc64/Kconfig.debug" 481source "arch/ppc64/Kconfig.debug"
467 482
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 521c2a5a2862..fdbd6f44adc0 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -75,17 +75,25 @@ else
75 CFLAGS += $(call cc-option,-mtune=power4) 75 CFLAGS += $(call cc-option,-mtune=power4)
76endif 76endif
77 77
78# No AltiVec instruction when building kernel
79CFLAGS += $(call cc-option, -mno-altivec)
80
78# Enable unit-at-a-time mode when possible. It shrinks the 81# Enable unit-at-a-time mode when possible. It shrinks the
79# kernel considerably. 82# kernel considerably.
80CFLAGS += $(call cc-option,-funit-at-a-time) 83CFLAGS += $(call cc-option,-funit-at-a-time)
81 84
82head-y := arch/ppc64/kernel/head.o 85head-y := arch/ppc64/kernel/head.o
86head-y += arch/powerpc/kernel/fpu.o
87head-y += arch/powerpc/kernel/entry_64.o
83 88
84libs-y += arch/ppc64/lib/ 89libs-y += arch/ppc64/lib/
85core-y += arch/ppc64/kernel/ 90core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
86core-y += arch/ppc64/mm/ 91core-y += arch/powerpc/mm/
87core-$(CONFIG_XMON) += arch/ppc64/xmon/ 92core-y += arch/powerpc/sysdev/
88drivers-$(CONFIG_OPROFILE) += arch/ppc64/oprofile/ 93core-y += arch/powerpc/platforms/
94core-y += arch/powerpc/lib/
95core-$(CONFIG_XMON) += arch/powerpc/xmon/
96drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
89 97
90boot := arch/ppc64/boot 98boot := arch/ppc64/boot
91 99
@@ -100,7 +108,7 @@ $(boottargets-y): vmlinux
100bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage 108bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
101bootimage-$(CONFIG_PPC_PMAC) := vmlinux 109bootimage-$(CONFIG_PPC_PMAC) := vmlinux
102bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage 110bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
103bootimage-$(CONFIG_PPC_BPA) := zImage 111bootimage-$(CONFIG_PPC_BPA) := $(boot)/zImage
104bootimage-$(CONFIG_PPC_ISERIES) := vmlinux 112bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
105BOOTIMAGE := $(bootimage-y) 113BOOTIMAGE := $(bootimage-y)
106install: vmlinux 114install: vmlinux
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile
index 33fdc8710891..301bc1536c49 100644
--- a/arch/ppc64/boot/Makefile
+++ b/arch/ppc64/boot/Makefile
@@ -22,15 +22,46 @@
22 22
23 23
24HOSTCC := gcc 24HOSTCC := gcc
25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) 25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) -fPIC
26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 27BOOTLFLAGS := -T $(srctree)/$(src)/zImage.lds
28OBJCOPYFLAGS := contents,alloc,load,readonly,data 28OBJCOPYFLAGS := contents,alloc,load,readonly,data
29 29
30src-boot := crt0.S string.S prom.c main.c zlib.c imagesize.c div64.S 30zlib := infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c
31zlibheader := infblock.h infcodes.h inffast.h inftrees.h infutil.h
32zliblinuxheader := zlib.h zconf.h zutil.h
33
34$(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
35#$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
36
37src-boot := string.S prom.c main.c div64.S crt0.S
38src-boot += $(zlib)
31src-boot := $(addprefix $(obj)/, $(src-boot)) 39src-boot := $(addprefix $(obj)/, $(src-boot))
32obj-boot := $(addsuffix .o, $(basename $(src-boot))) 40obj-boot := $(addsuffix .o, $(basename $(src-boot)))
33 41
42BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
43
44quiet_cmd_copy_zlib = COPY $@
45 cmd_copy_zlib = sed "s@__attribute_used__@@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
46
47quiet_cmd_copy_zlibheader = COPY $@
48 cmd_copy_zlibheader = sed "s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
49# stddef.h for NULL
50quiet_cmd_copy_zliblinuxheader = COPY $@
51 cmd_copy_zliblinuxheader = sed "s@<linux/string.h>@\"string.h\"@;s@<linux/kernel.h>@<stddef.h>@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
52
53$(addprefix $(obj)/,$(zlib)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
54 $(call cmd,copy_zlib)
55
56$(addprefix $(obj)/,$(zlibheader)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
57 $(call cmd,copy_zlibheader)
58
59$(addprefix $(obj)/,$(zliblinuxheader)): $(obj)/%: $(srctree)/include/linux/%
60 $(call cmd,copy_zliblinuxheader)
61
62clean-files := $(zlib) $(zlibheader) $(zliblinuxheader)
63
64
34quiet_cmd_bootcc = BOOTCC $@ 65quiet_cmd_bootcc = BOOTCC $@
35 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< 66 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
36 67
@@ -56,7 +87,7 @@ src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
56gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) 87gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
57 88
58hostprogs-y := addnote addRamDisk 89hostprogs-y := addnote addRamDisk
59targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd imagesize.c \ 90targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
60 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ 91 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
61 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ 92 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
62 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \ 93 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
@@ -69,9 +100,9 @@ quiet_cmd_ramdisk = RAMDISK $@
69quiet_cmd_stripvm = STRIP $@ 100quiet_cmd_stripvm = STRIP $@
70 cmd_stripvm = $(STRIP) -s $< -o $@ 101 cmd_stripvm = $(STRIP) -s $< -o $@
71 102
72vmlinux.strip: vmlinux FORCE 103vmlinux.strip: vmlinux
73 $(call if_changed,stripvm) 104 $(call if_changed,stripvm)
74$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz FORCE 105$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz
75 $(call if_changed,ramdisk) 106 $(call if_changed,ramdisk)
76 107
77quiet_cmd_addsection = ADDSEC $@ 108quiet_cmd_addsection = ADDSEC $@
@@ -79,48 +110,38 @@ quiet_cmd_addsection = ADDSEC $@
79 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \ 110 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \
80 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS) 111 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS)
81 112
82quiet_cmd_imagesize = GENSIZE $@
83 cmd_imagesize = ls -l vmlinux.strip | \
84 awk '{printf "/* generated -- do not edit! */\n" "unsigned long vmlinux_filesize = %d;\n", $$5}' \
85 > $(obj)/imagesize.c && \
86 $(CROSS_COMPILE)nm -n vmlinux | tail -n 1 | \
87 awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' >> $(obj)/imagesize.c
88
89quiet_cmd_addnote = ADDNOTE $@ 113quiet_cmd_addnote = ADDNOTE $@
90 cmd_addnote = $(obj)/addnote $@ 114 cmd_addnote = $(obj)/addnote $@
91 115
92$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE 116$(call gz-sec, $(required)): $(obj)/kernel-%.gz: %
93 $(call if_changed,gzip) 117 $(call if_changed,gzip)
94 118
95$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz 119$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz
96 cp -f $(obj)/ramdisk.image.gz $@ 120 cp -f $(obj)/ramdisk.image.gz $@
97 121
98$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE 122$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz
99 @touch $@ 123 @touch $@
100 124
101$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE 125$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c
102 $(call if_changed_dep,bootcc) 126 $(call if_changed_dep,bootcc)
103 $(call cmd,addsection) 127 $(call cmd,addsection)
104 128
105$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required)) 129$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
106$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) FORCE 130$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.lds
107 $(call cmd,bootld,$(obj-boot)) 131 $(call cmd,bootld,$(obj-boot))
108 132
109$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd)) 133$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
110$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) FORCE 134$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(srctree)/$(src)/zImage.lds
111 $(call cmd,bootld,$(obj-boot)) 135 $(call cmd,bootld,$(obj-boot))
112 136
113$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote FORCE 137$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote
114 @cp -f $< $@ 138 @cp -f $< $@
115 $(call if_changed,addnote) 139 $(call if_changed,addnote)
116 140
117$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote FORCE 141$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote
118 @cp -f $< $@ 142 @cp -f $< $@
119 $(call if_changed,addnote) 143 $(call if_changed,addnote)
120 144
121$(obj)/imagesize.c: vmlinux.strip
122 $(call cmd,imagesize)
123
124install: $(CONFIGURE) $(BOOTIMAGE) 145install: $(CONFIGURE) $(BOOTIMAGE)
125 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" 146 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)"
126 147
diff --git a/arch/ppc64/boot/crt0.S b/arch/ppc64/boot/crt0.S
index 3861e7f9cf19..9cc442263939 100644
--- a/arch/ppc64/boot/crt0.S
+++ b/arch/ppc64/boot/crt0.S
@@ -12,11 +12,40 @@
12#include "ppc_asm.h" 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl _start 15 .globl _zimage_start
16_start: 16_zimage_start:
17 bl reloc_offset
18
19reloc_offset:
20 mflr r0
21 lis r9,reloc_offset@ha
22 addi r9,r9,reloc_offset@l
23 subf. r0,r9,r0
24 beq clear_caches
25
26reloc_got2:
27 lis r9,__got2_start@ha
28 addi r9,r9,__got2_start@l
29 lis r8,__got2_end@ha
30 addi r8,r8,__got2_end@l
31 subf. r8,r9,r8
32 beq clear_caches
33 srwi. r8,r8,2
34 mtctr r8
35 add r9,r0,r9
36reloc_got2_loop:
37 lwz r8,0(r9)
38 add r8,r8,r0
39 stw r8,0(r9)
40 addi r9,r9,4
41 bdnz reloc_got2_loop
42
43clear_caches:
17 lis r9,_start@h 44 lis r9,_start@h
45 add r9,r0,r9
18 lis r8,_etext@ha 46 lis r8,_etext@ha
19 addi r8,r8,_etext@l 47 addi r8,r8,_etext@l
48 add r8,r0,r8
201: dcbf r0,r9 491: dcbf r0,r9
21 icbi r0,r9 50 icbi r0,r9
22 addi r9,r9,0x20 51 addi r9,r9,0x20
@@ -25,24 +54,6 @@ _start:
25 sync 54 sync
26 isync 55 isync
27 56
28 ## Clear out the BSS as per ANSI C requirements 57 mr r6,r1
29
30 lis r7,_end@ha
31 addi r7,r7,_end@l # r7 = &_end
32 lis r8,__bss_start@ha #
33 addi r8,r8,__bss_start@l # r8 = &_bss_start
34
35 ## Determine how large an area, in number of words, to clear
36
37 subf r7,r8,r7 # r7 = &_end - &_bss_start + 1
38 addi r7,r7,3 # r7 += 3
39 srwi. r7,r7,2 # r7 = size in words.
40 beq 3f # If the size is zero, don't bother
41 addi r8,r8,-4 # r8 -= 4
42 mtctr r7 # SPRN_CTR = number of words to clear
43 li r0,0 # r0 = 0
442: stwu r0,4(r8) # Clear out a word
45 bdnz 2b # Keep clearing until done
463:
47 b start 58 b start
48 59
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh
index cb2d6626b555..eacce9590816 100644
--- a/arch/ppc64/boot/install.sh
+++ b/arch/ppc64/boot/install.sh
@@ -28,7 +28,7 @@ if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}i
28# Default install 28# Default install
29 29
30# this should work for both the pSeries zImage and the iSeries vmlinux.sm 30# this should work for both the pSeries zImage and the iSeries vmlinux.sm
31image_name=`basename $5` 31image_name=`basename $2`
32 32
33if [ -f $4/$image_name ]; then 33if [ -f $4/$image_name ]; then
34 mv $4/$image_name $4/$image_name.old 34 mv $4/$image_name $4/$image_name.old
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index f7ec19a2d0b0..c1dc876bccab 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -17,7 +17,6 @@
17#include "prom.h" 17#include "prom.h"
18#include "zlib.h" 18#include "zlib.h"
19 19
20static void gunzip(void *, int, unsigned char *, int *);
21extern void flush_cache(void *, unsigned long); 20extern void flush_cache(void *, unsigned long);
22 21
23 22
@@ -26,31 +25,26 @@ extern void flush_cache(void *, unsigned long);
26#define RAM_END (512<<20) // Fixme: use OF */ 25#define RAM_END (512<<20) // Fixme: use OF */
27#define ONE_MB 0x100000 26#define ONE_MB 0x100000
28 27
29static char *avail_ram;
30static char *begin_avail, *end_avail;
31static char *avail_high;
32static unsigned int heap_use;
33static unsigned int heap_max;
34
35extern char _start[]; 28extern char _start[];
29extern char __bss_start[];
36extern char _end[]; 30extern char _end[];
37extern char _vmlinux_start[]; 31extern char _vmlinux_start[];
38extern char _vmlinux_end[]; 32extern char _vmlinux_end[];
39extern char _initrd_start[]; 33extern char _initrd_start[];
40extern char _initrd_end[]; 34extern char _initrd_end[];
41extern unsigned long vmlinux_filesize;
42extern unsigned long vmlinux_memsize;
43 35
44struct addr_range { 36struct addr_range {
45 unsigned long addr; 37 unsigned long addr;
46 unsigned long size; 38 unsigned long size;
47 unsigned long memsize; 39 unsigned long memsize;
48}; 40};
49static struct addr_range vmlinux = {0, 0, 0}; 41static struct addr_range vmlinux;
50static struct addr_range vmlinuz = {0, 0, 0}; 42static struct addr_range vmlinuz;
51static struct addr_range initrd = {0, 0, 0}; 43static struct addr_range initrd;
44
45static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */
46static char elfheader[256];
52 47
53static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
54 48
55typedef void (*kernel_entry_t)( unsigned long, 49typedef void (*kernel_entry_t)( unsigned long,
56 unsigned long, 50 unsigned long,
@@ -62,6 +56,63 @@ typedef void (*kernel_entry_t)( unsigned long,
62 56
63static unsigned long claim_base; 57static unsigned long claim_base;
64 58
59#define HEAD_CRC 2
60#define EXTRA_FIELD 4
61#define ORIG_NAME 8
62#define COMMENT 0x10
63#define RESERVED 0xe0
64
65static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
66{
67 z_stream s;
68 int r, i, flags;
69
70 /* skip header */
71 i = 10;
72 flags = src[3];
73 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
74 printf("bad gzipped data\n\r");
75 exit();
76 }
77 if ((flags & EXTRA_FIELD) != 0)
78 i = 12 + src[10] + (src[11] << 8);
79 if ((flags & ORIG_NAME) != 0)
80 while (src[i++] != 0)
81 ;
82 if ((flags & COMMENT) != 0)
83 while (src[i++] != 0)
84 ;
85 if ((flags & HEAD_CRC) != 0)
86 i += 2;
87 if (i >= *lenp) {
88 printf("gunzip: ran out of data in header\n\r");
89 exit();
90 }
91
92 if (zlib_inflate_workspacesize() > sizeof(scratch)) {
93 printf("gunzip needs more mem\n");
94 exit();
95 }
96 memset(&s, 0, sizeof(s));
97 s.workspace = scratch;
98 r = zlib_inflateInit2(&s, -MAX_WBITS);
99 if (r != Z_OK) {
100 printf("inflateInit2 returned %d\n\r", r);
101 exit();
102 }
103 s.next_in = src + i;
104 s.avail_in = *lenp - i;
105 s.next_out = dst;
106 s.avail_out = dstlen;
107 r = zlib_inflate(&s, Z_FULL_FLUSH);
108 if (r != Z_OK && r != Z_STREAM_END) {
109 printf("inflate returned %d msg: %s\n\r", r, s.msg);
110 exit();
111 }
112 *lenp = s.next_out - (unsigned char *) dst;
113 zlib_inflateEnd(&s);
114}
115
65static unsigned long try_claim(unsigned long size) 116static unsigned long try_claim(unsigned long size)
66{ 117{
67 unsigned long addr = 0; 118 unsigned long addr = 0;
@@ -80,13 +131,16 @@ static unsigned long try_claim(unsigned long size)
80 return addr; 131 return addr;
81} 132}
82 133
83void start(unsigned long a1, unsigned long a2, void *promptr) 134void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
84{ 135{
85 unsigned long i; 136 unsigned long i;
137 int len;
86 kernel_entry_t kernel_entry; 138 kernel_entry_t kernel_entry;
87 Elf64_Ehdr *elf64; 139 Elf64_Ehdr *elf64;
88 Elf64_Phdr *elf64ph; 140 Elf64_Phdr *elf64ph;
89 141
142 memset(__bss_start, 0, _end - __bss_start);
143
90 prom = (int (*)(void *)) promptr; 144 prom = (int (*)(void *)) promptr;
91 chosen_handle = finddevice("/chosen"); 145 chosen_handle = finddevice("/chosen");
92 if (chosen_handle == (void *) -1) 146 if (chosen_handle == (void *) -1)
@@ -97,7 +151,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
97 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) 151 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
98 exit(); 152 exit();
99 153
100 printf("\n\rzImage starting: loaded at 0x%lx\n\r", (unsigned long) _start); 154 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
101 155
102 /* 156 /*
103 * The first available claim_base must be above the end of the 157 * The first available claim_base must be above the end of the
@@ -118,25 +172,45 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
118 claim_base = PROG_START; 172 claim_base = PROG_START;
119#endif 173#endif
120 174
121 /* 175 vmlinuz.addr = (unsigned long)_vmlinux_start;
122 * Now we try to claim some memory for the kernel itself 176 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
123 * our "vmlinux_memsize" is the memory footprint in RAM, _HOWEVER_, what 177
124 * our Makefile stuffs in is an image containing all sort of junk including 178 /* gunzip the ELF header of the kernel */
125 * an ELF header. We need to do some calculations here to find the right 179 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
126 * size... In practice we add 1Mb, that is enough, but we should really 180 len = vmlinuz.size;
127 * consider fixing the Makefile to put a _raw_ kernel in there ! 181 gunzip(elfheader, sizeof(elfheader),
128 */ 182 (unsigned char *)vmlinuz.addr, &len);
129 vmlinux_memsize += ONE_MB; 183 } else
130 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize); 184 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));
131 vmlinux.addr = try_claim(vmlinux_memsize); 185
186 elf64 = (Elf64_Ehdr *)elfheader;
187 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
188 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
189 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
190 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
191 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
192 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
193 elf64->e_type != ET_EXEC ||
194 elf64->e_machine != EM_PPC64 )
195 {
196 printf("Error: not a valid PPC64 ELF file!\n\r");
197 exit();
198 }
199
200 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
201 (unsigned long)elf64->e_phoff);
202 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
203 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
204 break;
205 }
206 vmlinux.size = (unsigned long)elf64ph->p_filesz;
207 vmlinux.memsize = (unsigned long)elf64ph->p_memsz;
208 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
209 vmlinux.addr = try_claim(vmlinux.memsize);
132 if (vmlinux.addr == 0) { 210 if (vmlinux.addr == 0) {
133 printf("Can't allocate memory for kernel image !\n\r"); 211 printf("Can't allocate memory for kernel image !\n\r");
134 exit(); 212 exit();
135 } 213 }
136 vmlinuz.addr = (unsigned long)_vmlinux_start;
137 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
138 vmlinux.size = PAGE_ALIGN(vmlinux_filesize);
139 vmlinux.memsize = vmlinux_memsize;
140 214
141 /* 215 /*
142 * Now we try to claim memory for the initrd (and copy it there) 216 * Now we try to claim memory for the initrd (and copy it there)
@@ -160,49 +234,22 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
160 234
161 /* Eventually gunzip the kernel */ 235 /* Eventually gunzip the kernel */
162 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 236 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
163 int len;
164 avail_ram = scratch;
165 begin_avail = avail_high = avail_ram;
166 end_avail = scratch + sizeof(scratch);
167 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 237 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
168 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 238 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
169 len = vmlinuz.size; 239 len = vmlinuz.size;
170 gunzip((void *)vmlinux.addr, vmlinux.size, 240 gunzip((void *)vmlinux.addr, vmlinux.memsize,
171 (unsigned char *)vmlinuz.addr, &len); 241 (unsigned char *)vmlinuz.addr, &len);
172 printf("done 0x%lx bytes\n\r", len); 242 printf("done 0x%lx bytes\n\r", len);
173 printf("0x%x bytes of heap consumed, max in use 0x%x\n\r",
174 (unsigned)(avail_high - begin_avail), heap_max);
175 } else { 243 } else {
176 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); 244 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
177 } 245 }
178 246
179 /* Skip over the ELF header */ 247 /* Skip over the ELF header */
180 elf64 = (Elf64_Ehdr *)vmlinux.addr;
181 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
182 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
183 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
184 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
185 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
186 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
187 elf64->e_type != ET_EXEC ||
188 elf64->e_machine != EM_PPC64 )
189 {
190 printf("Error: not a valid PPC64 ELF file!\n\r");
191 exit();
192 }
193
194 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
195 (unsigned long)elf64->e_phoff);
196 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
197 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
198 break;
199 }
200#ifdef DEBUG 248#ifdef DEBUG
201 printf("... skipping 0x%lx bytes of ELF header\n\r", 249 printf("... skipping 0x%lx bytes of ELF header\n\r",
202 (unsigned long)elf64ph->p_offset); 250 (unsigned long)elf64ph->p_offset);
203#endif 251#endif
204 vmlinux.addr += (unsigned long)elf64ph->p_offset; 252 vmlinux.addr += (unsigned long)elf64ph->p_offset;
205 vmlinux.size -= (unsigned long)elf64ph->p_offset;
206 253
207 flush_cache((void *)vmlinux.addr, vmlinux.size); 254 flush_cache((void *)vmlinux.addr, vmlinux.size);
208 255
@@ -225,108 +272,3 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
225 exit(); 272 exit();
226} 273}
227 274
228struct memchunk {
229 unsigned int size;
230 unsigned int pad;
231 struct memchunk *next;
232};
233
234static struct memchunk *freechunks;
235
236void *zalloc(void *x, unsigned items, unsigned size)
237{
238 void *p;
239 struct memchunk **mpp, *mp;
240
241 size *= items;
242 size = _ALIGN(size, sizeof(struct memchunk));
243 heap_use += size;
244 if (heap_use > heap_max)
245 heap_max = heap_use;
246 for (mpp = &freechunks; (mp = *mpp) != 0; mpp = &mp->next) {
247 if (mp->size == size) {
248 *mpp = mp->next;
249 return mp;
250 }
251 }
252 p = avail_ram;
253 avail_ram += size;
254 if (avail_ram > avail_high)
255 avail_high = avail_ram;
256 if (avail_ram > end_avail) {
257 printf("oops... out of memory\n\r");
258 pause();
259 }
260 return p;
261}
262
263void zfree(void *x, void *addr, unsigned nb)
264{
265 struct memchunk *mp = addr;
266
267 nb = _ALIGN(nb, sizeof(struct memchunk));
268 heap_use -= nb;
269 if (avail_ram == addr + nb) {
270 avail_ram = addr;
271 return;
272 }
273 mp->size = nb;
274 mp->next = freechunks;
275 freechunks = mp;
276}
277
278#define HEAD_CRC 2
279#define EXTRA_FIELD 4
280#define ORIG_NAME 8
281#define COMMENT 0x10
282#define RESERVED 0xe0
283
284#define DEFLATED 8
285
286static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
287{
288 z_stream s;
289 int r, i, flags;
290
291 /* skip header */
292 i = 10;
293 flags = src[3];
294 if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
295 printf("bad gzipped data\n\r");
296 exit();
297 }
298 if ((flags & EXTRA_FIELD) != 0)
299 i = 12 + src[10] + (src[11] << 8);
300 if ((flags & ORIG_NAME) != 0)
301 while (src[i++] != 0)
302 ;
303 if ((flags & COMMENT) != 0)
304 while (src[i++] != 0)
305 ;
306 if ((flags & HEAD_CRC) != 0)
307 i += 2;
308 if (i >= *lenp) {
309 printf("gunzip: ran out of data in header\n\r");
310 exit();
311 }
312
313 s.zalloc = zalloc;
314 s.zfree = zfree;
315 r = inflateInit2(&s, -MAX_WBITS);
316 if (r != Z_OK) {
317 printf("inflateInit2 returned %d\n\r", r);
318 exit();
319 }
320 s.next_in = src + i;
321 s.avail_in = *lenp - i;
322 s.next_out = dst;
323 s.avail_out = dstlen;
324 r = inflate(&s, Z_FINISH);
325 if (r != Z_OK && r != Z_STREAM_END) {
326 printf("inflate returned %d msg: %s\n\r", r, s.msg);
327 exit();
328 }
329 *lenp = s.next_out - (unsigned char *) dst;
330 inflateEnd(&s);
331}
332
diff --git a/arch/ppc64/boot/string.S b/arch/ppc64/boot/string.S
index 7ade87ae7718..b1eeaed7db17 100644
--- a/arch/ppc64/boot/string.S
+++ b/arch/ppc64/boot/string.S
@@ -104,7 +104,7 @@ memmove:
104 104
105 .globl memcpy 105 .globl memcpy
106memcpy: 106memcpy:
107 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 107 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
108 addi r6,r3,-4 108 addi r6,r3,-4
109 addi r4,r4,-4 109 addi r4,r4,-4
110 beq 2f /* if less than 8 bytes to do */ 110 beq 2f /* if less than 8 bytes to do */
@@ -146,7 +146,7 @@ memcpy:
146 146
147 .globl backwards_memcpy 147 .globl backwards_memcpy
148backwards_memcpy: 148backwards_memcpy:
149 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 149 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
150 add r6,r3,r5 150 add r6,r3,r5
151 add r4,r4,r5 151 add r4,r4,r5
152 beq 2f 152 beq 2f
diff --git a/arch/ppc64/boot/string.h b/arch/ppc64/boot/string.h
index 9289258bcbd6..9fdff1cc0d70 100644
--- a/arch/ppc64/boot/string.h
+++ b/arch/ppc64/boot/string.h
@@ -1,5 +1,6 @@
1#ifndef _PPC_BOOT_STRING_H_ 1#ifndef _PPC_BOOT_STRING_H_
2#define _PPC_BOOT_STRING_H_ 2#define _PPC_BOOT_STRING_H_
3#include <stddef.h>
3 4
4extern char *strcpy(char *dest, const char *src); 5extern char *strcpy(char *dest, const char *src);
5extern char *strncpy(char *dest, const char *src, size_t n); 6extern char *strncpy(char *dest, const char *src, size_t n);
diff --git a/arch/ppc64/boot/zImage.lds b/arch/ppc64/boot/zImage.lds
index 8fe5e7071f54..4b6bb3ffe3dc 100644
--- a/arch/ppc64/boot/zImage.lds
+++ b/arch/ppc64/boot/zImage.lds
@@ -1,62 +1,24 @@
1OUTPUT_ARCH(powerpc:common) 1OUTPUT_ARCH(powerpc:common)
2SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/local/powerpc-any-elf/lib); 2ENTRY(_zimage_start)
3/* Do we need any of these for elf?
4 __DYNAMIC = 0; */
5SECTIONS 3SECTIONS
6{ 4{
7 /* Read-only sections, merged into text segment: */ 5 . = (4*1024*1024);
8 . = + SIZEOF_HEADERS; 6 _start = .;
9 .interp : { *(.interp) }
10 .hash : { *(.hash) }
11 .dynsym : { *(.dynsym) }
12 .dynstr : { *(.dynstr) }
13 .rel.text : { *(.rel.text) }
14 .rela.text : { *(.rela.text) }
15 .rel.data : { *(.rel.data) }
16 .rela.data : { *(.rela.data) }
17 .rel.rodata : { *(.rel.rodata) }
18 .rela.rodata : { *(.rela.rodata) }
19 .rel.got : { *(.rel.got) }
20 .rela.got : { *(.rela.got) }
21 .rel.ctors : { *(.rel.ctors) }
22 .rela.ctors : { *(.rela.ctors) }
23 .rel.dtors : { *(.rel.dtors) }
24 .rela.dtors : { *(.rela.dtors) }
25 .rel.bss : { *(.rel.bss) }
26 .rela.bss : { *(.rela.bss) }
27 .rel.plt : { *(.rel.plt) }
28 .rela.plt : { *(.rela.plt) }
29 .plt : { *(.plt) }
30 .text : 7 .text :
31 { 8 {
32 *(.text) 9 *(.text)
33 *(.fixup) 10 *(.fixup)
34 *(.got1)
35 } 11 }
36 . = ALIGN(4096);
37 _etext = .; 12 _etext = .;
38 PROVIDE (etext = .);
39 .rodata :
40 {
41 *(.rodata)
42 *(.rodata1)
43 }
44 .kstrtab : { *(.kstrtab) }
45 __vermagic : { *(__vermagic) }
46 .fini : { *(.fini) } =0
47 .ctors : { *(.ctors) }
48 .dtors : { *(.dtors) }
49 /* Read-write section, merged into data segment: */
50 . = ALIGN(4096); 13 . = ALIGN(4096);
51 .data : 14 .data :
52 { 15 {
53 *(.data) 16 *(.rodata*)
54 *(.data1) 17 *(.data*)
55 *(.sdata) 18 *(.sdata*)
56 *(.sdata2) 19 __got2_start = .;
57 *(.got.plt) *(.got) 20 *(.got2)
58 *(.dynamic) 21 __got2_end = .;
59 CONSTRUCTORS
60 } 22 }
61 23
62 . = ALIGN(4096); 24 . = ALIGN(4096);
@@ -71,20 +33,14 @@ SECTIONS
71 33
72 . = ALIGN(4096); 34 . = ALIGN(4096);
73 _edata = .; 35 _edata = .;
74 PROVIDE (edata = .);
75
76 .fixup : { *(.fixup) }
77 36
78 . = ALIGN(4096); 37 . = ALIGN(4096);
79 __bss_start = .; 38 __bss_start = .;
80 .bss : 39 .bss :
81 { 40 {
82 *(.sbss) *(.scommon) 41 *(.sbss)
83 *(.dynbss)
84 *(.bss) 42 *(.bss)
85 *(COMMON)
86 } 43 }
87 . = ALIGN(4096); 44 . = ALIGN(4096);
88 _end = . ; 45 _end = . ;
89 PROVIDE (end = .);
90} 46}
diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
deleted file mode 100644
index 0d910cd2079d..000000000000
--- a/arch/ppc64/boot/zlib.c
+++ /dev/null
@@ -1,2195 +0,0 @@
1/*
2 * This file is derived from various .h and .c files from the zlib-0.95
3 * distribution by Jean-loup Gailly and Mark Adler, with some additions
4 * by Paul Mackerras to aid in implementing Deflate compression and
5 * decompression for PPP packets. See zlib.h for conditions of
6 * distribution and use.
7 *
8 * Changes that have been made include:
9 * - changed functions not used outside this file to "local"
10 * - added minCompression parameter to deflateInit2
11 * - added Z_PACKET_FLUSH (see zlib.h for details)
12 * - added inflateIncomp
13 *
14 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
15
16 This software is provided 'as-is', without any express or implied
17 warranty. In no event will the authors be held liable for any damages
18 arising from the use of this software.
19
20 Permission is granted to anyone to use this software for any purpose,
21 including commercial applications, and to alter it and redistribute it
22 freely, subject to the following restrictions:
23
24 1. The origin of this software must not be misrepresented; you must not
25 claim that you wrote the original software. If you use this software
26 in a product, an acknowledgment in the product documentation would be
27 appreciated but is not required.
28 2. Altered source versions must be plainly marked as such, and must not be
29 misrepresented as being the original software.
30 3. This notice may not be removed or altered from any source distribution.
31
32 Jean-loup Gailly Mark Adler
33 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
34
35 *
36 *
37 */
38
39/*+++++*/
40/* zutil.h -- internal interface and configuration of the compression library
41 * Copyright (C) 1995 Jean-loup Gailly.
42 * For conditions of distribution and use, see copyright notice in zlib.h
43 */
44
45/* WARNING: this file should *not* be used by applications. It is
46 part of the implementation of the compression library and is
47 subject to change. Applications should only use zlib.h.
48 */
49
50/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
51
52#define _Z_UTIL_H
53
54#include "zlib.h"
55
56#ifndef local
57# define local static
58#endif
59/* compile with -Dlocal if your debugger can't find static symbols */
60
61#define FAR
62
63typedef unsigned char uch;
64typedef uch FAR uchf;
65typedef unsigned short ush;
66typedef ush FAR ushf;
67typedef unsigned long ulg;
68
69extern char *z_errmsg[]; /* indexed by 1-zlib_error */
70
71#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
72/* To be used only when the state is known to be valid */
73
74#ifndef NULL
75#define NULL ((void *) 0)
76#endif
77
78 /* common constants */
79
80#define DEFLATED 8
81
82#ifndef DEF_WBITS
83# define DEF_WBITS MAX_WBITS
84#endif
85/* default windowBits for decompression. MAX_WBITS is for compression only */
86
87#if MAX_MEM_LEVEL >= 8
88# define DEF_MEM_LEVEL 8
89#else
90# define DEF_MEM_LEVEL MAX_MEM_LEVEL
91#endif
92/* default memLevel */
93
94#define STORED_BLOCK 0
95#define STATIC_TREES 1
96#define DYN_TREES 2
97/* The three kinds of block type */
98
99#define MIN_MATCH 3
100#define MAX_MATCH 258
101/* The minimum and maximum match lengths */
102
103 /* functions */
104
105extern void *memcpy(void *, const void *, unsigned long);
106#define zmemcpy memcpy
107
108/* Diagnostic functions */
109#ifdef DEBUG_ZLIB
110# include "stdio.h"
111# ifndef verbose
112# define verbose 0
113# endif
114# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
115# define Trace(x) fprintf x
116# define Tracev(x) {if (verbose) fprintf x ;}
117# define Tracevv(x) {if (verbose>1) fprintf x ;}
118# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
119# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
120#else
121# define Assert(cond,msg)
122# define Trace(x)
123# define Tracev(x)
124# define Tracevv(x)
125# define Tracec(c,x)
126# define Tracecv(c,x)
127#endif
128
129
130typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
131
132/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
133/* void zcfree OF((voidpf opaque, voidpf ptr)); */
134
135#define ZALLOC(strm, items, size) \
136 (*((strm)->zalloc))((strm)->opaque, (items), (size))
137#define ZFREE(strm, addr, size) \
138 (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
139#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
140
141/* deflate.h -- internal compression state
142 * Copyright (C) 1995 Jean-loup Gailly
143 * For conditions of distribution and use, see copyright notice in zlib.h
144 */
145
146/* WARNING: this file should *not* be used by applications. It is
147 part of the implementation of the compression library and is
148 subject to change. Applications should only use zlib.h.
149 */
150
151/*+++++*/
152/* infblock.h -- header to use infblock.c
153 * Copyright (C) 1995 Mark Adler
154 * For conditions of distribution and use, see copyright notice in zlib.h
155 */
156
157/* WARNING: this file should *not* be used by applications. It is
158 part of the implementation of the compression library and is
159 subject to change. Applications should only use zlib.h.
160 */
161
162struct inflate_blocks_state;
163typedef struct inflate_blocks_state FAR inflate_blocks_statef;
164
165local inflate_blocks_statef * inflate_blocks_new OF((
166 z_stream *z,
167 check_func c, /* check function */
168 uInt w)); /* window size */
169
170local int inflate_blocks OF((
171 inflate_blocks_statef *,
172 z_stream *,
173 int)); /* initial return code */
174
175local void inflate_blocks_reset OF((
176 inflate_blocks_statef *,
177 z_stream *,
178 uLongf *)); /* check value on output */
179
180local int inflate_blocks_free OF((
181 inflate_blocks_statef *,
182 z_stream *,
183 uLongf *)); /* check value on output */
184
185local int inflate_addhistory OF((
186 inflate_blocks_statef *,
187 z_stream *));
188
189local int inflate_packet_flush OF((
190 inflate_blocks_statef *));
191
192/*+++++*/
193/* inftrees.h -- header to use inftrees.c
194 * Copyright (C) 1995 Mark Adler
195 * For conditions of distribution and use, see copyright notice in zlib.h
196 */
197
198/* WARNING: this file should *not* be used by applications. It is
199 part of the implementation of the compression library and is
200 subject to change. Applications should only use zlib.h.
201 */
202
203/* Huffman code lookup table entry--this entry is four bytes for machines
204 that have 16-bit pointers (e.g. PC's in the small or medium model). */
205
206typedef struct inflate_huft_s FAR inflate_huft;
207
208struct inflate_huft_s {
209 union {
210 struct {
211 Byte Exop; /* number of extra bits or operation */
212 Byte Bits; /* number of bits in this code or subcode */
213 } what;
214 uInt Nalloc; /* number of these allocated here */
215 Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
216 } word; /* 16-bit, 8 bytes for 32-bit machines) */
217 union {
218 uInt Base; /* literal, length base, or distance base */
219 inflate_huft *Next; /* pointer to next level of table */
220 } more;
221};
222
223#ifdef DEBUG_ZLIB
224 local uInt inflate_hufts;
225#endif
226
227local int inflate_trees_bits OF((
228 uIntf *, /* 19 code lengths */
229 uIntf *, /* bits tree desired/actual depth */
230 inflate_huft * FAR *, /* bits tree result */
231 z_stream *)); /* for zalloc, zfree functions */
232
233local int inflate_trees_dynamic OF((
234 uInt, /* number of literal/length codes */
235 uInt, /* number of distance codes */
236 uIntf *, /* that many (total) code lengths */
237 uIntf *, /* literal desired/actual bit depth */
238 uIntf *, /* distance desired/actual bit depth */
239 inflate_huft * FAR *, /* literal/length tree result */
240 inflate_huft * FAR *, /* distance tree result */
241 z_stream *)); /* for zalloc, zfree functions */
242
243local int inflate_trees_fixed OF((
244 uIntf *, /* literal desired/actual bit depth */
245 uIntf *, /* distance desired/actual bit depth */
246 inflate_huft * FAR *, /* literal/length tree result */
247 inflate_huft * FAR *)); /* distance tree result */
248
249local int inflate_trees_free OF((
250 inflate_huft *, /* tables to free */
251 z_stream *)); /* for zfree function */
252
253
254/*+++++*/
255/* infcodes.h -- header to use infcodes.c
256 * Copyright (C) 1995 Mark Adler
257 * For conditions of distribution and use, see copyright notice in zlib.h
258 */
259
260/* WARNING: this file should *not* be used by applications. It is
261 part of the implementation of the compression library and is
262 subject to change. Applications should only use zlib.h.
263 */
264
265struct inflate_codes_state;
266typedef struct inflate_codes_state FAR inflate_codes_statef;
267
268local inflate_codes_statef *inflate_codes_new OF((
269 uInt, uInt,
270 inflate_huft *, inflate_huft *,
271 z_stream *));
272
273local int inflate_codes OF((
274 inflate_blocks_statef *,
275 z_stream *,
276 int));
277
278local void inflate_codes_free OF((
279 inflate_codes_statef *,
280 z_stream *));
281
282
283/*+++++*/
284/* inflate.c -- zlib interface to inflate modules
285 * Copyright (C) 1995 Mark Adler
286 * For conditions of distribution and use, see copyright notice in zlib.h
287 */
288
289/* inflate private state */
290struct internal_state {
291
292 /* mode */
293 enum {
294 METHOD, /* waiting for method byte */
295 FLAG, /* waiting for flag byte */
296 BLOCKS, /* decompressing blocks */
297 CHECK4, /* four check bytes to go */
298 CHECK3, /* three check bytes to go */
299 CHECK2, /* two check bytes to go */
300 CHECK1, /* one check byte to go */
301 DONE, /* finished check, done */
302 BAD} /* got an error--stay here */
303 mode; /* current inflate mode */
304
305 /* mode dependent information */
306 union {
307 uInt method; /* if FLAGS, method byte */
308 struct {
309 uLong was; /* computed check value */
310 uLong need; /* stream check value */
311 } check; /* if CHECK, check values to compare */
312 uInt marker; /* if BAD, inflateSync's marker bytes count */
313 } sub; /* submode */
314
315 /* mode independent information */
316 int nowrap; /* flag for no wrapper */
317 uInt wbits; /* log2(window size) (8..15, defaults to 15) */
318 inflate_blocks_statef
319 *blocks; /* current inflate_blocks state */
320
321};
322
323
324int inflateReset(
325 z_stream *z
326)
327{
328 uLong c;
329
330 if (z == Z_NULL || z->state == Z_NULL)
331 return Z_STREAM_ERROR;
332 z->total_in = z->total_out = 0;
333 z->msg = Z_NULL;
334 z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
335 inflate_blocks_reset(z->state->blocks, z, &c);
336 Trace((stderr, "inflate: reset\n"));
337 return Z_OK;
338}
339
340
341int inflateEnd(
342 z_stream *z
343)
344{
345 uLong c;
346
347 if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
348 return Z_STREAM_ERROR;
349 if (z->state->blocks != Z_NULL)
350 inflate_blocks_free(z->state->blocks, z, &c);
351 ZFREE(z, z->state, sizeof(struct internal_state));
352 z->state = Z_NULL;
353 Trace((stderr, "inflate: end\n"));
354 return Z_OK;
355}
356
357
358int inflateInit2(
359 z_stream *z,
360 int w
361)
362{
363 /* initialize state */
364 if (z == Z_NULL)
365 return Z_STREAM_ERROR;
366/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
367/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
368 if ((z->state = (struct internal_state FAR *)
369 ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
370 return Z_MEM_ERROR;
371 z->state->blocks = Z_NULL;
372
373 /* handle undocumented nowrap option (no zlib header or check) */
374 z->state->nowrap = 0;
375 if (w < 0)
376 {
377 w = - w;
378 z->state->nowrap = 1;
379 }
380
381 /* set window size */
382 if (w < 8 || w > 15)
383 {
384 inflateEnd(z);
385 return Z_STREAM_ERROR;
386 }
387 z->state->wbits = (uInt)w;
388
389 /* create inflate_blocks state */
390 if ((z->state->blocks =
391 inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
392 == Z_NULL)
393 {
394 inflateEnd(z);
395 return Z_MEM_ERROR;
396 }
397 Trace((stderr, "inflate: allocated\n"));
398
399 /* reset state */
400 inflateReset(z);
401 return Z_OK;
402}
403
404
405int inflateInit(
406 z_stream *z
407)
408{
409 return inflateInit2(z, DEF_WBITS);
410}
411
412
413#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
414#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
415
416int inflate(
417 z_stream *z,
418 int f
419)
420{
421 int r;
422 uInt b;
423
424 if (z == Z_NULL || z->next_in == Z_NULL)
425 return Z_STREAM_ERROR;
426 r = Z_BUF_ERROR;
427 while (1) switch (z->state->mode)
428 {
429 case METHOD:
430 NEEDBYTE
431 if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
432 {
433 z->state->mode = BAD;
434 z->msg = "unknown compression method";
435 z->state->sub.marker = 5; /* can't try inflateSync */
436 break;
437 }
438 if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
439 {
440 z->state->mode = BAD;
441 z->msg = "invalid window size";
442 z->state->sub.marker = 5; /* can't try inflateSync */
443 break;
444 }
445 z->state->mode = FLAG;
446 case FLAG:
447 NEEDBYTE
448 if ((b = NEXTBYTE) & 0x20)
449 {
450 z->state->mode = BAD;
451 z->msg = "invalid reserved bit";
452 z->state->sub.marker = 5; /* can't try inflateSync */
453 break;
454 }
455 if (((z->state->sub.method << 8) + b) % 31)
456 {
457 z->state->mode = BAD;
458 z->msg = "incorrect header check";
459 z->state->sub.marker = 5; /* can't try inflateSync */
460 break;
461 }
462 Trace((stderr, "inflate: zlib header ok\n"));
463 z->state->mode = BLOCKS;
464 case BLOCKS:
465 r = inflate_blocks(z->state->blocks, z, r);
466 if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
467 r = inflate_packet_flush(z->state->blocks);
468 if (r == Z_DATA_ERROR)
469 {
470 z->state->mode = BAD;
471 z->state->sub.marker = 0; /* can try inflateSync */
472 break;
473 }
474 if (r != Z_STREAM_END)
475 return r;
476 r = Z_OK;
477 inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
478 if (z->state->nowrap)
479 {
480 z->state->mode = DONE;
481 break;
482 }
483 z->state->mode = CHECK4;
484 case CHECK4:
485 NEEDBYTE
486 z->state->sub.check.need = (uLong)NEXTBYTE << 24;
487 z->state->mode = CHECK3;
488 case CHECK3:
489 NEEDBYTE
490 z->state->sub.check.need += (uLong)NEXTBYTE << 16;
491 z->state->mode = CHECK2;
492 case CHECK2:
493 NEEDBYTE
494 z->state->sub.check.need += (uLong)NEXTBYTE << 8;
495 z->state->mode = CHECK1;
496 case CHECK1:
497 NEEDBYTE
498 z->state->sub.check.need += (uLong)NEXTBYTE;
499
500 if (z->state->sub.check.was != z->state->sub.check.need)
501 {
502 z->state->mode = BAD;
503 z->msg = "incorrect data check";
504 z->state->sub.marker = 5; /* can't try inflateSync */
505 break;
506 }
507 Trace((stderr, "inflate: zlib check ok\n"));
508 z->state->mode = DONE;
509 case DONE:
510 return Z_STREAM_END;
511 case BAD:
512 return Z_DATA_ERROR;
513 default:
514 return Z_STREAM_ERROR;
515 }
516
517 empty:
518 if (f != Z_PACKET_FLUSH)
519 return r;
520 z->state->mode = BAD;
521 z->state->sub.marker = 0; /* can try inflateSync */
522 return Z_DATA_ERROR;
523}
524
525/*
526 * This subroutine adds the data at next_in/avail_in to the output history
527 * without performing any output. The output buffer must be "caught up";
528 * i.e. no pending output (hence s->read equals s->write), and the state must
529 * be BLOCKS (i.e. we should be willing to see the start of a series of
530 * BLOCKS). On exit, the output will also be caught up, and the checksum
531 * will have been updated if need be.
532 */
533
534int inflateIncomp(
535 z_stream *z
536)
537{
538 if (z->state->mode != BLOCKS)
539 return Z_DATA_ERROR;
540 return inflate_addhistory(z->state->blocks, z);
541}
542
543
544int inflateSync(
545 z_stream *z
546)
547{
548 uInt n; /* number of bytes to look at */
549 Bytef *p; /* pointer to bytes */
550 uInt m; /* number of marker bytes found in a row */
551 uLong r, w; /* temporaries to save total_in and total_out */
552
553 /* set up */
554 if (z == Z_NULL || z->state == Z_NULL)
555 return Z_STREAM_ERROR;
556 if (z->state->mode != BAD)
557 {
558 z->state->mode = BAD;
559 z->state->sub.marker = 0;
560 }
561 if ((n = z->avail_in) == 0)
562 return Z_BUF_ERROR;
563 p = z->next_in;
564 m = z->state->sub.marker;
565
566 /* search */
567 while (n && m < 4)
568 {
569 if (*p == (Byte)(m < 2 ? 0 : 0xff))
570 m++;
571 else if (*p)
572 m = 0;
573 else
574 m = 4 - m;
575 p++, n--;
576 }
577
578 /* restore */
579 z->total_in += p - z->next_in;
580 z->next_in = p;
581 z->avail_in = n;
582 z->state->sub.marker = m;
583
584 /* return no joy or set up to restart on a new block */
585 if (m != 4)
586 return Z_DATA_ERROR;
587 r = z->total_in; w = z->total_out;
588 inflateReset(z);
589 z->total_in = r; z->total_out = w;
590 z->state->mode = BLOCKS;
591 return Z_OK;
592}
593
594#undef NEEDBYTE
595#undef NEXTBYTE
596
597/*+++++*/
598/* infutil.h -- types and macros common to blocks and codes
599 * Copyright (C) 1995 Mark Adler
600 * For conditions of distribution and use, see copyright notice in zlib.h
601 */
602
603/* WARNING: this file should *not* be used by applications. It is
604 part of the implementation of the compression library and is
605 subject to change. Applications should only use zlib.h.
606 */
607
608/* inflate blocks semi-private state */
609struct inflate_blocks_state {
610
611 /* mode */
612 enum {
613 TYPE, /* get type bits (3, including end bit) */
614 LENS, /* get lengths for stored */
615 STORED, /* processing stored block */
616 TABLE, /* get table lengths */
617 BTREE, /* get bit lengths tree for a dynamic block */
618 DTREE, /* get length, distance trees for a dynamic block */
619 CODES, /* processing fixed or dynamic block */
620 DRY, /* output remaining window bytes */
621 DONEB, /* finished last block, done */
622 BADB} /* got a data error--stuck here */
623 mode; /* current inflate_block mode */
624
625 /* mode dependent information */
626 union {
627 uInt left; /* if STORED, bytes left to copy */
628 struct {
629 uInt table; /* table lengths (14 bits) */
630 uInt index; /* index into blens (or border) */
631 uIntf *blens; /* bit lengths of codes */
632 uInt bb; /* bit length tree depth */
633 inflate_huft *tb; /* bit length decoding tree */
634 int nblens; /* # elements allocated at blens */
635 } trees; /* if DTREE, decoding info for trees */
636 struct {
637 inflate_huft *tl, *td; /* trees to free */
638 inflate_codes_statef
639 *codes;
640 } decode; /* if CODES, current state */
641 } sub; /* submode */
642 uInt last; /* true if this block is the last block */
643
644 /* mode independent information */
645 uInt bitk; /* bits in bit buffer */
646 uLong bitb; /* bit buffer */
647 Bytef *window; /* sliding window */
648 Bytef *end; /* one byte after sliding window */
649 Bytef *read; /* window read pointer */
650 Bytef *write; /* window write pointer */
651 check_func checkfn; /* check function */
652 uLong check; /* check on output */
653
654};
655
656
657/* defines for inflate input/output */
658/* update pointers and return */
659#define UPDBITS {s->bitb=b;s->bitk=k;}
660#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
661#define UPDOUT {s->write=q;}
662#define UPDATE {UPDBITS UPDIN UPDOUT}
663#define LEAVE {UPDATE return inflate_flush(s,z,r);}
664/* get bytes and bits */
665#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
666#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
667#define NEXTBYTE (n--,*p++)
668#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
669#define DUMPBITS(j) {b>>=(j);k-=(j);}
670/* output bytes */
671#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
672#define LOADOUT {q=s->write;m=WAVAIL;}
673#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
674#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
675#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
676#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
677/* load local pointers */
678#define LOAD {LOADIN LOADOUT}
679
680/* And'ing with mask[n] masks the lower n bits */
681local uInt inflate_mask[] = {
682 0x0000,
683 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
684 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
685};
686
687/* copy as much as possible from the sliding window to the output area */
688local int inflate_flush OF((
689 inflate_blocks_statef *,
690 z_stream *,
691 int));
692
693/*+++++*/
694/* inffast.h -- header to use inffast.c
695 * Copyright (C) 1995 Mark Adler
696 * For conditions of distribution and use, see copyright notice in zlib.h
697 */
698
699/* WARNING: this file should *not* be used by applications. It is
700 part of the implementation of the compression library and is
701 subject to change. Applications should only use zlib.h.
702 */
703
704local int inflate_fast OF((
705 uInt,
706 uInt,
707 inflate_huft *,
708 inflate_huft *,
709 inflate_blocks_statef *,
710 z_stream *));
711
712
713/*+++++*/
714/* infblock.c -- interpret and process block types to last block
715 * Copyright (C) 1995 Mark Adler
716 * For conditions of distribution and use, see copyright notice in zlib.h
717 */
718
719/* Table for deflate from PKZIP's appnote.txt. */
720local uInt border[] = { /* Order of the bit length code lengths */
721 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
722
723/*
724 Notes beyond the 1.93a appnote.txt:
725
726 1. Distance pointers never point before the beginning of the output
727 stream.
728 2. Distance pointers can point back across blocks, up to 32k away.
729 3. There is an implied maximum of 7 bits for the bit length table and
730 15 bits for the actual data.
731 4. If only one code exists, then it is encoded using one bit. (Zero
732 would be more efficient, but perhaps a little confusing.) If two
733 codes exist, they are coded using one bit each (0 and 1).
734 5. There is no way of sending zero distance codes--a dummy must be
735 sent if there are none. (History: a pre 2.0 version of PKZIP would
736 store blocks with no distance codes, but this was discovered to be
737 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
738 zero distance codes, which is sent as one code of zero bits in
739 length.
740 6. There are up to 286 literal/length codes. Code 256 represents the
741 end-of-block. Note however that the static length tree defines
742 288 codes just to fill out the Huffman codes. Codes 286 and 287
743 cannot be used though, since there is no length base or extra bits
744 defined for them. Similarily, there are up to 30 distance codes.
745 However, static trees define 32 codes (all 5 bits) to fill out the
746 Huffman codes, but the last two had better not show up in the data.
747 7. Unzip can check dynamic Huffman blocks for complete code sets.
748 The exception is that a single code would not be complete (see #4).
749 8. The five bits following the block type is really the number of
750 literal codes sent minus 257.
751 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
752 (1+6+6). Therefore, to output three times the length, you output
753 three codes (1+1+1), whereas to output four times the same length,
754 you only need two codes (1+3). Hmm.
755 10. In the tree reconstruction algorithm, Code = Code + Increment
756 only if BitLength(i) is not zero. (Pretty obvious.)
757 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
758 12. Note: length code 284 can represent 227-258, but length code 285
759 really is 258. The last length deserves its own, short code
760 since it gets used a lot in very redundant files. The length
761 258 is special since 258 - 3 (the min match length) is 255.
762 13. The literal/length and distance code bit lengths are read as a
763 single stream of lengths. It is possible (and advantageous) for
764 a repeat code (16, 17, or 18) to go across the boundary between
765 the two sets of lengths.
766 */
767
768
769local void inflate_blocks_reset(
770 inflate_blocks_statef *s,
771 z_stream *z,
772 uLongf *c
773)
774{
775 if (s->checkfn != Z_NULL)
776 *c = s->check;
777 if (s->mode == BTREE || s->mode == DTREE)
778 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
779 if (s->mode == CODES)
780 {
781 inflate_codes_free(s->sub.decode.codes, z);
782 inflate_trees_free(s->sub.decode.td, z);
783 inflate_trees_free(s->sub.decode.tl, z);
784 }
785 s->mode = TYPE;
786 s->bitk = 0;
787 s->bitb = 0;
788 s->read = s->write = s->window;
789 if (s->checkfn != Z_NULL)
790 s->check = (*s->checkfn)(0L, Z_NULL, 0);
791 Trace((stderr, "inflate: blocks reset\n"));
792}
793
794
795local inflate_blocks_statef *inflate_blocks_new(
796 z_stream *z,
797 check_func c,
798 uInt w
799)
800{
801 inflate_blocks_statef *s;
802
803 if ((s = (inflate_blocks_statef *)ZALLOC
804 (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
805 return s;
806 if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
807 {
808 ZFREE(z, s, sizeof(struct inflate_blocks_state));
809 return Z_NULL;
810 }
811 s->end = s->window + w;
812 s->checkfn = c;
813 s->mode = TYPE;
814 Trace((stderr, "inflate: blocks allocated\n"));
815 inflate_blocks_reset(s, z, &s->check);
816 return s;
817}
818
819
820local int inflate_blocks(
821 inflate_blocks_statef *s,
822 z_stream *z,
823 int r
824)
825{
826 uInt t; /* temporary storage */
827 uLong b; /* bit buffer */
828 uInt k; /* bits in bit buffer */
829 Bytef *p; /* input data pointer */
830 uInt n; /* bytes available there */
831 Bytef *q; /* output window write pointer */
832 uInt m; /* bytes to end of window or read pointer */
833
834 /* copy input/output information to locals (UPDATE macro restores) */
835 LOAD
836
837 /* process input based on current state */
838 while (1) switch (s->mode)
839 {
840 case TYPE:
841 NEEDBITS(3)
842 t = (uInt)b & 7;
843 s->last = t & 1;
844 switch (t >> 1)
845 {
846 case 0: /* stored */
847 Trace((stderr, "inflate: stored block%s\n",
848 s->last ? " (last)" : ""));
849 DUMPBITS(3)
850 t = k & 7; /* go to byte boundary */
851 DUMPBITS(t)
852 s->mode = LENS; /* get length of stored block */
853 break;
854 case 1: /* fixed */
855 Trace((stderr, "inflate: fixed codes block%s\n",
856 s->last ? " (last)" : ""));
857 {
858 uInt bl, bd;
859 inflate_huft *tl, *td;
860
861 inflate_trees_fixed(&bl, &bd, &tl, &td);
862 s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
863 if (s->sub.decode.codes == Z_NULL)
864 {
865 r = Z_MEM_ERROR;
866 LEAVE
867 }
868 s->sub.decode.tl = Z_NULL; /* don't try to free these */
869 s->sub.decode.td = Z_NULL;
870 }
871 DUMPBITS(3)
872 s->mode = CODES;
873 break;
874 case 2: /* dynamic */
875 Trace((stderr, "inflate: dynamic codes block%s\n",
876 s->last ? " (last)" : ""));
877 DUMPBITS(3)
878 s->mode = TABLE;
879 break;
880 case 3: /* illegal */
881 DUMPBITS(3)
882 s->mode = BADB;
883 z->msg = "invalid block type";
884 r = Z_DATA_ERROR;
885 LEAVE
886 }
887 break;
888 case LENS:
889 NEEDBITS(32)
890 if (((~b) >> 16) != (b & 0xffff))
891 {
892 s->mode = BADB;
893 z->msg = "invalid stored block lengths";
894 r = Z_DATA_ERROR;
895 LEAVE
896 }
897 s->sub.left = (uInt)b & 0xffff;
898 b = k = 0; /* dump bits */
899 Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
900 s->mode = s->sub.left ? STORED : TYPE;
901 break;
902 case STORED:
903 if (n == 0)
904 LEAVE
905 NEEDOUT
906 t = s->sub.left;
907 if (t > n) t = n;
908 if (t > m) t = m;
909 zmemcpy(q, p, t);
910 p += t; n -= t;
911 q += t; m -= t;
912 if ((s->sub.left -= t) != 0)
913 break;
914 Tracev((stderr, "inflate: stored end, %lu total out\n",
915 z->total_out + (q >= s->read ? q - s->read :
916 (s->end - s->read) + (q - s->window))));
917 s->mode = s->last ? DRY : TYPE;
918 break;
919 case TABLE:
920 NEEDBITS(14)
921 s->sub.trees.table = t = (uInt)b & 0x3fff;
922#ifndef PKZIP_BUG_WORKAROUND
923 if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
924 {
925 s->mode = BADB;
926 z->msg = "too many length or distance symbols";
927 r = Z_DATA_ERROR;
928 LEAVE
929 }
930#endif
931 t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
932 if (t < 19)
933 t = 19;
934 if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
935 {
936 r = Z_MEM_ERROR;
937 LEAVE
938 }
939 s->sub.trees.nblens = t;
940 DUMPBITS(14)
941 s->sub.trees.index = 0;
942 Tracev((stderr, "inflate: table sizes ok\n"));
943 s->mode = BTREE;
944 case BTREE:
945 while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
946 {
947 NEEDBITS(3)
948 s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
949 DUMPBITS(3)
950 }
951 while (s->sub.trees.index < 19)
952 s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
953 s->sub.trees.bb = 7;
954 t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
955 &s->sub.trees.tb, z);
956 if (t != Z_OK)
957 {
958 r = t;
959 if (r == Z_DATA_ERROR)
960 s->mode = BADB;
961 LEAVE
962 }
963 s->sub.trees.index = 0;
964 Tracev((stderr, "inflate: bits tree ok\n"));
965 s->mode = DTREE;
966 case DTREE:
967 while (t = s->sub.trees.table,
968 s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
969 {
970 inflate_huft *h;
971 uInt i, j, c;
972
973 t = s->sub.trees.bb;
974 NEEDBITS(t)
975 h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
976 t = h->word.what.Bits;
977 c = h->more.Base;
978 if (c < 16)
979 {
980 DUMPBITS(t)
981 s->sub.trees.blens[s->sub.trees.index++] = c;
982 }
983 else /* c == 16..18 */
984 {
985 i = c == 18 ? 7 : c - 14;
986 j = c == 18 ? 11 : 3;
987 NEEDBITS(t + i)
988 DUMPBITS(t)
989 j += (uInt)b & inflate_mask[i];
990 DUMPBITS(i)
991 i = s->sub.trees.index;
992 t = s->sub.trees.table;
993 if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
994 (c == 16 && i < 1))
995 {
996 s->mode = BADB;
997 z->msg = "invalid bit length repeat";
998 r = Z_DATA_ERROR;
999 LEAVE
1000 }
1001 c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
1002 do {
1003 s->sub.trees.blens[i++] = c;
1004 } while (--j);
1005 s->sub.trees.index = i;
1006 }
1007 }
1008 inflate_trees_free(s->sub.trees.tb, z);
1009 s->sub.trees.tb = Z_NULL;
1010 {
1011 uInt bl, bd;
1012 inflate_huft *tl, *td;
1013 inflate_codes_statef *c;
1014
1015 bl = 9; /* must be <= 9 for lookahead assumptions */
1016 bd = 6; /* must be <= 9 for lookahead assumptions */
1017 t = s->sub.trees.table;
1018 t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
1019 s->sub.trees.blens, &bl, &bd, &tl, &td, z);
1020 if (t != Z_OK)
1021 {
1022 if (t == (uInt)Z_DATA_ERROR)
1023 s->mode = BADB;
1024 r = t;
1025 LEAVE
1026 }
1027 Tracev((stderr, "inflate: trees ok\n"));
1028 if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
1029 {
1030 inflate_trees_free(td, z);
1031 inflate_trees_free(tl, z);
1032 r = Z_MEM_ERROR;
1033 LEAVE
1034 }
1035 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
1036 s->sub.decode.codes = c;
1037 s->sub.decode.tl = tl;
1038 s->sub.decode.td = td;
1039 }
1040 s->mode = CODES;
1041 case CODES:
1042 UPDATE
1043 if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
1044 return inflate_flush(s, z, r);
1045 r = Z_OK;
1046 inflate_codes_free(s->sub.decode.codes, z);
1047 inflate_trees_free(s->sub.decode.td, z);
1048 inflate_trees_free(s->sub.decode.tl, z);
1049 LOAD
1050 Tracev((stderr, "inflate: codes end, %lu total out\n",
1051 z->total_out + (q >= s->read ? q - s->read :
1052 (s->end - s->read) + (q - s->window))));
1053 if (!s->last)
1054 {
1055 s->mode = TYPE;
1056 break;
1057 }
1058 if (k > 7) /* return unused byte, if any */
1059 {
1060 Assert(k < 16, "inflate_codes grabbed too many bytes")
1061 k -= 8;
1062 n++;
1063 p--; /* can always return one */
1064 }
1065 s->mode = DRY;
1066 case DRY:
1067 FLUSH
1068 if (s->read != s->write)
1069 LEAVE
1070 s->mode = DONEB;
1071 case DONEB:
1072 r = Z_STREAM_END;
1073 LEAVE
1074 case BADB:
1075 r = Z_DATA_ERROR;
1076 LEAVE
1077 default:
1078 r = Z_STREAM_ERROR;
1079 LEAVE
1080 }
1081}
1082
1083
1084local int inflate_blocks_free(
1085 inflate_blocks_statef *s,
1086 z_stream *z,
1087 uLongf *c
1088)
1089{
1090 inflate_blocks_reset(s, z, c);
1091 ZFREE(z, s->window, s->end - s->window);
1092 ZFREE(z, s, sizeof(struct inflate_blocks_state));
1093 Trace((stderr, "inflate: blocks freed\n"));
1094 return Z_OK;
1095}
1096
1097/*
1098 * This subroutine adds the data at next_in/avail_in to the output history
1099 * without performing any output. The output buffer must be "caught up";
1100 * i.e. no pending output (hence s->read equals s->write), and the state must
1101 * be BLOCKS (i.e. we should be willing to see the start of a series of
1102 * BLOCKS). On exit, the output will also be caught up, and the checksum
1103 * will have been updated if need be.
1104 */
1105local int inflate_addhistory(
1106 inflate_blocks_statef *s,
1107 z_stream *z
1108)
1109{
1110 uLong b; /* bit buffer */ /* NOT USED HERE */
1111 uInt k; /* bits in bit buffer */ /* NOT USED HERE */
1112 uInt t; /* temporary storage */
1113 Bytef *p; /* input data pointer */
1114 uInt n; /* bytes available there */
1115 Bytef *q; /* output window write pointer */
1116 uInt m; /* bytes to end of window or read pointer */
1117
1118 if (s->read != s->write)
1119 return Z_STREAM_ERROR;
1120 if (s->mode != TYPE)
1121 return Z_DATA_ERROR;
1122
1123 /* we're ready to rock */
1124 LOAD
1125 /* while there is input ready, copy to output buffer, moving
1126 * pointers as needed.
1127 */
1128 while (n) {
1129 t = n; /* how many to do */
1130 /* is there room until end of buffer? */
1131 if (t > m) t = m;
1132 /* update check information */
1133 if (s->checkfn != Z_NULL)
1134 s->check = (*s->checkfn)(s->check, q, t);
1135 zmemcpy(q, p, t);
1136 q += t;
1137 p += t;
1138 n -= t;
1139 z->total_out += t;
1140 s->read = q; /* drag read pointer forward */
1141/* WRAP */ /* expand WRAP macro by hand to handle s->read */
1142 if (q == s->end) {
1143 s->read = q = s->window;
1144 m = WAVAIL;
1145 }
1146 }
1147 UPDATE
1148 return Z_OK;
1149}
1150
1151
1152/*
1153 * At the end of a Deflate-compressed PPP packet, we expect to have seen
1154 * a `stored' block type value but not the (zero) length bytes.
1155 */
1156local int inflate_packet_flush(
1157 inflate_blocks_statef *s
1158)
1159{
1160 if (s->mode != LENS)
1161 return Z_DATA_ERROR;
1162 s->mode = TYPE;
1163 return Z_OK;
1164}
1165
1166
1167/*+++++*/
1168/* inftrees.c -- generate Huffman trees for efficient decoding
1169 * Copyright (C) 1995 Mark Adler
1170 * For conditions of distribution and use, see copyright notice in zlib.h
1171 */
1172
1173/* simplify the use of the inflate_huft type with some defines */
1174#define base more.Base
1175#define next more.Next
1176#define exop word.what.Exop
1177#define bits word.what.Bits
1178
1179
1180local int huft_build OF((
1181 uIntf *, /* code lengths in bits */
1182 uInt, /* number of codes */
1183 uInt, /* number of "simple" codes */
1184 uIntf *, /* list of base values for non-simple codes */
1185 uIntf *, /* list of extra bits for non-simple codes */
1186 inflate_huft * FAR*,/* result: starting table */
1187 uIntf *, /* maximum lookup bits (returns actual) */
1188 z_stream *)); /* for zalloc function */
1189
1190local voidpf falloc OF((
1191 voidpf, /* opaque pointer (not used) */
1192 uInt, /* number of items */
1193 uInt)); /* size of item */
1194
1195local void ffree OF((
1196 voidpf q, /* opaque pointer (not used) */
1197 voidpf p, /* what to free (not used) */
1198 uInt n)); /* number of bytes (not used) */
1199
1200/* Tables for deflate from PKZIP's appnote.txt. */
1201local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
1202 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
1203 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
1204 /* actually lengths - 2; also see note #13 above about 258 */
1205local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
1206 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1207 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
1208local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
1209 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
1210 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
1211 8193, 12289, 16385, 24577};
1212local uInt cpdext[] = { /* Extra bits for distance codes */
1213 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1214 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
1215 12, 12, 13, 13};
1216
1217/*
1218 Huffman code decoding is performed using a multi-level table lookup.
1219 The fastest way to decode is to simply build a lookup table whose
1220 size is determined by the longest code. However, the time it takes
1221 to build this table can also be a factor if the data being decoded
1222 is not very long. The most common codes are necessarily the
1223 shortest codes, so those codes dominate the decoding time, and hence
1224 the speed. The idea is you can have a shorter table that decodes the
1225 shorter, more probable codes, and then point to subsidiary tables for
1226 the longer codes. The time it costs to decode the longer codes is
1227 then traded against the time it takes to make longer tables.
1228
1229 This results of this trade are in the variables lbits and dbits
1230 below. lbits is the number of bits the first level table for literal/
1231 length codes can decode in one step, and dbits is the same thing for
1232 the distance codes. Subsequent tables are also less than or equal to
1233 those sizes. These values may be adjusted either when all of the
1234 codes are shorter than that, in which case the longest code length in
1235 bits is used, or when the shortest code is *longer* than the requested
1236 table size, in which case the length of the shortest code in bits is
1237 used.
1238
1239 There are two different values for the two tables, since they code a
1240 different number of possibilities each. The literal/length table
1241 codes 286 possible values, or in a flat code, a little over eight
1242 bits. The distance table codes 30 possible values, or a little less
1243 than five bits, flat. The optimum values for speed end up being
1244 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
1245 The optimum values may differ though from machine to machine, and
1246 possibly even between compilers. Your mileage may vary.
1247 */
1248
1249
1250/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
1251#define BMAX 15 /* maximum bit length of any code */
1252#define N_MAX 288 /* maximum number of codes in any set */
1253
1254#ifdef DEBUG_ZLIB
1255 uInt inflate_hufts;
1256#endif
1257
1258local int huft_build(
1259 uIntf *b, /* code lengths in bits (all assumed <= BMAX) */
1260 uInt n, /* number of codes (assumed <= N_MAX) */
1261 uInt s, /* number of simple-valued codes (0..s-1) */
1262 uIntf *d, /* list of base values for non-simple codes */
1263 uIntf *e, /* list of extra bits for non-simple codes */
1264 inflate_huft * FAR *t, /* result: starting table */
1265 uIntf *m, /* maximum lookup bits, returns actual */
1266 z_stream *zs /* for zalloc function */
1267)
1268/* Given a list of code lengths and a maximum table size, make a set of
1269 tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
1270 if the given code set is incomplete (the tables are still built in this
1271 case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
1272 over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
1273{
1274
1275 uInt a; /* counter for codes of length k */
1276 uInt c[BMAX+1]; /* bit length count table */
1277 uInt f; /* i repeats in table every f entries */
1278 int g; /* maximum code length */
1279 int h; /* table level */
1280 register uInt i; /* counter, current code */
1281 register uInt j; /* counter */
1282 register int k; /* number of bits in current code */
1283 int l; /* bits per table (returned in m) */
1284 register uIntf *p; /* pointer into c[], b[], or v[] */
1285 inflate_huft *q; /* points to current table */
1286 struct inflate_huft_s r; /* table entry for structure assignment */
1287 inflate_huft *u[BMAX]; /* table stack */
1288 uInt v[N_MAX]; /* values in order of bit length */
1289 register int w; /* bits before this table == (l * h) */
1290 uInt x[BMAX+1]; /* bit offsets, then code stack */
1291 uIntf *xp; /* pointer into x */
1292 int y; /* number of dummy codes added */
1293 uInt z; /* number of entries in current table */
1294
1295
1296 /* Generate counts for each bit length */
1297 p = c;
1298#define C0 *p++ = 0;
1299#define C2 C0 C0 C0 C0
1300#define C4 C2 C2 C2 C2
1301 C4 /* clear c[]--assume BMAX+1 is 16 */
1302 p = b; i = n;
1303 do {
1304 c[*p++]++; /* assume all entries <= BMAX */
1305 } while (--i);
1306 if (c[0] == n) /* null input--all zero length codes */
1307 {
1308 *t = (inflate_huft *)Z_NULL;
1309 *m = 0;
1310 return Z_DATA_ERROR;
1311 }
1312
1313
1314 /* Find minimum and maximum length, bound *m by those */
1315 l = *m;
1316 for (j = 1; j <= BMAX; j++)
1317 if (c[j])
1318 break;
1319 k = j; /* minimum code length */
1320 if ((uInt)l < j)
1321 l = j;
1322 for (i = BMAX; i; i--)
1323 if (c[i])
1324 break;
1325 g = i; /* maximum code length */
1326 if ((uInt)l > i)
1327 l = i;
1328 *m = l;
1329
1330
1331 /* Adjust last length count to fill out codes, if needed */
1332 for (y = 1 << j; j < i; j++, y <<= 1)
1333 if ((y -= c[j]) < 0)
1334 return Z_DATA_ERROR;
1335 if ((y -= c[i]) < 0)
1336 return Z_DATA_ERROR;
1337 c[i] += y;
1338
1339
1340 /* Generate starting offsets into the value table for each length */
1341 x[1] = j = 0;
1342 p = c + 1; xp = x + 2;
1343 while (--i) { /* note that i == g from above */
1344 *xp++ = (j += *p++);
1345 }
1346
1347
1348 /* Make a table of values in order of bit lengths */
1349 p = b; i = 0;
1350 do {
1351 if ((j = *p++) != 0)
1352 v[x[j]++] = i;
1353 } while (++i < n);
1354 n = x[g]; /* set n to length of v */
1355
1356
1357 /* Generate the Huffman codes and for each, make the table entries */
1358 x[0] = i = 0; /* first Huffman code is zero */
1359 p = v; /* grab values in bit order */
1360 h = -1; /* no tables yet--level -1 */
1361 w = -l; /* bits decoded == (l * h) */
1362 u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
1363 q = (inflate_huft *)Z_NULL; /* ditto */
1364 z = 0; /* ditto */
1365
1366 /* go through the bit lengths (k already is bits in shortest code) */
1367 for (; k <= g; k++)
1368 {
1369 a = c[k];
1370 while (a--)
1371 {
1372 /* here i is the Huffman code of length k bits for value *p */
1373 /* make tables up to required level */
1374 while (k > w + l)
1375 {
1376 h++;
1377 w += l; /* previous table always l bits */
1378
1379 /* compute minimum size table less than or equal to l bits */
1380 z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
1381 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1382 { /* too few codes for k-w bit table */
1383 f -= a + 1; /* deduct codes from patterns left */
1384 xp = c + k;
1385 if (j < z)
1386 while (++j < z) /* try smaller tables up to z bits */
1387 {
1388 if ((f <<= 1) <= *++xp)
1389 break; /* enough codes to use up j bits */
1390 f -= *xp; /* else deduct codes from patterns */
1391 }
1392 }
1393 z = 1 << j; /* table entries for j-bit table */
1394
1395 /* allocate and link in new table */
1396 if ((q = (inflate_huft *)ZALLOC
1397 (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
1398 {
1399 if (h)
1400 inflate_trees_free(u[0], zs);
1401 return Z_MEM_ERROR; /* not enough memory */
1402 }
1403 q->word.Nalloc = z + 1;
1404#ifdef DEBUG_ZLIB
1405 inflate_hufts += z + 1;
1406#endif
1407 *t = q + 1; /* link to list for huft_free() */
1408 *(t = &(q->next)) = Z_NULL;
1409 u[h] = ++q; /* table starts after link */
1410
1411 /* connect to last table, if there is one */
1412 if (h)
1413 {
1414 x[h] = i; /* save pattern for backing up */
1415 r.bits = (Byte)l; /* bits to dump before this table */
1416 r.exop = (Byte)j; /* bits in this table */
1417 r.next = q; /* pointer to this table */
1418 j = i >> (w - l); /* (get around Turbo C bug) */
1419 u[h-1][j] = r; /* connect to last table */
1420 }
1421 }
1422
1423 /* set up table entry in r */
1424 r.bits = (Byte)(k - w);
1425 if (p >= v + n)
1426 r.exop = 128 + 64; /* out of values--invalid code */
1427 else if (*p < s)
1428 {
1429 r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
1430 r.base = *p++; /* simple code is just the value */
1431 }
1432 else
1433 {
1434 r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
1435 r.base = d[*p++ - s];
1436 }
1437
1438 /* fill code-like entries with r */
1439 f = 1 << (k - w);
1440 for (j = i >> w; j < z; j += f)
1441 q[j] = r;
1442
1443 /* backwards increment the k-bit code i */
1444 for (j = 1 << (k - 1); i & j; j >>= 1)
1445 i ^= j;
1446 i ^= j;
1447
1448 /* backup over finished tables */
1449 while ((i & ((1 << w) - 1)) != x[h])
1450 {
1451 h--; /* don't need to update q */
1452 w -= l;
1453 }
1454 }
1455 }
1456
1457
1458 /* Return Z_BUF_ERROR if we were given an incomplete table */
1459 return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
1460}
1461
1462
1463local int inflate_trees_bits(
1464 uIntf *c, /* 19 code lengths */
1465 uIntf *bb, /* bits tree desired/actual depth */
1466 inflate_huft * FAR *tb, /* bits tree result */
1467 z_stream *z /* for zfree function */
1468)
1469{
1470 int r;
1471
1472 r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
1473 if (r == Z_DATA_ERROR)
1474 z->msg = "oversubscribed dynamic bit lengths tree";
1475 else if (r == Z_BUF_ERROR)
1476 {
1477 inflate_trees_free(*tb, z);
1478 z->msg = "incomplete dynamic bit lengths tree";
1479 r = Z_DATA_ERROR;
1480 }
1481 return r;
1482}
1483
1484
1485local int inflate_trees_dynamic(
1486 uInt nl, /* number of literal/length codes */
1487 uInt nd, /* number of distance codes */
1488 uIntf *c, /* that many (total) code lengths */
1489 uIntf *bl, /* literal desired/actual bit depth */
1490 uIntf *bd, /* distance desired/actual bit depth */
1491 inflate_huft * FAR *tl, /* literal/length tree result */
1492 inflate_huft * FAR *td, /* distance tree result */
1493 z_stream *z /* for zfree function */
1494)
1495{
1496 int r;
1497
1498 /* build literal/length tree */
1499 if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
1500 {
1501 if (r == Z_DATA_ERROR)
1502 z->msg = "oversubscribed literal/length tree";
1503 else if (r == Z_BUF_ERROR)
1504 {
1505 inflate_trees_free(*tl, z);
1506 z->msg = "incomplete literal/length tree";
1507 r = Z_DATA_ERROR;
1508 }
1509 return r;
1510 }
1511
1512 /* build distance tree */
1513 if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
1514 {
1515 if (r == Z_DATA_ERROR)
1516 z->msg = "oversubscribed literal/length tree";
1517 else if (r == Z_BUF_ERROR) {
1518#ifdef PKZIP_BUG_WORKAROUND
1519 r = Z_OK;
1520 }
1521#else
1522 inflate_trees_free(*td, z);
1523 z->msg = "incomplete literal/length tree";
1524 r = Z_DATA_ERROR;
1525 }
1526 inflate_trees_free(*tl, z);
1527 return r;
1528#endif
1529 }
1530
1531 /* done */
1532 return Z_OK;
1533}
1534
1535
1536/* build fixed tables only once--keep them here */
1537local int fixed_lock = 0;
1538local int fixed_built = 0;
1539#define FIXEDH 530 /* number of hufts used by fixed tables */
1540local uInt fixed_left = FIXEDH;
1541local inflate_huft fixed_mem[FIXEDH];
1542local uInt fixed_bl;
1543local uInt fixed_bd;
1544local inflate_huft *fixed_tl;
1545local inflate_huft *fixed_td;
1546
1547
1548local voidpf falloc(
1549 voidpf q, /* opaque pointer (not used) */
1550 uInt n, /* number of items */
1551 uInt s /* size of item */
1552)
1553{
1554 Assert(s == sizeof(inflate_huft) && n <= fixed_left,
1555 "inflate_trees falloc overflow");
1556 if (q) s++; /* to make some compilers happy */
1557 fixed_left -= n;
1558 return (voidpf)(fixed_mem + fixed_left);
1559}
1560
1561
1562local void ffree(
1563 voidpf q,
1564 voidpf p,
1565 uInt n
1566)
1567{
1568 Assert(0, "inflate_trees ffree called!");
1569 if (q) q = p; /* to make some compilers happy */
1570}
1571
1572
1573local int inflate_trees_fixed(
1574 uIntf *bl, /* literal desired/actual bit depth */
1575 uIntf *bd, /* distance desired/actual bit depth */
1576 inflate_huft * FAR *tl, /* literal/length tree result */
1577 inflate_huft * FAR *td /* distance tree result */
1578)
1579{
1580 /* build fixed tables if not built already--lock out other instances */
1581 while (++fixed_lock > 1)
1582 fixed_lock--;
1583 if (!fixed_built)
1584 {
1585 int k; /* temporary variable */
1586 unsigned c[288]; /* length list for huft_build */
1587 z_stream z; /* for falloc function */
1588
1589 /* set up fake z_stream for memory routines */
1590 z.zalloc = falloc;
1591 z.zfree = ffree;
1592 z.opaque = Z_NULL;
1593
1594 /* literal table */
1595 for (k = 0; k < 144; k++)
1596 c[k] = 8;
1597 for (; k < 256; k++)
1598 c[k] = 9;
1599 for (; k < 280; k++)
1600 c[k] = 7;
1601 for (; k < 288; k++)
1602 c[k] = 8;
1603 fixed_bl = 7;
1604 huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
1605
1606 /* distance table */
1607 for (k = 0; k < 30; k++)
1608 c[k] = 5;
1609 fixed_bd = 5;
1610 huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
1611
1612 /* done */
1613 fixed_built = 1;
1614 }
1615 fixed_lock--;
1616 *bl = fixed_bl;
1617 *bd = fixed_bd;
1618 *tl = fixed_tl;
1619 *td = fixed_td;
1620 return Z_OK;
1621}
1622
1623
1624local int inflate_trees_free(
1625 inflate_huft *t, /* table to free */
1626 z_stream *z /* for zfree function */
1627)
1628/* Free the malloc'ed tables built by huft_build(), which makes a linked
1629 list of the tables it made, with the links in a dummy first entry of
1630 each table. */
1631{
1632 register inflate_huft *p, *q;
1633
1634 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1635 p = t;
1636 while (p != Z_NULL)
1637 {
1638 q = (--p)->next;
1639 ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
1640 p = q;
1641 }
1642 return Z_OK;
1643}
1644
1645/*+++++*/
1646/* infcodes.c -- process literals and length/distance pairs
1647 * Copyright (C) 1995 Mark Adler
1648 * For conditions of distribution and use, see copyright notice in zlib.h
1649 */
1650
1651/* simplify the use of the inflate_huft type with some defines */
1652#define base more.Base
1653#define next more.Next
1654#define exop word.what.Exop
1655#define bits word.what.Bits
1656
1657/* inflate codes private state */
1658struct inflate_codes_state {
1659
1660 /* mode */
1661 enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1662 START, /* x: set up for LEN */
1663 LEN, /* i: get length/literal/eob next */
1664 LENEXT, /* i: getting length extra (have base) */
1665 DIST, /* i: get distance next */
1666 DISTEXT, /* i: getting distance extra */
1667 COPY, /* o: copying bytes in window, waiting for space */
1668 LIT, /* o: got literal, waiting for output space */
1669 WASH, /* o: got eob, possibly still output waiting */
1670 END, /* x: got eob and all data flushed */
1671 BADCODE} /* x: got error */
1672 mode; /* current inflate_codes mode */
1673
1674 /* mode dependent information */
1675 uInt len;
1676 union {
1677 struct {
1678 inflate_huft *tree; /* pointer into tree */
1679 uInt need; /* bits needed */
1680 } code; /* if LEN or DIST, where in tree */
1681 uInt lit; /* if LIT, literal */
1682 struct {
1683 uInt get; /* bits to get for extra */
1684 uInt dist; /* distance back to copy from */
1685 } copy; /* if EXT or COPY, where and how much */
1686 } sub; /* submode */
1687
1688 /* mode independent information */
1689 Byte lbits; /* ltree bits decoded per branch */
1690 Byte dbits; /* dtree bits decoder per branch */
1691 inflate_huft *ltree; /* literal/length/eob tree */
1692 inflate_huft *dtree; /* distance tree */
1693
1694};
1695
1696
1697local inflate_codes_statef *inflate_codes_new(
1698 uInt bl,
1699 uInt bd,
1700 inflate_huft *tl,
1701 inflate_huft *td,
1702 z_stream *z
1703)
1704{
1705 inflate_codes_statef *c;
1706
1707 if ((c = (inflate_codes_statef *)
1708 ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
1709 {
1710 c->mode = START;
1711 c->lbits = (Byte)bl;
1712 c->dbits = (Byte)bd;
1713 c->ltree = tl;
1714 c->dtree = td;
1715 Tracev((stderr, "inflate: codes new\n"));
1716 }
1717 return c;
1718}
1719
1720
1721local int inflate_codes(
1722 inflate_blocks_statef *s,
1723 z_stream *z,
1724 int r
1725)
1726{
1727 uInt j; /* temporary storage */
1728 inflate_huft *t; /* temporary pointer */
1729 uInt e; /* extra bits or operation */
1730 uLong b; /* bit buffer */
1731 uInt k; /* bits in bit buffer */
1732 Bytef *p; /* input data pointer */
1733 uInt n; /* bytes available there */
1734 Bytef *q; /* output window write pointer */
1735 uInt m; /* bytes to end of window or read pointer */
1736 Bytef *f; /* pointer to copy strings from */
1737 inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
1738
1739 /* copy input/output information to locals (UPDATE macro restores) */
1740 LOAD
1741
1742 /* process input and output based on current state */
1743 while (1) switch (c->mode)
1744 { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1745 case START: /* x: set up for LEN */
1746#ifndef SLOW
1747 if (m >= 258 && n >= 10)
1748 {
1749 UPDATE
1750 r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
1751 LOAD
1752 if (r != Z_OK)
1753 {
1754 c->mode = r == Z_STREAM_END ? WASH : BADCODE;
1755 break;
1756 }
1757 }
1758#endif /* !SLOW */
1759 c->sub.code.need = c->lbits;
1760 c->sub.code.tree = c->ltree;
1761 c->mode = LEN;
1762 case LEN: /* i: get length/literal/eob next */
1763 j = c->sub.code.need;
1764 NEEDBITS(j)
1765 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1766 DUMPBITS(t->bits)
1767 e = (uInt)(t->exop);
1768 if (e == 0) /* literal */
1769 {
1770 c->sub.lit = t->base;
1771 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1772 "inflate: literal '%c'\n" :
1773 "inflate: literal 0x%02x\n", t->base));
1774 c->mode = LIT;
1775 break;
1776 }
1777 if (e & 16) /* length */
1778 {
1779 c->sub.copy.get = e & 15;
1780 c->len = t->base;
1781 c->mode = LENEXT;
1782 break;
1783 }
1784 if ((e & 64) == 0) /* next table */
1785 {
1786 c->sub.code.need = e;
1787 c->sub.code.tree = t->next;
1788 break;
1789 }
1790 if (e & 32) /* end of block */
1791 {
1792 Tracevv((stderr, "inflate: end of block\n"));
1793 c->mode = WASH;
1794 break;
1795 }
1796 c->mode = BADCODE; /* invalid code */
1797 z->msg = "invalid literal/length code";
1798 r = Z_DATA_ERROR;
1799 LEAVE
1800 case LENEXT: /* i: getting length extra (have base) */
1801 j = c->sub.copy.get;
1802 NEEDBITS(j)
1803 c->len += (uInt)b & inflate_mask[j];
1804 DUMPBITS(j)
1805 c->sub.code.need = c->dbits;
1806 c->sub.code.tree = c->dtree;
1807 Tracevv((stderr, "inflate: length %u\n", c->len));
1808 c->mode = DIST;
1809 case DIST: /* i: get distance next */
1810 j = c->sub.code.need;
1811 NEEDBITS(j)
1812 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1813 DUMPBITS(t->bits)
1814 e = (uInt)(t->exop);
1815 if (e & 16) /* distance */
1816 {
1817 c->sub.copy.get = e & 15;
1818 c->sub.copy.dist = t->base;
1819 c->mode = DISTEXT;
1820 break;
1821 }
1822 if ((e & 64) == 0) /* next table */
1823 {
1824 c->sub.code.need = e;
1825 c->sub.code.tree = t->next;
1826 break;
1827 }
1828 c->mode = BADCODE; /* invalid code */
1829 z->msg = "invalid distance code";
1830 r = Z_DATA_ERROR;
1831 LEAVE
1832 case DISTEXT: /* i: getting distance extra */
1833 j = c->sub.copy.get;
1834 NEEDBITS(j)
1835 c->sub.copy.dist += (uInt)b & inflate_mask[j];
1836 DUMPBITS(j)
1837 Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
1838 c->mode = COPY;
1839 case COPY: /* o: copying bytes in window, waiting for space */
1840#ifndef __TURBOC__ /* Turbo C bug for following expression */
1841 f = (uInt)(q - s->window) < c->sub.copy.dist ?
1842 s->end - (c->sub.copy.dist - (q - s->window)) :
1843 q - c->sub.copy.dist;
1844#else
1845 f = q - c->sub.copy.dist;
1846 if ((uInt)(q - s->window) < c->sub.copy.dist)
1847 f = s->end - (c->sub.copy.dist - (q - s->window));
1848#endif
1849 while (c->len)
1850 {
1851 NEEDOUT
1852 OUTBYTE(*f++)
1853 if (f == s->end)
1854 f = s->window;
1855 c->len--;
1856 }
1857 c->mode = START;
1858 break;
1859 case LIT: /* o: got literal, waiting for output space */
1860 NEEDOUT
1861 OUTBYTE(c->sub.lit)
1862 c->mode = START;
1863 break;
1864 case WASH: /* o: got eob, possibly more output */
1865 FLUSH
1866 if (s->read != s->write)
1867 LEAVE
1868 c->mode = END;
1869 case END:
1870 r = Z_STREAM_END;
1871 LEAVE
1872 case BADCODE: /* x: got error */
1873 r = Z_DATA_ERROR;
1874 LEAVE
1875 default:
1876 r = Z_STREAM_ERROR;
1877 LEAVE
1878 }
1879}
1880
1881
1882local void inflate_codes_free(
1883 inflate_codes_statef *c,
1884 z_stream *z
1885)
1886{
1887 ZFREE(z, c, sizeof(struct inflate_codes_state));
1888 Tracev((stderr, "inflate: codes free\n"));
1889}
1890
1891/*+++++*/
1892/* inflate_util.c -- data and routines common to blocks and codes
1893 * Copyright (C) 1995 Mark Adler
1894 * For conditions of distribution and use, see copyright notice in zlib.h
1895 */
1896
1897/* copy as much as possible from the sliding window to the output area */
1898local int inflate_flush(
1899 inflate_blocks_statef *s,
1900 z_stream *z,
1901 int r
1902)
1903{
1904 uInt n;
1905 Bytef *p, *q;
1906
1907 /* local copies of source and destination pointers */
1908 p = z->next_out;
1909 q = s->read;
1910
1911 /* compute number of bytes to copy as far as end of window */
1912 n = (uInt)((q <= s->write ? s->write : s->end) - q);
1913 if (n > z->avail_out) n = z->avail_out;
1914 if (n && r == Z_BUF_ERROR) r = Z_OK;
1915
1916 /* update counters */
1917 z->avail_out -= n;
1918 z->total_out += n;
1919
1920 /* update check information */
1921 if (s->checkfn != Z_NULL)
1922 s->check = (*s->checkfn)(s->check, q, n);
1923
1924 /* copy as far as end of window */
1925 zmemcpy(p, q, n);
1926 p += n;
1927 q += n;
1928
1929 /* see if more to copy at beginning of window */
1930 if (q == s->end)
1931 {
1932 /* wrap pointers */
1933 q = s->window;
1934 if (s->write == s->end)
1935 s->write = s->window;
1936
1937 /* compute bytes to copy */
1938 n = (uInt)(s->write - q);
1939 if (n > z->avail_out) n = z->avail_out;
1940 if (n && r == Z_BUF_ERROR) r = Z_OK;
1941
1942 /* update counters */
1943 z->avail_out -= n;
1944 z->total_out += n;
1945
1946 /* update check information */
1947 if (s->checkfn != Z_NULL)
1948 s->check = (*s->checkfn)(s->check, q, n);
1949
1950 /* copy */
1951 zmemcpy(p, q, n);
1952 p += n;
1953 q += n;
1954 }
1955
1956 /* update pointers */
1957 z->next_out = p;
1958 s->read = q;
1959
1960 /* done */
1961 return r;
1962}
1963
1964
1965/*+++++*/
1966/* inffast.c -- process literals and length/distance pairs fast
1967 * Copyright (C) 1995 Mark Adler
1968 * For conditions of distribution and use, see copyright notice in zlib.h
1969 */
1970
1971/* simplify the use of the inflate_huft type with some defines */
1972#define base more.Base
1973#define next more.Next
1974#define exop word.what.Exop
1975#define bits word.what.Bits
1976
1977/* macros for bit input with no checking and for returning unused bytes */
1978#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
1979#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
1980
1981/* Called with number of bytes left to write in window at least 258
1982 (the maximum string length) and number of input bytes available
1983 at least ten. The ten bytes are six bytes for the longest length/
1984 distance pair plus four bytes for overloading the bit buffer. */
1985
1986local int inflate_fast(
1987 uInt bl,
1988 uInt bd,
1989 inflate_huft *tl,
1990 inflate_huft *td,
1991 inflate_blocks_statef *s,
1992 z_stream *z
1993)
1994{
1995 inflate_huft *t; /* temporary pointer */
1996 uInt e; /* extra bits or operation */
1997 uLong b; /* bit buffer */
1998 uInt k; /* bits in bit buffer */
1999 Bytef *p; /* input data pointer */
2000 uInt n; /* bytes available there */
2001 Bytef *q; /* output window write pointer */
2002 uInt m; /* bytes to end of window or read pointer */
2003 uInt ml; /* mask for literal/length tree */
2004 uInt md; /* mask for distance tree */
2005 uInt c; /* bytes to copy */
2006 uInt d; /* distance back to copy from */
2007 Bytef *r; /* copy source pointer */
2008
2009 /* load input, output, bit values */
2010 LOAD
2011
2012 /* initialize masks */
2013 ml = inflate_mask[bl];
2014 md = inflate_mask[bd];
2015
2016 /* do until not enough input or output space for fast loop */
2017 do { /* assume called with m >= 258 && n >= 10 */
2018 /* get literal/length code */
2019 GRABBITS(20) /* max bits for literal/length code */
2020 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
2021 {
2022 DUMPBITS(t->bits)
2023 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2024 "inflate: * literal '%c'\n" :
2025 "inflate: * literal 0x%02x\n", t->base));
2026 *q++ = (Byte)t->base;
2027 m--;
2028 continue;
2029 }
2030 do {
2031 DUMPBITS(t->bits)
2032 if (e & 16)
2033 {
2034 /* get extra bits for length */
2035 e &= 15;
2036 c = t->base + ((uInt)b & inflate_mask[e]);
2037 DUMPBITS(e)
2038 Tracevv((stderr, "inflate: * length %u\n", c));
2039
2040 /* decode distance base of block to copy */
2041 GRABBITS(15); /* max bits for distance code */
2042 e = (t = td + ((uInt)b & md))->exop;
2043 do {
2044 DUMPBITS(t->bits)
2045 if (e & 16)
2046 {
2047 /* get extra bits to add to distance base */
2048 e &= 15;
2049 GRABBITS(e) /* get extra bits (up to 13) */
2050 d = t->base + ((uInt)b & inflate_mask[e]);
2051 DUMPBITS(e)
2052 Tracevv((stderr, "inflate: * distance %u\n", d));
2053
2054 /* do the copy */
2055 m -= c;
2056 if ((uInt)(q - s->window) >= d) /* offset before dest */
2057 { /* just copy */
2058 r = q - d;
2059 *q++ = *r++; c--; /* minimum count is three, */
2060 *q++ = *r++; c--; /* so unroll loop a little */
2061 }
2062 else /* else offset after destination */
2063 {
2064 e = d - (q - s->window); /* bytes from offset to end */
2065 r = s->end - e; /* pointer to offset */
2066 if (c > e) /* if source crosses, */
2067 {
2068 c -= e; /* copy to end of window */
2069 do {
2070 *q++ = *r++;
2071 } while (--e);
2072 r = s->window; /* copy rest from start of window */
2073 }
2074 }
2075 do { /* copy all or what's left */
2076 *q++ = *r++;
2077 } while (--c);
2078 break;
2079 }
2080 else if ((e & 64) == 0)
2081 e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
2082 else
2083 {
2084 z->msg = "invalid distance code";
2085 UNGRAB
2086 UPDATE
2087 return Z_DATA_ERROR;
2088 }
2089 } while (1);
2090 break;
2091 }
2092 if ((e & 64) == 0)
2093 {
2094 if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
2095 {
2096 DUMPBITS(t->bits)
2097 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2098 "inflate: * literal '%c'\n" :
2099 "inflate: * literal 0x%02x\n", t->base));
2100 *q++ = (Byte)t->base;
2101 m--;
2102 break;
2103 }
2104 }
2105 else if (e & 32)
2106 {
2107 Tracevv((stderr, "inflate: * end of block\n"));
2108 UNGRAB
2109 UPDATE
2110 return Z_STREAM_END;
2111 }
2112 else
2113 {
2114 z->msg = "invalid literal/length code";
2115 UNGRAB
2116 UPDATE
2117 return Z_DATA_ERROR;
2118 }
2119 } while (1);
2120 } while (m >= 258 && n >= 10);
2121
2122 /* not enough input or output--restore pointers and return */
2123 UNGRAB
2124 UPDATE
2125 return Z_OK;
2126}
2127
2128
2129/*+++++*/
2130/* zutil.c -- target dependent utility functions for the compression library
2131 * Copyright (C) 1995 Jean-loup Gailly.
2132 * For conditions of distribution and use, see copyright notice in zlib.h
2133 */
2134
2135/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
2136
2137char *zlib_version = ZLIB_VERSION;
2138
2139char *z_errmsg[] = {
2140"stream end", /* Z_STREAM_END 1 */
2141"", /* Z_OK 0 */
2142"file error", /* Z_ERRNO (-1) */
2143"stream error", /* Z_STREAM_ERROR (-2) */
2144"data error", /* Z_DATA_ERROR (-3) */
2145"insufficient memory", /* Z_MEM_ERROR (-4) */
2146"buffer error", /* Z_BUF_ERROR (-5) */
2147""};
2148
2149
2150/*+++++*/
2151/* adler32.c -- compute the Adler-32 checksum of a data stream
2152 * Copyright (C) 1995 Mark Adler
2153 * For conditions of distribution and use, see copyright notice in zlib.h
2154 */
2155
2156/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
2157
2158#define BASE 65521L /* largest prime smaller than 65536 */
2159#define NMAX 5552
2160/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
2161
2162#define DO1(buf) {s1 += *buf++; s2 += s1;}
2163#define DO2(buf) DO1(buf); DO1(buf);
2164#define DO4(buf) DO2(buf); DO2(buf);
2165#define DO8(buf) DO4(buf); DO4(buf);
2166#define DO16(buf) DO8(buf); DO8(buf);
2167
2168/* ========================================================================= */
2169uLong adler32(
2170 uLong adler,
2171 Bytef *buf,
2172 uInt len
2173)
2174{
2175 unsigned long s1 = adler & 0xffff;
2176 unsigned long s2 = (adler >> 16) & 0xffff;
2177 int k;
2178
2179 if (buf == Z_NULL) return 1L;
2180
2181 while (len > 0) {
2182 k = len < NMAX ? len : NMAX;
2183 len -= k;
2184 while (k >= 16) {
2185 DO16(buf);
2186 k -= 16;
2187 }
2188 if (k != 0) do {
2189 DO1(buf);
2190 } while (--k);
2191 s1 %= BASE;
2192 s2 %= BASE;
2193 }
2194 return (s2 << 16) | s1;
2195}
diff --git a/arch/ppc64/boot/zlib.h b/arch/ppc64/boot/zlib.h
deleted file mode 100644
index f0b996c6864f..000000000000
--- a/arch/ppc64/boot/zlib.h
+++ /dev/null
@@ -1,432 +0,0 @@
1/* */
2
3/*
4 * This file is derived from zlib.h and zconf.h from the zlib-0.95
5 * distribution by Jean-loup Gailly and Mark Adler, with some additions
6 * by Paul Mackerras to aid in implementing Deflate compression and
7 * decompression for PPP packets.
8 */
9
10/*
11 * ==FILEVERSION 960122==
12 *
13 * This marker is used by the Linux installation script to determine
14 * whether an up-to-date version of this file is already installed.
15 */
16
17/* zlib.h -- interface of the 'zlib' general purpose compression library
18 version 0.95, Aug 16th, 1995.
19
20 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
21
22 This software is provided 'as-is', without any express or implied
23 warranty. In no event will the authors be held liable for any damages
24 arising from the use of this software.
25
26 Permission is granted to anyone to use this software for any purpose,
27 including commercial applications, and to alter it and redistribute it
28 freely, subject to the following restrictions:
29
30 1. The origin of this software must not be misrepresented; you must not
31 claim that you wrote the original software. If you use this software
32 in a product, an acknowledgment in the product documentation would be
33 appreciated but is not required.
34 2. Altered source versions must be plainly marked as such, and must not be
35 misrepresented as being the original software.
36 3. This notice may not be removed or altered from any source distribution.
37
38 Jean-loup Gailly Mark Adler
39 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
40 */
41
42#ifndef _ZLIB_H
43#define _ZLIB_H
44
45/* #include "zconf.h" */ /* included directly here */
46
47/* zconf.h -- configuration of the zlib compression library
48 * Copyright (C) 1995 Jean-loup Gailly.
49 * For conditions of distribution and use, see copyright notice in zlib.h
50 */
51
52/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
53
54/*
55 The library does not install any signal handler. It is recommended to
56 add at least a handler for SIGSEGV when decompressing; the library checks
57 the consistency of the input data whenever possible but may go nuts
58 for some forms of corrupted input.
59 */
60
61/*
62 * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
63 * than 64k bytes at a time (needed on systems with 16-bit int).
64 * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
65 * at addresses which are not a multiple of their size.
66 * Under DOS, -DFAR=far or -DFAR=__far may be needed.
67 */
68
69#ifndef STDC
70# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
71# define STDC
72# endif
73#endif
74
75#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
76# include <unix.h>
77#endif
78
79/* Maximum value for memLevel in deflateInit2 */
80#ifndef MAX_MEM_LEVEL
81# ifdef MAXSEG_64K
82# define MAX_MEM_LEVEL 8
83# else
84# define MAX_MEM_LEVEL 9
85# endif
86#endif
87
88#ifndef FAR
89# define FAR
90#endif
91
92/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
93#ifndef MAX_WBITS
94# define MAX_WBITS 15 /* 32K LZ77 window */
95#endif
96
97/* The memory requirements for deflate are (in bytes):
98 1 << (windowBits+2) + 1 << (memLevel+9)
99 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
100 plus a few kilobytes for small objects. For example, if you want to reduce
101 the default memory requirements from 256K to 128K, compile with
102 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
103 Of course this will generally degrade compression (there's no free lunch).
104
105 The memory requirements for inflate are (in bytes) 1 << windowBits
106 that is, 32K for windowBits=15 (default value) plus a few kilobytes
107 for small objects.
108*/
109
110 /* Type declarations */
111
112#ifndef OF /* function prototypes */
113# ifdef STDC
114# define OF(args) args
115# else
116# define OF(args) ()
117# endif
118#endif
119
120typedef unsigned char Byte; /* 8 bits */
121typedef unsigned int uInt; /* 16 bits or more */
122typedef unsigned long uLong; /* 32 bits or more */
123
124typedef Byte FAR Bytef;
125typedef char FAR charf;
126typedef int FAR intf;
127typedef uInt FAR uIntf;
128typedef uLong FAR uLongf;
129
130#ifdef STDC
131 typedef void FAR *voidpf;
132 typedef void *voidp;
133#else
134 typedef Byte FAR *voidpf;
135 typedef Byte *voidp;
136#endif
137
138/* end of original zconf.h */
139
140#define ZLIB_VERSION "0.95P"
141
142/*
143 The 'zlib' compression library provides in-memory compression and
144 decompression functions, including integrity checks of the uncompressed
145 data. This version of the library supports only one compression method
146 (deflation) but other algorithms may be added later and will have the same
147 stream interface.
148
149 For compression the application must provide the output buffer and
150 may optionally provide the input buffer for optimization. For decompression,
151 the application must provide the input buffer and may optionally provide
152 the output buffer for optimization.
153
154 Compression can be done in a single step if the buffers are large
155 enough (for example if an input file is mmap'ed), or can be done by
156 repeated calls of the compression function. In the latter case, the
157 application must provide more input and/or consume the output
158 (providing more output space) before each call.
159*/
160
161typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
162typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
163
164struct internal_state;
165
166typedef struct z_stream_s {
167 Bytef *next_in; /* next input byte */
168 uInt avail_in; /* number of bytes available at next_in */
169 uLong total_in; /* total nb of input bytes read so far */
170
171 Bytef *next_out; /* next output byte should be put there */
172 uInt avail_out; /* remaining free space at next_out */
173 uLong total_out; /* total nb of bytes output so far */
174
175 char *msg; /* last error message, NULL if no error */
176 struct internal_state FAR *state; /* not visible by applications */
177
178 alloc_func zalloc; /* used to allocate the internal state */
179 free_func zfree; /* used to free the internal state */
180 voidp opaque; /* private data object passed to zalloc and zfree */
181
182 Byte data_type; /* best guess about the data type: ascii or binary */
183
184} z_stream;
185
186/*
187 The application must update next_in and avail_in when avail_in has
188 dropped to zero. It must update next_out and avail_out when avail_out
189 has dropped to zero. The application must initialize zalloc, zfree and
190 opaque before calling the init function. All other fields are set by the
191 compression library and must not be updated by the application.
192
193 The opaque value provided by the application will be passed as the first
194 parameter for calls of zalloc and zfree. This can be useful for custom
195 memory management. The compression library attaches no meaning to the
196 opaque value.
197
198 zalloc must return Z_NULL if there is not enough memory for the object.
199 On 16-bit systems, the functions zalloc and zfree must be able to allocate
200 exactly 65536 bytes, but will not be required to allocate more than this
201 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
202 pointers returned by zalloc for objects of exactly 65536 bytes *must*
203 have their offset normalized to zero. The default allocation function
204 provided by this library ensures this (see zutil.c). To reduce memory
205 requirements and avoid any allocation of 64K objects, at the expense of
206 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
207
208 The fields total_in and total_out can be used for statistics or
209 progress reports. After compression, total_in holds the total size of
210 the uncompressed data and may be saved for use in the decompressor
211 (particularly if the decompressor wants to decompress everything in
212 a single step).
213*/
214
215 /* constants */
216
217#define Z_NO_FLUSH 0
218#define Z_PARTIAL_FLUSH 1
219#define Z_FULL_FLUSH 2
220#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
221#define Z_FINISH 4
222#define Z_PACKET_FLUSH 5
223/* See deflate() below for the usage of these constants */
224
225#define Z_OK 0
226#define Z_STREAM_END 1
227#define Z_ERRNO (-1)
228#define Z_STREAM_ERROR (-2)
229#define Z_DATA_ERROR (-3)
230#define Z_MEM_ERROR (-4)
231#define Z_BUF_ERROR (-5)
232/* error codes for the compression/decompression functions */
233
234#define Z_BEST_SPEED 1
235#define Z_BEST_COMPRESSION 9
236#define Z_DEFAULT_COMPRESSION (-1)
237/* compression levels */
238
239#define Z_FILTERED 1
240#define Z_HUFFMAN_ONLY 2
241#define Z_DEFAULT_STRATEGY 0
242
243#define Z_BINARY 0
244#define Z_ASCII 1
245#define Z_UNKNOWN 2
246/* Used to set the data_type field */
247
248#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
249
250extern char *zlib_version;
251/* The application can compare zlib_version and ZLIB_VERSION for consistency.
252 If the first character differs, the library code actually used is
253 not compatible with the zlib.h header file used by the application.
254 */
255
256 /* basic functions */
257
258extern int inflateInit OF((z_stream *strm));
259/*
260 Initializes the internal stream state for decompression. The fields
261 zalloc and zfree must be initialized before by the caller. If zalloc and
262 zfree are set to Z_NULL, inflateInit updates them to use default allocation
263 functions.
264
265 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
266 enough memory. msg is set to null if there is no error message.
267 inflateInit does not perform any decompression: this will be done by
268 inflate().
269*/
270
271
272extern int inflate OF((z_stream *strm, int flush));
273/*
274 Performs one or both of the following actions:
275
276 - Decompress more input starting at next_in and update next_in and avail_in
277 accordingly. If not all input can be processed (because there is not
278 enough room in the output buffer), next_in is updated and processing
279 will resume at this point for the next call of inflate().
280
281 - Provide more output starting at next_out and update next_out and avail_out
282 accordingly. inflate() always provides as much output as possible
283 (until there is no more input data or no more space in the output buffer).
284
285 Before the call of inflate(), the application should ensure that at least
286 one of the actions is possible, by providing more input and/or consuming
287 more output, and updating the next_* and avail_* values accordingly.
288 The application can consume the uncompressed output when it wants, for
289 example when the output buffer is full (avail_out == 0), or after each
290 call of inflate().
291
292 If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
293 inflate flushes as much output as possible to the output buffer. The
294 flushing behavior of inflate is not specified for values of the flush
295 parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
296 current implementation actually flushes as much output as possible
297 anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
298 has been consumed, it is expecting to see the length field of a stored
299 block; if not, it returns Z_DATA_ERROR.
300
301 inflate() should normally be called until it returns Z_STREAM_END or an
302 error. However if all decompression is to be performed in a single step
303 (a single call of inflate), the parameter flush should be set to
304 Z_FINISH. In this case all pending input is processed and all pending
305 output is flushed; avail_out must be large enough to hold all the
306 uncompressed data. (The size of the uncompressed data may have been saved
307 by the compressor for this purpose.) The next operation on this stream must
308 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
309 is never required, but can be used to inform inflate that a faster routine
310 may be used for the single inflate() call.
311
312 inflate() returns Z_OK if some progress has been made (more input
313 processed or more output produced), Z_STREAM_END if the end of the
314 compressed data has been reached and all uncompressed output has been
315 produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
316 the stream structure was inconsistent (for example if next_in or next_out
317 was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
318 progress is possible or if there was not enough room in the output buffer
319 when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
320 call inflateSync to look for a good compression block. */
321
322
323extern int inflateEnd OF((z_stream *strm));
324/*
325 All dynamically allocated data structures for this stream are freed.
326 This function discards any unprocessed input and does not flush any
327 pending output.
328
329 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
330 was inconsistent. In the error case, msg may be set but then points to a
331 static string (which must not be deallocated).
332*/
333
334 /* advanced functions */
335
336extern int inflateInit2 OF((z_stream *strm,
337 int windowBits));
338/*
339 This is another version of inflateInit with more compression options. The
340 fields next_out, zalloc and zfree must be initialized before by the caller.
341
342 The windowBits parameter is the base two logarithm of the maximum window
343 size (the size of the history buffer). It should be in the range 8..15 for
344 this version of the library (the value 16 will be allowed soon). The
345 default value is 15 if inflateInit is used instead. If a compressed stream
346 with a larger window size is given as input, inflate() will return with
347 the error code Z_DATA_ERROR instead of trying to allocate a larger window.
348
349 If next_out is not null, the library will use this buffer for the history
350 buffer; the buffer must either be large enough to hold the entire output
351 data, or have at least 1<<windowBits bytes. If next_out is null, the
352 library will allocate its own buffer (and leave next_out null). next_in
353 need not be provided here but must be provided by the application for the
354 next call of inflate().
355
356 If the history buffer is provided by the application, next_out must
357 never be changed by the application since the decompressor maintains
358 history information inside this buffer from call to call; the application
359 can only reset next_out to the beginning of the history buffer when
360 avail_out is zero and all output has been consumed.
361
362 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
363 not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
364 windowBits < 8). msg is set to null if there is no error message.
365 inflateInit2 does not perform any decompression: this will be done by
366 inflate().
367*/
368
369extern int inflateSync OF((z_stream *strm));
370/*
371 Skips invalid compressed data until the special marker (see deflate()
372 above) can be found, or until all available input is skipped. No output
373 is provided.
374
375 inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
376 if no more input was provided, Z_DATA_ERROR if no marker has been found,
377 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
378 case, the application may save the current current value of total_in which
379 indicates where valid compressed data was found. In the error case, the
380 application may repeatedly call inflateSync, providing more input each time,
381 until success or end of the input data.
382*/
383
384extern int inflateReset OF((z_stream *strm));
385/*
386 This function is equivalent to inflateEnd followed by inflateInit,
387 but does not free and reallocate all the internal decompression state.
388 The stream will keep attributes that may have been set by inflateInit2.
389
390 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
391 stream state was inconsistent (such as zalloc or state being NULL).
392*/
393
394extern int inflateIncomp OF((z_stream *strm));
395/*
396 This function adds the data at next_in (avail_in bytes) to the output
397 history without performing any output. There must be no pending output,
398 and the decompressor must be expecting to see the start of a block.
399 Calling this function is equivalent to decompressing a stored block
400 containing the data at next_in (except that the data is not output).
401*/
402
403 /* checksum functions */
404
405/*
406 This function is not related to compression but is exported
407 anyway because it might be useful in applications using the
408 compression library.
409*/
410
411extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
412
413/*
414 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
415 return the updated checksum. If buf is NULL, this function returns
416 the required initial value for the checksum.
417 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
418 much faster. Usage example:
419
420 uLong adler = adler32(0L, Z_NULL, 0);
421
422 while (read_buffer(buffer, length) != EOF) {
423 adler = adler32(adler, buffer, length);
424 }
425 if (adler != original_adler) error();
426*/
427
428#ifndef _Z_UTIL_H
429 struct internal_state {int dummy;}; /* hack for buggy compilers */
430#endif
431
432#endif /* _ZLIB_H */
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index 37c157c93cef..e79fd60bc122 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -1318,7 +1318,7 @@ CONFIG_MSDOS_PARTITION=y
1318# 1318#
1319CONFIG_NLS=y 1319CONFIG_NLS=y
1320CONFIG_NLS_DEFAULT="iso8859-1" 1320CONFIG_NLS_DEFAULT="iso8859-1"
1321CONFIG_NLS_CODEPAGE_437=m 1321CONFIG_NLS_CODEPAGE_437=y
1322CONFIG_NLS_CODEPAGE_737=m 1322CONFIG_NLS_CODEPAGE_737=m
1323CONFIG_NLS_CODEPAGE_775=m 1323CONFIG_NLS_CODEPAGE_775=m
1324CONFIG_NLS_CODEPAGE_850=m 1324CONFIG_NLS_CODEPAGE_850=m
@@ -1342,7 +1342,7 @@ CONFIG_NLS_ISO8859_8=m
1342CONFIG_NLS_CODEPAGE_1250=m 1342CONFIG_NLS_CODEPAGE_1250=m
1343CONFIG_NLS_CODEPAGE_1251=m 1343CONFIG_NLS_CODEPAGE_1251=m
1344CONFIG_NLS_ASCII=m 1344CONFIG_NLS_ASCII=m
1345CONFIG_NLS_ISO8859_1=m 1345CONFIG_NLS_ISO8859_1=y
1346CONFIG_NLS_ISO8859_2=m 1346CONFIG_NLS_ISO8859_2=m
1347CONFIG_NLS_ISO8859_3=m 1347CONFIG_NLS_ISO8859_3=m
1348CONFIG_NLS_ISO8859_4=m 1348CONFIG_NLS_ISO8859_4=m
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b138902..000000000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/system.h>
13#include <asm/iSeries/HvLpEvent.h>
14#include <asm/iSeries/HvCallEvent.h>
15#include <asm/iSeries/ItLpNaca.h>
16
17/* Array of LpEvent handler functions */
18LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
19unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
20
21/* Register a handler for an LpEvent type */
22
23int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
24{
25 int rc = 1;
26 if ( eventType < HvLpEvent_Type_NumTypes ) {
27 lpEventHandler[eventType] = handler;
28 rc = 0;
29 }
30 return rc;
31
32}
33
34int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
35{
36 int rc = 1;
37
38 might_sleep();
39
40 if ( eventType < HvLpEvent_Type_NumTypes ) {
41 if ( !lpEventHandlerPaths[eventType] ) {
42 lpEventHandler[eventType] = NULL;
43 rc = 0;
44
45 /* We now sleep until all other CPUs have scheduled. This ensures that
46 * the deletion is seen by all other CPUs, and that the deleted handler
47 * isn't still running on another CPU when we return. */
48 synchronize_rcu();
49 }
50 }
51 return rc;
52}
53EXPORT_SYMBOL(HvLpEvent_registerHandler);
54EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
55
56/* (lpIndex is the partition index of the target partition.
57 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
58 * indicates to use our partition index - for the other types)
59 */
60int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
61{
62 int rc = 1;
63 if ( eventType < HvLpEvent_Type_NumTypes &&
64 lpEventHandler[eventType] ) {
65 if ( lpIndex == 0 )
66 lpIndex = itLpNaca.xLpIndex;
67 HvCallEvent_openLpEventPath( lpIndex, eventType );
68 ++lpEventHandlerPaths[eventType];
69 rc = 0;
70 }
71 return rc;
72}
73
74int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
75{
76 int rc = 1;
77 if ( eventType < HvLpEvent_Type_NumTypes &&
78 lpEventHandler[eventType] &&
79 lpEventHandlerPaths[eventType] ) {
80 if ( lpIndex == 0 )
81 lpIndex = itLpNaca.xLpIndex;
82 HvCallEvent_closeLpEventPath( lpIndex, eventType );
83 --lpEventHandlerPaths[eventType];
84 rc = 0;
85 }
86 return rc;
87}
88
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index ae60eb1193c6..327c08ce4291 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -2,36 +2,34 @@
2# Makefile for the linux ppc64 kernel. 2# Makefile for the linux ppc64 kernel.
3# 3#
4 4
5ifneq ($(CONFIG_PPC_MERGE),y)
6
5EXTRA_CFLAGS += -mno-minimal-toc 7EXTRA_CFLAGS += -mno-minimal-toc
6extra-y := head.o vmlinux.lds 8extra-y := head.o vmlinux.lds
7 9
8obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \ 10obj-y := misc.o prom.o
9 time.o process.o signal.o syscalls.o misc.o ptrace.o \ 11
10 align.o semaphore.o bitops.o pacaData.o \ 12endif
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/
16 13
17obj-$(CONFIG_PPC_OF) += of_device.o 14obj-y += irq.o idle.o dma.o \
15 signal.o \
16 align.o bitops.o pacaData.o \
17 udbg.o ioctl32.o \
18 rtc.o \
19 cpu_setup_power4.o \
20 iommu.o sysfs.o vdso.o firmware.o
21obj-y += vdso32/ vdso64/
18 22
19pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \
20 iSeries_VpdInfo.o
21pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 23pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
22 24
23obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) 25obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
24 26
25obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \ 27obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
26 iSeries_setup.o ItLpQueue.o hvCall.o \ 28ifneq ($(CONFIG_PPC_MERGE),y)
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ 29obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
28 iSeries_iommu.o 30endif
29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 pSeries_setup.o pSeries_iommu.o udbg_16550.o
35 33
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ 34obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o 35 bpa_iic.o spider-pic.o
@@ -41,45 +39,36 @@ obj-$(CONFIG_EEH) += eeh.o
41obj-$(CONFIG_PROC_FS) += proc_ppc64.o 39obj-$(CONFIG_PROC_FS) += proc_ppc64.o
42obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 40obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
43obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
44obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 42obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o 43ifneq ($(CONFIG_PPC_MERGE),y)
44obj-$(CONFIG_MODULES) += ppc_ksyms.o
45endif
46obj-$(CONFIG_PPC_RTAS) += rtas_pci.o
46obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 47obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
47obj-$(CONFIG_SCANLOG) += scanlog.o 48obj-$(CONFIG_SCANLOG) += scanlog.o
48obj-$(CONFIG_VIOPATH) += viopath.o
49obj-$(CONFIG_LPARCFG) += lparcfg.o 49obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51ifneq ($(CONFIG_PPC_MERGE),y)
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 52obj-$(CONFIG_BOOTX_TEXT) += btext.o
53endif
52obj-$(CONFIG_HVCS) += hvcserver.o 54obj-$(CONFIG_HVCS) += hvcserver.o
53 55
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 56obj-$(CONFIG_PPC_PMAC) += udbg_scc.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
57obj-$(CONFIG_XICS) += xics.o
58obj-$(CONFIG_MPIC) += mpic.o
59 57
60obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 58obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
61 pmac_time.o pmac_nvram.o pmac_low_i2c.o \
62 udbg_scc.o
63
64obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
65 udbg_16550.o
66
67obj-$(CONFIG_U3_DART) += u3_iommu.o
68 59
69ifdef CONFIG_SMP 60ifdef CONFIG_SMP
70obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 61obj-$(CONFIG_PPC_PMAC) += smp-tbsync.o
71obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
72obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
73obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
74obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 62obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
75endif 63endif
76 64
77obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
78obj-$(CONFIG_KPROBES) += kprobes.o 65obj-$(CONFIG_KPROBES) += kprobes.o
79 66
80CFLAGS_ioctl32.o += -Ifs/ 67CFLAGS_ioctl32.o += -Ifs/
81 68
69ifneq ($(CONFIG_PPC_MERGE),y)
82ifeq ($(CONFIG_PPC_ISERIES),y) 70ifeq ($(CONFIG_PPC_ISERIES),y)
83arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s 71arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
84AFLAGS_head.o += -Iarch/ppc64/kernel 72AFLAGS_head.o += -Iarch/powerpc/kernel
73endif
85endif 74endif
diff --git a/arch/ppc64/kernel/align.c b/arch/ppc64/kernel/align.c
index 330e7ef81427..256d5b592aa1 100644
--- a/arch/ppc64/kernel/align.c
+++ b/arch/ppc64/kernel/align.c
@@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs)
313 /* Doing stfs, have to convert to single */ 313 /* Doing stfs, have to convert to single */
314 preempt_disable(); 314 preempt_disable();
315 enable_kernel_fp(); 315 enable_kernel_fp();
316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr); 316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
317 disable_kernel_fp(); 317 disable_kernel_fp();
318 preempt_enable(); 318 preempt_enable();
319 } 319 }
@@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs)
349 /* Doing lfs, have to convert to double */ 349 /* Doing lfs, have to convert to double */
350 preempt_disable(); 350 preempt_disable();
351 enable_kernel_fp(); 351 enable_kernel_fp();
352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr); 352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
353 disable_kernel_fp(); 353 disable_kernel_fp();
354 preempt_enable(); 354 preempt_enable();
355 } 355 }
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 1ff4fa05a973..5e6046cb414e 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -46,8 +46,6 @@
46int main(void) 46int main(void)
47{ 47{
48 /* thread struct on stack */ 48 /* thread struct on stack */
49 DEFINE(THREAD_SHIFT, THREAD_SHIFT);
50 DEFINE(THREAD_SIZE, THREAD_SIZE);
51 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 49 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
52 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 50 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
53 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); 51 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
@@ -77,6 +75,7 @@ int main(void)
77 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 75 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
78 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 76 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
79 DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); 77 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
78 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
80 79
81 /* paca */ 80 /* paca */
82 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 81 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index 5f2460090e03..da1b4b7a3269 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
39#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h>
42 43
43#include "pci.h"
44#include "bpa_iommu.h" 44#include "bpa_iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db66f458..c2dc8f282eb8 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,9 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/nvram.h> 44#include <asm/nvram.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
47#include <asm/irq.h>
46 48
47#include "pci.h"
48#include "bpa_iic.h" 49#include "bpa_iic.h"
49#include "bpa_iommu.h" 50#include "bpa_iommu.h"
50 51
@@ -54,7 +55,7 @@
54#define DBG(fmt...) 55#define DBG(fmt...)
55#endif 56#endif
56 57
57void bpa_get_cpuinfo(struct seq_file *m) 58void bpa_show_cpuinfo(struct seq_file *m)
58{ 59{
59 struct device_node *root; 60 struct device_node *root;
60 const char *model = ""; 61 const char *model = "";
@@ -128,7 +129,7 @@ struct machdep_calls __initdata bpa_md = {
128 .probe = bpa_probe, 129 .probe = bpa_probe,
129 .setup_arch = bpa_setup_arch, 130 .setup_arch = bpa_setup_arch,
130 .init_early = bpa_init_early, 131 .init_early = bpa_init_early,
131 .get_cpuinfo = bpa_get_cpuinfo, 132 .show_cpuinfo = bpa_show_cpuinfo,
132 .restart = rtas_restart, 133 .restart = rtas_restart,
133 .power_off = rtas_power_off, 134 .power_off = rtas_power_off,
134 .halt = rtas_halt, 135 .halt = rtas_halt,
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
index b6fbfbe9032d..506a37885c5c 100644
--- a/arch/ppc64/kernel/btext.c
+++ b/arch/ppc64/kernel/btext.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/lmb.h> 19#include <asm/lmb.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/udbg.h>
21 22
22#undef NO_SCROLL 23#undef NO_SCROLL
23 24
@@ -131,6 +132,47 @@ int btext_initialize(struct device_node *np)
131 return 0; 132 return 0;
132} 133}
133 134
135static void btext_putc(unsigned char c)
136{
137 btext_drawchar(c);
138}
139
140void __init init_boot_display(void)
141{
142 char *name;
143 struct device_node *np = NULL;
144 int rc = -ENODEV;
145
146 printk("trying to initialize btext ...\n");
147
148 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
149 if (name != NULL) {
150 np = of_find_node_by_path(name);
151 if (np != NULL) {
152 if (strcmp(np->type, "display") != 0) {
153 printk("boot stdout isn't a display !\n");
154 of_node_put(np);
155 np = NULL;
156 }
157 }
158 }
159 if (np)
160 rc = btext_initialize(np);
161 if (rc) {
162 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
163 if (get_property(np, "linux,opened", NULL)) {
164 printk("trying %s ...\n", np->full_name);
165 rc = btext_initialize(np);
166 printk("result: %d\n", rc);
167 }
168 if (rc == 0)
169 break;
170 }
171 }
172 if (rc == 0 && udbg_putc == NULL)
173 udbg_putc = btext_putc;
174}
175
134 176
135/* Calc the base address of a given point (x,y) */ 177/* Calc the base address of a given point (x,y) */
136static unsigned char * calc_base(int x, int y) 178static unsigned char * calc_base(int x, int y)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
deleted file mode 100644
index 8831a28c3c4e..000000000000
--- a/arch/ppc64/kernel/cputable.c
+++ /dev/null
@@ -1,308 +0,0 @@
1/*
2 * arch/ppc64/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/threads.h>
19#include <linux/init.h>
20#include <linux/module.h>
21
22#include <asm/oprofile_impl.h>
23#include <asm/cputable.h>
24
25struct cpu_spec* cur_cpu_spec = NULL;
26EXPORT_SYMBOL(cur_cpu_spec);
27
28/* NOTE:
29 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
30 * the responsibility of the appropriate CPU save/restore functions to
31 * eventually copy these settings over. Those save/restore aren't yet
32 * part of the cputable though. That has to be fixed for both ppc32
33 * and ppc64
34 */
35extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
39
40
41/* We only set the altivec features if the kernel was compiled with altivec
42 * support
43 */
44#ifdef CONFIG_ALTIVEC
45#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
46#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
47#else
48#define CPU_FTR_ALTIVEC_COMP 0
49#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
50#endif
51
52struct cpu_spec cpu_specs[] = {
53 { /* Power3 */
54 .pvr_mask = 0xffff0000,
55 .pvr_value = 0x00400000,
56 .cpu_name = "POWER3 (630)",
57 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
58 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
59 .cpu_user_features = COMMON_USER_PPC64,
60 .icache_bsize = 128,
61 .dcache_bsize = 128,
62 .num_pmcs = 8,
63 .cpu_setup = __setup_cpu_power3,
64#ifdef CONFIG_OPROFILE
65 .oprofile_cpu_type = "ppc64/power3",
66 .oprofile_model = &op_model_rs64,
67#endif
68 },
69 { /* Power3+ */
70 .pvr_mask = 0xffff0000,
71 .pvr_value = 0x00410000,
72 .cpu_name = "POWER3 (630+)",
73 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
74 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
75 .cpu_user_features = COMMON_USER_PPC64,
76 .icache_bsize = 128,
77 .dcache_bsize = 128,
78 .num_pmcs = 8,
79 .cpu_setup = __setup_cpu_power3,
80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
83#endif
84 },
85 { /* Northstar */
86 .pvr_mask = 0xffff0000,
87 .pvr_value = 0x00330000,
88 .cpu_name = "RS64-II (northstar)",
89 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
90 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
91 CPU_FTR_MMCRA | CPU_FTR_CTRL,
92 .cpu_user_features = COMMON_USER_PPC64,
93 .icache_bsize = 128,
94 .dcache_bsize = 128,
95 .num_pmcs = 8,
96 .cpu_setup = __setup_cpu_power3,
97#ifdef CONFIG_OPROFILE
98 .oprofile_cpu_type = "ppc64/rs64",
99 .oprofile_model = &op_model_rs64,
100#endif
101 },
102 { /* Pulsar */
103 .pvr_mask = 0xffff0000,
104 .pvr_value = 0x00340000,
105 .cpu_name = "RS64-III (pulsar)",
106 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
107 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
108 CPU_FTR_MMCRA | CPU_FTR_CTRL,
109 .cpu_user_features = COMMON_USER_PPC64,
110 .icache_bsize = 128,
111 .dcache_bsize = 128,
112 .num_pmcs = 8,
113 .cpu_setup = __setup_cpu_power3,
114#ifdef CONFIG_OPROFILE
115 .oprofile_cpu_type = "ppc64/rs64",
116 .oprofile_model = &op_model_rs64,
117#endif
118 },
119 { /* I-star */
120 .pvr_mask = 0xffff0000,
121 .pvr_value = 0x00360000,
122 .cpu_name = "RS64-III (icestar)",
123 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
124 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
125 CPU_FTR_MMCRA | CPU_FTR_CTRL,
126 .cpu_user_features = COMMON_USER_PPC64,
127 .icache_bsize = 128,
128 .dcache_bsize = 128,
129 .num_pmcs = 8,
130 .cpu_setup = __setup_cpu_power3,
131#ifdef CONFIG_OPROFILE
132 .oprofile_cpu_type = "ppc64/rs64",
133 .oprofile_model = &op_model_rs64,
134#endif
135 },
136 { /* S-star */
137 .pvr_mask = 0xffff0000,
138 .pvr_value = 0x00370000,
139 .cpu_name = "RS64-IV (sstar)",
140 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
141 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
142 CPU_FTR_MMCRA | CPU_FTR_CTRL,
143 .cpu_user_features = COMMON_USER_PPC64,
144 .icache_bsize = 128,
145 .dcache_bsize = 128,
146 .num_pmcs = 8,
147 .cpu_setup = __setup_cpu_power3,
148#ifdef CONFIG_OPROFILE
149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_model = &op_model_rs64,
151#endif
152 },
153 { /* Power4 */
154 .pvr_mask = 0xffff0000,
155 .pvr_value = 0x00350000,
156 .cpu_name = "POWER4 (gp)",
157 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
158 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
159 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
160 .cpu_user_features = COMMON_USER_PPC64,
161 .icache_bsize = 128,
162 .dcache_bsize = 128,
163 .num_pmcs = 8,
164 .cpu_setup = __setup_cpu_power4,
165#ifdef CONFIG_OPROFILE
166 .oprofile_cpu_type = "ppc64/power4",
167 .oprofile_model = &op_model_rs64,
168#endif
169 },
170 { /* Power4+ */
171 .pvr_mask = 0xffff0000,
172 .pvr_value = 0x00380000,
173 .cpu_name = "POWER4+ (gq)",
174 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
176 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
177 .cpu_user_features = COMMON_USER_PPC64,
178 .icache_bsize = 128,
179 .dcache_bsize = 128,
180 .num_pmcs = 8,
181 .cpu_setup = __setup_cpu_power4,
182#ifdef CONFIG_OPROFILE
183 .oprofile_cpu_type = "ppc64/power4",
184 .oprofile_model = &op_model_power4,
185#endif
186 },
187 { /* PPC970 */
188 .pvr_mask = 0xffff0000,
189 .pvr_value = 0x00390000,
190 .cpu_name = "PPC970",
191 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
192 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
193 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
194 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
195 .cpu_user_features = COMMON_USER_PPC64 |
196 PPC_FEATURE_HAS_ALTIVEC_COMP,
197 .icache_bsize = 128,
198 .dcache_bsize = 128,
199 .num_pmcs = 8,
200 .cpu_setup = __setup_cpu_ppc970,
201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
205 },
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
211 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
212 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
213 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
214 .cpu_user_features = COMMON_USER_PPC64 |
215 PPC_FEATURE_HAS_ALTIVEC_COMP,
216 .icache_bsize = 128,
217 .dcache_bsize = 128,
218 .num_pmcs = 8,
219 .cpu_setup = __setup_cpu_ppc970,
220#ifdef CONFIG_OPROFILE
221 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_model = &op_model_power4,
223#endif
224 },
225 { /* PPC970MP */
226 .pvr_mask = 0xffff0000,
227 .pvr_value = 0x00440000,
228 .cpu_name = "PPC970MP",
229 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
231 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
249 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
250 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
251 CPU_FTR_MMCRA_SIHV,
252 .cpu_user_features = COMMON_USER_PPC64,
253 .icache_bsize = 128,
254 .dcache_bsize = 128,
255 .num_pmcs = 6,
256 .cpu_setup = __setup_cpu_power4,
257#ifdef CONFIG_OPROFILE
258 .oprofile_cpu_type = "ppc64/power5",
259 .oprofile_model = &op_model_power4,
260#endif
261 },
262 { /* Power5 */
263 .pvr_mask = 0xffff0000,
264 .pvr_value = 0x003b0000,
265 .cpu_name = "POWER5 (gs)",
266 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
267 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
268 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
269 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
270 CPU_FTR_MMCRA_SIHV,
271 .cpu_user_features = COMMON_USER_PPC64,
272 .icache_bsize = 128,
273 .dcache_bsize = 128,
274 .num_pmcs = 6,
275 .cpu_setup = __setup_cpu_power4,
276#ifdef CONFIG_OPROFILE
277 .oprofile_cpu_type = "ppc64/power5",
278 .oprofile_model = &op_model_power4,
279#endif
280 },
281 { /* BE DD1.x */
282 .pvr_mask = 0xffff0000,
283 .pvr_value = 0x00700000,
284 .cpu_name = "Broadband Engine",
285 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
286 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
287 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
288 CPU_FTR_SMT,
289 .cpu_user_features = COMMON_USER_PPC64 |
290 PPC_FEATURE_HAS_ALTIVEC_COMP,
291 .icache_bsize = 128,
292 .dcache_bsize = 128,
293 .cpu_setup = __setup_cpu_be,
294 },
295 { /* default match */
296 .pvr_mask = 0x00000000,
297 .pvr_value = 0x00000000,
298 .cpu_name = "POWER4 (compatible)",
299 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
300 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
301 CPU_FTR_PPCAS_ARCH_V2,
302 .cpu_user_features = COMMON_USER_PPC64,
303 .icache_bsize = 128,
304 .dcache_bsize = 128,
305 .num_pmcs = 6,
306 .cpu_setup = __setup_cpu_power4,
307 }
308};
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd731222..035d1b14a207 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/systemcfg.h> 35#include <asm/systemcfg.h>
36#include "pci.h" 36#include <asm/ppc-pci.h>
37 37
38#undef DEBUG 38#undef DEBUG
39 39
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 72c61041151a..929f9f42cf7a 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -36,6 +36,7 @@
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h> 38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
39 40
40#ifdef CONFIG_PPC_ISERIES 41#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE 42#define DO_SOFT_DISABLE
@@ -80,7 +81,7 @@ _stext:
80_GLOBAL(__start) 81_GLOBAL(__start)
81 /* NOP this out unconditionally */ 82 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION 83BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform 84 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1) 85END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 86#endif /* CONFIG_PPC_MULTIPLATFORM */
86 87
@@ -201,22 +202,22 @@ exception_marker:
201#define EX_CCR 60 202#define EX_CCR 60
202 203
203#define EXCEPTION_PROLOG_PSERIES(area, label) \ 204#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \ 207 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \ 208 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \ 209 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \ 210 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \ 211 std r9,area+EX_R13(r13); \
211 mfcr r9; \ 212 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \ 213 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \ 214 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \ 216 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \ 218 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \ 220 mtspr SPRN_SRR1,r10; \
220 rfid; \ 221 rfid; \
221 b . /* prevent speculative execution */ 222 b . /* prevent speculative execution */
222 223
@@ -225,12 +226,12 @@ exception_marker:
225 * This code runs with relocation on. 226 * This code runs with relocation on.
226 */ 227 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \ 228#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \ 229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \ 231 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \ 232 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \ 233 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \ 234 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \ 235 std r9,area+EX_R13(r13); \
235 mfcr r9 236 mfcr r9
236 237
@@ -283,7 +284,7 @@ exception_marker:
283 std r9,_LINK(r1); \ 284 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \ 285 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \ 286 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \ 287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \ 288 std r11,_XER(r1); \
288 li r9,(n)+1; \ 289 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \ 290 std r9,_TRAP(r1); /* set trap number */ \
@@ -300,7 +301,7 @@ exception_marker:
300 .globl label##_pSeries; \ 301 .globl label##_pSeries; \
301label##_pSeries: \ 302label##_pSeries: \
302 HMT_MEDIUM; \ 303 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \ 304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \ 305 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306 307
@@ -308,7 +309,7 @@ label##_pSeries: \
308 .globl label##_iSeries; \ 309 .globl label##_iSeries; \
309label##_iSeries: \ 310label##_iSeries: \
310 HMT_MEDIUM; \ 311 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \ 312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \ 313 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \ 314 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \ 315 EXCEPTION_PROLOG_ISERIES_2; \
@@ -318,7 +319,7 @@ label##_iSeries: \
318 .globl label##_iSeries; \ 319 .globl label##_iSeries; \
319label##_iSeries: \ 320label##_iSeries: \
320 HMT_MEDIUM; \ 321 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \ 322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \ 323 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \ 325 lbz r10,PACAPROCENABLED(r13); \
@@ -388,7 +389,7 @@ __start_interrupts:
388 . = 0x200 389 . = 0x200
389_machine_check_pSeries: 390_machine_check_pSeries:
390 HMT_MEDIUM 391 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */ 392 mtspr SPRN_SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13) 393 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394 395
@@ -396,18 +397,18 @@ _machine_check_pSeries:
396 .globl data_access_pSeries 397 .globl data_access_pSeries
397data_access_pSeries: 398data_access_pSeries:
398 HMT_MEDIUM 399 HMT_MEDIUM
399 mtspr SPRG1,r13 400 mtspr SPRN_SPRG1,r13
400BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
401 mtspr SPRG2,r12 402 mtspr SPRN_SPRG2,r12
402 mfspr r13,DAR 403 mfspr r13,SPRN_DAR
403 mfspr r12,DSISR 404 mfspr r12,SPRN_DSISR
404 srdi r13,r13,60 405 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20 406 rlwimi r13,r12,16,0x20
406 mfcr r12 407 mfcr r12
407 cmpwi r13,0x2c 408 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries 409 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12 410 mtcrf 0x80,r12
410 mfspr r12,SPRG2 411 mfspr r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413 414
@@ -415,19 +416,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 .globl data_access_slb_pSeries 416 .globl data_access_slb_pSeries
416data_access_slb_pSeries: 417data_access_slb_pSeries:
417 HMT_MEDIUM 418 HMT_MEDIUM
418 mtspr SPRG1,r13 419 mtspr SPRN_SPRG1,r13
419 RUNLATCH_ON(r13) 420 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */ 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13) 423 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13) 424 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13) 425 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13) 426 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1 427 mfspr r9,SPRN_SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13) 428 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9 429 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */ 430 mfspr r12,SPRN_SRR1 /* and SRR1 */
430 mfspr r3,DAR 431 mfspr r3,SPRN_DAR
431 b .do_slb_miss /* Rel. branch works in real mode */ 432 b .do_slb_miss /* Rel. branch works in real mode */
432 433
433 STD_EXCEPTION_PSERIES(0x400, instruction_access) 434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -436,19 +437,19 @@ data_access_slb_pSeries:
436 .globl instruction_access_slb_pSeries 437 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries: 438instruction_access_slb_pSeries:
438 HMT_MEDIUM 439 HMT_MEDIUM
439 mtspr SPRG1,r13 440 mtspr SPRN_SPRG1,r13
440 RUNLATCH_ON(r13) 441 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */ 442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13) 444 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13) 445 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13) 446 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13) 447 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1 448 mfspr r9,SPRN_SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13) 449 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9 450 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */ 451 mfspr r12,SPRN_SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */ 452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */ 453 b .do_slb_miss /* Rel. branch works in real mode */
453 454
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
@@ -466,15 +467,15 @@ system_call_pSeries:
466 RUNLATCH_ON(r9) 467 RUNLATCH_ON(r9)
467 mr r9,r13 468 mr r9,r13
468 mfmsr r10 469 mfmsr r10
469 mfspr r13,SPRG3 470 mfspr r13,SPRN_SPRG3
470 mfspr r11,SRR0 471 mfspr r11,SPRN_SRR0
471 clrrdi r12,r13,32 472 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h 473 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l 474 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12 475 mtspr SPRN_SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1 477 mfspr r12,SPRN_SRR1
477 mtspr SRR1,r10 478 mtspr SPRN_SRR1,r10
478 rfid 479 rfid
479 b . /* prevent speculative execution */ 480 b . /* prevent speculative execution */
480 481
@@ -504,25 +505,25 @@ system_call_pSeries:
504 .align 7 505 .align 7
505_GLOBAL(do_stab_bolted_pSeries) 506_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12 507 mtcrf 0x80,r12
507 mfspr r12,SPRG2 508 mfspr r12,SPRN_SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509 510
510/* 511/*
511 * Vectors for the FWNMI option. Share common code. 512 * Vectors for the FWNMI option. Share common code.
512 */ 513 */
513 .globl system_reset_fwnmi 514 .globl system_reset_fwnmi
514system_reset_fwnmi: 515system_reset_fwnmi:
515 HMT_MEDIUM 516 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */ 517 mtspr SPRN_SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13) 518 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519 520
520 .globl machine_check_fwnmi 521 .globl machine_check_fwnmi
521machine_check_fwnmi: 522machine_check_fwnmi:
522 HMT_MEDIUM 523 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */ 524 mtspr SPRN_SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13) 525 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526 527
527#ifdef CONFIG_PPC_ISERIES 528#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/ 529/*** ISeries-LPAR interrupt handlers ***/
@@ -531,18 +532,18 @@ machine_check_fwnmi:
531 532
532 .globl data_access_iSeries 533 .globl data_access_iSeries
533data_access_iSeries: 534data_access_iSeries:
534 mtspr SPRG1,r13 535 mtspr SPRN_SPRG1,r13
535BEGIN_FTR_SECTION 536BEGIN_FTR_SECTION
536 mtspr SPRG2,r12 537 mtspr SPRN_SPRG2,r12
537 mfspr r13,DAR 538 mfspr r13,SPRN_DAR
538 mfspr r12,DSISR 539 mfspr r12,SPRN_DSISR
539 srdi r13,r13,60 540 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20 541 rlwimi r13,r12,16,0x20
541 mfcr r12 542 mfcr r12
542 cmpwi r13,0x2c 543 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries 544 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12 545 mtcrf 0x80,r12
545 mfspr r12,SPRG2 546 mfspr r12,SPRN_SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2 549 EXCEPTION_PROLOG_ISERIES_2
@@ -550,25 +551,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
550 551
551.do_stab_bolted_iSeries: 552.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12 553 mtcrf 0x80,r12
553 mfspr r12,SPRG2 554 mfspr r12,SPRN_SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2 556 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted 557 b .do_stab_bolted
557 558
558 .globl data_access_slb_iSeries 559 .globl data_access_slb_iSeries
559data_access_slb_iSeries: 560data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */ 561 mtspr SPRN_SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13) 563 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13) 564 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR 565 mfspr r3,SPRN_DAR
565 b .do_slb_miss 566 b .do_slb_miss
566 567
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568 569
569 .globl instruction_access_slb_iSeries 570 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries: 571instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */ 572 mtspr SPRN_SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13) 574 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13) 575 ld r12,PACALPPACA+LPPACASRR1(r13)
@@ -586,7 +587,7 @@ instruction_access_slb_iSeries:
586 .globl system_call_iSeries 587 .globl system_call_iSeries
587system_call_iSeries: 588system_call_iSeries:
588 mr r9,r13 589 mr r9,r13
589 mfspr r13,SPRG3 590 mfspr r13,SPRN_SPRG3
590 EXCEPTION_PROLOG_ISERIES_2 591 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common 592 b system_call_common
592 593
@@ -596,7 +597,7 @@ system_call_iSeries:
596 597
597 .globl system_reset_iSeries 598 .globl system_reset_iSeries
598system_reset_iSeries: 599system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */ 600 mfspr r13,SPRN_SPRG3 /* Get paca address */
600 mfmsr r24 601 mfmsr r24
601 ori r24,r24,MSR_RI 602 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */ 603 mtmsrd r24 /* RI on */
@@ -639,7 +640,7 @@ iSeries_secondary_smp_loop:
639#endif /* CONFIG_SMP */ 640#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */ 642 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */ 643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries 644 b 1b /* If SMP not configured, secondaries
644 * loop forever */ 645 * loop forever */
645 646
@@ -656,8 +657,8 @@ hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */ 657 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13) 658 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13) 659 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11 660 mtspr SPRN_SRR0,r11
660 mtspr SRR1,r12 661 mtspr SPRN_SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13) 662 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13) 663 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13) 664 ld r11,PACA_EXGEN+EX_R11(r13)
@@ -713,8 +714,8 @@ bad_stack:
713 std r10,GPR1(r1) 714 std r10,GPR1(r1)
714 std r11,_NIP(r1) 715 std r11,_NIP(r1)
715 std r12,_MSR(r1) 716 std r12,_MSR(r1)
716 mfspr r11,DAR 717 mfspr r11,SPRN_DAR
717 mfspr r12,DSISR 718 mfspr r12,SPRN_DSISR
718 std r11,_DAR(r1) 719 std r11,_DAR(r1)
719 std r12,_DSISR(r1) 720 std r12,_DSISR(r1)
720 mflr r10 721 mflr r10
@@ -746,6 +747,7 @@ bad_stack:
746 * any task or sent any task a signal, you should use 747 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this. 748 * ret_from_except or ret_from_except_lite instead of this.
748 */ 749 */
750 .globl fast_exception_return
749fast_exception_return: 751fast_exception_return:
750 ld r12,_MSR(r1) 752 ld r12,_MSR(r1)
751 ld r11,_NIP(r1) 753 ld r11,_NIP(r1)
@@ -766,8 +768,8 @@ fast_exception_return:
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1 769 mtmsrd r10,1
768 770
769 mtspr SRR1,r12 771 mtspr SPRN_SRR1,r12
770 mtspr SRR0,r11 772 mtspr SPRN_SRR0,r11
771 REST_4GPRS(10, r1) 773 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1) 774 ld r1,GPR1(r1)
773 rfid 775 rfid
@@ -788,9 +790,9 @@ unrecov_fer:
788 .globl data_access_common 790 .globl data_access_common
789data_access_common: 791data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR 793 mfspr r10,SPRN_DAR
792 std r10,PACA_EXGEN+EX_DAR(r13) 794 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR 795 mfspr r10,SPRN_DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13) 796 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13) 798 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -821,9 +823,9 @@ hardware_interrupt_entry:
821 .align 7 823 .align 7
822 .globl alignment_common 824 .globl alignment_common
823alignment_common: 825alignment_common:
824 mfspr r10,DAR 826 mfspr r10,SPRN_DAR
825 std r10,PACA_EXGEN+EX_DAR(r13) 827 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR 828 mfspr r10,SPRN_DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13) 829 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13) 831 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -857,62 +859,6 @@ fp_unavailable_common:
857 bl .kernel_fp_unavailable_exception 859 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE 860 BUG_OPCODE
859 861
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7 862 .align 7
917 .globl altivec_unavailable_common 863 .globl altivec_unavailable_common
918altivec_unavailable_common: 864altivec_unavailable_common:
@@ -1120,7 +1066,7 @@ _GLOBAL(do_stab_bolted)
1120 1066
1121 /* Hash to the primary group */ 1067 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13) 1068 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR 1069 mfspr r11,SPRN_DAR
1124 srdi r11,r11,28 1070 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126 1072
@@ -1162,7 +1108,7 @@ _GLOBAL(do_stab_bolted)
11622: std r9,8(r10) /* Store the vsid part of the ste */ 11082: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio 1109 eieio
1164 1110
1165 mfspr r11,DAR /* Get the new esid */ 1111 mfspr r11,SPRN_DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */ 1113 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */ 1114 std r11,0(r10) /* Put new entry back into the stab */
@@ -1182,8 +1128,8 @@ _GLOBAL(do_stab_bolted)
1182 clrrdi r10,r10,2 1128 clrrdi r10,r10,2
1183 mtmsrd r10,1 1129 mtmsrd r10,1
1184 1130
1185 mtspr SRR0,r11 1131 mtspr SPRN_SRR0,r11
1186 mtspr SRR1,r12 1132 mtspr SPRN_SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13) 1133 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13) 1134 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13) 1135 ld r11,PACA_EXSLB+EX_R11(r13)
@@ -1229,8 +1175,8 @@ _GLOBAL(do_slb_miss)
1229.machine pop 1175.machine pop
1230 1176
1231#ifdef CONFIG_PPC_ISERIES 1177#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11 1178 mtspr SPRN_SRR0,r11
1233 mtspr SRR1,r12 1179 mtspr SPRN_SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */ 1180#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13) 1181 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13) 1182 ld r10,PACA_EXSLB+EX_R10(r13)
@@ -1253,7 +1199,7 @@ unrecov_slb:
1253 * 1199 *
1254 * On iSeries, the hypervisor must fill in at least one entry before 1200 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv 1201 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a 1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >> 1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT). 1204 * PAGE_SHIFT).
1259 */ 1205 */
@@ -1316,7 +1262,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1316 mr r3,r24 /* not found, copy phys to r3 */ 1262 mr r3,r24 /* not found, copy phys to r3 */
1317 b .kexec_wait /* next kernel might do better */ 1263 b .kexec_wait /* next kernel might do better */
1318 1264
13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1320 /* From now on, r24 is expected to be logical cpuid */ 1266 /* From now on, r24 is expected to be logical cpuid */
1321 mr r24,r5 1267 mr r24,r5
13223: HMT_LOW 12683: HMT_LOW
@@ -1364,6 +1310,7 @@ _STATIC(__start_initialization_iSeries)
1364 addi r2,r2,0x4000 1310 addi r2,r2,0x4000
1365 1311
1366 bl .iSeries_early_setup 1312 bl .iSeries_early_setup
1313 bl .early_setup
1367 1314
1368 /* relocation is on at this point */ 1315 /* relocation is on at this point */
1369 1316
@@ -1554,20 +1501,17 @@ copy_to_here:
1554 .section ".text"; 1501 .section ".text";
1555 .align 2 ; 1502 .align 2 ;
1556 1503
1557 .globl pmac_secondary_start_1 1504 .globl __secondary_start_pmac_0
1558pmac_secondary_start_1: 1505__secondary_start_pmac_0:
1559 li r24, 1 1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1560 b .pmac_secondary_start 1507 li r24,0
1561 1508 b 1f
1562 .globl pmac_secondary_start_2 1509 li r24,1
1563pmac_secondary_start_2: 1510 b 1f
1564 li r24, 2 1511 li r24,2
1565 b .pmac_secondary_start 1512 b 1f
1566 1513 li r24,3
1567 .globl pmac_secondary_start_3 15141:
1568pmac_secondary_start_3:
1569 li r24, 3
1570 b .pmac_secondary_start
1571 1515
1572_GLOBAL(pmac_secondary_start) 1516_GLOBAL(pmac_secondary_start)
1573 /* turn on 64-bit mode */ 1517 /* turn on 64-bit mode */
@@ -1586,7 +1530,7 @@ _GLOBAL(pmac_secondary_start)
1586 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1587 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1588 add r13,r13,r4 /* for this processor. */ 1532 add r13,r13,r4 /* for this processor. */
1589 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1590 1534
1591 /* Create a temp kernel stack for use before relocation is on. */ 1535 /* Create a temp kernel stack for use before relocation is on. */
1592 ld r1,PACAEMERGSP(r13) 1536 ld r1,PACAEMERGSP(r13)
@@ -1621,7 +1565,7 @@ _GLOBAL(__secondary_start)
1621 /* Initialize the page table pointer register. */ 1565 /* Initialize the page table pointer register. */
1622 LOADADDR(r6,_SDR1) 1566 LOADADDR(r6,_SDR1)
1623 ld r6,0(r6) /* get the value of _SDR1 */ 1567 ld r6,0(r6) /* get the value of _SDR1 */
1624 mtspr SDR1,r6 /* set the htab location */ 1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1625#endif 1569#endif
1626 /* Initialize the first segment table (or SLB) entry */ 1570 /* Initialize the first segment table (or SLB) entry */
1627 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
@@ -1650,7 +1594,7 @@ _GLOBAL(__secondary_start)
1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1652 beq 98f /* branch if result is 0 */ 1596 beq 98f /* branch if result is 0 */
1653 mfspr r3,PVR 1597 mfspr r3,SPRN_PVR
1654 srwi r3,r3,16 1598 srwi r3,r3,16
1655 cmpwi r3,0x37 /* SStar */ 1599 cmpwi r3,0x37 /* SStar */
1656 beq 97f 1600 beq 97f
@@ -1674,8 +1618,8 @@ _GLOBAL(__secondary_start)
1674#ifdef DO_SOFT_DISABLE 1618#ifdef DO_SOFT_DISABLE
1675 ori r4,r4,MSR_EE 1619 ori r4,r4,MSR_EE
1676#endif 1620#endif
1677 mtspr SRR0,r3 1621 mtspr SPRN_SRR0,r3
1678 mtspr SRR1,r4 1622 mtspr SPRN_SRR1,r4
1679 rfid 1623 rfid
1680 b . /* prevent speculative execution */ 1624 b . /* prevent speculative execution */
1681 1625
@@ -1737,7 +1681,7 @@ _STATIC(start_here_multiplatform)
1737 1681
1738#ifdef CONFIG_HMT 1682#ifdef CONFIG_HMT
1739 /* Start up the second thread on cpu 0 */ 1683 /* Start up the second thread on cpu 0 */
1740 mfspr r3,PVR 1684 mfspr r3,SPRN_PVR
1741 srwi r3,r3,16 1685 srwi r3,r3,16
1742 cmpwi r3,0x34 /* Pulsar */ 1686 cmpwi r3,0x34 /* Pulsar */
1743 beq 90f 1687 beq 90f
@@ -1797,7 +1741,7 @@ _STATIC(start_here_multiplatform)
1797 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1798 add r13,r13,r24 /* for this processor. */ 1742 add r13,r13,r24 /* for this processor. */
1799 sub r13,r13,r26 /* convert to physical addr */ 1743 sub r13,r13,r26 /* convert to physical addr */
1800 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ 1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1801 1745
1802 /* Do very early kernel initializations, including initial hash table, 1746 /* Do very early kernel initializations, including initial hash table,
1803 * stab and slb setup before we turn on relocation. */ 1747 * stab and slb setup before we turn on relocation. */
@@ -1814,7 +1758,7 @@ _STATIC(start_here_multiplatform)
1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1816 beq 98f /* branch if result is 0 */ 1760 beq 98f /* branch if result is 0 */
1817 mfspr r3,PVR 1761 mfspr r3,SPRN_PVR
1818 srwi r3,r3,16 1762 srwi r3,r3,16
1819 cmpwi r3,0x37 /* SStar */ 1763 cmpwi r3,0x37 /* SStar */
1820 beq 97f 1764 beq 97f
@@ -1838,12 +1782,12 @@ _STATIC(start_here_multiplatform)
1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1839 sub r6,r6,r26 1783 sub r6,r6,r26
1840 ld r6,0(r6) /* get the value of _SDR1 */ 1784 ld r6,0(r6) /* get the value of _SDR1 */
1841 mtspr SDR1,r6 /* set the htab location */ 1785 mtspr SPRN_SDR1,r6 /* set the htab location */
184298: 178698:
1843 LOADADDR(r3,.start_here_common) 1787 LOADADDR(r3,.start_here_common)
1844 SET_REG_TO_CONST(r4, MSR_KERNEL) 1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1845 mtspr SRR0,r3 1789 mtspr SPRN_SRR0,r3
1846 mtspr SRR1,r4 1790 mtspr SPRN_SRR1,r4
1847 rfid 1791 rfid
1848 b . /* prevent speculative execution */ 1792 b . /* prevent speculative execution */
1849#endif /* CONFIG_PPC_MULTIPLATFORM */ 1793#endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -1874,7 +1818,7 @@ _STATIC(start_here_common)
1874 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1875 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1876 add r13,r13,r24 /* for this processor. */ 1820 add r13,r13,r24 /* for this processor. */
1877 mtspr SPRG3,r13 1821 mtspr SPRN_SPRG3,r13
1878 1822
1879 /* ptr to current */ 1823 /* ptr to current */
1880 LOADADDR(r4,init_task) 1824 LOADADDR(r4,init_task)
@@ -1901,7 +1845,7 @@ _STATIC(start_here_common)
1901_GLOBAL(hmt_init) 1845_GLOBAL(hmt_init)
1902#ifdef CONFIG_HMT 1846#ifdef CONFIG_HMT
1903 LOADADDR(r5, hmt_thread_data) 1847 LOADADDR(r5, hmt_thread_data)
1904 mfspr r7,PVR 1848 mfspr r7,SPRN_PVR
1905 srwi r7,r7,16 1849 srwi r7,r7,16
1906 cmpwi r7,0x34 /* Pulsar */ 1850 cmpwi r7,0x34 /* Pulsar */
1907 beq 90f 1851 beq 90f
@@ -1910,10 +1854,10 @@ _GLOBAL(hmt_init)
1910 cmpwi r7,0x37 /* SStar */ 1854 cmpwi r7,0x37 /* SStar */
1911 beq 91f 1855 beq 91f
1912 b 101f 1856 b 101f
191390: mfspr r6,PIR 185790: mfspr r6,SPRN_PIR
1914 andi. r6,r6,0x1f 1858 andi. r6,r6,0x1f
1915 b 92f 1859 b 92f
191691: mfspr r6,PIR 186091: mfspr r6,SPRN_PIR
1917 andi. r6,r6,0x3ff 1861 andi. r6,r6,0x3ff
191892: sldi r4,r24,3 186292: sldi r4,r24,3
1919 stwx r6,r5,r4 1863 stwx r6,r5,r4
@@ -1924,8 +1868,8 @@ __hmt_secondary_hold:
1924 LOADADDR(r5, hmt_thread_data) 1868 LOADADDR(r5, hmt_thread_data)
1925 clrldi r5,r5,4 1869 clrldi r5,r5,4
1926 li r7,0 1870 li r7,0
1927 mfspr r6,PIR 1871 mfspr r6,SPRN_PIR
1928 mfspr r8,PVR 1872 mfspr r8,SPRN_PVR
1929 srwi r8,r8,16 1873 srwi r8,r8,16
1930 cmpwi r8,0x34 1874 cmpwi r8,0x34
1931 bne 93f 1875 bne 93f
@@ -1951,39 +1895,41 @@ __hmt_secondary_hold:
1951_GLOBAL(hmt_start_secondary) 1895_GLOBAL(hmt_start_secondary)
1952 LOADADDR(r4,__hmt_secondary_hold) 1896 LOADADDR(r4,__hmt_secondary_hold)
1953 clrldi r4,r4,4 1897 clrldi r4,r4,4
1954 mtspr NIADORM, r4 1898 mtspr SPRN_NIADORM, r4
1955 mfspr r4, MSRDORM 1899 mfspr r4, SPRN_MSRDORM
1956 li r5, -65 1900 li r5, -65
1957 and r4, r4, r5 1901 and r4, r4, r5
1958 mtspr MSRDORM, r4 1902 mtspr SPRN_MSRDORM, r4
1959 lis r4,0xffef 1903 lis r4,0xffef
1960 ori r4,r4,0x7403 1904 ori r4,r4,0x7403
1961 mtspr TSC, r4 1905 mtspr SPRN_TSC, r4
1962 li r4,0x1f4 1906 li r4,0x1f4
1963 mtspr TST, r4 1907 mtspr SPRN_TST, r4
1964 mfspr r4, HID0 1908 mfspr r4, SPRN_HID0
1965 ori r4, r4, 0x1 1909 ori r4, r4, 0x1
1966 mtspr HID0, r4 1910 mtspr SPRN_HID0, r4
1967 mfspr r4, SPRN_CTRLF 1911 mfspr r4, SPRN_CTRLF
1968 oris r4, r4, 0x40 1912 oris r4, r4, 0x40
1969 mtspr SPRN_CTRLT, r4 1913 mtspr SPRN_CTRLT, r4
1970 blr 1914 blr
1971#endif 1915#endif
1972 1916
1973#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) 1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1974_GLOBAL(smp_release_cpus) 1918_GLOBAL(smp_release_cpus)
1975 /* All secondary cpus are spinning on a common 1919 /* All secondary cpus are spinning on a common
1976 * spinloop, release them all now so they can start 1920 * spinloop, release them all now so they can start
1977 * to spin on their individual paca spinloops. 1921 * to spin on their individual paca spinloops.
1978 * For non SMP kernels, the secondary cpus never 1922 * For non SMP kernels, the secondary cpus never
1979 * get out of the common spinloop. 1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1980 */ 1926 */
1981 li r3,1 1927 li r3,1
1982 LOADADDR(r5,__secondary_hold_spinloop) 1928 LOADADDR(r5,__secondary_hold_spinloop)
1983 std r3,0(r5) 1929 std r3,0(r5)
1984 sync 1930 sync
1985 blr 1931 blr
1986#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ 1932#endif /* CONFIG_SMP */
1987 1933
1988 1934
1989/* 1935/*
@@ -1992,7 +1938,7 @@ _GLOBAL(smp_release_cpus)
1992 */ 1938 */
1993 .section ".bss" 1939 .section ".bss"
1994 1940
1995 .align 12 1941 .align PAGE_SHIFT
1996 1942
1997 .globl empty_zero_page 1943 .globl empty_zero_page
1998empty_zero_page: 1944empty_zero_page:
diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c
index bde8f42da854..4d584172055a 100644
--- a/arch/ppc64/kernel/hvcserver.c
+++ b/arch/ppc64/kernel/hvcserver.c
@@ -22,6 +22,8 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
26
25#include <asm/hvcall.h> 27#include <asm/hvcall.h>
26#include <asm/hvcserver.h> 28#include <asm/hvcserver.h>
27#include <asm/io.h> 29#include <asm/io.h>
diff --git a/arch/ppc64/kernel/i8259.c b/arch/ppc64/kernel/i8259.c
deleted file mode 100644
index 74dcfd68fc75..000000000000
--- a/arch/ppc64/kernel/i8259.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * c 2001 PPC64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/cache.h>
14#include <linux/irq.h>
15#include <linux/interrupt.h>
16#include <asm/io.h>
17#include <asm/ppcdebug.h>
18#include "i8259.h"
19
20unsigned char cached_8259[2] = { 0xff, 0xff };
21#define cached_A1 (cached_8259[0])
22#define cached_21 (cached_8259[1])
23
24static __cacheline_aligned_in_smp DEFINE_SPINLOCK(i8259_lock);
25
26static int i8259_pic_irq_offset;
27static int i8259_present;
28
29int i8259_irq(int cpu)
30{
31 int irq;
32
33 spin_lock/*_irqsave*/(&i8259_lock/*, flags*/);
34 /*
35 * Perform an interrupt acknowledge cycle on controller 1
36 */
37 outb(0x0C, 0x20);
38 irq = inb(0x20) & 7;
39 if (irq == 2)
40 {
41 /*
42 * Interrupt is cascaded so perform interrupt
43 * acknowledge on controller 2
44 */
45 outb(0x0C, 0xA0);
46 irq = (inb(0xA0) & 7) + 8;
47 }
48 else if (irq==7)
49 {
50 /*
51 * This may be a spurious interrupt
52 *
53 * Read the interrupt status register. If the most
54 * significant bit is not set then there is no valid
55 * interrupt
56 */
57 outb(0x0b, 0x20);
58 if(~inb(0x20)&0x80) {
59 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
60 return -1;
61 }
62 }
63 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
64 return irq;
65}
66
67static void i8259_mask_and_ack_irq(unsigned int irq_nr)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&i8259_lock, flags);
72 if ( irq_nr >= i8259_pic_irq_offset )
73 irq_nr -= i8259_pic_irq_offset;
74
75 if (irq_nr > 7) {
76 cached_A1 |= 1 << (irq_nr-8);
77 inb(0xA1); /* DUMMY */
78 outb(cached_A1,0xA1);
79 outb(0x20,0xA0); /* Non-specific EOI */
80 outb(0x20,0x20); /* Non-specific EOI to cascade */
81 } else {
82 cached_21 |= 1 << irq_nr;
83 inb(0x21); /* DUMMY */
84 outb(cached_21,0x21);
85 outb(0x20,0x20); /* Non-specific EOI */
86 }
87 spin_unlock_irqrestore(&i8259_lock, flags);
88}
89
90static void i8259_set_irq_mask(int irq_nr)
91{
92 outb(cached_A1,0xA1);
93 outb(cached_21,0x21);
94}
95
96static void i8259_mask_irq(unsigned int irq_nr)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&i8259_lock, flags);
101 if ( irq_nr >= i8259_pic_irq_offset )
102 irq_nr -= i8259_pic_irq_offset;
103 if ( irq_nr < 8 )
104 cached_21 |= 1 << irq_nr;
105 else
106 cached_A1 |= 1 << (irq_nr-8);
107 i8259_set_irq_mask(irq_nr);
108 spin_unlock_irqrestore(&i8259_lock, flags);
109}
110
111static void i8259_unmask_irq(unsigned int irq_nr)
112{
113 unsigned long flags;
114
115 spin_lock_irqsave(&i8259_lock, flags);
116 if ( irq_nr >= i8259_pic_irq_offset )
117 irq_nr -= i8259_pic_irq_offset;
118 if ( irq_nr < 8 )
119 cached_21 &= ~(1 << irq_nr);
120 else
121 cached_A1 &= ~(1 << (irq_nr-8));
122 i8259_set_irq_mask(irq_nr);
123 spin_unlock_irqrestore(&i8259_lock, flags);
124}
125
126static void i8259_end_irq(unsigned int irq)
127{
128 if (!(get_irq_desc(irq)->status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
129 get_irq_desc(irq)->action)
130 i8259_unmask_irq(irq);
131}
132
133struct hw_interrupt_type i8259_pic = {
134 .typename = " i8259 ",
135 .enable = i8259_unmask_irq,
136 .disable = i8259_mask_irq,
137 .ack = i8259_mask_and_ack_irq,
138 .end = i8259_end_irq,
139};
140
141void __init i8259_init(int offset)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&i8259_lock, flags);
146 i8259_pic_irq_offset = offset;
147 i8259_present = 1;
148 /* init master interrupt controller */
149 outb(0x11, 0x20); /* Start init sequence */
150 outb(0x00, 0x21); /* Vector base */
151 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
152 outb(0x01, 0x21); /* Select 8086 mode */
153 outb(0xFF, 0x21); /* Mask all */
154 /* init slave interrupt controller */
155 outb(0x11, 0xA0); /* Start init sequence */
156 outb(0x08, 0xA1); /* Vector base */
157 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
158 outb(0x01, 0xA1); /* Select 8086 mode */
159 outb(0xFF, 0xA1); /* Mask all */
160 outb(cached_A1, 0xA1);
161 outb(cached_21, 0x21);
162 spin_unlock_irqrestore(&i8259_lock, flags);
163
164}
165
166static int i8259_request_cascade(void)
167{
168 if (!i8259_present)
169 return -ENODEV;
170
171 request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT,
172 "82c59 secondary cascade", NULL );
173
174 return 0;
175}
176
177arch_initcall(i8259_request_cascade);
diff --git a/arch/ppc64/kernel/i8259.h b/arch/ppc64/kernel/i8259.h
deleted file mode 100644
index f74764ba0bfa..000000000000
--- a/arch/ppc64/kernel/i8259.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef _PPC_KERNEL_i8259_H
10#define _PPC_KERNEL_i8259_H
11
12extern struct hw_interrupt_type i8259_pic;
13
14extern void i8259_init(int offset);
15extern int i8259_irq(int);
16
17#endif /* _PPC_KERNEL_i8259_H */
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 954395d42636..8abd2ad92832 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -31,7 +31,7 @@
31 31
32extern void power4_idle(void); 32extern void power4_idle(void);
33 33
34int default_idle(void) 34void default_idle(void)
35{ 35{
36 long oldval; 36 long oldval;
37 unsigned int cpu = smp_processor_id(); 37 unsigned int cpu = smp_processor_id();
@@ -64,11 +64,9 @@ int default_idle(void)
64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
65 cpu_die(); 65 cpu_die();
66 } 66 }
67
68 return 0;
69} 67}
70 68
71int native_idle(void) 69void native_idle(void)
72{ 70{
73 while (1) { 71 while (1) {
74 ppc64_runlatch_off(); 72 ppc64_runlatch_off();
@@ -85,8 +83,6 @@ int native_idle(void)
85 system_state == SYSTEM_RUNNING) 83 system_state == SYSTEM_RUNNING)
86 cpu_die(); 84 cpu_die();
87 } 85 }
88
89 return 0;
90} 86}
91 87
92void cpu_idle(void) 88void cpu_idle(void)
diff --git a/arch/ppc64/kernel/ioctl32.c b/arch/ppc64/kernel/ioctl32.c
index a8005db23ec5..ba4a899045c2 100644
--- a/arch/ppc64/kernel/ioctl32.c
+++ b/arch/ppc64/kernel/ioctl32.c
@@ -39,9 +39,7 @@ IOCTL_TABLE_START
39#include <linux/compat_ioctl.h> 39#include <linux/compat_ioctl.h>
40#define DECLARES 40#define DECLARES
41#include "compat_ioctl.c" 41#include "compat_ioctl.c"
42COMPATIBLE_IOCTL(TIOCSTART) 42
43COMPATIBLE_IOCTL(TIOCSTOP)
44COMPATIBLE_IOCTL(TIOCSLTC)
45/* Little p (/dev/rtc, /dev/envctrl, etc.) */ 43/* Little p (/dev/rtc, /dev/envctrl, etc.) */
46COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ 44COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
47COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ 45COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 9c6facc24f70..ed876a5178ae 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -395,7 +395,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
395 if (post_kprobe_handler(args->regs)) 395 if (post_kprobe_handler(args->regs))
396 ret = NOTIFY_STOP; 396 ret = NOTIFY_STOP;
397 break; 397 break;
398 case DIE_GPF:
399 case DIE_PAGE_FAULT: 398 case DIE_PAGE_FAULT:
400 if (kprobe_running() && 399 if (kprobe_running() &&
401 kprobe_fault_handler(args->regs, args->trapnr)) 400 kprobe_fault_handler(args->regs, args->trapnr))
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index e7241ad80a08..077507ffbab8 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -28,6 +28,7 @@
28#include <asm/ppc_asm.h> 28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
31#include <asm/thread_info.h>
31 32
32 .text 33 .text
33 34
@@ -64,44 +65,6 @@ _GLOBAL(get_srr1)
64_GLOBAL(get_sp) 65_GLOBAL(get_sp)
65 mr r3,r1 66 mr r3,r1
66 blr 67 blr
67
68#ifdef CONFIG_PPC_ISERIES
69/* unsigned long local_save_flags(void) */
70_GLOBAL(local_get_flags)
71 lbz r3,PACAPROCENABLED(r13)
72 blr
73
74/* unsigned long local_irq_disable(void) */
75_GLOBAL(local_irq_disable)
76 lbz r3,PACAPROCENABLED(r13)
77 li r4,0
78 stb r4,PACAPROCENABLED(r13)
79 blr /* Done */
80
81/* void local_irq_restore(unsigned long flags) */
82_GLOBAL(local_irq_restore)
83 lbz r5,PACAPROCENABLED(r13)
84 /* Check if things are setup the way we want _already_. */
85 cmpw 0,r3,r5
86 beqlr
87 /* are we enabling interrupts? */
88 cmpdi 0,r3,0
89 stb r3,PACAPROCENABLED(r13)
90 beqlr
91 /* Check pending interrupts */
92 /* A decrementer, IPI or PMC interrupt may have occurred
93 * while we were in the hypervisor (which enables) */
94 ld r4,PACALPPACA+LPPACAANYINT(r13)
95 cmpdi r4,0
96 beqlr
97
98 /*
99 * Handle pending interrupts in interrupt context
100 */
101 li r0,0x5555
102 sc
103 blr
104#endif /* CONFIG_PPC_ISERIES */
105 68
106#ifdef CONFIG_IRQSTACKS 69#ifdef CONFIG_IRQSTACKS
107_GLOBAL(call_do_softirq) 70_GLOBAL(call_do_softirq)
@@ -329,7 +292,7 @@ _GLOBAL(__flush_dcache_icache)
329 292
330/* Flush the dcache */ 293/* Flush the dcache */
331 ld r7,PPC64_CACHES@toc(r2) 294 ld r7,PPC64_CACHES@toc(r2)
332 clrrdi r3,r3,12 /* Page align */ 295 clrrdi r3,r3,PAGE_SHIFT /* Page align */
333 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ 296 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
334 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ 297 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
335 mr r6,r3 298 mr r6,r3
@@ -488,25 +451,6 @@ _GLOBAL(_outsl_ns)
488 sync 451 sync
489 blr 452 blr
490 453
491
492_GLOBAL(cvt_fd)
493 lfd 0,0(r5) /* load up fpscr value */
494 mtfsf 0xff,0
495 lfs 0,0(r3)
496 stfd 0,0(r4)
497 mffs 0 /* save new fpscr value */
498 stfd 0,0(r5)
499 blr
500
501_GLOBAL(cvt_df)
502 lfd 0,0(r5) /* load up fpscr value */
503 mtfsf 0xff,0
504 lfd 0,0(r3)
505 stfs 0,0(r4)
506 mffs 0 /* save new fpscr value */
507 stfd 0,0(r5)
508 blr
509
510/* 454/*
511 * identify_cpu and calls setup_cpu 455 * identify_cpu and calls setup_cpu
512 * In: r3 = base of the cpu_specs array 456 * In: r3 = base of the cpu_specs array
@@ -692,38 +636,6 @@ _GLOBAL(disable_kernel_fp)
692 isync 636 isync
693 blr 637 blr
694 638
695/*
696 * giveup_fpu(tsk)
697 * Disable FP for the task given as the argument,
698 * and save the floating-point registers in its thread_struct.
699 * Enables the FPU for use in the kernel on return.
700 */
701_GLOBAL(giveup_fpu)
702 mfmsr r5
703 ori r5,r5,MSR_FP
704 mtmsrd r5 /* enable use of fpu now */
705 isync
706 cmpdi 0,r3,0
707 beqlr- /* if no previous owner, done */
708 addi r3,r3,THREAD /* want THREAD of task */
709 ld r5,PT_REGS(r3)
710 cmpdi 0,r5,0
711 SAVE_32FPRS(0, r3)
712 mffs fr0
713 stfd fr0,THREAD_FPSCR(r3)
714 beq 1f
715 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
716 li r3,MSR_FP|MSR_FE0|MSR_FE1
717 andc r4,r4,r3 /* disable FP for previous task */
718 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7191:
720#ifndef CONFIG_SMP
721 li r5,0
722 ld r4,last_task_used_math@got(r2)
723 std r5,0(r4)
724#endif /* CONFIG_SMP */
725 blr
726
727#ifdef CONFIG_ALTIVEC 639#ifdef CONFIG_ALTIVEC
728 640
729#if 0 /* this has no callers for now */ 641#if 0 /* this has no callers for now */
@@ -778,6 +690,13 @@ _GLOBAL(giveup_altivec)
778_GLOBAL(__setup_cpu_power3) 690_GLOBAL(__setup_cpu_power3)
779 blr 691 blr
780 692
693_GLOBAL(execve)
694 li r0,__NR_execve
695 sc
696 bnslr
697 neg r3,r3
698 blr
699
781/* kexec_wait(phys_cpu) 700/* kexec_wait(phys_cpu)
782 * 701 *
783 * wait for the flag to change, indicating this kernel is going away but 702 * wait for the flag to change, indicating this kernel is going away but
@@ -948,566 +867,3 @@ _GLOBAL(kexec_sequence)
948 li r5,0 867 li r5,0
949 blr /* image->start(physid, image->start, 0); */ 868 blr /* image->start(physid, image->start, 0); */
950#endif /* CONFIG_KEXEC */ 869#endif /* CONFIG_KEXEC */
951
952/* Why isn't this a) automatic, b) written in 'C'? */
953 .balign 8
954_GLOBAL(sys_call_table32)
955 .llong .sys_restart_syscall /* 0 */
956 .llong .sys_exit
957 .llong .ppc_fork
958 .llong .sys_read
959 .llong .sys_write
960 .llong .compat_sys_open /* 5 */
961 .llong .sys_close
962 .llong .sys32_waitpid
963 .llong .sys32_creat
964 .llong .sys_link
965 .llong .sys_unlink /* 10 */
966 .llong .sys32_execve
967 .llong .sys_chdir
968 .llong .compat_sys_time
969 .llong .sys_mknod
970 .llong .sys_chmod /* 15 */
971 .llong .sys_lchown
972 .llong .sys_ni_syscall /* old break syscall */
973 .llong .sys_ni_syscall /* old stat syscall */
974 .llong .ppc32_lseek
975 .llong .sys_getpid /* 20 */
976 .llong .compat_sys_mount
977 .llong .sys_oldumount
978 .llong .sys_setuid
979 .llong .sys_getuid
980 .llong .compat_sys_stime /* 25 */
981 .llong .sys32_ptrace
982 .llong .sys_alarm
983 .llong .sys_ni_syscall /* old fstat syscall */
984 .llong .sys32_pause
985 .llong .compat_sys_utime /* 30 */
986 .llong .sys_ni_syscall /* old stty syscall */
987 .llong .sys_ni_syscall /* old gtty syscall */
988 .llong .sys32_access
989 .llong .sys32_nice
990 .llong .sys_ni_syscall /* 35 - old ftime syscall */
991 .llong .sys_sync
992 .llong .sys32_kill
993 .llong .sys_rename
994 .llong .sys32_mkdir
995 .llong .sys_rmdir /* 40 */
996 .llong .sys_dup
997 .llong .sys_pipe
998 .llong .compat_sys_times
999 .llong .sys_ni_syscall /* old prof syscall */
1000 .llong .sys_brk /* 45 */
1001 .llong .sys_setgid
1002 .llong .sys_getgid
1003 .llong .sys_signal
1004 .llong .sys_geteuid
1005 .llong .sys_getegid /* 50 */
1006 .llong .sys_acct
1007 .llong .sys_umount
1008 .llong .sys_ni_syscall /* old lock syscall */
1009 .llong .compat_sys_ioctl
1010 .llong .compat_sys_fcntl /* 55 */
1011 .llong .sys_ni_syscall /* old mpx syscall */
1012 .llong .sys32_setpgid
1013 .llong .sys_ni_syscall /* old ulimit syscall */
1014 .llong .sys32_olduname
1015 .llong .sys32_umask /* 60 */
1016 .llong .sys_chroot
1017 .llong .sys_ustat
1018 .llong .sys_dup2
1019 .llong .sys_getppid
1020 .llong .sys_getpgrp /* 65 */
1021 .llong .sys_setsid
1022 .llong .sys32_sigaction
1023 .llong .sys_sgetmask
1024 .llong .sys32_ssetmask
1025 .llong .sys_setreuid /* 70 */
1026 .llong .sys_setregid
1027 .llong .ppc32_sigsuspend
1028 .llong .compat_sys_sigpending
1029 .llong .sys32_sethostname
1030 .llong .compat_sys_setrlimit /* 75 */
1031 .llong .compat_sys_old_getrlimit
1032 .llong .compat_sys_getrusage
1033 .llong .sys32_gettimeofday
1034 .llong .sys32_settimeofday
1035 .llong .sys32_getgroups /* 80 */
1036 .llong .sys32_setgroups
1037 .llong .sys_ni_syscall /* old select syscall */
1038 .llong .sys_symlink
1039 .llong .sys_ni_syscall /* old lstat syscall */
1040 .llong .sys32_readlink /* 85 */
1041 .llong .sys_uselib
1042 .llong .sys_swapon
1043 .llong .sys_reboot
1044 .llong .old32_readdir
1045 .llong .sys_mmap /* 90 */
1046 .llong .sys_munmap
1047 .llong .sys_truncate
1048 .llong .sys_ftruncate
1049 .llong .sys_fchmod
1050 .llong .sys_fchown /* 95 */
1051 .llong .sys32_getpriority
1052 .llong .sys32_setpriority
1053 .llong .sys_ni_syscall /* old profil syscall */
1054 .llong .compat_sys_statfs
1055 .llong .compat_sys_fstatfs /* 100 */
1056 .llong .sys_ni_syscall /* old ioperm syscall */
1057 .llong .compat_sys_socketcall
1058 .llong .sys32_syslog
1059 .llong .compat_sys_setitimer
1060 .llong .compat_sys_getitimer /* 105 */
1061 .llong .compat_sys_newstat
1062 .llong .compat_sys_newlstat
1063 .llong .compat_sys_newfstat
1064 .llong .sys32_uname
1065 .llong .sys_ni_syscall /* 110 old iopl syscall */
1066 .llong .sys_vhangup
1067 .llong .sys_ni_syscall /* old idle syscall */
1068 .llong .sys_ni_syscall /* old vm86 syscall */
1069 .llong .compat_sys_wait4
1070 .llong .sys_swapoff /* 115 */
1071 .llong .sys32_sysinfo
1072 .llong .sys32_ipc
1073 .llong .sys_fsync
1074 .llong .ppc32_sigreturn
1075 .llong .ppc_clone /* 120 */
1076 .llong .sys32_setdomainname
1077 .llong .ppc64_newuname
1078 .llong .sys_ni_syscall /* old modify_ldt syscall */
1079 .llong .sys32_adjtimex
1080 .llong .sys_mprotect /* 125 */
1081 .llong .compat_sys_sigprocmask
1082 .llong .sys_ni_syscall /* old create_module syscall */
1083 .llong .sys_init_module
1084 .llong .sys_delete_module
1085 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1086 .llong .sys_quotactl
1087 .llong .sys32_getpgid
1088 .llong .sys_fchdir
1089 .llong .sys_bdflush
1090 .llong .sys32_sysfs /* 135 */
1091 .llong .ppc64_personality
1092 .llong .sys_ni_syscall /* for afs_syscall */
1093 .llong .sys_setfsuid
1094 .llong .sys_setfsgid
1095 .llong .sys_llseek /* 140 */
1096 .llong .sys32_getdents
1097 .llong .ppc32_select
1098 .llong .sys_flock
1099 .llong .sys_msync
1100 .llong .compat_sys_readv /* 145 */
1101 .llong .compat_sys_writev
1102 .llong .sys32_getsid
1103 .llong .sys_fdatasync
1104 .llong .sys32_sysctl
1105 .llong .sys_mlock /* 150 */
1106 .llong .sys_munlock
1107 .llong .sys_mlockall
1108 .llong .sys_munlockall
1109 .llong .sys32_sched_setparam
1110 .llong .sys32_sched_getparam /* 155 */
1111 .llong .sys32_sched_setscheduler
1112 .llong .sys32_sched_getscheduler
1113 .llong .sys_sched_yield
1114 .llong .sys32_sched_get_priority_max
1115 .llong .sys32_sched_get_priority_min /* 160 */
1116 .llong .sys32_sched_rr_get_interval
1117 .llong .compat_sys_nanosleep
1118 .llong .sys_mremap
1119 .llong .sys_setresuid
1120 .llong .sys_getresuid /* 165 */
1121 .llong .sys_ni_syscall /* old query_module syscall */
1122 .llong .sys_poll
1123 .llong .compat_sys_nfsservctl
1124 .llong .sys_setresgid
1125 .llong .sys_getresgid /* 170 */
1126 .llong .sys32_prctl
1127 .llong .ppc32_rt_sigreturn
1128 .llong .sys32_rt_sigaction
1129 .llong .sys32_rt_sigprocmask
1130 .llong .sys32_rt_sigpending /* 175 */
1131 .llong .compat_sys_rt_sigtimedwait
1132 .llong .sys32_rt_sigqueueinfo
1133 .llong .ppc32_rt_sigsuspend
1134 .llong .sys32_pread64
1135 .llong .sys32_pwrite64 /* 180 */
1136 .llong .sys_chown
1137 .llong .sys_getcwd
1138 .llong .sys_capget
1139 .llong .sys_capset
1140 .llong .sys32_sigaltstack /* 185 */
1141 .llong .sys32_sendfile
1142 .llong .sys_ni_syscall /* reserved for streams1 */
1143 .llong .sys_ni_syscall /* reserved for streams2 */
1144 .llong .ppc_vfork
1145 .llong .compat_sys_getrlimit /* 190 */
1146 .llong .sys32_readahead
1147 .llong .sys32_mmap2
1148 .llong .sys32_truncate64
1149 .llong .sys32_ftruncate64
1150 .llong .sys_stat64 /* 195 */
1151 .llong .sys_lstat64
1152 .llong .sys_fstat64
1153 .llong .sys32_pciconfig_read
1154 .llong .sys32_pciconfig_write
1155 .llong .sys32_pciconfig_iobase /* 200 - pciconfig_iobase */
1156 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1157 .llong .sys_getdents64
1158 .llong .sys_pivot_root
1159 .llong .compat_sys_fcntl64
1160 .llong .sys_madvise /* 205 */
1161 .llong .sys_mincore
1162 .llong .sys_gettid
1163 .llong .sys_tkill
1164 .llong .sys_setxattr
1165 .llong .sys_lsetxattr /* 210 */
1166 .llong .sys_fsetxattr
1167 .llong .sys_getxattr
1168 .llong .sys_lgetxattr
1169 .llong .sys_fgetxattr
1170 .llong .sys_listxattr /* 215 */
1171 .llong .sys_llistxattr
1172 .llong .sys_flistxattr
1173 .llong .sys_removexattr
1174 .llong .sys_lremovexattr
1175 .llong .sys_fremovexattr /* 220 */
1176 .llong .compat_sys_futex
1177 .llong .compat_sys_sched_setaffinity
1178 .llong .compat_sys_sched_getaffinity
1179 .llong .sys_ni_syscall
1180 .llong .sys_ni_syscall /* 225 - reserved for tux */
1181 .llong .sys32_sendfile64
1182 .llong .compat_sys_io_setup
1183 .llong .sys_io_destroy
1184 .llong .compat_sys_io_getevents
1185 .llong .compat_sys_io_submit
1186 .llong .sys_io_cancel
1187 .llong .sys_set_tid_address
1188 .llong .ppc32_fadvise64
1189 .llong .sys_exit_group
1190 .llong .ppc32_lookup_dcookie /* 235 */
1191 .llong .sys_epoll_create
1192 .llong .sys_epoll_ctl
1193 .llong .sys_epoll_wait
1194 .llong .sys_remap_file_pages
1195 .llong .ppc32_timer_create /* 240 */
1196 .llong .compat_sys_timer_settime
1197 .llong .compat_sys_timer_gettime
1198 .llong .sys_timer_getoverrun
1199 .llong .sys_timer_delete
1200 .llong .compat_sys_clock_settime /* 245 */
1201 .llong .compat_sys_clock_gettime
1202 .llong .compat_sys_clock_getres
1203 .llong .compat_sys_clock_nanosleep
1204 .llong .ppc32_swapcontext
1205 .llong .sys32_tgkill /* 250 */
1206 .llong .sys32_utimes
1207 .llong .compat_sys_statfs64
1208 .llong .compat_sys_fstatfs64
1209 .llong .ppc32_fadvise64_64 /* 32bit only fadvise64_64 */
1210 .llong .ppc_rtas /* 255 */
1211 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1212 .llong .sys_ni_syscall /* 257 reserved for vserver */
1213 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1214 .llong .compat_sys_mbind
1215 .llong .compat_sys_get_mempolicy /* 260 */
1216 .llong .compat_sys_set_mempolicy
1217 .llong .compat_sys_mq_open
1218 .llong .sys_mq_unlink
1219 .llong .compat_sys_mq_timedsend
1220 .llong .compat_sys_mq_timedreceive /* 265 */
1221 .llong .compat_sys_mq_notify
1222 .llong .compat_sys_mq_getsetattr
1223 .llong .compat_sys_kexec_load
1224 .llong .sys32_add_key
1225 .llong .sys32_request_key /* 270 */
1226 .llong .compat_sys_keyctl
1227 .llong .compat_sys_waitid
1228 .llong .sys32_ioprio_set
1229 .llong .sys32_ioprio_get
1230 .llong .sys_inotify_init /* 275 */
1231 .llong .sys_inotify_add_watch
1232 .llong .sys_inotify_rm_watch
1233
1234 .balign 8
1235_GLOBAL(sys_call_table)
1236 .llong .sys_restart_syscall /* 0 */
1237 .llong .sys_exit
1238 .llong .ppc_fork
1239 .llong .sys_read
1240 .llong .sys_write
1241 .llong .sys_open /* 5 */
1242 .llong .sys_close
1243 .llong .sys_waitpid
1244 .llong .sys_creat
1245 .llong .sys_link
1246 .llong .sys_unlink /* 10 */
1247 .llong .sys_execve
1248 .llong .sys_chdir
1249 .llong .sys64_time
1250 .llong .sys_mknod
1251 .llong .sys_chmod /* 15 */
1252 .llong .sys_lchown
1253 .llong .sys_ni_syscall /* old break syscall */
1254 .llong .sys_ni_syscall /* old stat syscall */
1255 .llong .sys_lseek
1256 .llong .sys_getpid /* 20 */
1257 .llong .sys_mount
1258 .llong .sys_ni_syscall /* old umount syscall */
1259 .llong .sys_setuid
1260 .llong .sys_getuid
1261 .llong .sys_stime /* 25 */
1262 .llong .sys_ptrace
1263 .llong .sys_alarm
1264 .llong .sys_ni_syscall /* old fstat syscall */
1265 .llong .sys_pause
1266 .llong .sys_utime /* 30 */
1267 .llong .sys_ni_syscall /* old stty syscall */
1268 .llong .sys_ni_syscall /* old gtty syscall */
1269 .llong .sys_access
1270 .llong .sys_nice
1271 .llong .sys_ni_syscall /* 35 - old ftime syscall */
1272 .llong .sys_sync
1273 .llong .sys_kill
1274 .llong .sys_rename
1275 .llong .sys_mkdir
1276 .llong .sys_rmdir /* 40 */
1277 .llong .sys_dup
1278 .llong .sys_pipe
1279 .llong .sys_times
1280 .llong .sys_ni_syscall /* old prof syscall */
1281 .llong .sys_brk /* 45 */
1282 .llong .sys_setgid
1283 .llong .sys_getgid
1284 .llong .sys_signal
1285 .llong .sys_geteuid
1286 .llong .sys_getegid /* 50 */
1287 .llong .sys_acct
1288 .llong .sys_umount
1289 .llong .sys_ni_syscall /* old lock syscall */
1290 .llong .sys_ioctl
1291 .llong .sys_fcntl /* 55 */
1292 .llong .sys_ni_syscall /* old mpx syscall */
1293 .llong .sys_setpgid
1294 .llong .sys_ni_syscall /* old ulimit syscall */
1295 .llong .sys_ni_syscall /* old uname syscall */
1296 .llong .sys_umask /* 60 */
1297 .llong .sys_chroot
1298 .llong .sys_ustat
1299 .llong .sys_dup2
1300 .llong .sys_getppid
1301 .llong .sys_getpgrp /* 65 */
1302 .llong .sys_setsid
1303 .llong .sys_ni_syscall
1304 .llong .sys_sgetmask
1305 .llong .sys_ssetmask
1306 .llong .sys_setreuid /* 70 */
1307 .llong .sys_setregid
1308 .llong .sys_ni_syscall
1309 .llong .sys_ni_syscall
1310 .llong .sys_sethostname
1311 .llong .sys_setrlimit /* 75 */
1312 .llong .sys_ni_syscall /* old getrlimit syscall */
1313 .llong .sys_getrusage
1314 .llong .sys_gettimeofday
1315 .llong .sys_settimeofday
1316 .llong .sys_getgroups /* 80 */
1317 .llong .sys_setgroups
1318 .llong .sys_ni_syscall /* old select syscall */
1319 .llong .sys_symlink
1320 .llong .sys_ni_syscall /* old lstat syscall */
1321 .llong .sys_readlink /* 85 */
1322 .llong .sys_uselib
1323 .llong .sys_swapon
1324 .llong .sys_reboot
1325 .llong .sys_ni_syscall /* old readdir syscall */
1326 .llong .sys_mmap /* 90 */
1327 .llong .sys_munmap
1328 .llong .sys_truncate
1329 .llong .sys_ftruncate
1330 .llong .sys_fchmod
1331 .llong .sys_fchown /* 95 */
1332 .llong .sys_getpriority
1333 .llong .sys_setpriority
1334 .llong .sys_ni_syscall /* old profil syscall holder */
1335 .llong .sys_statfs
1336 .llong .sys_fstatfs /* 100 */
1337 .llong .sys_ni_syscall /* old ioperm syscall */
1338 .llong .sys_socketcall
1339 .llong .sys_syslog
1340 .llong .sys_setitimer
1341 .llong .sys_getitimer /* 105 */
1342 .llong .sys_newstat
1343 .llong .sys_newlstat
1344 .llong .sys_newfstat
1345 .llong .sys_ni_syscall /* old uname syscall */
1346 .llong .sys_ni_syscall /* 110 old iopl syscall */
1347 .llong .sys_vhangup
1348 .llong .sys_ni_syscall /* old idle syscall */
1349 .llong .sys_ni_syscall /* old vm86 syscall */
1350 .llong .sys_wait4
1351 .llong .sys_swapoff /* 115 */
1352 .llong .sys_sysinfo
1353 .llong .sys_ipc
1354 .llong .sys_fsync
1355 .llong .sys_ni_syscall
1356 .llong .ppc_clone /* 120 */
1357 .llong .sys_setdomainname
1358 .llong .ppc64_newuname
1359 .llong .sys_ni_syscall /* old modify_ldt syscall */
1360 .llong .sys_adjtimex
1361 .llong .sys_mprotect /* 125 */
1362 .llong .sys_ni_syscall
1363 .llong .sys_ni_syscall /* old create_module syscall */
1364 .llong .sys_init_module
1365 .llong .sys_delete_module
1366 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1367 .llong .sys_quotactl
1368 .llong .sys_getpgid
1369 .llong .sys_fchdir
1370 .llong .sys_bdflush
1371 .llong .sys_sysfs /* 135 */
1372 .llong .ppc64_personality
1373 .llong .sys_ni_syscall /* for afs_syscall */
1374 .llong .sys_setfsuid
1375 .llong .sys_setfsgid
1376 .llong .sys_llseek /* 140 */
1377 .llong .sys_getdents
1378 .llong .sys_select
1379 .llong .sys_flock
1380 .llong .sys_msync
1381 .llong .sys_readv /* 145 */
1382 .llong .sys_writev
1383 .llong .sys_getsid
1384 .llong .sys_fdatasync
1385 .llong .sys_sysctl
1386 .llong .sys_mlock /* 150 */
1387 .llong .sys_munlock
1388 .llong .sys_mlockall
1389 .llong .sys_munlockall
1390 .llong .sys_sched_setparam
1391 .llong .sys_sched_getparam /* 155 */
1392 .llong .sys_sched_setscheduler
1393 .llong .sys_sched_getscheduler
1394 .llong .sys_sched_yield
1395 .llong .sys_sched_get_priority_max
1396 .llong .sys_sched_get_priority_min /* 160 */
1397 .llong .sys_sched_rr_get_interval
1398 .llong .sys_nanosleep
1399 .llong .sys_mremap
1400 .llong .sys_setresuid
1401 .llong .sys_getresuid /* 165 */
1402 .llong .sys_ni_syscall /* old query_module syscall */
1403 .llong .sys_poll
1404 .llong .sys_nfsservctl
1405 .llong .sys_setresgid
1406 .llong .sys_getresgid /* 170 */
1407 .llong .sys_prctl
1408 .llong .ppc64_rt_sigreturn
1409 .llong .sys_rt_sigaction
1410 .llong .sys_rt_sigprocmask
1411 .llong .sys_rt_sigpending /* 175 */
1412 .llong .sys_rt_sigtimedwait
1413 .llong .sys_rt_sigqueueinfo
1414 .llong .ppc64_rt_sigsuspend
1415 .llong .sys_pread64
1416 .llong .sys_pwrite64 /* 180 */
1417 .llong .sys_chown
1418 .llong .sys_getcwd
1419 .llong .sys_capget
1420 .llong .sys_capset
1421 .llong .sys_sigaltstack /* 185 */
1422 .llong .sys_sendfile64
1423 .llong .sys_ni_syscall /* reserved for streams1 */
1424 .llong .sys_ni_syscall /* reserved for streams2 */
1425 .llong .ppc_vfork
1426 .llong .sys_getrlimit /* 190 */
1427 .llong .sys_readahead
1428 .llong .sys_ni_syscall /* 32bit only mmap2 */
1429 .llong .sys_ni_syscall /* 32bit only truncate64 */
1430 .llong .sys_ni_syscall /* 32bit only ftruncate64 */
1431 .llong .sys_ni_syscall /* 195 - 32bit only stat64 */
1432 .llong .sys_ni_syscall /* 32bit only lstat64 */
1433 .llong .sys_ni_syscall /* 32bit only fstat64 */
1434 .llong .sys_pciconfig_read
1435 .llong .sys_pciconfig_write
1436 .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */
1437 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1438 .llong .sys_getdents64
1439 .llong .sys_pivot_root
1440 .llong .sys_ni_syscall /* 32bit only fcntl64 */
1441 .llong .sys_madvise /* 205 */
1442 .llong .sys_mincore
1443 .llong .sys_gettid
1444 .llong .sys_tkill
1445 .llong .sys_setxattr
1446 .llong .sys_lsetxattr /* 210 */
1447 .llong .sys_fsetxattr
1448 .llong .sys_getxattr
1449 .llong .sys_lgetxattr
1450 .llong .sys_fgetxattr
1451 .llong .sys_listxattr /* 215 */
1452 .llong .sys_llistxattr
1453 .llong .sys_flistxattr
1454 .llong .sys_removexattr
1455 .llong .sys_lremovexattr
1456 .llong .sys_fremovexattr /* 220 */
1457 .llong .sys_futex
1458 .llong .sys_sched_setaffinity
1459 .llong .sys_sched_getaffinity
1460 .llong .sys_ni_syscall
1461 .llong .sys_ni_syscall /* 225 - reserved for tux */
1462 .llong .sys_ni_syscall /* 32bit only sendfile64 */
1463 .llong .sys_io_setup
1464 .llong .sys_io_destroy
1465 .llong .sys_io_getevents
1466 .llong .sys_io_submit /* 230 */
1467 .llong .sys_io_cancel
1468 .llong .sys_set_tid_address
1469 .llong .sys_fadvise64
1470 .llong .sys_exit_group
1471 .llong .sys_lookup_dcookie /* 235 */
1472 .llong .sys_epoll_create
1473 .llong .sys_epoll_ctl
1474 .llong .sys_epoll_wait
1475 .llong .sys_remap_file_pages
1476 .llong .sys_timer_create /* 240 */
1477 .llong .sys_timer_settime
1478 .llong .sys_timer_gettime
1479 .llong .sys_timer_getoverrun
1480 .llong .sys_timer_delete
1481 .llong .sys_clock_settime /* 245 */
1482 .llong .sys_clock_gettime
1483 .llong .sys_clock_getres
1484 .llong .sys_clock_nanosleep
1485 .llong .ppc64_swapcontext
1486 .llong .sys_tgkill /* 250 */
1487 .llong .sys_utimes
1488 .llong .sys_statfs64
1489 .llong .sys_fstatfs64
1490 .llong .sys_ni_syscall /* 32bit only fadvise64_64 */
1491 .llong .ppc_rtas /* 255 */
1492 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1493 .llong .sys_ni_syscall /* 257 reserved for vserver */
1494 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1495 .llong .sys_mbind
1496 .llong .sys_get_mempolicy /* 260 */
1497 .llong .sys_set_mempolicy
1498 .llong .sys_mq_open
1499 .llong .sys_mq_unlink
1500 .llong .sys_mq_timedsend
1501 .llong .sys_mq_timedreceive /* 265 */
1502 .llong .sys_mq_notify
1503 .llong .sys_mq_getsetattr
1504 .llong .sys_kexec_load
1505 .llong .sys_add_key
1506 .llong .sys_request_key /* 270 */
1507 .llong .sys_keyctl
1508 .llong .sys_waitid
1509 .llong .sys_ioprio_set
1510 .llong .sys_ioprio_get
1511 .llong .sys_inotify_init /* 275 */
1512 .llong .sys_inotify_add_watch
1513 .llong .sys_inotify_rm_watch
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1da69d5..3d2106b022a1 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34#include <asm/ppc-pci.h>
35#include "pci.h"
36 35
37#ifdef DEBUG 36#ifdef DEBUG
38#define DBG(fmt...) udbg_printf(fmt) 37#define DBG(fmt...) udbg_printf(fmt)
@@ -727,16 +726,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
727 * above routine 726 * above routine
728 */ 727 */
729pgprot_t pci_phys_mem_access_prot(struct file *file, 728pgprot_t pci_phys_mem_access_prot(struct file *file,
730 unsigned long offset, 729 unsigned long pfn,
731 unsigned long size, 730 unsigned long size,
732 pgprot_t protection) 731 pgprot_t protection)
733{ 732{
734 struct pci_dev *pdev = NULL; 733 struct pci_dev *pdev = NULL;
735 struct resource *found = NULL; 734 struct resource *found = NULL;
736 unsigned long prot = pgprot_val(protection); 735 unsigned long prot = pgprot_val(protection);
736 unsigned long offset = pfn << PAGE_SHIFT;
737 int i; 737 int i;
738 738
739 if (page_is_ram(offset >> PAGE_SHIFT)) 739 if (page_is_ram(pfn))
740 return __pgprot(prot); 740 return __pgprot(prot);
741 741
742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
@@ -881,9 +881,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
881} 881}
882 882
883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
884 struct device_node *dev) 884 struct device_node *dev, int prim)
885{ 885{
886 unsigned int *ranges; 886 unsigned int *ranges, pci_space;
887 unsigned long size; 887 unsigned long size;
888 int rlen = 0; 888 int rlen = 0;
889 int memno = 0; 889 int memno = 0;
@@ -906,16 +906,39 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen); 906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
907 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 907 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
908 res = NULL; 908 res = NULL;
909 pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; 909 pci_space = ranges[0];
910 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
910 911
911 cpu_phys_addr = ranges[3]; 912 cpu_phys_addr = ranges[3];
912 if (na == 2) 913 if (na >= 2)
913 cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; 914 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
914 915
915 size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; 916 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
917 ranges += np;
916 if (size == 0) 918 if (size == 0)
917 continue; 919 continue;
918 switch ((ranges[0] >> 24) & 0x3) { 920
921 /* Now consume following elements while they are contiguous */
922 while (rlen >= np * sizeof(unsigned int)) {
923 unsigned long addr, phys;
924
925 if (ranges[0] != pci_space)
926 break;
927 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
928 phys = ranges[3];
929 if (na >= 2)
930 phys = (phys << 32) | ranges[4];
931 if (addr != pci_addr + size ||
932 phys != cpu_phys_addr + size)
933 break;
934
935 size += ((unsigned long)ranges[na+3] << 32)
936 | ranges[na+4];
937 ranges += np;
938 rlen -= np * sizeof(unsigned int);
939 }
940
941 switch ((pci_space >> 24) & 0x3) {
919 case 1: /* I/O space */ 942 case 1: /* I/O space */
920 hose->io_base_phys = cpu_phys_addr; 943 hose->io_base_phys = cpu_phys_addr;
921 hose->pci_io_size = size; 944 hose->pci_io_size = size;
@@ -949,7 +972,6 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
949 res->sibling = NULL; 972 res->sibling = NULL;
950 res->child = NULL; 973 res->child = NULL;
951 } 974 }
952 ranges += np;
953 } 975 }
954} 976}
955 977
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index 54055c81017a..e1a32f802c0b 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h> 29#include <asm/abs_addr.h>
30 30#include <asm/ppc-pci.h>
31#include "pci.h"
32 31
33static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flag) 33 dma_addr_t *dma_handle, gfp_t flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d07d57..493bbe43f5b4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
30#include <asm/prom.h> 30#include <asm/prom.h>
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h> 32#include <asm/pSeries_reconfig.h>
33 33#include <asm/ppc-pci.h>
34#include "pci.h"
35 34
36/* 35/*
37 * Traverse_func that inits the PCI fields of the device node. 36 * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index d9e33b7d4203..bdf15dbbf4f0 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/ppc64/kernel/pci_iommu.c 2 * arch/ppc64/kernel/pci_iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * 4 *
5 * Rewrite, cleanup, new allocation schemes: 5 * Rewrite, cleanup, new allocation schemes:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * 7 *
8 * Dynamic DMA mapping support, platform-independent parts. 8 * Dynamic DMA mapping support, platform-independent parts.
@@ -11,19 +11,18 @@
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 25
26#include <linux/config.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -37,11 +36,7 @@
37#include <asm/iommu.h> 36#include <asm/iommu.h>
38#include <asm/pci-bridge.h> 37#include <asm/pci-bridge.h>
39#include <asm/machdep.h> 38#include <asm/machdep.h>
40#include "pci.h" 39#include <asm/ppc-pci.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#include <asm/iSeries/iSeries_pci.h>
44#endif /* CONFIG_PPC_ISERIES */
45 40
46/* 41/*
47 * We can use ->sysdata directly and avoid the extra work in 42 * We can use ->sysdata directly and avoid the extra work in
@@ -61,13 +56,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
61 } else 56 } else
62 pdev = to_pci_dev(dev); 57 pdev = to_pci_dev(dev);
63 58
64#ifdef CONFIG_PPC_ISERIES
65 return ISERIES_DEVNODE(pdev)->iommu_table;
66#endif /* CONFIG_PPC_ISERIES */
67
68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 59 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 60}
72 61
73 62
diff --git a/arch/ppc64/kernel/pmac.h b/arch/ppc64/kernel/pmac.h
deleted file mode 100644
index 40e1c5030f74..000000000000
--- a/arch/ppc64/kernel/pmac.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6
7/*
8 * Declaration for the various functions exported by the
9 * pmac_* files. Mostly for use by pmac_setup
10 */
11
12extern void pmac_get_boot_time(struct rtc_time *tm);
13extern void pmac_get_rtc_time(struct rtc_time *tm);
14extern int pmac_set_rtc_time(struct rtc_time *tm);
15extern void pmac_read_rtc_time(void);
16extern void pmac_calibrate_decr(void);
17
18extern void pmac_pcibios_fixup(void);
19extern void pmac_pci_init(void);
20extern void pmac_setup_pci_dma(void);
21extern void pmac_check_ht_link(void);
22
23extern void pmac_setup_smp(void);
24
25extern unsigned long pmac_ide_get_base(int index);
26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
27 unsigned long data_port, unsigned long ctrl_port, int *irq);
28
29extern void pmac_nvram_init(void);
30
31#endif /* __PMAC_H__ */
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
deleted file mode 100644
index eb4e6c3f694d..000000000000
--- a/arch/ppc64/kernel/pmac_feature.c
+++ /dev/null
@@ -1,767 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/keylargo.h>
35#include <asm/uninorth.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/dbdma.h>
41#include <asm/pci-bridge.h>
42#include <asm/pmac_low_i2c.h>
43
44#undef DEBUG_FEATURE
45
46#ifdef DEBUG_FEATURE
47#define DBG(fmt...) printk(KERN_DEBUG fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * We use a single global lock to protect accesses. Each driver has
54 * to take care of its own locking
55 */
56static DEFINE_SPINLOCK(feature_lock __pmacdata);
57
58#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
59#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
60
61
62/*
63 * Instance of some macio stuffs
64 */
65struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata;
66
67struct macio_chip* __pmac macio_find(struct device_node* child, int type)
68{
69 while(child) {
70 int i;
71
72 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
73 if (child == macio_chips[i].of_node &&
74 (!type || macio_chips[i].type == type))
75 return &macio_chips[i];
76 child = child->parent;
77 }
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(macio_find);
81
82static const char* macio_names[] __pmacdata =
83{
84 "Unknown",
85 "Grand Central",
86 "OHare",
87 "OHareII",
88 "Heathrow",
89 "Gatwick",
90 "Paddington",
91 "Keylargo",
92 "Pangea",
93 "Intrepid",
94 "K2"
95};
96
97
98
99/*
100 * Uninorth reg. access. Note that Uni-N regs are big endian
101 */
102
103#define UN_REG(r) (uninorth_base + ((r) >> 2))
104#define UN_IN(r) (in_be32(UN_REG(r)))
105#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
106#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
107#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
108
109static struct device_node* uninorth_node __pmacdata;
110static u32* uninorth_base __pmacdata;
111static u32 uninorth_rev __pmacdata;
112static void *u3_ht;
113
114extern struct device_node *k2_skiplist[2];
115
116/*
117 * For each motherboard family, we have a table of functions pointers
118 * that handle the various features.
119 */
120
121typedef long (*feature_call)(struct device_node* node, long param, long value);
122
123struct feature_table_entry {
124 unsigned int selector;
125 feature_call function;
126};
127
128struct pmac_mb_def
129{
130 const char* model_string;
131 const char* model_name;
132 int model_id;
133 struct feature_table_entry* features;
134 unsigned long board_flags;
135};
136static struct pmac_mb_def pmac_mb __pmacdata;
137
138/*
139 * Here are the chip specific feature functions
140 */
141
142
143static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
144{
145 struct macio_chip* macio = &macio_chips[0];
146
147 return MACIO_IN8(param);
148}
149
150
151static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
152{
153 struct macio_chip* macio = &macio_chips[0];
154
155 MACIO_OUT8(param, (u8)(value & 0xff));
156 return 0;
157}
158
159static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
160{
161 struct macio_chip* macio = &macio_chips[0];
162 unsigned long flags;
163
164 if (node == NULL)
165 return -ENODEV;
166
167 LOCK(flags);
168 if (value) {
169 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
170 mb();
171 k2_skiplist[0] = NULL;
172 } else {
173 k2_skiplist[0] = node;
174 mb();
175 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
176 }
177
178 UNLOCK(flags);
179 mdelay(1);
180
181 return 0;
182}
183
184static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
185{
186 struct macio_chip* macio = &macio_chips[0];
187 unsigned long flags;
188
189 if (node == NULL)
190 return -ENODEV;
191
192 LOCK(flags);
193 if (value) {
194 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
195 mb();
196 k2_skiplist[1] = NULL;
197 } else {
198 k2_skiplist[1] = node;
199 mb();
200 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
201 }
202
203 UNLOCK(flags);
204 mdelay(1);
205
206 return 0;
207}
208
209static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
210{
211 unsigned long flags;
212
213 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
214 return 0;
215
216 LOCK(flags);
217 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
218 UNLOCK(flags);
219
220 return 0;
221}
222
223static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
224{
225 struct macio_chip* macio = &macio_chips[0];
226 struct device_node *phy;
227 int need_reset;
228
229 /*
230 * We must not reset the combo PHYs, only the BCM5221 found in
231 * the iMac G5.
232 */
233 phy = of_get_next_child(node, NULL);
234 if (!phy)
235 return -ENODEV;
236 need_reset = device_is_compatible(phy, "B5221");
237 of_node_put(phy);
238 if (!need_reset)
239 return 0;
240
241 /* PHY reset is GPIO 29, not in device-tree unfortunately */
242 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
243 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
244 /* Thankfully, this is now always called at a time when we can
245 * schedule by sungem.
246 */
247 msleep(10);
248 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
249
250 return 0;
251}
252
253static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
254{
255 /* Very crude implementation for now */
256 struct macio_chip* macio = &macio_chips[0];
257 unsigned long flags;
258
259 if (value == 0)
260 return 0; /* don't disable yet */
261
262 LOCK(flags);
263 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
264 KL3_I2S0_CLK18_ENABLE);
265 udelay(10);
266 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
267 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
268 udelay(10);
269 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
270 UNLOCK(flags);
271 udelay(10);
272
273 return 0;
274}
275
276
277#ifdef CONFIG_SMP
278static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
279{
280 unsigned int reset_io = 0;
281 unsigned long flags;
282 struct macio_chip* macio;
283 struct device_node* np;
284
285 macio = &macio_chips[0];
286 if (macio->type != macio_keylargo2)
287 return -ENODEV;
288
289 np = find_path_device("/cpus");
290 if (np == NULL)
291 return -ENODEV;
292 for (np = np->child; np != NULL; np = np->sibling) {
293 u32* num = (u32 *)get_property(np, "reg", NULL);
294 u32* rst = (u32 *)get_property(np, "soft-reset", NULL);
295 if (num == NULL || rst == NULL)
296 continue;
297 if (param == *num) {
298 reset_io = *rst;
299 break;
300 }
301 }
302 if (np == NULL || reset_io == 0)
303 return -ENODEV;
304
305 LOCK(flags);
306 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
307 (void)MACIO_IN8(reset_io);
308 udelay(1);
309 MACIO_OUT8(reset_io, 0);
310 (void)MACIO_IN8(reset_io);
311 UNLOCK(flags);
312
313 return 0;
314}
315#endif /* CONFIG_SMP */
316
317/*
318 * This can be called from pmac_smp so isn't static
319 *
320 * This takes the second CPU off the bus on dual CPU machines
321 * running UP
322 */
323void __pmac g5_phy_disable_cpu1(void)
324{
325 UN_OUT(U3_API_PHY_CONFIG_1, 0);
326}
327
328static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
329{
330 switch(param) {
331 case PMAC_MB_INFO_MODEL:
332 return pmac_mb.model_id;
333 case PMAC_MB_INFO_FLAGS:
334 return pmac_mb.board_flags;
335 case PMAC_MB_INFO_NAME:
336 /* hack hack hack... but should work */
337 *((const char **)value) = pmac_mb.model_name;
338 return 0;
339 }
340 return -EINVAL;
341}
342
343
344/*
345 * Table definitions
346 */
347
348/* Used on any machine
349 */
350static struct feature_table_entry any_features[] __pmacdata = {
351 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
352 { 0, NULL }
353};
354
355/* G5 features
356 */
357static struct feature_table_entry g5_features[] __pmacdata = {
358 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
359 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
360 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
361 { PMAC_FTR_READ_GPIO, g5_read_gpio },
362 { PMAC_FTR_WRITE_GPIO, g5_write_gpio },
363 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
364 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
365#ifdef CONFIG_SMP
366 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
367#endif /* CONFIG_SMP */
368 { 0, NULL }
369};
370
371static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
372 { "PowerMac7,2", "PowerMac G5",
373 PMAC_TYPE_POWERMAC_G5, g5_features,
374 0,
375 },
376 { "PowerMac7,3", "PowerMac G5",
377 PMAC_TYPE_POWERMAC_G5, g5_features,
378 0,
379 },
380 { "PowerMac8,1", "iMac G5",
381 PMAC_TYPE_IMAC_G5, g5_features,
382 0,
383 },
384 { "PowerMac9,1", "PowerMac G5",
385 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
386 0,
387 },
388 { "RackMac3,1", "XServe G5",
389 PMAC_TYPE_XSERVE_G5, g5_features,
390 0,
391 },
392};
393
394/*
395 * The toplevel feature_call callback
396 */
397long __pmac pmac_do_feature_call(unsigned int selector, ...)
398{
399 struct device_node* node;
400 long param, value;
401 int i;
402 feature_call func = NULL;
403 va_list args;
404
405 if (pmac_mb.features)
406 for (i=0; pmac_mb.features[i].function; i++)
407 if (pmac_mb.features[i].selector == selector) {
408 func = pmac_mb.features[i].function;
409 break;
410 }
411 if (!func)
412 for (i=0; any_features[i].function; i++)
413 if (any_features[i].selector == selector) {
414 func = any_features[i].function;
415 break;
416 }
417 if (!func)
418 return -ENODEV;
419
420 va_start(args, selector);
421 node = (struct device_node*)va_arg(args, void*);
422 param = va_arg(args, long);
423 value = va_arg(args, long);
424 va_end(args);
425
426 return func(node, param, value);
427}
428
429static int __init probe_motherboard(void)
430{
431 int i;
432 struct macio_chip* macio = &macio_chips[0];
433 const char* model = NULL;
434 struct device_node *dt;
435
436 /* Lookup known motherboard type in device-tree. First try an
437 * exact match on the "model" property, then try a "compatible"
438 * match is none is found.
439 */
440 dt = find_devices("device-tree");
441 if (dt != NULL)
442 model = (const char *) get_property(dt, "model", NULL);
443 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
444 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
445 pmac_mb = pmac_mb_defs[i];
446 goto found;
447 }
448 }
449 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
450 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
451 pmac_mb = pmac_mb_defs[i];
452 goto found;
453 }
454 }
455
456 /* Fallback to selection depending on mac-io chip type */
457 switch(macio->type) {
458 case macio_keylargo2:
459 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
460 pmac_mb.model_name = "Unknown K2-based";
461 pmac_mb.features = g5_features;
462
463 default:
464 return -ENODEV;
465 }
466found:
467 /* Check for "mobile" machine */
468 if (model && (strncmp(model, "PowerBook", 9) == 0
469 || strncmp(model, "iBook", 5) == 0))
470 pmac_mb.board_flags |= PMAC_MB_MOBILE;
471
472
473 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
474 return 0;
475}
476
477/* Initialize the Core99 UniNorth host bridge and memory controller
478 */
479static void __init probe_uninorth(void)
480{
481 uninorth_node = of_find_node_by_name(NULL, "u3");
482 if (uninorth_node && uninorth_node->n_addrs > 0) {
483 /* Small hack until I figure out if parsing in prom.c is correct. I should
484 * get rid of those pre-parsed junk anyway
485 */
486 unsigned long address = uninorth_node->addrs[0].address;
487 uninorth_base = ioremap(address, 0x40000);
488 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
489 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
490 } else
491 uninorth_node = NULL;
492
493 if (!uninorth_node)
494 return;
495
496 printk(KERN_INFO "Found U3 memory controller & host bridge, revision: %d\n",
497 uninorth_rev);
498 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
499
500}
501
502static void __init probe_one_macio(const char* name, const char* compat, int type)
503{
504 struct device_node* node;
505 int i;
506 volatile u32* base;
507 u32* revp;
508
509 node = find_devices(name);
510 if (!node || !node->n_addrs)
511 return;
512 if (compat)
513 do {
514 if (device_is_compatible(node, compat))
515 break;
516 node = node->next;
517 } while (node);
518 if (!node)
519 return;
520 for(i=0; i<MAX_MACIO_CHIPS; i++) {
521 if (!macio_chips[i].of_node)
522 break;
523 if (macio_chips[i].of_node == node)
524 return;
525 }
526 if (i >= MAX_MACIO_CHIPS) {
527 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
528 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
529 return;
530 }
531 base = (volatile u32*)ioremap(node->addrs[0].address, node->addrs[0].size);
532 if (!base) {
533 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
534 return;
535 }
536 if (type == macio_keylargo) {
537 u32* did = (u32 *)get_property(node, "device-id", NULL);
538 if (*did == 0x00000025)
539 type = macio_pangea;
540 if (*did == 0x0000003e)
541 type = macio_intrepid;
542 }
543 macio_chips[i].of_node = node;
544 macio_chips[i].type = type;
545 macio_chips[i].base = base;
546 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
547 macio_chips[i].name = macio_names[type];
548 revp = (u32 *)get_property(node, "revision-id", NULL);
549 if (revp)
550 macio_chips[i].rev = *revp;
551 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
552 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
553}
554
555static int __init
556probe_macios(void)
557{
558 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
559
560 macio_chips[0].lbus.index = 0;
561 macio_chips[1].lbus.index = 1;
562
563 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
564}
565
566static void __init
567set_initial_features(void)
568{
569 struct device_node *np;
570
571 if (macio_chips[0].type == macio_keylargo2) {
572#ifndef CONFIG_SMP
573 /* On SMP machines running UP, we have the second CPU eating
574 * bus cycles. We need to take it off the bus. This is done
575 * from pmac_smp for SMP kernels running on one CPU
576 */
577 np = of_find_node_by_type(NULL, "cpu");
578 if (np != NULL)
579 np = of_find_node_by_type(np, "cpu");
580 if (np != NULL) {
581 g5_phy_disable_cpu1();
582 of_node_put(np);
583 }
584#endif /* CONFIG_SMP */
585 /* Enable GMAC for now for PCI probing. It will be disabled
586 * later on after PCI probe
587 */
588 np = of_find_node_by_name(NULL, "ethernet");
589 while(np) {
590 if (device_is_compatible(np, "K2-GMAC"))
591 g5_gmac_enable(np, 0, 1);
592 np = of_find_node_by_name(np, "ethernet");
593 }
594
595 /* Enable FW before PCI probe. Will be disabled later on
596 * Note: We should have a batter way to check that we are
597 * dealing with uninorth internal cell and not a PCI cell
598 * on the external PCI. The code below works though.
599 */
600 np = of_find_node_by_name(NULL, "firewire");
601 while(np) {
602 if (device_is_compatible(np, "pci106b,5811")) {
603 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
604 g5_fw_enable(np, 0, 1);
605 }
606 np = of_find_node_by_name(np, "firewire");
607 }
608 }
609}
610
611void __init
612pmac_feature_init(void)
613{
614 /* Detect the UniNorth memory controller */
615 probe_uninorth();
616
617 /* Probe mac-io controllers */
618 if (probe_macios()) {
619 printk(KERN_WARNING "No mac-io chip found\n");
620 return;
621 }
622
623 /* Setup low-level i2c stuffs */
624 pmac_init_low_i2c();
625
626 /* Probe machine type */
627 if (probe_motherboard())
628 printk(KERN_WARNING "Unknown PowerMac !\n");
629
630 /* Set some initial features (turn off some chips that will
631 * be later turned on)
632 */
633 set_initial_features();
634}
635
636int __init pmac_feature_late_init(void)
637{
638#if 0
639 struct device_node* np;
640
641 /* Request some resources late */
642 if (uninorth_node)
643 request_OF_resource(uninorth_node, 0, NULL);
644 np = find_devices("hammerhead");
645 if (np)
646 request_OF_resource(np, 0, NULL);
647 np = find_devices("interrupt-controller");
648 if (np)
649 request_OF_resource(np, 0, NULL);
650#endif
651 return 0;
652}
653
654device_initcall(pmac_feature_late_init);
655
656#if 0
657static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
658{
659 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
660 int bits[8] = { 8,16,0,32,2,4,0,0 };
661 int freq = (frq >> 8) & 0xf;
662
663 if (freqs[freq] == 0)
664 printk("%s: Unknown HT link frequency %x\n", name, freq);
665 else
666 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
667 name, freqs[freq],
668 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
669}
670#endif
671
672void __init pmac_check_ht_link(void)
673{
674#if 0 /* Disabled for now */
675 u32 ufreq, freq, ucfg, cfg;
676 struct device_node *pcix_node;
677 struct pci_dn *pdn;
678 u8 px_bus, px_devfn;
679 struct pci_controller *px_hose;
680
681 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
682 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
683 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
684 dump_HT_speeds("U3 HyperTransport", cfg, freq);
685
686 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
687 if (pcix_node == NULL) {
688 printk("No PCI-X bridge found\n");
689 return;
690 }
691 pdn = pcix_node->data;
692 px_hose = pdn->phb;
693 px_bus = pdn->busno;
694 px_devfn = pdn->devfn;
695
696 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
697 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
698 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
699 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
700 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
701 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
702#endif
703}
704
705/*
706 * Early video resume hook
707 */
708
709static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
710static void *pmac_early_vresume_data __pmacdata;
711
712void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
713{
714 if (_machine != _MACH_Pmac)
715 return;
716 preempt_disable();
717 pmac_early_vresume_proc = proc;
718 pmac_early_vresume_data = data;
719 preempt_enable();
720}
721EXPORT_SYMBOL(pmac_set_early_video_resume);
722
723
724/*
725 * AGP related suspend/resume code
726 */
727
728static struct pci_dev *pmac_agp_bridge __pmacdata;
729static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
730static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
731
732void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
733 int (*suspend)(struct pci_dev *bridge),
734 int (*resume)(struct pci_dev *bridge))
735{
736 if (suspend || resume) {
737 pmac_agp_bridge = bridge;
738 pmac_agp_suspend = suspend;
739 pmac_agp_resume = resume;
740 return;
741 }
742 if (bridge != pmac_agp_bridge)
743 return;
744 pmac_agp_suspend = pmac_agp_resume = NULL;
745 return;
746}
747EXPORT_SYMBOL(pmac_register_agp_pm);
748
749void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
750{
751 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
752 return;
753 if (pmac_agp_bridge->bus != dev->bus)
754 return;
755 pmac_agp_suspend(pmac_agp_bridge);
756}
757EXPORT_SYMBOL(pmac_suspend_agp_for_card);
758
759void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
760{
761 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
762 return;
763 if (pmac_agp_bridge->bus != dev->bus)
764 return;
765 pmac_agp_resume(pmac_agp_bridge);
766}
767EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
deleted file mode 100644
index dc40a0cad0b4..000000000000
--- a/arch/ppc64/kernel/pmac_pci.c
+++ /dev/null
@@ -1,793 +0,0 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 * At present the "bandit" and "chaos" bridges are supported.
4 * Fortunately you access configuration space in the same
5 * way with either bridge.
6 *
7 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
8 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/iommu.h>
30
31#include "pci.h"
32#include "pmac.h"
33
34#define DEBUG
35
36#ifdef DEBUG
37#define DBG(x...) printk(x)
38#else
39#define DBG(x...)
40#endif
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45static struct pci_controller *u3_agp;
46struct device_node *k2_skiplist[2];
47
48static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
49{
50 for (; node != 0;node = node->sibling) {
51 int * bus_range;
52 unsigned int *class_code;
53 int len;
54
55 /* For PCI<->PCI bridges or CardBus bridges, we go down */
56 class_code = (unsigned int *) get_property(node, "class-code", NULL);
57 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
58 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
59 continue;
60 bus_range = (int *) get_property(node, "bus-range", &len);
61 if (bus_range != NULL && len > 2 * sizeof(int)) {
62 if (bus_range[1] > higher)
63 higher = bus_range[1];
64 }
65 higher = fixup_one_level_bus_range(node->child, higher);
66 }
67 return higher;
68}
69
70/* This routine fixes the "bus-range" property of all bridges in the
71 * system since they tend to have their "last" member wrong on macs
72 *
73 * Note that the bus numbers manipulated here are OF bus numbers, they
74 * are not Linux bus numbers.
75 */
76static void __init fixup_bus_range(struct device_node *bridge)
77{
78 int * bus_range;
79 int len;
80
81 /* Lookup the "bus-range" property for the hose */
82 bus_range = (int *) get_property(bridge, "bus-range", &len);
83 if (bus_range == NULL || len < 2 * sizeof(int)) {
84 printk(KERN_WARNING "Can't get bus-range for %s\n",
85 bridge->full_name);
86 return;
87 }
88 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
89}
90
91/*
92 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
93 *
94 * The "Bandit" version is present in all early PCI PowerMacs,
95 * and up to the first ones using Grackle. Some machines may
96 * have 2 bandit controllers (2 PCI busses).
97 *
98 * "Chaos" is used in some "Bandit"-type machines as a bridge
99 * for the separate display bus. It is accessed the same
100 * way as bandit, but cannot be probed for devices. It therefore
101 * has its own config access functions.
102 *
103 * The "UniNorth" version is present in all Core99 machines
104 * (iBook, G4, new IMacs, and all the recent Apple machines).
105 * It contains 3 controllers in one ASIC.
106 *
107 * The U3 is the bridge used on G5 machines. It contains on
108 * AGP bus which is dealt with the old UniNorth access routines
109 * and an HyperTransport bus which uses its own set of access
110 * functions.
111 */
112
113#define MACRISC_CFA0(devfn, off) \
114 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
115 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
116 | (((unsigned long)(off)) & 0xFCUL))
117
118#define MACRISC_CFA1(bus, devfn, off) \
119 ((((unsigned long)(bus)) << 16) \
120 |(((unsigned long)(devfn)) << 8) \
121 |(((unsigned long)(off)) & 0xFCUL) \
122 |1UL)
123
124static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
125 u8 bus, u8 dev_fn, u8 offset)
126{
127 unsigned int caddr;
128
129 if (bus == hose->first_busno) {
130 if (dev_fn < (11 << 3))
131 return 0;
132 caddr = MACRISC_CFA0(dev_fn, offset);
133 } else
134 caddr = MACRISC_CFA1(bus, dev_fn, offset);
135
136 /* Uninorth will return garbage if we don't read back the value ! */
137 do {
138 out_le32(hose->cfg_addr, caddr);
139 } while (in_le32(hose->cfg_addr) != caddr);
140
141 offset &= has_uninorth ? 0x07 : 0x03;
142 return ((unsigned long)hose->cfg_data) + offset;
143}
144
145static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
146 int offset, int len, u32 *val)
147{
148 struct pci_controller *hose;
149 unsigned long addr;
150
151 hose = pci_bus_to_host(bus);
152 if (hose == NULL)
153 return PCIBIOS_DEVICE_NOT_FOUND;
154
155 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
156 if (!addr)
157 return PCIBIOS_DEVICE_NOT_FOUND;
158 /*
159 * Note: the caller has already checked that offset is
160 * suitably aligned and that len is 1, 2 or 4.
161 */
162 switch (len) {
163 case 1:
164 *val = in_8((u8 *)addr);
165 break;
166 case 2:
167 *val = in_le16((u16 *)addr);
168 break;
169 default:
170 *val = in_le32((u32 *)addr);
171 break;
172 }
173 return PCIBIOS_SUCCESSFUL;
174}
175
176static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
177 int offset, int len, u32 val)
178{
179 struct pci_controller *hose;
180 unsigned long addr;
181
182 hose = pci_bus_to_host(bus);
183 if (hose == NULL)
184 return PCIBIOS_DEVICE_NOT_FOUND;
185
186 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
187 if (!addr)
188 return PCIBIOS_DEVICE_NOT_FOUND;
189 /*
190 * Note: the caller has already checked that offset is
191 * suitably aligned and that len is 1, 2 or 4.
192 */
193 switch (len) {
194 case 1:
195 out_8((u8 *)addr, val);
196 (void) in_8((u8 *)addr);
197 break;
198 case 2:
199 out_le16((u16 *)addr, val);
200 (void) in_le16((u16 *)addr);
201 break;
202 default:
203 out_le32((u32 *)addr, val);
204 (void) in_le32((u32 *)addr);
205 break;
206 }
207 return PCIBIOS_SUCCESSFUL;
208}
209
210static struct pci_ops macrisc_pci_ops =
211{
212 macrisc_read_config,
213 macrisc_write_config
214};
215
216/*
217 * These versions of U3 HyperTransport config space access ops do not
218 * implement self-view of the HT host yet
219 */
220
221/*
222 * This function deals with some "special cases" devices.
223 *
224 * 0 -> No special case
225 * 1 -> Skip the device but act as if the access was successfull
226 * (return 0xff's on reads, eventually, cache config space
227 * accesses in a later version)
228 * -1 -> Hide the device (unsuccessful acess)
229 */
230static int u3_ht_skip_device(struct pci_controller *hose,
231 struct pci_bus *bus, unsigned int devfn)
232{
233 struct device_node *busdn, *dn;
234 int i;
235
236 /* We only allow config cycles to devices that are in OF device-tree
237 * as we are apparently having some weird things going on with some
238 * revs of K2 on recent G5s
239 */
240 if (bus->self)
241 busdn = pci_device_to_OF_node(bus->self);
242 else
243 busdn = hose->arch_data;
244 for (dn = busdn->child; dn; dn = dn->sibling)
245 if (dn->data && PCI_DN(dn)->devfn == devfn)
246 break;
247 if (dn == NULL)
248 return -1;
249
250 /*
251 * When a device in K2 is powered down, we die on config
252 * cycle accesses. Fix that here.
253 */
254 for (i=0; i<2; i++)
255 if (k2_skiplist[i] == dn)
256 return 1;
257
258 return 0;
259}
260
261#define U3_HT_CFA0(devfn, off) \
262 ((((unsigned long)devfn) << 8) | offset)
263#define U3_HT_CFA1(bus, devfn, off) \
264 (U3_HT_CFA0(devfn, off) \
265 + (((unsigned long)bus) << 16) \
266 + 0x01000000UL)
267
268static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
269 u8 bus, u8 devfn, u8 offset)
270{
271 if (bus == hose->first_busno) {
272 /* For now, we don't self probe U3 HT bridge */
273 if (PCI_SLOT(devfn) == 0)
274 return 0;
275 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
276 } else
277 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
278}
279
280static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
281 int offset, int len, u32 *val)
282{
283 struct pci_controller *hose;
284 unsigned long addr;
285
286
287 hose = pci_bus_to_host(bus);
288 if (hose == NULL)
289 return PCIBIOS_DEVICE_NOT_FOUND;
290
291 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
292 if (!addr)
293 return PCIBIOS_DEVICE_NOT_FOUND;
294
295 switch (u3_ht_skip_device(hose, bus, devfn)) {
296 case 0:
297 break;
298 case 1:
299 switch (len) {
300 case 1:
301 *val = 0xff; break;
302 case 2:
303 *val = 0xffff; break;
304 default:
305 *val = 0xfffffffful; break;
306 }
307 return PCIBIOS_SUCCESSFUL;
308 default:
309 return PCIBIOS_DEVICE_NOT_FOUND;
310 }
311
312 /*
313 * Note: the caller has already checked that offset is
314 * suitably aligned and that len is 1, 2 or 4.
315 */
316 switch (len) {
317 case 1:
318 *val = in_8((u8 *)addr);
319 break;
320 case 2:
321 *val = in_le16((u16 *)addr);
322 break;
323 default:
324 *val = in_le32((u32 *)addr);
325 break;
326 }
327 return PCIBIOS_SUCCESSFUL;
328}
329
330static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
331 int offset, int len, u32 val)
332{
333 struct pci_controller *hose;
334 unsigned long addr;
335
336 hose = pci_bus_to_host(bus);
337 if (hose == NULL)
338 return PCIBIOS_DEVICE_NOT_FOUND;
339
340 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
341 if (!addr)
342 return PCIBIOS_DEVICE_NOT_FOUND;
343
344 switch (u3_ht_skip_device(hose, bus, devfn)) {
345 case 0:
346 break;
347 case 1:
348 return PCIBIOS_SUCCESSFUL;
349 default:
350 return PCIBIOS_DEVICE_NOT_FOUND;
351 }
352
353 /*
354 * Note: the caller has already checked that offset is
355 * suitably aligned and that len is 1, 2 or 4.
356 */
357 switch (len) {
358 case 1:
359 out_8((u8 *)addr, val);
360 (void) in_8((u8 *)addr);
361 break;
362 case 2:
363 out_le16((u16 *)addr, val);
364 (void) in_le16((u16 *)addr);
365 break;
366 default:
367 out_le32((u32 *)addr, val);
368 (void) in_le32((u32 *)addr);
369 break;
370 }
371 return PCIBIOS_SUCCESSFUL;
372}
373
374static struct pci_ops u3_ht_pci_ops =
375{
376 u3_ht_read_config,
377 u3_ht_write_config
378};
379
380static void __init setup_u3_agp(struct pci_controller* hose)
381{
382 /* On G5, we move AGP up to high bus number so we don't need
383 * to reassign bus numbers for HT. If we ever have P2P bridges
384 * on AGP, we'll have to move pci_assign_all_busses to the
385 * pci_controller structure so we enable it for AGP and not for
386 * HT childs.
387 * We hard code the address because of the different size of
388 * the reg address cell, we shall fix that by killing struct
389 * reg_property and using some accessor functions instead
390 */
391 hose->first_busno = 0xf0;
392 hose->last_busno = 0xff;
393 has_uninorth = 1;
394 hose->ops = &macrisc_pci_ops;
395 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
396 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
397
398 u3_agp = hose;
399}
400
401static void __init setup_u3_ht(struct pci_controller* hose)
402{
403 struct device_node *np = (struct device_node *)hose->arch_data;
404 int i, cur;
405
406 hose->ops = &u3_ht_pci_ops;
407
408 /* We hard code the address because of the different size of
409 * the reg address cell, we shall fix that by killing struct
410 * reg_property and using some accessor functions instead
411 */
412 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
413
414 /*
415 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
416 * have been allocated to AGP. So far, this version of the code doesn't assign
417 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
418 * We need to fix that sooner or later by either parsing all child "ranges"
419 * properties or figuring out the U3 address space decoding logic and
420 * then read it's configuration register (if any).
421 */
422 hose->io_base_phys = 0xf4000000;
423 hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
424 isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
425 hose->io_resource.name = np->full_name;
426 hose->io_resource.start = 0;
427 hose->io_resource.end = 0x003fffff;
428 hose->io_resource.flags = IORESOURCE_IO;
429 hose->pci_mem_offset = 0;
430 hose->first_busno = 0;
431 hose->last_busno = 0xef;
432 hose->mem_resources[0].name = np->full_name;
433 hose->mem_resources[0].start = 0x80000000;
434 hose->mem_resources[0].end = 0xefffffff;
435 hose->mem_resources[0].flags = IORESOURCE_MEM;
436
437 if (u3_agp == NULL) {
438 DBG("U3 has no AGP, using full resource range\n");
439 return;
440 }
441
442 /* We "remove" the AGP resources from the resources allocated to HT, that
443 * is we create "holes". However, that code does assumptions that so far
444 * happen to be true (cross fingers...), typically that resources in the
445 * AGP node are properly ordered
446 */
447 cur = 0;
448 for (i=0; i<3; i++) {
449 struct resource *res = &u3_agp->mem_resources[i];
450 if (res->flags != IORESOURCE_MEM)
451 continue;
452 /* We don't care about "fine" resources */
453 if (res->start >= 0xf0000000)
454 continue;
455 /* Check if it's just a matter of "shrinking" us in one direction */
456 if (hose->mem_resources[cur].start == res->start) {
457 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
458 cur, hose->mem_resources[cur].start, res->end + 1);
459 hose->mem_resources[cur].start = res->end + 1;
460 continue;
461 }
462 if (hose->mem_resources[cur].end == res->end) {
463 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
464 cur, hose->mem_resources[cur].end, res->start - 1);
465 hose->mem_resources[cur].end = res->start - 1;
466 continue;
467 }
468 /* No, it's not the case, we need a hole */
469 if (cur == 2) {
470 /* not enough resources for a hole, we drop part of the range */
471 printk(KERN_WARNING "Running out of resources for /ht host !\n");
472 hose->mem_resources[cur].end = res->start - 1;
473 continue;
474 }
475 cur++;
476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
477 cur-1, res->start - 1, cur, res->end + 1);
478 hose->mem_resources[cur].name = np->full_name;
479 hose->mem_resources[cur].flags = IORESOURCE_MEM;
480 hose->mem_resources[cur].start = res->end + 1;
481 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
482 hose->mem_resources[cur-1].end = res->start - 1;
483 }
484}
485
486static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
487 struct device_node *dev, int primary)
488{
489 static unsigned int static_lc_ranges[2024];
490 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
491 unsigned int size;
492 int rlen = 0, orig_rlen;
493 int memno = 0;
494 struct resource *res;
495 int np, na = prom_n_addr_cells(dev);
496
497 np = na + 5;
498
499 /* First we try to merge ranges to fix a problem with some pmacs
500 * that can have more than 3 ranges, fortunately using contiguous
501 * addresses -- BenH
502 */
503 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
504 if (!dt_ranges)
505 return;
506 /* lc_ranges = alloc_bootmem(rlen);*/
507 lc_ranges = static_lc_ranges;
508 if (!lc_ranges)
509 return; /* what can we do here ? */
510 memcpy(lc_ranges, dt_ranges, rlen);
511 orig_rlen = rlen;
512
513 /* Let's work on a copy of the "ranges" property instead of damaging
514 * the device-tree image in memory
515 */
516 ranges = lc_ranges;
517 prev = NULL;
518 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
519 if (prev) {
520 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
521 (prev[2] + prev[na+4]) == ranges[2] &&
522 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
523 prev[na+4] += ranges[na+4];
524 ranges[0] = 0;
525 ranges += np;
526 continue;
527 }
528 }
529 prev = ranges;
530 ranges += np;
531 }
532
533 /*
534 * The ranges property is laid out as an array of elements,
535 * each of which comprises:
536 * cells 0 - 2: a PCI address
537 * cells 3 or 3+4: a CPU physical address
538 * (size depending on dev->n_addr_cells)
539 * cells 4+5 or 5+6: the size of the range
540 */
541 ranges = lc_ranges;
542 rlen = orig_rlen;
543 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
544 res = NULL;
545 size = ranges[na+4];
546 switch (ranges[0] >> 24) {
547 case 1: /* I/O space */
548 if (ranges[2] != 0)
549 break;
550 hose->io_base_phys = ranges[na+2];
551 /* limit I/O space to 16MB */
552 if (size > 0x01000000)
553 size = 0x01000000;
554 hose->io_base_virt = ioremap(ranges[na+2], size);
555 if (primary)
556 isa_io_base = (unsigned long) hose->io_base_virt;
557 res = &hose->io_resource;
558 res->flags = IORESOURCE_IO;
559 res->start = ranges[2];
560 break;
561 case 2: /* memory space */
562 memno = 0;
563 if (ranges[1] == 0 && ranges[2] == 0
564 && ranges[na+4] <= (16 << 20)) {
565 /* 1st 16MB, i.e. ISA memory area */
566#if 0
567 if (primary)
568 isa_mem_base = ranges[na+2];
569#endif
570 memno = 1;
571 }
572 while (memno < 3 && hose->mem_resources[memno].flags)
573 ++memno;
574 if (memno == 0)
575 hose->pci_mem_offset = ranges[na+2] - ranges[2];
576 if (memno < 3) {
577 res = &hose->mem_resources[memno];
578 res->flags = IORESOURCE_MEM;
579 res->start = ranges[na+2];
580 }
581 break;
582 }
583 if (res != NULL) {
584 res->name = dev->full_name;
585 res->end = res->start + size - 1;
586 res->parent = NULL;
587 res->sibling = NULL;
588 res->child = NULL;
589 }
590 ranges += np;
591 }
592}
593
594/*
595 * We assume that if we have a G3 powermac, we have one bridge called
596 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
597 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
598 */
599static int __init add_bridge(struct device_node *dev)
600{
601 int len;
602 struct pci_controller *hose;
603 char* disp_name;
604 int *bus_range;
605 int primary = 1;
606 struct property *of_prop;
607
608 DBG("Adding PCI host bridge %s\n", dev->full_name);
609
610 bus_range = (int *) get_property(dev, "bus-range", &len);
611 if (bus_range == NULL || len < 2 * sizeof(int)) {
612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
613 dev->full_name);
614 }
615
616 hose = alloc_bootmem(sizeof(struct pci_controller));
617 if (hose == NULL)
618 return -ENOMEM;
619 pci_setup_pci_controller(hose);
620
621 hose->arch_data = dev;
622 hose->first_busno = bus_range ? bus_range[0] : 0;
623 hose->last_busno = bus_range ? bus_range[1] : 0xff;
624
625 of_prop = alloc_bootmem(sizeof(struct property) +
626 sizeof(hose->global_number));
627 if (of_prop) {
628 memset(of_prop, 0, sizeof(struct property));
629 of_prop->name = "linux,pci-domain";
630 of_prop->length = sizeof(hose->global_number);
631 of_prop->value = (unsigned char *)&of_prop[1];
632 memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
633 prom_add_property(dev, of_prop);
634 }
635
636 disp_name = NULL;
637 if (device_is_compatible(dev, "u3-agp")) {
638 setup_u3_agp(hose);
639 disp_name = "U3-AGP";
640 primary = 0;
641 } else if (device_is_compatible(dev, "u3-ht")) {
642 setup_u3_ht(hose);
643 disp_name = "U3-HT";
644 primary = 1;
645 }
646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
647 disp_name, hose->first_busno, hose->last_busno);
648
649 /* Interpret the "ranges" property */
650 /* This also maps the I/O region and sets isa_io/mem_base */
651 pmac_process_bridge_OF_ranges(hose, dev, primary);
652
653 /* Fixup "bus-range" OF property */
654 fixup_bus_range(dev);
655
656 return 0;
657}
658
659/*
660 * We use our own read_irq_line here because PCI_INTERRUPT_PIN is
661 * crap on some of Apple ASICs. We unconditionally use the Open Firmware
662 * interrupt number as this is always right.
663 */
664static int pmac_pci_read_irq_line(struct pci_dev *pci_dev)
665{
666 struct device_node *node;
667
668 node = pci_device_to_OF_node(pci_dev);
669 if (node == NULL)
670 return -1;
671 if (node->n_intrs == 0)
672 return -1;
673 pci_dev->irq = node->intrs[0].line;
674 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
675
676 return 0;
677}
678
679void __init pmac_pcibios_fixup(void)
680{
681 struct pci_dev *dev = NULL;
682
683 for_each_pci_dev(dev)
684 pmac_pci_read_irq_line(dev);
685}
686
687static void __init pmac_fixup_phb_resources(void)
688{
689 struct pci_controller *hose, *tmp;
690
691 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
692 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
693 hose->io_resource.start += offset;
694 hose->io_resource.end += offset;
695 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
696 hose->global_number,
697 hose->io_resource.start, hose->io_resource.end);
698 }
699}
700
701void __init pmac_pci_init(void)
702{
703 struct device_node *np, *root;
704 struct device_node *ht = NULL;
705
706 /* Probe root PCI hosts, that is on U3 the AGP host and the
707 * HyperTransport host. That one is actually "kept" around
708 * and actually added last as it's resource management relies
709 * on the AGP resources to have been setup first
710 */
711 root = of_find_node_by_path("/");
712 if (root == NULL) {
713 printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
714 return;
715 }
716 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
717 if (np->name == NULL)
718 continue;
719 if (strcmp(np->name, "pci") == 0) {
720 if (add_bridge(np) == 0)
721 of_node_get(np);
722 }
723 if (strcmp(np->name, "ht") == 0) {
724 of_node_get(np);
725 ht = np;
726 }
727 }
728 of_node_put(root);
729
730 /* Now setup the HyperTransport host if we found any
731 */
732 if (ht && add_bridge(ht) != 0)
733 of_node_put(ht);
734
735 /* Fixup the IO resources on our host bridges as the common code
736 * does it only for childs of the host bridges
737 */
738 pmac_fixup_phb_resources();
739
740 /* Setup the linkage between OF nodes and PHBs */
741 pci_devs_phb_init();
742
743 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
744 * assume there is no P2P bridge on the AGP bus, which should be a
745 * safe assumptions hopefully.
746 */
747 if (u3_agp) {
748 struct device_node *np = u3_agp->arch_data;
749 PCI_DN(np)->busno = 0xf0;
750 for (np = np->child; np; np = np->sibling)
751 PCI_DN(np)->busno = 0xf0;
752 }
753
754 pmac_check_ht_link();
755
756 /* Tell pci.c to not use the common resource allocation mecanism */
757 pci_probe_only = 1;
758
759 /* Allow all IO */
760 io_page_mask = -1;
761}
762
763/*
764 * Disable second function on K2-SATA, it's broken
765 * and disable IO BARs on first one
766 */
767static void fixup_k2_sata(struct pci_dev* dev)
768{
769 int i;
770 u16 cmd;
771
772 if (PCI_FUNC(dev->devfn) > 0) {
773 pci_read_config_word(dev, PCI_COMMAND, &cmd);
774 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
775 pci_write_config_word(dev, PCI_COMMAND, cmd);
776 for (i = 0; i < 6; i++) {
777 dev->resource[i].start = dev->resource[i].end = 0;
778 dev->resource[i].flags = 0;
779 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
780 }
781 } else {
782 pci_read_config_word(dev, PCI_COMMAND, &cmd);
783 cmd &= ~PCI_COMMAND_IO;
784 pci_write_config_word(dev, PCI_COMMAND, cmd);
785 for (i = 0; i < 5; i++) {
786 dev->resource[i].start = dev->resource[i].end = 0;
787 dev->resource[i].flags = 0;
788 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
789 }
790 }
791}
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
793
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
deleted file mode 100644
index fa8121d53b89..000000000000
--- a/arch/ppc64/kernel/pmac_setup.c
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#undef DEBUG
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/errno.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
36#include <linux/ptrace.h>
37#include <linux/slab.h>
38#include <linux/user.h>
39#include <linux/a.out.h>
40#include <linux/tty.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/ioport.h>
44#include <linux/major.h>
45#include <linux/initrd.h>
46#include <linux/vt_kern.h>
47#include <linux/console.h>
48#include <linux/ide.h>
49#include <linux/pci.h>
50#include <linux/adb.h>
51#include <linux/cuda.h>
52#include <linux/pmu.h>
53#include <linux/irq.h>
54#include <linux/seq_file.h>
55#include <linux/root_dev.h>
56#include <linux/bitops.h>
57
58#include <asm/processor.h>
59#include <asm/sections.h>
60#include <asm/prom.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/iommu.h>
65#include <asm/machdep.h>
66#include <asm/dma.h>
67#include <asm/btext.h>
68#include <asm/cputable.h>
69#include <asm/pmac_feature.h>
70#include <asm/time.h>
71#include <asm/of_device.h>
72#include <asm/lmb.h>
73#include <asm/smu.h>
74#include <asm/pmc.h>
75
76#include "pmac.h"
77#include "mpic.h"
78
79#ifdef DEBUG
80#define DBG(fmt...) udbg_printf(fmt)
81#else
82#define DBG(fmt...)
83#endif
84
85static int current_root_goodness = -1;
86#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
87
88extern int powersave_nap;
89int sccdbg;
90
91sys_ctrler_t sys_ctrler;
92EXPORT_SYMBOL(sys_ctrler);
93
94#ifdef CONFIG_PMAC_SMU
95unsigned long smu_cmdbuf_abs;
96EXPORT_SYMBOL(smu_cmdbuf_abs);
97#endif
98
99extern void udbg_init_scc(struct device_node *np);
100
101static void __pmac pmac_show_cpuinfo(struct seq_file *m)
102{
103 struct device_node *np;
104 char *pp;
105 int plen;
106 char* mbname;
107 int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
108 PMAC_MB_INFO_MODEL, 0);
109 unsigned int mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
110 PMAC_MB_INFO_FLAGS, 0);
111
112 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
113 (long)&mbname) != 0)
114 mbname = "Unknown";
115
116 /* find motherboard type */
117 seq_printf(m, "machine\t\t: ");
118 np = of_find_node_by_path("/");
119 if (np != NULL) {
120 pp = (char *) get_property(np, "model", NULL);
121 if (pp != NULL)
122 seq_printf(m, "%s\n", pp);
123 else
124 seq_printf(m, "PowerMac\n");
125 pp = (char *) get_property(np, "compatible", &plen);
126 if (pp != NULL) {
127 seq_printf(m, "motherboard\t:");
128 while (plen > 0) {
129 int l = strlen(pp) + 1;
130 seq_printf(m, " %s", pp);
131 plen -= l;
132 pp += l;
133 }
134 seq_printf(m, "\n");
135 }
136 of_node_put(np);
137 } else
138 seq_printf(m, "PowerMac\n");
139
140 /* print parsed model */
141 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
142 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
143
144 /* Indicate newworld */
145 seq_printf(m, "pmac-generation\t: NewWorld\n");
146}
147
148
149static void __init pmac_setup_arch(void)
150{
151 /* init to some ~sane value until calibrate_delay() runs */
152 loops_per_jiffy = 50000000;
153
154 /* Probe motherboard chipset */
155 pmac_feature_init();
156#if 0
157 /* Lock-enable the SCC channel used for debug */
158 if (sccdbg) {
159 np = of_find_node_by_name(NULL, "escc");
160 if (np)
161 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np,
162 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
163 }
164#endif
165 /* We can NAP */
166 powersave_nap = 1;
167
168#ifdef CONFIG_ADB_PMU
169 /* Initialize the PMU if any */
170 find_via_pmu();
171#endif
172#ifdef CONFIG_PMAC_SMU
173 /* Initialize the SMU if any */
174 smu_init();
175#endif
176
177 /* Init NVRAM access */
178 pmac_nvram_init();
179
180 /* Setup SMP callback */
181#ifdef CONFIG_SMP
182 pmac_setup_smp();
183#endif
184
185 /* Lookup PCI hosts */
186 pmac_pci_init();
187
188#ifdef CONFIG_DUMMY_CONSOLE
189 conswitchp = &dummy_con;
190#endif
191
192 printk(KERN_INFO "Using native/NAP idle loop\n");
193}
194
195#ifdef CONFIG_SCSI
196void note_scsi_host(struct device_node *node, void *host)
197{
198 /* Obsolete */
199}
200#endif
201
202
203static int initializing = 1;
204
205static int pmac_late_init(void)
206{
207 initializing = 0;
208 return 0;
209}
210
211late_initcall(pmac_late_init);
212
213/* can't be __init - can be called whenever a disk is first accessed */
214void __pmac note_bootable_part(dev_t dev, int part, int goodness)
215{
216 extern dev_t boot_dev;
217 char *p;
218
219 if (!initializing)
220 return;
221 if ((goodness <= current_root_goodness) &&
222 ROOT_DEV != DEFAULT_ROOT_DEVICE)
223 return;
224 p = strstr(saved_command_line, "root=");
225 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
226 return;
227
228 if (!boot_dev || dev == boot_dev) {
229 ROOT_DEV = dev + part;
230 boot_dev = 0;
231 current_root_goodness = goodness;
232 }
233}
234
235static void __pmac pmac_restart(char *cmd)
236{
237 switch(sys_ctrler) {
238#ifdef CONFIG_ADB_PMU
239 case SYS_CTRLER_PMU:
240 pmu_restart();
241 break;
242#endif
243
244#ifdef CONFIG_PMAC_SMU
245 case SYS_CTRLER_SMU:
246 smu_restart();
247 break;
248#endif
249 default:
250 ;
251 }
252}
253
254static void __pmac pmac_power_off(void)
255{
256 switch(sys_ctrler) {
257#ifdef CONFIG_ADB_PMU
258 case SYS_CTRLER_PMU:
259 pmu_shutdown();
260 break;
261#endif
262#ifdef CONFIG_PMAC_SMU
263 case SYS_CTRLER_SMU:
264 smu_shutdown();
265 break;
266#endif
267 default:
268 ;
269 }
270}
271
272static void __pmac pmac_halt(void)
273{
274 pmac_power_off();
275}
276
277#ifdef CONFIG_BOOTX_TEXT
278static void btext_putc(unsigned char c)
279{
280 btext_drawchar(c);
281}
282
283static void __init init_boot_display(void)
284{
285 char *name;
286 struct device_node *np = NULL;
287 int rc = -ENODEV;
288
289 printk("trying to initialize btext ...\n");
290
291 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
292 if (name != NULL) {
293 np = of_find_node_by_path(name);
294 if (np != NULL) {
295 if (strcmp(np->type, "display") != 0) {
296 printk("boot stdout isn't a display !\n");
297 of_node_put(np);
298 np = NULL;
299 }
300 }
301 }
302 if (np)
303 rc = btext_initialize(np);
304 if (rc == 0)
305 return;
306
307 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
308 if (get_property(np, "linux,opened", NULL)) {
309 printk("trying %s ...\n", np->full_name);
310 rc = btext_initialize(np);
311 printk("result: %d\n", rc);
312 }
313 if (rc == 0)
314 return;
315 }
316}
317#endif /* CONFIG_BOOTX_TEXT */
318
319/*
320 * Early initialization.
321 */
322static void __init pmac_init_early(void)
323{
324 DBG(" -> pmac_init_early\n");
325
326 /* Initialize hash table, from now on, we can take hash faults
327 * and call ioremap
328 */
329 hpte_init_native();
330
331 /* Init SCC */
332 if (strstr(cmd_line, "sccdbg")) {
333 sccdbg = 1;
334 udbg_init_scc(NULL);
335 }
336#ifdef CONFIG_BOOTX_TEXT
337 else {
338 init_boot_display();
339
340 udbg_putc = btext_putc;
341 }
342#endif /* CONFIG_BOOTX_TEXT */
343
344 /* Setup interrupt mapping options */
345 ppc64_interrupt_controller = IC_OPEN_PIC;
346
347 iommu_init_early_u3();
348
349 DBG(" <- pmac_init_early\n");
350}
351
352static int pmac_u3_cascade(struct pt_regs *regs, void *data)
353{
354 return mpic_get_one_irq((struct mpic *)data, regs);
355}
356
357static __init void pmac_init_IRQ(void)
358{
359 struct device_node *irqctrler = NULL;
360 struct device_node *irqctrler2 = NULL;
361 struct device_node *np = NULL;
362 struct mpic *mpic1, *mpic2;
363
364 /* We first try to detect Apple's new Core99 chipset, since mac-io
365 * is quite different on those machines and contains an IBM MPIC2.
366 */
367 while ((np = of_find_node_by_type(np, "open-pic")) != NULL) {
368 struct device_node *parent = of_get_parent(np);
369 if (parent && !strcmp(parent->name, "u3"))
370 irqctrler2 = of_node_get(np);
371 else
372 irqctrler = of_node_get(np);
373 of_node_put(parent);
374 }
375 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
376 unsigned char senses[128];
377
378 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
379 (unsigned int)irqctrler->addrs[0].address);
380
381 prom_get_irq_senses(senses, 0, 128);
382 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
383 MPIC_PRIMARY | MPIC_WANTS_RESET,
384 0, 0, 128, 256, senses, 128, " K2-MPIC ");
385 BUG_ON(mpic1 == NULL);
386 mpic_init(mpic1);
387
388 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
389 irqctrler2->n_addrs > 0) {
390 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
391 (u32)irqctrler2->addrs[0].address,
392 irqctrler2->intrs[0].line);
393
394 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
395 prom_get_irq_senses(senses, 128, 128 + 128);
396
397 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
398 * hypertransport interrupts routed to it
399 */
400 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
401 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
402 0, 128, 128, 0, senses, 128, " U3-MPIC ");
403 BUG_ON(mpic2 == NULL);
404 mpic_init(mpic2);
405 mpic_setup_cascade(irqctrler2->intrs[0].line,
406 pmac_u3_cascade, mpic2);
407 }
408 }
409 of_node_put(irqctrler);
410 of_node_put(irqctrler2);
411}
412
413static void __init pmac_progress(char *s, unsigned short hex)
414{
415 if (sccdbg) {
416 udbg_puts(s);
417 udbg_puts("\n");
418 }
419#ifdef CONFIG_BOOTX_TEXT
420 else if (boot_text_mapped) {
421 btext_drawstring(s);
422 btext_drawstring("\n");
423 }
424#endif /* CONFIG_BOOTX_TEXT */
425}
426
427/*
428 * pmac has no legacy IO, anything calling this function has to
429 * fail or bad things will happen
430 */
431static int pmac_check_legacy_ioport(unsigned int baseport)
432{
433 return -ENODEV;
434}
435
436static int __init pmac_declare_of_platform_devices(void)
437{
438 struct device_node *np, *npp;
439
440 npp = of_find_node_by_name(NULL, "u3");
441 if (npp) {
442 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
443 if (strncmp(np->name, "i2c", 3) == 0) {
444 of_platform_device_create(np, "u3-i2c", NULL);
445 of_node_put(np);
446 break;
447 }
448 }
449 of_node_put(npp);
450 }
451 npp = of_find_node_by_type(NULL, "smu");
452 if (npp) {
453 of_platform_device_create(npp, "smu", NULL);
454 of_node_put(npp);
455 }
456
457 return 0;
458}
459
460device_initcall(pmac_declare_of_platform_devices);
461
462/*
463 * Called very early, MMU is off, device-tree isn't unflattened
464 */
465static int __init pmac_probe(int platform)
466{
467 if (platform != PLATFORM_POWERMAC)
468 return 0;
469 /*
470 * On U3, the DART (iommu) must be allocated now since it
471 * has an impact on htab_initialize (due to the large page it
472 * occupies having to be broken up so the DART itself is not
473 * part of the cacheable linar mapping
474 */
475 alloc_u3_dart_table();
476
477#ifdef CONFIG_PMAC_SMU
478 /*
479 * SMU based G5s need some memory below 2Gb, at least the current
480 * driver needs that. We have to allocate it now. We allocate 4k
481 * (1 small page) for now.
482 */
483 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
484#endif /* CONFIG_PMAC_SMU */
485
486 return 1;
487}
488
489static int pmac_probe_mode(struct pci_bus *bus)
490{
491 struct device_node *node = bus->sysdata;
492
493 /* We need to use normal PCI probing for the AGP bus,
494 since the device for the AGP bridge isn't in the tree. */
495 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
496 return PCI_PROBE_NORMAL;
497
498 return PCI_PROBE_DEVTREE;
499}
500
501struct machdep_calls __initdata pmac_md = {
502#ifdef CONFIG_HOTPLUG_CPU
503 .cpu_die = generic_mach_cpu_die,
504#endif
505 .probe = pmac_probe,
506 .setup_arch = pmac_setup_arch,
507 .init_early = pmac_init_early,
508 .get_cpuinfo = pmac_show_cpuinfo,
509 .init_IRQ = pmac_init_IRQ,
510 .get_irq = mpic_get_irq,
511 .pcibios_fixup = pmac_pcibios_fixup,
512 .pci_probe_mode = pmac_probe_mode,
513 .restart = pmac_restart,
514 .power_off = pmac_power_off,
515 .halt = pmac_halt,
516 .get_boot_time = pmac_get_boot_time,
517 .set_rtc_time = pmac_set_rtc_time,
518 .get_rtc_time = pmac_get_rtc_time,
519 .calibrate_decr = pmac_calibrate_decr,
520 .feature_call = pmac_do_feature_call,
521 .progress = pmac_progress,
522 .check_legacy_ioport = pmac_check_legacy_ioport,
523 .idle_loop = native_idle,
524 .enable_pmcs = power4_enable_pmcs,
525};
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
deleted file mode 100644
index a23de37227bf..000000000000
--- a/arch/ppc64/kernel/pmac_smp.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24
25#undef DEBUG
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/smp.h>
31#include <linux/smp_lock.h>
32#include <linux/interrupt.h>
33#include <linux/kernel_stat.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/errno.h>
37#include <linux/irq.h>
38
39#include <asm/ptrace.h>
40#include <asm/atomic.h>
41#include <asm/irq.h>
42#include <asm/page.h>
43#include <asm/pgtable.h>
44#include <asm/sections.h>
45#include <asm/io.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/pmac_feature.h>
50#include <asm/time.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#include "mpic.h"
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void pmac_secondary_start_1(void);
64extern void pmac_secondary_start_2(void);
65extern void pmac_secondary_start_3(void);
66
67extern struct smp_ops_t *smp_ops;
68
69static void (*pmac_tb_freeze)(int freeze);
70static struct device_node *pmac_tb_clock_chip_host;
71static u8 pmac_tb_pulsar_addr;
72static DEFINE_SPINLOCK(timebase_lock);
73static unsigned long timebase;
74
75static void smp_core99_cypress_tb_freeze(int freeze)
76{
77 u8 data;
78 int rc;
79
80 /* Strangely, the device-tree says address is 0xd2, but darwin
81 * accesses 0xd0 ...
82 */
83 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
84 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
85 0xd0 | pmac_low_i2c_read,
86 0x81, &data, 1);
87 if (rc != 0)
88 goto bail;
89
90 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
91
92 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
93 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
94 0xd0 | pmac_low_i2c_write,
95 0x81, &data, 1);
96
97 bail:
98 if (rc != 0) {
99 printk("Cypress Timebase %s rc: %d\n",
100 freeze ? "freeze" : "unfreeze", rc);
101 panic("Timebase freeze failed !\n");
102 }
103}
104
105static void smp_core99_pulsar_tb_freeze(int freeze)
106{
107 u8 data;
108 int rc;
109
110 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
111 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
112 pmac_tb_pulsar_addr | pmac_low_i2c_read,
113 0x2e, &data, 1);
114 if (rc != 0)
115 goto bail;
116
117 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
118
119 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
120 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
121 pmac_tb_pulsar_addr | pmac_low_i2c_write,
122 0x2e, &data, 1);
123 bail:
124 if (rc != 0) {
125 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
126 freeze ? "freeze" : "unfreeze", rc);
127 panic("Timebase freeze failed !\n");
128 }
129}
130
131
132static void smp_core99_give_timebase(void)
133{
134 /* Open i2c bus for synchronous access */
135 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
136 panic("Can't open i2c for TB sync !\n");
137
138 spin_lock(&timebase_lock);
139 (*pmac_tb_freeze)(1);
140 mb();
141 timebase = get_tb();
142 spin_unlock(&timebase_lock);
143
144 while (timebase)
145 barrier();
146
147 spin_lock(&timebase_lock);
148 (*pmac_tb_freeze)(0);
149 spin_unlock(&timebase_lock);
150
151 /* Close i2c bus */
152 pmac_low_i2c_close(pmac_tb_clock_chip_host);
153}
154
155
156static void __devinit smp_core99_take_timebase(void)
157{
158 while (!timebase)
159 barrier();
160 spin_lock(&timebase_lock);
161 set_tb(timebase >> 32, timebase & 0xffffffff);
162 timebase = 0;
163 spin_unlock(&timebase_lock);
164}
165
166
167static int __init smp_core99_probe(void)
168{
169 struct device_node *cpus;
170 struct device_node *cc;
171 int ncpus = 0;
172
173 /* Maybe use systemconfiguration here ? */
174 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
175
176 /* Count CPUs in the device-tree */
177 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
178 ++ncpus;
179
180 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
181
182 /* Nothing more to do if less than 2 of them */
183 if (ncpus <= 1)
184 return 1;
185
186 /* HW sync only on these platforms */
187 if (!machine_is_compatible("PowerMac7,2") &&
188 !machine_is_compatible("PowerMac7,3") &&
189 !machine_is_compatible("RackMac3,1"))
190 goto nohwsync;
191
192 /* Look for the clock chip */
193 for (cc = NULL; (cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL;) {
194 struct device_node *p = of_get_parent(cc);
195 u32 *reg;
196 int ok;
197 ok = p && device_is_compatible(p, "uni-n-i2c");
198 if (!ok)
199 goto next;
200 reg = (u32 *)get_property(cc, "reg", NULL);
201 if (reg == NULL)
202 goto next;
203 switch (*reg) {
204 case 0xd2:
205 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
206 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
207 pmac_tb_pulsar_addr = 0xd2;
208 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
209 } else if (device_is_compatible(cc, "cy28508")) {
210 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
211 printk(KERN_INFO "Timebase clock is Cypress chip\n");
212 }
213 break;
214 case 0xd4:
215 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
216 pmac_tb_pulsar_addr = 0xd4;
217 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
218 break;
219 }
220 if (pmac_tb_freeze != NULL) {
221 pmac_tb_clock_chip_host = p;
222 smp_ops->give_timebase = smp_core99_give_timebase;
223 smp_ops->take_timebase = smp_core99_take_timebase;
224 of_node_put(cc);
225 of_node_put(p);
226 break;
227 }
228 next:
229 of_node_put(p);
230 }
231
232 nohwsync:
233 mpic_request_ipis();
234
235 return ncpus;
236}
237
238static void __init smp_core99_kick_cpu(int nr)
239{
240 int save_vector, j;
241 unsigned long new_vector;
242 unsigned long flags;
243 volatile unsigned int *vector
244 = ((volatile unsigned int *)(KERNELBASE+0x100));
245
246 if (nr < 1 || nr > 3)
247 return;
248 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
249
250 local_irq_save(flags);
251 local_irq_disable();
252
253 /* Save reset vector */
254 save_vector = *vector;
255
256 /* Setup fake reset vector that does
257 * b .pmac_secondary_start - KERNELBASE
258 */
259 switch(nr) {
260 case 1:
261 new_vector = (unsigned long)pmac_secondary_start_1;
262 break;
263 case 2:
264 new_vector = (unsigned long)pmac_secondary_start_2;
265 break;
266 case 3:
267 default:
268 new_vector = (unsigned long)pmac_secondary_start_3;
269 break;
270 }
271 *vector = 0x48000002 + (new_vector - KERNELBASE);
272
273 /* flush data cache and inval instruction cache */
274 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
275
276 /* Put some life in our friend */
277 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
278 paca[nr].cpu_start = 1;
279
280 /* FIXME: We wait a bit for the CPU to take the exception, I should
281 * instead wait for the entry code to set something for me. Well,
282 * ideally, all that crap will be done in prom.c and the CPU left
283 * in a RAM-based wait loop like CHRP.
284 */
285 for (j = 1; j < 1000000; j++)
286 mb();
287
288 /* Restore our exception vector */
289 *vector = save_vector;
290 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
291
292 local_irq_restore(flags);
293 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
294}
295
296static void __init smp_core99_setup_cpu(int cpu_nr)
297{
298 /* Setup MPIC */
299 mpic_setup_this_cpu();
300
301 if (cpu_nr == 0) {
302 extern void g5_phy_disable_cpu1(void);
303
304 /* If we didn't start the second CPU, we must take
305 * it off the bus
306 */
307 if (num_online_cpus() < 2)
308 g5_phy_disable_cpu1();
309 if (ppc_md.progress) ppc_md.progress("smp_core99_setup_cpu 0 done", 0x349);
310 }
311}
312
313struct smp_ops_t core99_smp_ops __pmacdata = {
314 .message_pass = smp_mpic_message_pass,
315 .probe = smp_core99_probe,
316 .kick_cpu = smp_core99_kick_cpu,
317 .setup_cpu = smp_core99_setup_cpu,
318 .give_timebase = smp_generic_give_timebase,
319 .take_timebase = smp_generic_take_timebase,
320};
321
322void __init pmac_setup_smp(void)
323{
324 smp_ops = &core99_smp_ops;
325#ifdef CONFIG_HOTPLUG_CPU
326 smp_ops->cpu_enable = generic_cpu_enable;
327 smp_ops->cpu_disable = generic_cpu_disable;
328 smp_ops->cpu_die = generic_cpu_die;
329#endif
330}
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
deleted file mode 100644
index 41bbb8c59697..000000000000
--- a/arch/ppc64/kernel/pmac_time.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/pmu.h>
23#include <linux/interrupt.h>
24
25#include <asm/sections.h>
26#include <asm/prom.h>
27#include <asm/system.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/machdep.h>
31#include <asm/time.h>
32#include <asm/nvram.h>
33#include <asm/smu.h>
34
35#undef DEBUG
36
37#ifdef DEBUG
38#define DBG(x...) printk(x)
39#else
40#define DBG(x...)
41#endif
42
43/* Apparently the RTC stores seconds since 1 Jan 1904 */
44#define RTC_OFFSET 2082844800
45
46/*
47 * Calibrate the decrementer frequency with the VIA timer 1.
48 */
49#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
50
51extern struct timezone sys_tz;
52extern void to_tm(int tim, struct rtc_time * tm);
53
54void __pmac pmac_get_rtc_time(struct rtc_time *tm)
55{
56 switch(sys_ctrler) {
57#ifdef CONFIG_ADB_PMU
58 case SYS_CTRLER_PMU: {
59 /* TODO: Move that to a function in the PMU driver */
60 struct adb_request req;
61 unsigned int now;
62
63 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
64 return;
65 pmu_wait_complete(&req);
66 if (req.reply_len != 4)
67 printk(KERN_ERR "pmac_get_rtc_time: PMU returned a %d"
68 " bytes reply\n", req.reply_len);
69 now = (req.reply[0] << 24) + (req.reply[1] << 16)
70 + (req.reply[2] << 8) + req.reply[3];
71 DBG("get: %u -> %u\n", (int)now, (int)(now - RTC_OFFSET));
72 now -= RTC_OFFSET;
73
74 to_tm(now, tm);
75 tm->tm_year -= 1900;
76 tm->tm_mon -= 1;
77
78 DBG("-> tm_mday: %d, tm_mon: %d, tm_year: %d, %d:%02d:%02d\n",
79 tm->tm_mday, tm->tm_mon, tm->tm_year,
80 tm->tm_hour, tm->tm_min, tm->tm_sec);
81 break;
82 }
83#endif /* CONFIG_ADB_PMU */
84
85#ifdef CONFIG_PMAC_SMU
86 case SYS_CTRLER_SMU:
87 smu_get_rtc_time(tm, 1);
88 break;
89#endif /* CONFIG_PMAC_SMU */
90 default:
91 ;
92 }
93}
94
95int __pmac pmac_set_rtc_time(struct rtc_time *tm)
96{
97 switch(sys_ctrler) {
98#ifdef CONFIG_ADB_PMU
99 case SYS_CTRLER_PMU: {
100 /* TODO: Move that to a function in the PMU driver */
101 struct adb_request req;
102 unsigned int nowtime;
103
104 DBG("set: tm_mday: %d, tm_mon: %d, tm_year: %d,"
105 " %d:%02d:%02d\n",
106 tm->tm_mday, tm->tm_mon, tm->tm_year,
107 tm->tm_hour, tm->tm_min, tm->tm_sec);
108
109 nowtime = mktime(tm->tm_year + 1900, tm->tm_mon + 1,
110 tm->tm_mday, tm->tm_hour, tm->tm_min,
111 tm->tm_sec);
112
113 DBG("-> %u -> %u\n", (int)nowtime,
114 (int)(nowtime + RTC_OFFSET));
115 nowtime += RTC_OFFSET;
116
117 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
118 nowtime >> 24, nowtime >> 16,
119 nowtime >> 8, nowtime) < 0)
120 return -ENXIO;
121 pmu_wait_complete(&req);
122 if (req.reply_len != 0)
123 printk(KERN_ERR "pmac_set_rtc_time: PMU returned a %d"
124 " bytes reply\n", req.reply_len);
125 return 0;
126 }
127#endif /* CONFIG_ADB_PMU */
128
129#ifdef CONFIG_PMAC_SMU
130 case SYS_CTRLER_SMU:
131 return smu_set_rtc_time(tm, 1);
132#endif /* CONFIG_PMAC_SMU */
133 default:
134 return -ENODEV;
135 }
136}
137
138void __init pmac_get_boot_time(struct rtc_time *tm)
139{
140 pmac_get_rtc_time(tm);
141
142#ifdef disabled__CONFIG_NVRAM
143 s32 delta = 0;
144 int dst;
145
146 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
147 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
148 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
149 if (delta & 0x00800000UL)
150 delta |= 0xFF000000UL;
151 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
152 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
153 dst ? "on" : "off");
154#endif
155}
156
157/*
158 * Query the OF and get the decr frequency.
159 * FIXME: merge this with generic_calibrate_decr
160 */
161void __init pmac_calibrate_decr(void)
162{
163 struct device_node *cpu;
164 unsigned int freq, *fp;
165 struct div_result divres;
166
167 /*
168 * The cpu node should have a timebase-frequency property
169 * to tell us the rate at which the decrementer counts.
170 */
171 cpu = find_type_devices("cpu");
172 if (cpu == 0)
173 panic("can't find cpu node in time_init");
174 fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
175 if (fp == 0)
176 panic("can't get cpu timebase frequency");
177 freq = *fp;
178 printk("time_init: decrementer frequency = %u.%.6u MHz\n",
179 freq/1000000, freq%1000000);
180 tb_ticks_per_jiffy = freq / HZ;
181 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
182 tb_ticks_per_usec = freq / 1000000;
183 tb_to_us = mulhwu_scale_factor(freq, 1000000);
184 div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
185 tb_to_xs = divres.result_low;
186 ppc_tb_freq = freq;
187
188 fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
189 if (fp == 0)
190 panic("can't get cpu processor frequency");
191 ppc_proc_freq = *fp;
192
193 setup_default_decr();
194}
195
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f4eec6..84006e26342c 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h> 20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/iSeries/HvCallSc.h>
23 22
24EXPORT_SYMBOL(strcpy); 23EXPORT_SYMBOL(strcpy);
25EXPORT_SYMBOL(strncpy); 24EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
46 45
47EXPORT_SYMBOL(reloc_offset); 46EXPORT_SYMBOL(reloc_offset);
48 47
49#ifdef CONFIG_PPC_ISERIES
50EXPORT_SYMBOL(HvCall0);
51EXPORT_SYMBOL(HvCall1);
52EXPORT_SYMBOL(HvCall2);
53EXPORT_SYMBOL(HvCall3);
54EXPORT_SYMBOL(HvCall4);
55EXPORT_SYMBOL(HvCall5);
56EXPORT_SYMBOL(HvCall6);
57EXPORT_SYMBOL(HvCall7);
58#endif
59
60EXPORT_SYMBOL(_insb); 48EXPORT_SYMBOL(_insb);
61EXPORT_SYMBOL(_outsb); 49EXPORT_SYMBOL(_outsb);
62EXPORT_SYMBOL(_insw); 50EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
77EXPORT_SYMBOL(__flush_icache_range); 65EXPORT_SYMBOL(__flush_icache_range);
78EXPORT_SYMBOL(flush_dcache_range); 66EXPORT_SYMBOL(flush_dcache_range);
79 67
80#ifdef CONFIG_SMP
81#ifdef CONFIG_PPC_ISERIES
82EXPORT_SYMBOL(local_get_flags);
83EXPORT_SYMBOL(local_irq_disable);
84EXPORT_SYMBOL(local_irq_restore);
85#endif
86#endif
87
88EXPORT_SYMBOL(memcpy); 68EXPORT_SYMBOL(memcpy);
89EXPORT_SYMBOL(memset); 69EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memmove); 70EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 7035deb6de92..97bfceb5353b 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -46,7 +46,6 @@
46#include <asm/pgtable.h> 46#include <asm/pgtable.h>
47#include <asm/pci.h> 47#include <asm/pci.h>
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/bootinfo.h>
50#include <asm/ppcdebug.h> 49#include <asm/ppcdebug.h>
51#include <asm/btext.h> 50#include <asm/btext.h>
52#include <asm/sections.h> 51#include <asm/sections.h>
@@ -78,11 +77,14 @@ typedef int interpret_func(struct device_node *, unsigned long *,
78extern struct rtas_t rtas; 77extern struct rtas_t rtas;
79extern struct lmb lmb; 78extern struct lmb lmb;
80extern unsigned long klimit; 79extern unsigned long klimit;
80extern unsigned long memory_limit;
81 81
82static int __initdata dt_root_addr_cells; 82static int __initdata dt_root_addr_cells;
83static int __initdata dt_root_size_cells; 83static int __initdata dt_root_size_cells;
84static int __initdata iommu_is_off; 84static int __initdata iommu_is_off;
85int __initdata iommu_force_on; 85int __initdata iommu_force_on;
86unsigned long tce_alloc_start, tce_alloc_end;
87
86typedef u32 cell_t; 88typedef u32 cell_t;
87 89
88#if 0 90#if 0
@@ -1063,7 +1065,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1063{ 1065{
1064 u32 *prop; 1066 u32 *prop;
1065 u64 *prop64; 1067 u64 *prop64;
1066 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
1067 1068
1068 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1069 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1069 1070
@@ -1237,7 +1238,7 @@ void __init early_init_devtree(void *params)
1237 lmb_init(); 1238 lmb_init();
1238 scan_flat_dt(early_init_dt_scan_root, NULL); 1239 scan_flat_dt(early_init_dt_scan_root, NULL);
1239 scan_flat_dt(early_init_dt_scan_memory, NULL); 1240 scan_flat_dt(early_init_dt_scan_memory, NULL);
1240 lmb_enforce_memory_limit(); 1241 lmb_enforce_memory_limit(memory_limit);
1241 lmb_analyze(); 1242 lmb_analyze();
1242 systemcfg->physicalMemorySize = lmb_phys_mem_size(); 1243 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1243 lmb_reserve(0, __pa(klimit)); 1244 lmb_reserve(0, __pa(klimit));
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index f252670874a4..69924ba4d7d9 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -44,7 +44,6 @@
44#include <asm/pgtable.h> 44#include <asm/pgtable.h>
45#include <asm/pci.h> 45#include <asm/pci.h>
46#include <asm/iommu.h> 46#include <asm/iommu.h>
47#include <asm/bootinfo.h>
48#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
49#include <asm/btext.h> 48#include <asm/btext.h>
50#include <asm/sections.h> 49#include <asm/sections.h>
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
deleted file mode 100644
index b1c044ca5756..000000000000
--- a/arch/ppc64/kernel/ptrace.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/signal.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/pgtable.h>
37#include <asm/system.h>
38#include <asm/ptrace-common.h>
39
40/*
41 * does not yet catch signals sent when the child dies.
42 * in exit.c or in signal.c.
43 */
44
45/*
46 * Called by kernel/ptrace.c when detaching..
47 *
48 * Make sure single step bits etc are not set.
49 */
50void ptrace_disable(struct task_struct *child)
51{
52 /* make sure the single step bit is not set. */
53 clear_single_step(child);
54}
55
56int sys_ptrace(long request, long pid, long addr, long data)
57{
58 struct task_struct *child;
59 int ret = -EPERM;
60
61 lock_kernel();
62 if (request == PTRACE_TRACEME) {
63 /* are we already being traced? */
64 if (current->ptrace & PT_PTRACED)
65 goto out;
66 ret = security_ptrace(current->parent, current);
67 if (ret)
68 goto out;
69 /* set the ptrace bit in the process flags. */
70 current->ptrace |= PT_PTRACED;
71 ret = 0;
72 goto out;
73 }
74 ret = -ESRCH;
75 read_lock(&tasklist_lock);
76 child = find_task_by_pid(pid);
77 if (child)
78 get_task_struct(child);
79 read_unlock(&tasklist_lock);
80 if (!child)
81 goto out;
82
83 ret = -EPERM;
84 if (pid == 1) /* you may not mess with init */
85 goto out_tsk;
86
87 if (request == PTRACE_ATTACH) {
88 ret = ptrace_attach(child);
89 goto out_tsk;
90 }
91
92 ret = ptrace_check_attach(child, request == PTRACE_KILL);
93 if (ret < 0)
94 goto out_tsk;
95
96 switch (request) {
97 /* when I and D space are separate, these will need to be fixed. */
98 case PTRACE_PEEKTEXT: /* read word at location addr. */
99 case PTRACE_PEEKDATA: {
100 unsigned long tmp;
101 int copied;
102
103 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
104 ret = -EIO;
105 if (copied != sizeof(tmp))
106 break;
107 ret = put_user(tmp,(unsigned long __user *) data);
108 break;
109 }
110
111 /* read the word at location addr in the USER area. */
112 case PTRACE_PEEKUSR: {
113 unsigned long index;
114 unsigned long tmp;
115
116 ret = -EIO;
117 /* convert to index and check */
118 index = (unsigned long) addr >> 3;
119 if ((addr & 7) || (index > PT_FPSCR))
120 break;
121
122 if (index < PT_FPR0) {
123 tmp = get_reg(child, (int)index);
124 } else {
125 flush_fp_to_thread(child);
126 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
127 }
128 ret = put_user(tmp,(unsigned long __user *) data);
129 break;
130 }
131
132 /* If I and D space are separate, this will have to be fixed. */
133 case PTRACE_POKETEXT: /* write the word at location addr. */
134 case PTRACE_POKEDATA:
135 ret = 0;
136 if (access_process_vm(child, addr, &data, sizeof(data), 1)
137 == sizeof(data))
138 break;
139 ret = -EIO;
140 break;
141
142 /* write the word at location addr in the USER area */
143 case PTRACE_POKEUSR: {
144 unsigned long index;
145
146 ret = -EIO;
147 /* convert to index and check */
148 index = (unsigned long) addr >> 3;
149 if ((addr & 7) || (index > PT_FPSCR))
150 break;
151
152 if (index == PT_ORIG_R3)
153 break;
154 if (index < PT_FPR0) {
155 ret = put_reg(child, index, data);
156 } else {
157 flush_fp_to_thread(child);
158 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
159 ret = 0;
160 }
161 break;
162 }
163
164 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
165 case PTRACE_CONT: { /* restart after signal. */
166 ret = -EIO;
167 if (!valid_signal(data))
168 break;
169 if (request == PTRACE_SYSCALL)
170 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
171 else
172 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
173 child->exit_code = data;
174 /* make sure the single step bit is not set. */
175 clear_single_step(child);
176 wake_up_process(child);
177 ret = 0;
178 break;
179 }
180
181 /*
182 * make the child exit. Best I can do is send it a sigkill.
183 * perhaps it should be put in the status that it wants to
184 * exit.
185 */
186 case PTRACE_KILL: {
187 ret = 0;
188 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
189 break;
190 child->exit_code = SIGKILL;
191 /* make sure the single step bit is not set. */
192 clear_single_step(child);
193 wake_up_process(child);
194 break;
195 }
196
197 case PTRACE_SINGLESTEP: { /* set the trap flag. */
198 ret = -EIO;
199 if (!valid_signal(data))
200 break;
201 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 set_single_step(child);
203 child->exit_code = data;
204 /* give it a chance to run. */
205 wake_up_process(child);
206 ret = 0;
207 break;
208 }
209
210 case PTRACE_GET_DEBUGREG: {
211 ret = -EINVAL;
212 /* We only support one DABR and no IABRS at the moment */
213 if (addr > 0)
214 break;
215 ret = put_user(child->thread.dabr,
216 (unsigned long __user *)data);
217 break;
218 }
219
220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data);
222 break;
223
224 case PTRACE_DETACH:
225 ret = ptrace_detach(child, data);
226 break;
227
228 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
229 int i;
230 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
231 unsigned long __user *tmp = (unsigned long __user *)addr;
232
233 for (i = 0; i < 32; i++) {
234 ret = put_user(*reg, tmp);
235 if (ret)
236 break;
237 reg++;
238 tmp++;
239 }
240 break;
241 }
242
243 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
244 int i;
245 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
246 unsigned long __user *tmp = (unsigned long __user *)addr;
247
248 for (i = 0; i < 32; i++) {
249 ret = get_user(*reg, tmp);
250 if (ret)
251 break;
252 reg++;
253 tmp++;
254 }
255 break;
256 }
257
258 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
259 int i;
260 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
261 unsigned long __user *tmp = (unsigned long __user *)addr;
262
263 flush_fp_to_thread(child);
264
265 for (i = 0; i < 32; i++) {
266 ret = put_user(*reg, tmp);
267 if (ret)
268 break;
269 reg++;
270 tmp++;
271 }
272 break;
273 }
274
275 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
276 int i;
277 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
278 unsigned long __user *tmp = (unsigned long __user *)addr;
279
280 flush_fp_to_thread(child);
281
282 for (i = 0; i < 32; i++) {
283 ret = get_user(*reg, tmp);
284 if (ret)
285 break;
286 reg++;
287 tmp++;
288 }
289 break;
290 }
291
292#ifdef CONFIG_ALTIVEC
293 case PTRACE_GETVRREGS:
294 /* Get the child altivec register state. */
295 flush_altivec_to_thread(child);
296 ret = get_vrregs((unsigned long __user *)data, child);
297 break;
298
299 case PTRACE_SETVRREGS:
300 /* Set the child altivec register state. */
301 flush_altivec_to_thread(child);
302 ret = set_vrregs(child, (unsigned long __user *)data);
303 break;
304#endif
305
306 default:
307 ret = ptrace_request(child, request, addr, data);
308 break;
309 }
310out_tsk:
311 put_task_struct(child);
312out:
313 unlock_kernel();
314 return ret;
315}
316
317static void do_syscall_trace(void)
318{
319 /* the 0x80 provides a way for the tracing parent to distinguish
320 between a syscall stop and SIGTRAP delivery */
321 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
322 ? 0x80 : 0));
323
324 /*
325 * this isn't the same as continuing with a signal, but it will do
326 * for normal use. strace only continues with a signal if the
327 * stopping signal is not SIGTRAP. -brl
328 */
329 if (current->exit_code) {
330 send_sig(current->exit_code, current, 1);
331 current->exit_code = 0;
332 }
333}
334
335void do_syscall_trace_enter(struct pt_regs *regs)
336{
337 secure_computing(regs->gpr[0]);
338
339 if (test_thread_flag(TIF_SYSCALL_TRACE)
340 && (current->ptrace & PT_PTRACED))
341 do_syscall_trace();
342
343 if (unlikely(current->audit_context))
344 audit_syscall_entry(current,
345 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
346 regs->gpr[0],
347 regs->gpr[3], regs->gpr[4],
348 regs->gpr[5], regs->gpr[6]);
349
350}
351
352void do_syscall_trace_leave(struct pt_regs *regs)
353{
354 if (unlikely(current->audit_context))
355 audit_syscall_exit(current,
356 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
357 regs->result);
358
359 if ((test_thread_flag(TIF_SYSCALL_TRACE)
360 || test_thread_flag(TIF_SINGLESTEP))
361 && (current->ptrace & PT_PTRACED))
362 do_syscall_trace();
363}
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 1f3ff860fdf0..5bdd5b079d96 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/rtc.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4a9719b48abe..3ad15c90fbbd 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -38,9 +38,8 @@
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/rtas.h> 40#include <asm/rtas.h>
41 41#include <asm/mpic.h>
42#include "mpic.h" 42#include <asm/ppc-pci.h>
43#include "pci.h"
44 43
45/* RTAS tokens */ 44/* RTAS tokens */
46static int read_pci_config; 45static int read_pci_config;
@@ -401,7 +400,7 @@ unsigned long __init find_and_init_phbs(void)
401 if (!phb) 400 if (!phb)
402 continue; 401 continue;
403 402
404 pci_process_bridge_OF_ranges(phb, node); 403 pci_process_bridge_OF_ranges(phb, node, 0);
405 pci_setup_phb_io(phb, index == 0); 404 pci_setup_phb_io(phb, index == 0);
406#ifdef CONFIG_PPC_PSERIES 405#ifdef CONFIG_PPC_PSERIES
407 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { 406 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
@@ -451,7 +450,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
451 if (!phb) 450 if (!phb)
452 return NULL; 451 return NULL;
453 452
454 pci_process_bridge_OF_ranges(phb, dn); 453 pci_process_bridge_OF_ranges(phb, dn, primary);
455 454
456 pci_setup_phb_io_dynamic(phb, primary); 455 pci_setup_phb_io_dynamic(phb, primary);
457 of_node_put(root); 456 of_node_put(root);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc61325..79e7ed2858dd 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/rtas.h> 44#include <asm/rtas.h>
45 45
46#include <asm/iSeries/mf.h>
47#include <asm/machdep.h> 46#include <asm/machdep.h>
48 47
49extern int piranha_simulator;
50
51/* 48/*
52 * We sponge a minor off of the misc major. No need slurping 49 * We sponge a minor off of the misc major. No need slurping
53 * up another valuable major dev number for this. If you add 50 * up another valuable major dev number for this. If you add
@@ -265,44 +262,10 @@ static int rtc_read_proc(char *page, char **start, off_t off,
265 return len; 262 return len;
266} 263}
267 264
268#ifdef CONFIG_PPC_ISERIES
269/*
270 * Get the RTC from the virtual service processor
271 * This requires flowing LpEvents to the primary partition
272 */
273void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
274{
275 if (piranha_simulator)
276 return;
277
278 mf_get_rtc(rtc_tm);
279 rtc_tm->tm_mon--;
280}
281
282/*
283 * Set the RTC in the virtual service processor
284 * This requires flowing LpEvents to the primary partition
285 */
286int iSeries_set_rtc_time(struct rtc_time *tm)
287{
288 mf_set_rtc(tm);
289 return 0;
290}
291
292void iSeries_get_boot_time(struct rtc_time *tm)
293{
294 if ( piranha_simulator )
295 return;
296
297 mf_get_boot_rtc(tm);
298 tm->tm_mon -= 1;
299}
300#endif
301
302#ifdef CONFIG_PPC_RTAS 265#ifdef CONFIG_PPC_RTAS
303#define MAX_RTC_WAIT 5000 /* 5 sec */ 266#define MAX_RTC_WAIT 5000 /* 5 sec */
304#define RTAS_CLOCK_BUSY (-2) 267#define RTAS_CLOCK_BUSY (-2)
305void rtas_get_boot_time(struct rtc_time *rtc_tm) 268unsigned long rtas_get_boot_time(void)
306{ 269{
307 int ret[8]; 270 int ret[8];
308 int error, wait_time; 271 int error, wait_time;
@@ -322,15 +285,10 @@ void rtas_get_boot_time(struct rtc_time *rtc_tm)
322 if (error != 0 && printk_ratelimit()) { 285 if (error != 0 && printk_ratelimit()) {
323 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 286 printk(KERN_WARNING "error: reading the clock failed (%d)\n",
324 error); 287 error);
325 return; 288 return 0;
326 } 289 }
327 290
328 rtc_tm->tm_sec = ret[5]; 291 return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
329 rtc_tm->tm_min = ret[4];
330 rtc_tm->tm_hour = ret[3];
331 rtc_tm->tm_mday = ret[2];
332 rtc_tm->tm_mon = ret[1] - 1;
333 rtc_tm->tm_year = ret[0] - 1900;
334} 292}
335 293
336/* NOTE: get_rtc_time will get an error if executed in interrupt context 294/* NOTE: get_rtc_time will get an error if executed in interrupt context
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index 347112cca3c0..ec9d0984b6a0 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -133,7 +133,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
133 flush_fp_to_thread(current); 133 flush_fp_to_thread(current);
134 134
135 /* Make sure signal doesn't get spurrious FP exceptions */ 135 /* Make sure signal doesn't get spurrious FP exceptions */
136 current->thread.fpscr = 0; 136 current->thread.fpscr.val = 0;
137 137
138#ifdef CONFIG_ALTIVEC 138#ifdef CONFIG_ALTIVEC
139 err |= __put_user(v_regs, &sc->v_regs); 139 err |= __put_user(v_regs, &sc->v_regs);
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 793b562da653..017c12919832 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -45,8 +45,7 @@
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
48 48#include <asm/mpic.h>
49#include "mpic.h"
50 49
51#ifdef DEBUG 50#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt) 51#define DBG(fmt...) udbg_printf(fmt)
@@ -70,28 +69,6 @@ void smp_call_function_interrupt(void);
70int smt_enabled_at_boot = 1; 69int smt_enabled_at_boot = 1;
71 70
72#ifdef CONFIG_MPIC 71#ifdef CONFIG_MPIC
73void smp_mpic_message_pass(int target, int msg)
74{
75 /* make sure we're sending something that translates to an IPI */
76 if ( msg > 0x3 ){
77 printk("SMP %d: smp_message_pass: unknown msg %d\n",
78 smp_processor_id(), msg);
79 return;
80 }
81 switch ( target )
82 {
83 case MSG_ALL:
84 mpic_send_ipi(msg, 0xffffffff);
85 break;
86 case MSG_ALL_BUT_SELF:
87 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
88 break;
89 default:
90 mpic_send_ipi(msg, 1 << target);
91 break;
92 }
93}
94
95int __init smp_mpic_probe(void) 72int __init smp_mpic_probe(void)
96{ 73{
97 int nr_cpus; 74 int nr_cpus;
@@ -128,21 +105,6 @@ void __devinit smp_generic_kick_cpu(int nr)
128 105
129#endif /* CONFIG_MPIC */ 106#endif /* CONFIG_MPIC */
130 107
131static void __init smp_space_timers(unsigned int max_cpus)
132{
133 int i;
134 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
135 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
136
137 for_each_cpu(i) {
138 if (i != boot_cpuid) {
139 paca[i].next_jiffy_update_tb =
140 previous_tb + offset;
141 previous_tb = paca[i].next_jiffy_update_tb;
142 }
143 }
144}
145
146void smp_message_recv(int msg, struct pt_regs *regs) 108void smp_message_recv(int msg, struct pt_regs *regs)
147{ 109{
148 switch(msg) { 110 switch(msg) {
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
deleted file mode 100644
index 7467ae508e6e..000000000000
--- a/arch/ppc64/kernel/traps.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/traps.c
3 *
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Modified by Cort Dougan (cort@cs.nmt.edu)
12 * and Paul Mackerras (paulus@cs.anu.edu.au)
13 */
14
15/*
16 * This file handles the architecture-dependent parts of hardware exceptions
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/interrupt.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34#include <asm/kdebug.h>
35
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/processor.h>
41#include <asm/ppcdebug.h>
42#include <asm/rtas.h>
43#include <asm/systemcfg.h>
44#include <asm/machdep.h>
45#include <asm/pmc.h>
46
47#ifdef CONFIG_DEBUGGER
48int (*__debugger)(struct pt_regs *regs);
49int (*__debugger_ipi)(struct pt_regs *regs);
50int (*__debugger_bpt)(struct pt_regs *regs);
51int (*__debugger_sstep)(struct pt_regs *regs);
52int (*__debugger_iabr_match)(struct pt_regs *regs);
53int (*__debugger_dabr_match)(struct pt_regs *regs);
54int (*__debugger_fault_handler)(struct pt_regs *regs);
55
56EXPORT_SYMBOL(__debugger);
57EXPORT_SYMBOL(__debugger_ipi);
58EXPORT_SYMBOL(__debugger_bpt);
59EXPORT_SYMBOL(__debugger_sstep);
60EXPORT_SYMBOL(__debugger_iabr_match);
61EXPORT_SYMBOL(__debugger_dabr_match);
62EXPORT_SYMBOL(__debugger_fault_handler);
63#endif
64
65struct notifier_block *ppc64_die_chain;
66static DEFINE_SPINLOCK(die_notifier_lock);
67
68int register_die_notifier(struct notifier_block *nb)
69{
70 int err = 0;
71 unsigned long flags;
72
73 spin_lock_irqsave(&die_notifier_lock, flags);
74 err = notifier_chain_register(&ppc64_die_chain, nb);
75 spin_unlock_irqrestore(&die_notifier_lock, flags);
76 return err;
77}
78
79/*
80 * Trap & Exception support
81 */
82
83static DEFINE_SPINLOCK(die_lock);
84
85int die(const char *str, struct pt_regs *regs, long err)
86{
87 static int die_counter;
88 int nl = 0;
89
90 if (debugger(regs))
91 return 1;
92
93 console_verbose();
94 spin_lock_irq(&die_lock);
95 bust_spinlocks(1);
96 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
97#ifdef CONFIG_PREEMPT
98 printk("PREEMPT ");
99 nl = 1;
100#endif
101#ifdef CONFIG_SMP
102 printk("SMP NR_CPUS=%d ", NR_CPUS);
103 nl = 1;
104#endif
105#ifdef CONFIG_DEBUG_PAGEALLOC
106 printk("DEBUG_PAGEALLOC ");
107 nl = 1;
108#endif
109#ifdef CONFIG_NUMA
110 printk("NUMA ");
111 nl = 1;
112#endif
113 switch(systemcfg->platform) {
114 case PLATFORM_PSERIES:
115 printk("PSERIES ");
116 nl = 1;
117 break;
118 case PLATFORM_PSERIES_LPAR:
119 printk("PSERIES LPAR ");
120 nl = 1;
121 break;
122 case PLATFORM_ISERIES_LPAR:
123 printk("ISERIES LPAR ");
124 nl = 1;
125 break;
126 case PLATFORM_POWERMAC:
127 printk("POWERMAC ");
128 nl = 1;
129 break;
130 case PLATFORM_BPA:
131 printk("BPA ");
132 nl = 1;
133 break;
134 }
135 if (nl)
136 printk("\n");
137 print_modules();
138 show_regs(regs);
139 bust_spinlocks(0);
140 spin_unlock_irq(&die_lock);
141
142 if (in_interrupt())
143 panic("Fatal exception in interrupt");
144
145 if (panic_on_oops) {
146 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
147 ssleep(5);
148 panic("Fatal exception");
149 }
150 do_exit(SIGSEGV);
151
152 return 0;
153}
154
155void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
156{
157 siginfo_t info;
158
159 if (!user_mode(regs)) {
160 if (die("Exception in kernel mode", regs, signr))
161 return;
162 }
163
164 memset(&info, 0, sizeof(info));
165 info.si_signo = signr;
166 info.si_code = code;
167 info.si_addr = (void __user *) addr;
168 force_sig_info(signr, &info, current);
169}
170
171void system_reset_exception(struct pt_regs *regs)
172{
173 /* See if any machine dependent calls */
174 if (ppc_md.system_reset_exception)
175 ppc_md.system_reset_exception(regs);
176
177 die("System Reset", regs, 0);
178
179 /* Must die if the interrupt is not recoverable */
180 if (!(regs->msr & MSR_RI))
181 panic("Unrecoverable System Reset");
182
183 /* What should we do here? We could issue a shutdown or hard reset. */
184}
185
186void machine_check_exception(struct pt_regs *regs)
187{
188 int recover = 0;
189
190 /* See if any machine dependent calls */
191 if (ppc_md.machine_check_exception)
192 recover = ppc_md.machine_check_exception(regs);
193
194 if (recover)
195 return;
196
197 if (debugger_fault_handler(regs))
198 return;
199 die("Machine check", regs, 0);
200
201 /* Must die if the interrupt is not recoverable */
202 if (!(regs->msr & MSR_RI))
203 panic("Unrecoverable Machine check");
204}
205
206void unknown_exception(struct pt_regs *regs)
207{
208 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
209 regs->nip, regs->msr, regs->trap);
210
211 _exception(SIGTRAP, regs, 0, 0);
212}
213
214void instruction_breakpoint_exception(struct pt_regs *regs)
215{
216 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
217 5, SIGTRAP) == NOTIFY_STOP)
218 return;
219 if (debugger_iabr_match(regs))
220 return;
221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
222}
223
224void __kprobes single_step_exception(struct pt_regs *regs)
225{
226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
227
228 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
229 5, SIGTRAP) == NOTIFY_STOP)
230 return;
231 if (debugger_sstep(regs))
232 return;
233
234 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
235}
236
237/*
238 * After we have successfully emulated an instruction, we have to
239 * check if the instruction was being single-stepped, and if so,
240 * pretend we got a single-step exception. This was pointed out
241 * by Kumar Gala. -- paulus
242 */
243static inline void emulate_single_step(struct pt_regs *regs)
244{
245 if (regs->msr & MSR_SE)
246 single_step_exception(regs);
247}
248
249static void parse_fpe(struct pt_regs *regs)
250{
251 int code = 0;
252 unsigned long fpscr;
253
254 flush_fp_to_thread(current);
255
256 fpscr = current->thread.fpscr;
257
258 /* Invalid operation */
259 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
260 code = FPE_FLTINV;
261
262 /* Overflow */
263 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
264 code = FPE_FLTOVF;
265
266 /* Underflow */
267 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
268 code = FPE_FLTUND;
269
270 /* Divide by zero */
271 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
272 code = FPE_FLTDIV;
273
274 /* Inexact result */
275 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
276 code = FPE_FLTRES;
277
278 _exception(SIGFPE, regs, code, regs->nip);
279}
280
281/*
282 * Illegal instruction emulation support. Return non-zero if we can't
283 * emulate, or -EFAULT if the associated memory access caused an access
284 * fault. Return zero on success.
285 */
286
287#define INST_MFSPR_PVR 0x7c1f42a6
288#define INST_MFSPR_PVR_MASK 0xfc1fffff
289
290#define INST_DCBA 0x7c0005ec
291#define INST_DCBA_MASK 0x7c0007fe
292
293#define INST_MCRXR 0x7c000400
294#define INST_MCRXR_MASK 0x7c0007fe
295
296static int emulate_instruction(struct pt_regs *regs)
297{
298 unsigned int instword;
299
300 if (!user_mode(regs))
301 return -EINVAL;
302
303 CHECK_FULL_REGS(regs);
304
305 if (get_user(instword, (unsigned int __user *)(regs->nip)))
306 return -EFAULT;
307
308 /* Emulate the mfspr rD, PVR. */
309 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
310 unsigned int rd;
311
312 rd = (instword >> 21) & 0x1f;
313 regs->gpr[rd] = mfspr(SPRN_PVR);
314 return 0;
315 }
316
317 /* Emulating the dcba insn is just a no-op. */
318 if ((instword & INST_DCBA_MASK) == INST_DCBA) {
319 static int warned;
320
321 if (!warned) {
322 printk(KERN_WARNING
323 "process %d (%s) uses obsolete 'dcba' insn\n",
324 current->pid, current->comm);
325 warned = 1;
326 }
327 return 0;
328 }
329
330 /* Emulate the mcrxr insn. */
331 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
332 static int warned;
333 unsigned int shift;
334
335 if (!warned) {
336 printk(KERN_WARNING
337 "process %d (%s) uses obsolete 'mcrxr' insn\n",
338 current->pid, current->comm);
339 warned = 1;
340 }
341
342 shift = (instword >> 21) & 0x1c;
343 regs->ccr &= ~(0xf0000000 >> shift);
344 regs->ccr |= (regs->xer & 0xf0000000) >> shift;
345 regs->xer &= ~0xf0000000;
346 return 0;
347 }
348
349 return -EINVAL;
350}
351
352/*
353 * Look through the list of trap instructions that are used for BUG(),
354 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
355 * that the exception was caused by a trap instruction of some kind.
356 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
357 * otherwise.
358 */
359extern struct bug_entry __start___bug_table[], __stop___bug_table[];
360
361#ifndef CONFIG_MODULES
362#define module_find_bug(x) NULL
363#endif
364
365struct bug_entry *find_bug(unsigned long bugaddr)
366{
367 struct bug_entry *bug;
368
369 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
370 if (bugaddr == bug->bug_addr)
371 return bug;
372 return module_find_bug(bugaddr);
373}
374
375static int
376check_bug_trap(struct pt_regs *regs)
377{
378 struct bug_entry *bug;
379 unsigned long addr;
380
381 if (regs->msr & MSR_PR)
382 return 0; /* not in kernel */
383 addr = regs->nip; /* address of trap instruction */
384 if (addr < PAGE_OFFSET)
385 return 0;
386 bug = find_bug(regs->nip);
387 if (bug == NULL)
388 return 0;
389 if (bug->line & BUG_WARNING_TRAP) {
390 /* this is a WARN_ON rather than BUG/BUG_ON */
391 printk(KERN_ERR "Badness in %s at %s:%d\n",
392 bug->function, bug->file,
393 (unsigned int)bug->line & ~BUG_WARNING_TRAP);
394 show_stack(current, (void *)regs->gpr[1]);
395 return 1;
396 }
397 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
398 bug->function, bug->file, (unsigned int)bug->line);
399 return 0;
400}
401
402void __kprobes program_check_exception(struct pt_regs *regs)
403{
404 if (debugger_fault_handler(regs))
405 return;
406
407 if (regs->msr & 0x100000) {
408 /* IEEE FP exception */
409 parse_fpe(regs);
410 } else if (regs->msr & 0x20000) {
411 /* trap exception */
412
413 if (notify_die(DIE_BPT, "breakpoint", regs, 5,
414 5, SIGTRAP) == NOTIFY_STOP)
415 return;
416 if (debugger_bpt(regs))
417 return;
418
419 if (check_bug_trap(regs)) {
420 regs->nip += 4;
421 return;
422 }
423 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
424
425 } else {
426 /* Privileged or illegal instruction; try to emulate it. */
427 switch (emulate_instruction(regs)) {
428 case 0:
429 regs->nip += 4;
430 emulate_single_step(regs);
431 break;
432
433 case -EFAULT:
434 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
435 break;
436
437 default:
438 if (regs->msr & 0x40000)
439 /* priveleged */
440 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
441 else
442 /* illegal */
443 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
444 break;
445 }
446 }
447}
448
449void kernel_fp_unavailable_exception(struct pt_regs *regs)
450{
451 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
452 "%lx at %lx\n", regs->trap, regs->nip);
453 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
454}
455
456void altivec_unavailable_exception(struct pt_regs *regs)
457{
458 if (user_mode(regs)) {
459 /* A user program has executed an altivec instruction,
460 but this kernel doesn't support altivec. */
461 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
462 return;
463 }
464 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
465 "%lx at %lx\n", regs->trap, regs->nip);
466 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
467}
468
469extern perf_irq_t perf_irq;
470
471void performance_monitor_exception(struct pt_regs *regs)
472{
473 perf_irq(regs);
474}
475
476void alignment_exception(struct pt_regs *regs)
477{
478 int fixed;
479
480 fixed = fix_alignment(regs);
481
482 if (fixed == 1) {
483 regs->nip += 4; /* skip over emulated instruction */
484 emulate_single_step(regs);
485 return;
486 }
487
488 /* Operand address was bad */
489 if (fixed == -EFAULT) {
490 if (user_mode(regs)) {
491 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->dar);
492 } else {
493 /* Search exception table */
494 bad_page_fault(regs, regs->dar, SIGSEGV);
495 }
496
497 return;
498 }
499
500 _exception(SIGBUS, regs, BUS_ADRALN, regs->nip);
501}
502
503#ifdef CONFIG_ALTIVEC
504void altivec_assist_exception(struct pt_regs *regs)
505{
506 int err;
507 siginfo_t info;
508
509 if (!user_mode(regs)) {
510 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
511 " at %lx\n", regs->nip);
512 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
513 }
514
515 flush_altivec_to_thread(current);
516
517 err = emulate_altivec(regs);
518 if (err == 0) {
519 regs->nip += 4; /* skip emulated instruction */
520 emulate_single_step(regs);
521 return;
522 }
523
524 if (err == -EFAULT) {
525 /* got an error reading the instruction */
526 info.si_signo = SIGSEGV;
527 info.si_errno = 0;
528 info.si_code = SEGV_MAPERR;
529 info.si_addr = (void __user *) regs->nip;
530 force_sig_info(SIGSEGV, &info, current);
531 } else {
532 /* didn't recognize the instruction */
533 /* XXX quick hack for now: set the non-Java bit in the VSCR */
534 if (printk_ratelimit())
535 printk(KERN_ERR "Unrecognized altivec instruction "
536 "in %s at %lx\n", current->comm, regs->nip);
537 current->thread.vscr.u[3] |= 0x10000;
538 }
539}
540#endif /* CONFIG_ALTIVEC */
541
542/*
543 * We enter here if we get an unrecoverable exception, that is, one
544 * that happened at a point where the RI (recoverable interrupt) bit
545 * in the MSR is 0. This indicates that SRR0/1 are live, and that
546 * we therefore lost state by taking this exception.
547 */
548void unrecoverable_exception(struct pt_regs *regs)
549{
550 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
551 regs->trap, regs->nip);
552 die("Unrecoverable exception", regs, SIGABRT);
553}
554
555/*
556 * We enter here if we discover during exception entry that we are
557 * running in supervisor mode with a userspace value in the stack pointer.
558 */
559void kernel_bad_stack(struct pt_regs *regs)
560{
561 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
562 regs->gpr[1], regs->nip);
563 die("Bad kernel stack pointer", regs, SIGABRT);
564}
565
566void __init trap_init(void)
567{
568}
diff --git a/arch/ppc64/kernel/vdso64/sigtramp.S b/arch/ppc64/kernel/vdso64/sigtramp.S
index 8ae8f205e470..31b604ab56de 100644
--- a/arch/ppc64/kernel/vdso64/sigtramp.S
+++ b/arch/ppc64/kernel/vdso64/sigtramp.S
@@ -15,6 +15,7 @@
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18#include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
18 19
19 .text 20 .text
20 21
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644
index cb207629f21f..000000000000
--- a/arch/ppc64/kernel/vecemu.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Routines to emulate some Altivec/VMX instructions, specifically
3 * those that can trap when given denormalized operands in Java mode.
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/sched.h>
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12/* Functions in vector.S */
13extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
14extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
15extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
16extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
17extern void vrefp(vector128 *dst, vector128 *src);
18extern void vrsqrtefp(vector128 *dst, vector128 *src);
19extern void vexptep(vector128 *dst, vector128 *src);
20
21static unsigned int exp2s[8] = {
22 0x800000,
23 0x8b95c2,
24 0x9837f0,
25 0xa5fed7,
26 0xb504f3,
27 0xc5672a,
28 0xd744fd,
29 0xeac0c7
30};
31
32/*
33 * Computes an estimate of 2^x. The `s' argument is the 32-bit
34 * single-precision floating-point representation of x.
35 */
36static unsigned int eexp2(unsigned int s)
37{
38 int exp, pwr;
39 unsigned int mant, frac;
40
41 /* extract exponent field from input */
42 exp = ((s >> 23) & 0xff) - 127;
43 if (exp > 7) {
44 /* check for NaN input */
45 if (exp == 128 && (s & 0x7fffff) != 0)
46 return s | 0x400000; /* return QNaN */
47 /* 2^-big = 0, 2^+big = +Inf */
48 return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
49 }
50 if (exp < -23)
51 return 0x3f800000; /* 1.0 */
52
53 /* convert to fixed point integer in 9.23 representation */
54 pwr = (s & 0x7fffff) | 0x800000;
55 if (exp > 0)
56 pwr <<= exp;
57 else
58 pwr >>= -exp;
59 if (s & 0x80000000)
60 pwr = -pwr;
61
62 /* extract integer part, which becomes exponent part of result */
63 exp = (pwr >> 23) + 126;
64 if (exp >= 254)
65 return 0x7f800000;
66 if (exp < -23)
67 return 0;
68
69 /* table lookup on top 3 bits of fraction to get mantissa */
70 mant = exp2s[(pwr >> 20) & 7];
71
72 /* linear interpolation using remaining 20 bits of fraction */
73 asm("mulhwu %0,%1,%2" : "=r" (frac)
74 : "r" (pwr << 12), "r" (0x172b83ff));
75 asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
76 mant += frac;
77
78 if (exp >= 0)
79 return mant + (exp << 23);
80
81 /* denormalized result */
82 exp = -exp;
83 mant += 1 << (exp - 1);
84 return mant >> exp;
85}
86
87/*
88 * Computes an estimate of log_2(x). The `s' argument is the 32-bit
89 * single-precision floating-point representation of x.
90 */
91static unsigned int elog2(unsigned int s)
92{
93 int exp, mant, lz, frac;
94
95 exp = s & 0x7f800000;
96 mant = s & 0x7fffff;
97 if (exp == 0x7f800000) { /* Inf or NaN */
98 if (mant != 0)
99 s |= 0x400000; /* turn NaN into QNaN */
100 return s;
101 }
102 if ((exp | mant) == 0) /* +0 or -0 */
103 return 0xff800000; /* return -Inf */
104
105 if (exp == 0) {
106 /* denormalized */
107 asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
108 mant <<= lz - 8;
109 exp = (-118 - lz) << 23;
110 } else {
111 mant |= 0x800000;
112 exp -= 127 << 23;
113 }
114
115 if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
116 exp |= 0x400000; /* 0.5 * 2^23 */
117 asm("mulhwu %0,%1,%2" : "=r" (mant)
118 : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
119 }
120 if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
121 exp |= 0x200000; /* 0.25 * 2^23 */
122 asm("mulhwu %0,%1,%2" : "=r" (mant)
123 : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
124 }
125 if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
126 exp |= 0x100000; /* 0.125 * 2^23 */
127 asm("mulhwu %0,%1,%2" : "=r" (mant)
128 : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
129 }
130 if (mant > 0x800000) { /* 1.0 * 2^23 */
131 /* calculate (mant - 1) * 1.381097463 */
132 /* 1.381097463 == 0.125 / (2^0.125 - 1) */
133 asm("mulhwu %0,%1,%2" : "=r" (frac)
134 : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
135 exp += frac;
136 }
137 s = exp & 0x80000000;
138 if (exp != 0) {
139 if (s)
140 exp = -exp;
141 asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
142 lz = 8 - lz;
143 if (lz > 0)
144 exp >>= lz;
145 else if (lz < 0)
146 exp <<= -lz;
147 s += ((lz + 126) << 23) + exp;
148 }
149 return s;
150}
151
152#define VSCR_SAT 1
153
154static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
155{
156 int exp, mant;
157
158 exp = (x >> 23) & 0xff;
159 mant = x & 0x7fffff;
160 if (exp == 255 && mant != 0)
161 return 0; /* NaN -> 0 */
162 exp = exp - 127 + scale;
163 if (exp < 0)
164 return 0; /* round towards zero */
165 if (exp >= 31) {
166 /* saturate, unless the result would be -2^31 */
167 if (x + (scale << 23) != 0xcf000000)
168 *vscrp |= VSCR_SAT;
169 return (x & 0x80000000)? 0x80000000: 0x7fffffff;
170 }
171 mant |= 0x800000;
172 mant = (mant << 7) >> (30 - exp);
173 return (x & 0x80000000)? -mant: mant;
174}
175
176static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
177{
178 int exp;
179 unsigned int mant;
180
181 exp = (x >> 23) & 0xff;
182 mant = x & 0x7fffff;
183 if (exp == 255 && mant != 0)
184 return 0; /* NaN -> 0 */
185 exp = exp - 127 + scale;
186 if (exp < 0)
187 return 0; /* round towards zero */
188 if (x & 0x80000000) {
189 /* negative => saturate to 0 */
190 *vscrp |= VSCR_SAT;
191 return 0;
192 }
193 if (exp >= 32) {
194 /* saturate */
195 *vscrp |= VSCR_SAT;
196 return 0xffffffff;
197 }
198 mant |= 0x800000;
199 mant = (mant << 8) >> (31 - exp);
200 return mant;
201}
202
203/* Round to floating integer, towards 0 */
204static unsigned int rfiz(unsigned int x)
205{
206 int exp;
207
208 exp = ((x >> 23) & 0xff) - 127;
209 if (exp == 128 && (x & 0x7fffff) != 0)
210 return x | 0x400000; /* NaN -> make it a QNaN */
211 if (exp >= 23)
212 return x; /* it's an integer already (or Inf) */
213 if (exp < 0)
214 return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
215 return x & ~(0x7fffff >> exp);
216}
217
218/* Round to floating integer, towards +/- Inf */
219static unsigned int rfii(unsigned int x)
220{
221 int exp, mask;
222
223 exp = ((x >> 23) & 0xff) - 127;
224 if (exp == 128 && (x & 0x7fffff) != 0)
225 return x | 0x400000; /* NaN -> make it a QNaN */
226 if (exp >= 23)
227 return x; /* it's an integer already (or Inf) */
228 if ((x & 0x7fffffff) == 0)
229 return x; /* +/-0 -> +/-0 */
230 if (exp < 0)
231 /* 0 < |x| < 1.0 rounds to +/- 1.0 */
232 return (x & 0x80000000) | 0x3f800000;
233 mask = 0x7fffff >> exp;
234 /* mantissa overflows into exponent - that's OK,
235 it can't overflow into the sign bit */
236 return (x + mask) & ~mask;
237}
238
239/* Round to floating integer, to nearest */
240static unsigned int rfin(unsigned int x)
241{
242 int exp, half;
243
244 exp = ((x >> 23) & 0xff) - 127;
245 if (exp == 128 && (x & 0x7fffff) != 0)
246 return x | 0x400000; /* NaN -> make it a QNaN */
247 if (exp >= 23)
248 return x; /* it's an integer already (or Inf) */
249 if (exp < -1)
250 return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
251 if (exp == -1)
252 /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
253 return (x & 0x80000000) | 0x3f800000;
254 half = 0x400000 >> exp;
255 /* add 0.5 to the magnitude and chop off the fraction bits */
256 return (x + half) & ~(0x7fffff >> exp);
257}
258
259int
260emulate_altivec(struct pt_regs *regs)
261{
262 unsigned int instr, i;
263 unsigned int va, vb, vc, vd;
264 vector128 *vrs;
265
266 if (get_user(instr, (unsigned int __user *) regs->nip))
267 return -EFAULT;
268 if ((instr >> 26) != 4)
269 return -EINVAL; /* not an altivec instruction */
270 vd = (instr >> 21) & 0x1f;
271 va = (instr >> 16) & 0x1f;
272 vb = (instr >> 11) & 0x1f;
273 vc = (instr >> 6) & 0x1f;
274
275 vrs = current->thread.vr;
276 switch (instr & 0x3f) {
277 case 10:
278 switch (vc) {
279 case 0: /* vaddfp */
280 vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
281 break;
282 case 1: /* vsubfp */
283 vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
284 break;
285 case 4: /* vrefp */
286 vrefp(&vrs[vd], &vrs[vb]);
287 break;
288 case 5: /* vrsqrtefp */
289 vrsqrtefp(&vrs[vd], &vrs[vb]);
290 break;
291 case 6: /* vexptefp */
292 for (i = 0; i < 4; ++i)
293 vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
294 break;
295 case 7: /* vlogefp */
296 for (i = 0; i < 4; ++i)
297 vrs[vd].u[i] = elog2(vrs[vb].u[i]);
298 break;
299 case 8: /* vrfin */
300 for (i = 0; i < 4; ++i)
301 vrs[vd].u[i] = rfin(vrs[vb].u[i]);
302 break;
303 case 9: /* vrfiz */
304 for (i = 0; i < 4; ++i)
305 vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
306 break;
307 case 10: /* vrfip */
308 for (i = 0; i < 4; ++i) {
309 u32 x = vrs[vb].u[i];
310 x = (x & 0x80000000)? rfiz(x): rfii(x);
311 vrs[vd].u[i] = x;
312 }
313 break;
314 case 11: /* vrfim */
315 for (i = 0; i < 4; ++i) {
316 u32 x = vrs[vb].u[i];
317 x = (x & 0x80000000)? rfii(x): rfiz(x);
318 vrs[vd].u[i] = x;
319 }
320 break;
321 case 14: /* vctuxs */
322 for (i = 0; i < 4; ++i)
323 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
324 &current->thread.vscr.u[3]);
325 break;
326 case 15: /* vctsxs */
327 for (i = 0; i < 4; ++i)
328 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
329 &current->thread.vscr.u[3]);
330 break;
331 default:
332 return -EINVAL;
333 }
334 break;
335 case 46: /* vmaddfp */
336 vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
337 break;
338 case 47: /* vnmsubfp */
339 vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 return 0;
346}
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 0306510bc4ff..022f220e772f 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
1#include <asm/page.h>
1#include <asm-generic/vmlinux.lds.h> 2#include <asm-generic/vmlinux.lds.h>
2 3
3OUTPUT_ARCH(powerpc:common64) 4OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@ SECTIONS
17 LOCK_TEXT 18 LOCK_TEXT
18 KPROBES_TEXT 19 KPROBES_TEXT
19 *(.fixup) 20 *(.fixup)
20 . = ALIGN(4096); 21 . = ALIGN(PAGE_SIZE);
21 _etext = .; 22 _etext = .;
22 } 23 }
23 24
@@ -43,7 +44,7 @@ SECTIONS
43 44
44 45
45 /* will be freed after init */ 46 /* will be freed after init */
46 . = ALIGN(4096); 47 . = ALIGN(PAGE_SIZE);
47 __init_begin = .; 48 __init_begin = .;
48 49
49 .init.text : { 50 .init.text : {
@@ -83,7 +84,7 @@ SECTIONS
83 84
84 SECURITY_INIT 85 SECURITY_INIT
85 86
86 . = ALIGN(4096); 87 . = ALIGN(PAGE_SIZE);
87 .init.ramfs : { 88 .init.ramfs : {
88 __initramfs_start = .; 89 __initramfs_start = .;
89 *(.init.ramfs) 90 *(.init.ramfs)
@@ -96,18 +97,22 @@ SECTIONS
96 __per_cpu_end = .; 97 __per_cpu_end = .;
97 } 98 }
98 99
100 . = ALIGN(PAGE_SIZE);
99 . = ALIGN(16384); 101 . = ALIGN(16384);
100 __init_end = .; 102 __init_end = .;
101 /* freed after init ends here */ 103 /* freed after init ends here */
102 104
103 105
104 /* Read/write sections */ 106 /* Read/write sections */
107 . = ALIGN(PAGE_SIZE);
105 . = ALIGN(16384); 108 . = ALIGN(16384);
109 _sdata = .;
106 /* The initial task and kernel stack */ 110 /* The initial task and kernel stack */
107 .data.init_task : { 111 .data.init_task : {
108 *(.data.init_task) 112 *(.data.init_task)
109 } 113 }
110 114
115 . = ALIGN(PAGE_SIZE);
111 .data.page_aligned : { 116 .data.page_aligned : {
112 *(.data.page_aligned) 117 *(.data.page_aligned)
113 } 118 }
@@ -129,18 +134,18 @@ SECTIONS
129 __toc_start = .; 134 __toc_start = .;
130 *(.got) 135 *(.got)
131 *(.toc) 136 *(.toc)
132 . = ALIGN(4096); 137 . = ALIGN(PAGE_SIZE);
133 _edata = .; 138 _edata = .;
134 } 139 }
135 140
136 141
137 . = ALIGN(4096); 142 . = ALIGN(PAGE_SIZE);
138 .bss : { 143 .bss : {
139 __bss_start = .; 144 __bss_start = .;
140 *(.bss) 145 *(.bss)
141 __bss_stop = .; 146 __bss_stop = .;
142 } 147 }
143 148
144 . = ALIGN(4096); 149 . = ALIGN(PAGE_SIZE);
145 _end = . ; 150 _end = . ;
146} 151}
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
index 0b6e967de948..42d5295bf345 100644
--- a/arch/ppc64/lib/Makefile
+++ b/arch/ppc64/lib/Makefile
@@ -2,17 +2,4 @@
2# Makefile for ppc64-specific library files.. 2# Makefile for ppc64-specific library files..
3# 3#
4 4
5lib-y := checksum.o string.o strcase.o 5lib-y := string.o
6lib-y += copypage.o memcpy.o copyuser.o usercopy.o
7
8# Lock primitives are defined as no-ops in include/linux/spinlock.h
9# for non-SMP configs. Don't build the real versions.
10
11lib-$(CONFIG_SMP) += locks.o
12
13# e2a provides EBCDIC to ASCII conversions.
14ifdef CONFIG_PPC_ISERIES
15obj-y += e2a.o
16endif
17
18lib-$(CONFIG_DEBUG_KERNEL) += sstep.o
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
index 813587e5c2ec..e21a0038a4d6 100644
--- a/arch/ppc64/lib/string.S
+++ b/arch/ppc64/lib/string.S
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
65 subf r3,r3,r4 65 subf r3,r3,r4
66 blr 66 blr
67 67
68_GLOBAL(memset)
69 neg r0,r3
70 rlwimi r4,r4,8,16,23
71 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
72 rlwimi r4,r4,16,0,15
73 cmplw cr1,r5,r0 /* do we get that far? */
74 rldimi r4,r4,32,0
75 mtcrf 1,r0
76 mr r6,r3
77 blt cr1,8f
78 beq+ 3f /* if already 8-byte aligned */
79 subf r5,r0,r5
80 bf 31,1f
81 stb r4,0(r6)
82 addi r6,r6,1
831: bf 30,2f
84 sth r4,0(r6)
85 addi r6,r6,2
862: bf 29,3f
87 stw r4,0(r6)
88 addi r6,r6,4
893: srdi. r0,r5,6
90 clrldi r5,r5,58
91 mtctr r0
92 beq 5f
934: std r4,0(r6)
94 std r4,8(r6)
95 std r4,16(r6)
96 std r4,24(r6)
97 std r4,32(r6)
98 std r4,40(r6)
99 std r4,48(r6)
100 std r4,56(r6)
101 addi r6,r6,64
102 bdnz 4b
1035: srwi. r0,r5,3
104 clrlwi r5,r5,29
105 mtcrf 1,r0
106 beq 8f
107 bf 29,6f
108 std r4,0(r6)
109 std r4,8(r6)
110 std r4,16(r6)
111 std r4,24(r6)
112 addi r6,r6,32
1136: bf 30,7f
114 std r4,0(r6)
115 std r4,8(r6)
116 addi r6,r6,16
1177: bf 31,8f
118 std r4,0(r6)
119 addi r6,r6,8
1208: cmpwi r5,0
121 mtcrf 1,r5
122 beqlr+
123 bf 29,9f
124 stw r4,0(r6)
125 addi r6,r6,4
1269: bf 30,10f
127 sth r4,0(r6)
128 addi r6,r6,2
12910: bflr 31
130 stb r4,0(r6)
131 blr
132
133_GLOBAL(memmove)
134 cmplw 0,r3,r4
135 bgt .backwards_memcpy
136 b .memcpy
137
138_GLOBAL(backwards_memcpy)
139 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
140 add r6,r3,r5
141 add r4,r4,r5
142 beq 2f
143 andi. r0,r6,3
144 mtctr r7
145 bne 5f
1461: lwz r7,-4(r4)
147 lwzu r8,-8(r4)
148 stw r7,-4(r6)
149 stwu r8,-8(r6)
150 bdnz 1b
151 andi. r5,r5,7
1522: cmplwi 0,r5,4
153 blt 3f
154 lwzu r0,-4(r4)
155 subi r5,r5,4
156 stwu r0,-4(r6)
1573: cmpwi 0,r5,0
158 beqlr
159 mtctr r5
1604: lbzu r0,-1(r4)
161 stbu r0,-1(r6)
162 bdnz 4b
163 blr
1645: mtctr r0
1656: lbzu r7,-1(r4)
166 stbu r7,-1(r6)
167 bdnz 6b
168 subf r5,r0,r5
169 rlwinm. r7,r5,32-3,3,31
170 beq 2b
171 mtctr r7
172 b 1b
173
174_GLOBAL(memcmp) 68_GLOBAL(memcmp)
175 cmpwi 0,r5,0 69 cmpwi 0,r5,0
176 ble- 2f 70 ble- 2f
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
deleted file mode 100644
index 3695d00d347f..000000000000
--- a/arch/ppc64/mm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5EXTRA_CFLAGS += -mno-minimal-toc
6
7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
8 slb_low.o slb.o stab.o mmap.o
9obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
deleted file mode 100644
index e2bd7776622f..000000000000
--- a/arch/ppc64/mm/init.c
+++ /dev/null
@@ -1,950 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76int mem_init_done;
77unsigned long ioremap_bot = IMALLOC_BASE;
78static unsigned long phbs_io_bot = PHBS_IO_BASE;
79
80extern pgd_t swapper_pg_dir[];
81extern struct task_struct *current_set[NR_CPUS];
82
83unsigned long klimit = (unsigned long)_end;
84
85unsigned long _SDR1=0;
86unsigned long _ASR=0;
87
88/* max amount of RAM to use */
89unsigned long __max_memory;
90
91/* info on what we think the IO hole is */
92unsigned long io_hole_start;
93unsigned long io_hole_size;
94
95void show_mem(void)
96{
97 unsigned long total = 0, reserved = 0;
98 unsigned long shared = 0, cached = 0;
99 struct page *page;
100 pg_data_t *pgdat;
101 unsigned long i;
102
103 printk("Mem-info:\n");
104 show_free_areas();
105 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
106 for_each_pgdat(pgdat) {
107 unsigned long flags;
108 pgdat_resize_lock(pgdat, &flags);
109 for (i = 0; i < pgdat->node_spanned_pages; i++) {
110 page = pgdat_page_nr(pgdat, i);
111 total++;
112 if (PageReserved(page))
113 reserved++;
114 else if (PageSwapCache(page))
115 cached++;
116 else if (page_count(page))
117 shared += page_count(page) - 1;
118 }
119 pgdat_resize_unlock(pgdat, &flags);
120 }
121 printk("%ld pages of RAM\n", total);
122 printk("%ld reserved pages\n", reserved);
123 printk("%ld pages shared\n", shared);
124 printk("%ld pages swap cached\n", cached);
125}
126
127#ifdef CONFIG_PPC_ISERIES
128
129void __iomem *ioremap(unsigned long addr, unsigned long size)
130{
131 return (void __iomem *)addr;
132}
133
134extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
135 unsigned long flags)
136{
137 return (void __iomem *)addr;
138}
139
140void iounmap(volatile void __iomem *addr)
141{
142 return;
143}
144
145#else
146
147/*
148 * map_io_page currently only called by __ioremap
149 * map_io_page adds an entry to the ioremap page table
150 * and adds an entry to the HPT, possibly bolting it
151 */
152static int map_io_page(unsigned long ea, unsigned long pa, int flags)
153{
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158 unsigned long vsid;
159
160 if (mem_init_done) {
161 pgdp = pgd_offset_k(ea);
162 pudp = pud_alloc(&init_mm, pgdp, ea);
163 if (!pudp)
164 return -ENOMEM;
165 pmdp = pmd_alloc(&init_mm, pudp, ea);
166 if (!pmdp)
167 return -ENOMEM;
168 ptep = pte_alloc_kernel(pmdp, ea);
169 if (!ptep)
170 return -ENOMEM;
171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
172 __pgprot(flags)));
173 } else {
174 unsigned long va, vpn, hash, hpteg;
175
176 /*
177 * If the mm subsystem is not fully up, we cannot create a
178 * linux page table entry for this mapping. Simply bolt an
179 * entry in the hardware page table.
180 */
181 vsid = get_kernel_vsid(ea);
182 va = (vsid << 28) | (ea & 0xFFFFFFF);
183 vpn = va >> PAGE_SHIFT;
184
185 hash = hpt_hash(vpn, 0);
186
187 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
188
189 /* Panic if a pte grpup is full */
190 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
191 HPTE_V_BOLTED,
192 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
193 == -1) {
194 panic("map_io_page: could not insert mapping");
195 }
196 }
197 return 0;
198}
199
200
201static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
202 unsigned long ea, unsigned long size,
203 unsigned long flags)
204{
205 unsigned long i;
206
207 if ((flags & _PAGE_PRESENT) == 0)
208 flags |= pgprot_val(PAGE_KERNEL);
209
210 for (i = 0; i < size; i += PAGE_SIZE)
211 if (map_io_page(ea+i, pa+i, flags))
212 return NULL;
213
214 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
215}
216
217
218void __iomem *
219ioremap(unsigned long addr, unsigned long size)
220{
221 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
222}
223
224void __iomem * __ioremap(unsigned long addr, unsigned long size,
225 unsigned long flags)
226{
227 unsigned long pa, ea;
228 void __iomem *ret;
229
230 /*
231 * Choose an address to map it to.
232 * Once the imalloc system is running, we use it.
233 * Before that, we map using addresses going
234 * up from ioremap_bot. imalloc will use
235 * the addresses from ioremap_bot through
236 * IMALLOC_END
237 *
238 */
239 pa = addr & PAGE_MASK;
240 size = PAGE_ALIGN(addr + size) - pa;
241
242 if (size == 0)
243 return NULL;
244
245 if (mem_init_done) {
246 struct vm_struct *area;
247 area = im_get_free_area(size);
248 if (area == NULL)
249 return NULL;
250 ea = (unsigned long)(area->addr);
251 ret = __ioremap_com(addr, pa, ea, size, flags);
252 if (!ret)
253 im_free(area->addr);
254 } else {
255 ea = ioremap_bot;
256 ret = __ioremap_com(addr, pa, ea, size, flags);
257 if (ret)
258 ioremap_bot += size;
259 }
260 return ret;
261}
262
263#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
264
265int __ioremap_explicit(unsigned long pa, unsigned long ea,
266 unsigned long size, unsigned long flags)
267{
268 struct vm_struct *area;
269 void __iomem *ret;
270
271 /* For now, require page-aligned values for pa, ea, and size */
272 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
273 !IS_PAGE_ALIGNED(size)) {
274 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
275 return 1;
276 }
277
278 if (!mem_init_done) {
279 /* Two things to consider in this case:
280 * 1) No records will be kept (imalloc, etc) that the region
281 * has been remapped
282 * 2) It won't be easy to iounmap() the region later (because
283 * of 1)
284 */
285 ;
286 } else {
287 area = im_get_area(ea, size,
288 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
289 if (area == NULL) {
290 /* Expected when PHB-dlpar is in play */
291 return 1;
292 }
293 if (ea != (unsigned long) area->addr) {
294 printk(KERN_ERR "unexpected addr return from "
295 "im_get_area\n");
296 return 1;
297 }
298 }
299
300 ret = __ioremap_com(pa, pa, ea, size, flags);
301 if (ret == NULL) {
302 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
303 return 1;
304 }
305 if (ret != (void *) ea) {
306 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
307 return 1;
308 }
309
310 return 0;
311}
312
313/*
314 * Unmap an IO region and remove it from imalloc'd list.
315 * Access to IO memory should be serialized by driver.
316 * This code is modeled after vmalloc code - unmap_vm_area()
317 *
318 * XXX what about calls before mem_init_done (ie python_countermeasures())
319 */
320void iounmap(volatile void __iomem *token)
321{
322 void *addr;
323
324 if (!mem_init_done)
325 return;
326
327 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
328
329 im_free(addr);
330}
331
332static int iounmap_subset_regions(unsigned long addr, unsigned long size)
333{
334 struct vm_struct *area;
335
336 /* Check whether subsets of this region exist */
337 area = im_get_area(addr, size, IM_REGION_SUPERSET);
338 if (area == NULL)
339 return 1;
340
341 while (area) {
342 iounmap((void __iomem *) area->addr);
343 area = im_get_area(addr, size,
344 IM_REGION_SUPERSET);
345 }
346
347 return 0;
348}
349
350int iounmap_explicit(volatile void __iomem *start, unsigned long size)
351{
352 struct vm_struct *area;
353 unsigned long addr;
354 int rc;
355
356 addr = (unsigned long __force) start & PAGE_MASK;
357
358 /* Verify that the region either exists or is a subset of an existing
359 * region. In the latter case, split the parent region to create
360 * the exact region
361 */
362 area = im_get_area(addr, size,
363 IM_REGION_EXISTS | IM_REGION_SUBSET);
364 if (area == NULL) {
365 /* Determine whether subset regions exist. If so, unmap */
366 rc = iounmap_subset_regions(addr, size);
367 if (rc) {
368 printk(KERN_ERR
369 "%s() cannot unmap nonexistent range 0x%lx\n",
370 __FUNCTION__, addr);
371 return 1;
372 }
373 } else {
374 iounmap((void __iomem *) area->addr);
375 }
376 /*
377 * FIXME! This can't be right:
378 iounmap(area->addr);
379 * Maybe it should be "iounmap(area);"
380 */
381 return 0;
382}
383
384#endif
385
386EXPORT_SYMBOL(ioremap);
387EXPORT_SYMBOL(__ioremap);
388EXPORT_SYMBOL(iounmap);
389
390void free_initmem(void)
391{
392 unsigned long addr;
393
394 addr = (unsigned long)__init_begin;
395 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
396 memset((void *)addr, 0xcc, PAGE_SIZE);
397 ClearPageReserved(virt_to_page(addr));
398 set_page_count(virt_to_page(addr), 1);
399 free_page(addr);
400 totalram_pages++;
401 }
402 printk ("Freeing unused kernel memory: %luk freed\n",
403 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
404}
405
406#ifdef CONFIG_BLK_DEV_INITRD
407void free_initrd_mem(unsigned long start, unsigned long end)
408{
409 if (start < end)
410 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
411 for (; start < end; start += PAGE_SIZE) {
412 ClearPageReserved(virt_to_page(start));
413 set_page_count(virt_to_page(start), 1);
414 free_page(start);
415 totalram_pages++;
416 }
417}
418#endif
419
420static DEFINE_SPINLOCK(mmu_context_lock);
421static DEFINE_IDR(mmu_context_idr);
422
423int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
424{
425 int index;
426 int err;
427
428again:
429 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
430 return -ENOMEM;
431
432 spin_lock(&mmu_context_lock);
433 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
434 spin_unlock(&mmu_context_lock);
435
436 if (err == -EAGAIN)
437 goto again;
438 else if (err)
439 return err;
440
441 if (index > MAX_CONTEXT) {
442 idr_remove(&mmu_context_idr, index);
443 return -ENOMEM;
444 }
445
446 mm->context.id = index;
447
448 return 0;
449}
450
451void destroy_context(struct mm_struct *mm)
452{
453 spin_lock(&mmu_context_lock);
454 idr_remove(&mmu_context_idr, mm->context.id);
455 spin_unlock(&mmu_context_lock);
456
457 mm->context.id = NO_CONTEXT;
458}
459
460/*
461 * Do very early mm setup.
462 */
463void __init mm_init_ppc64(void)
464{
465#ifndef CONFIG_PPC_ISERIES
466 unsigned long i;
467#endif
468
469 ppc64_boot_msg(0x100, "MM Init");
470
471 /* This is the story of the IO hole... please, keep seated,
472 * unfortunately, we are out of oxygen masks at the moment.
473 * So we need some rough way to tell where your big IO hole
474 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
475 * that area as well, on POWER4 we don't have one, etc...
476 * We need that as a "hint" when sizing the TCE table on POWER3
477 * So far, the simplest way that seem work well enough for us it
478 * to just assume that the first discontinuity in our physical
479 * RAM layout is the IO hole. That may not be correct in the future
480 * (and isn't on iSeries but then we don't care ;)
481 */
482
483#ifndef CONFIG_PPC_ISERIES
484 for (i = 1; i < lmb.memory.cnt; i++) {
485 unsigned long base, prevbase, prevsize;
486
487 prevbase = lmb.memory.region[i-1].base;
488 prevsize = lmb.memory.region[i-1].size;
489 base = lmb.memory.region[i].base;
490 if (base > (prevbase + prevsize)) {
491 io_hole_start = prevbase + prevsize;
492 io_hole_size = base - (prevbase + prevsize);
493 break;
494 }
495 }
496#endif /* CONFIG_PPC_ISERIES */
497 if (io_hole_start)
498 printk("IO Hole assumed to be %lx -> %lx\n",
499 io_hole_start, io_hole_start + io_hole_size - 1);
500
501 ppc64_boot_msg(0x100, "MM Init Done");
502}
503
504/*
505 * This is called by /dev/mem to know if a given address has to
506 * be mapped non-cacheable or not
507 */
508int page_is_ram(unsigned long pfn)
509{
510 int i;
511 unsigned long paddr = (pfn << PAGE_SHIFT);
512
513 for (i=0; i < lmb.memory.cnt; i++) {
514 unsigned long base;
515
516 base = lmb.memory.region[i].base;
517
518 if ((paddr >= base) &&
519 (paddr < (base + lmb.memory.region[i].size))) {
520 return 1;
521 }
522 }
523
524 return 0;
525}
526EXPORT_SYMBOL(page_is_ram);
527
528/*
529 * Initialize the bootmem system and give it all the memory we
530 * have available.
531 */
532#ifndef CONFIG_NEED_MULTIPLE_NODES
533void __init do_init_bootmem(void)
534{
535 unsigned long i;
536 unsigned long start, bootmap_pages;
537 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
538 int boot_mapsize;
539
540 /*
541 * Find an area to use for the bootmem bitmap. Calculate the size of
542 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
543 * Add 1 additional page in case the address isn't page-aligned.
544 */
545 bootmap_pages = bootmem_bootmap_pages(total_pages);
546
547 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
548 BUG_ON(!start);
549
550 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
551
552 max_pfn = max_low_pfn;
553
554 /* Add all physical memory to the bootmem map, mark each area
555 * present.
556 */
557 for (i=0; i < lmb.memory.cnt; i++)
558 free_bootmem(lmb.memory.region[i].base,
559 lmb_size_bytes(&lmb.memory, i));
560
561 /* reserve the sections we're already using */
562 for (i=0; i < lmb.reserved.cnt; i++)
563 reserve_bootmem(lmb.reserved.region[i].base,
564 lmb_size_bytes(&lmb.reserved, i));
565
566 for (i=0; i < lmb.memory.cnt; i++)
567 memory_present(0, lmb_start_pfn(&lmb.memory, i),
568 lmb_end_pfn(&lmb.memory, i));
569}
570
571/*
572 * paging_init() sets up the page tables - in fact we've already done this.
573 */
574void __init paging_init(void)
575{
576 unsigned long zones_size[MAX_NR_ZONES];
577 unsigned long zholes_size[MAX_NR_ZONES];
578 unsigned long total_ram = lmb_phys_mem_size();
579 unsigned long top_of_ram = lmb_end_of_DRAM();
580
581 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
582 top_of_ram, total_ram);
583 printk(KERN_INFO "Memory hole size: %ldMB\n",
584 (top_of_ram - total_ram) >> 20);
585 /*
586 * All pages are DMA-able so we put them all in the DMA zone.
587 */
588 memset(zones_size, 0, sizeof(zones_size));
589 memset(zholes_size, 0, sizeof(zholes_size));
590
591 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
592 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
593
594 free_area_init_node(0, NODE_DATA(0), zones_size,
595 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
596}
597#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
598
599static struct kcore_list kcore_vmem;
600
601static int __init setup_kcore(void)
602{
603 int i;
604
605 for (i=0; i < lmb.memory.cnt; i++) {
606 unsigned long base, size;
607 struct kcore_list *kcore_mem;
608
609 base = lmb.memory.region[i].base;
610 size = lmb.memory.region[i].size;
611
612 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
613 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
614 if (!kcore_mem)
615 panic("mem_init: kmalloc failed\n");
616
617 kclist_add(kcore_mem, __va(base), size);
618 }
619
620 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
621
622 return 0;
623}
624module_init(setup_kcore);
625
626void __init mem_init(void)
627{
628#ifdef CONFIG_NEED_MULTIPLE_NODES
629 int nid;
630#endif
631 pg_data_t *pgdat;
632 unsigned long i;
633 struct page *page;
634 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
635
636 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
637 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
638
639#ifdef CONFIG_NEED_MULTIPLE_NODES
640 for_each_online_node(nid) {
641 if (NODE_DATA(nid)->node_spanned_pages != 0) {
642 printk("freeing bootmem node %x\n", nid);
643 totalram_pages +=
644 free_all_bootmem_node(NODE_DATA(nid));
645 }
646 }
647#else
648 max_mapnr = num_physpages;
649 totalram_pages += free_all_bootmem();
650#endif
651
652 for_each_pgdat(pgdat) {
653 unsigned long flags;
654 pgdat_resize_lock(pgdat, &flags);
655 for (i = 0; i < pgdat->node_spanned_pages; i++) {
656 page = pgdat_page_nr(pgdat, i);
657 if (PageReserved(page))
658 reservedpages++;
659 }
660 pgdat_resize_unlock(pgdat, &flags);
661 }
662
663 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
664 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
665 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
666 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
667
668 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
669 "%luk reserved, %luk data, %luk bss, %luk init)\n",
670 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
671 num_physpages << (PAGE_SHIFT-10),
672 codesize >> 10,
673 reservedpages << (PAGE_SHIFT-10),
674 datasize >> 10,
675 bsssize >> 10,
676 initsize >> 10);
677
678 mem_init_done = 1;
679
680 /* Initialize the vDSO */
681 vdso_init();
682}
683
684/*
685 * This is called when a page has been modified by the kernel.
686 * It just marks the page as not i-cache clean. We do the i-cache
687 * flush later when the page is given to a user process, if necessary.
688 */
689void flush_dcache_page(struct page *page)
690{
691 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
692 return;
693 /* avoid an atomic op if possible */
694 if (test_bit(PG_arch_1, &page->flags))
695 clear_bit(PG_arch_1, &page->flags);
696}
697EXPORT_SYMBOL(flush_dcache_page);
698
699void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
700{
701 clear_page(page);
702
703 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
704 return;
705 /*
706 * We shouldnt have to do this, but some versions of glibc
707 * require it (ld.so assumes zero filled pages are icache clean)
708 * - Anton
709 */
710
711 /* avoid an atomic op if possible */
712 if (test_bit(PG_arch_1, &pg->flags))
713 clear_bit(PG_arch_1, &pg->flags);
714}
715EXPORT_SYMBOL(clear_user_page);
716
717void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
718 struct page *pg)
719{
720 copy_page(vto, vfrom);
721
722 /*
723 * We should be able to use the following optimisation, however
724 * there are two problems.
725 * Firstly a bug in some versions of binutils meant PLT sections
726 * were not marked executable.
727 * Secondly the first word in the GOT section is blrl, used
728 * to establish the GOT address. Until recently the GOT was
729 * not marked executable.
730 * - Anton
731 */
732#if 0
733 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
734 return;
735#endif
736
737 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
738 return;
739
740 /* avoid an atomic op if possible */
741 if (test_bit(PG_arch_1, &pg->flags))
742 clear_bit(PG_arch_1, &pg->flags);
743}
744
745void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
746 unsigned long addr, int len)
747{
748 unsigned long maddr;
749
750 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
751 flush_icache_range(maddr, maddr + len);
752}
753EXPORT_SYMBOL(flush_icache_user_range);
754
755/*
756 * This is called at the end of handling a user page fault, when the
757 * fault has been handled by updating a PTE in the linux page tables.
758 * We use it to preload an HPTE into the hash table corresponding to
759 * the updated linux PTE.
760 *
761 * This must always be called with the mm->page_table_lock held
762 */
763void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
764 pte_t pte)
765{
766 unsigned long vsid;
767 void *pgdir;
768 pte_t *ptep;
769 int local = 0;
770 cpumask_t tmp;
771 unsigned long flags;
772
773 /* handle i-cache coherency */
774 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
775 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
776 unsigned long pfn = pte_pfn(pte);
777 if (pfn_valid(pfn)) {
778 struct page *page = pfn_to_page(pfn);
779 if (!PageReserved(page)
780 && !test_bit(PG_arch_1, &page->flags)) {
781 __flush_dcache_icache(page_address(page));
782 set_bit(PG_arch_1, &page->flags);
783 }
784 }
785 }
786
787 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
788 if (!pte_young(pte))
789 return;
790
791 pgdir = vma->vm_mm->pgd;
792 if (pgdir == NULL)
793 return;
794
795 ptep = find_linux_pte(pgdir, ea);
796 if (!ptep)
797 return;
798
799 vsid = get_vsid(vma->vm_mm->context.id, ea);
800
801 local_irq_save(flags);
802 tmp = cpumask_of_cpu(smp_processor_id());
803 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
804 local = 1;
805
806 __hash_page(ea, 0, vsid, ptep, 0x300, local);
807 local_irq_restore(flags);
808}
809
810void __iomem * reserve_phb_iospace(unsigned long size)
811{
812 void __iomem *virt_addr;
813
814 if (phbs_io_bot >= IMALLOC_BASE)
815 panic("reserve_phb_iospace(): phb io space overflow\n");
816
817 virt_addr = (void __iomem *) phbs_io_bot;
818 phbs_io_bot += size;
819
820 return virt_addr;
821}
822
823static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
824{
825 memset(addr, 0, kmem_cache_size(cache));
826}
827
828static const int pgtable_cache_size[2] = {
829 PTE_TABLE_SIZE, PMD_TABLE_SIZE
830};
831static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
832 "pgd_pte_cache", "pud_pmd_cache",
833};
834
835kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
836
837void pgtable_cache_init(void)
838{
839 int i;
840
841 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
842 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
843 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
844 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
845
846 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
847 int size = pgtable_cache_size[i];
848 const char *name = pgtable_cache_name[i];
849
850 pgtable_cache[i] = kmem_cache_create(name,
851 size, size,
852 SLAB_HWCACHE_ALIGN
853 | SLAB_MUST_HWCACHE_ALIGN,
854 zero_ctor,
855 NULL);
856 if (! pgtable_cache[i])
857 panic("pgtable_cache_init(): could not create %s!\n",
858 name);
859 }
860}
861
862pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
863 unsigned long size, pgprot_t vma_prot)
864{
865 if (ppc_md.phys_mem_access_prot)
866 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
867
868 if (!page_is_ram(addr >> PAGE_SHIFT))
869 vma_prot = __pgprot(pgprot_val(vma_prot)
870 | _PAGE_GUARDED | _PAGE_NO_CACHE);
871 return vma_prot;
872}
873EXPORT_SYMBOL(phys_mem_access_prot);
874
875#ifdef CONFIG_MEMORY_HOTPLUG
876
877void online_page(struct page *page)
878{
879 ClearPageReserved(page);
880 free_cold_page(page);
881 totalram_pages++;
882 num_physpages++;
883}
884
885/*
886 * This works only for the non-NUMA case. Later, we'll need a lookup
887 * to convert from real physical addresses to nid, that doesn't use
888 * pfn_to_nid().
889 */
890int __devinit add_memory(u64 start, u64 size)
891{
892 struct pglist_data *pgdata = NODE_DATA(0);
893 struct zone *zone;
894 unsigned long start_pfn = start >> PAGE_SHIFT;
895 unsigned long nr_pages = size >> PAGE_SHIFT;
896
897 /* this should work for most non-highmem platforms */
898 zone = pgdata->node_zones;
899
900 return __add_pages(zone, start_pfn, nr_pages);
901
902 return 0;
903}
904
905/*
906 * First pass at this code will check to determine if the remove
907 * request is within the RMO. Do not allow removal within the RMO.
908 */
909int __devinit remove_memory(u64 start, u64 size)
910{
911 struct zone *zone;
912 unsigned long start_pfn, end_pfn, nr_pages;
913
914 start_pfn = start >> PAGE_SHIFT;
915 nr_pages = size >> PAGE_SHIFT;
916 end_pfn = start_pfn + nr_pages;
917
918 printk("%s(): Attempting to remove memoy in range "
919 "%lx to %lx\n", __func__, start, start+size);
920 /*
921 * check for range within RMO
922 */
923 zone = page_zone(pfn_to_page(start_pfn));
924
925 printk("%s(): memory will be removed from "
926 "the %s zone\n", __func__, zone->name);
927
928 /*
929 * not handling removing memory ranges that
930 * overlap multiple zones yet
931 */
932 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
933 goto overlap;
934
935 /* make sure it is NOT in RMO */
936 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
937 printk("%s(): range to be removed must NOT be in RMO!\n",
938 __func__);
939 goto in_rmo;
940 }
941
942 return __remove_pages(zone, start_pfn, nr_pages);
943
944overlap:
945 printk("%s(): memory range to be removed overlaps "
946 "multiple zones!!!\n", __func__);
947in_rmo:
948 return -1;
949}
950#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/ppc64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644
index 162dbf06c142..000000000000
--- a/arch/ppc64/oprofile/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
diff --git a/arch/ppc64/xmon/Makefile b/arch/ppc64/xmon/Makefile
deleted file mode 100644
index fb21a7088d3e..000000000000
--- a/arch/ppc64/xmon/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1# Makefile for xmon
2
3EXTRA_CFLAGS += -mno-minimal-toc
4
5obj-y := start.o xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
diff --git a/arch/ppc64/xmon/setjmp.S b/arch/ppc64/xmon/setjmp.S
deleted file mode 100644
index 30ee643d557c..000000000000
--- a/arch/ppc64/xmon/setjmp.S
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: assert(sizeof(buf) > 184)
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14_GLOBAL(xmon_setjmp)
15 mflr r0
16 std r0,0(r3)
17 std r1,8(r3)
18 std r2,16(r3)
19 mfcr r0
20 std r0,24(r3)
21 std r13,32(r3)
22 std r14,40(r3)
23 std r15,48(r3)
24 std r16,56(r3)
25 std r17,64(r3)
26 std r18,72(r3)
27 std r19,80(r3)
28 std r20,88(r3)
29 std r21,96(r3)
30 std r22,104(r3)
31 std r23,112(r3)
32 std r24,120(r3)
33 std r25,128(r3)
34 std r26,136(r3)
35 std r27,144(r3)
36 std r28,152(r3)
37 std r29,160(r3)
38 std r30,168(r3)
39 std r31,176(r3)
40 li r3,0
41 blr
42
43_GLOBAL(xmon_longjmp)
44 cmpdi r4,0
45 bne 1f
46 li r4,1
471: ld r13,32(r3)
48 ld r14,40(r3)
49 ld r15,48(r3)
50 ld r16,56(r3)
51 ld r17,64(r3)
52 ld r18,72(r3)
53 ld r19,80(r3)
54 ld r20,88(r3)
55 ld r21,96(r3)
56 ld r22,104(r3)
57 ld r23,112(r3)
58 ld r24,120(r3)
59 ld r25,128(r3)
60 ld r26,136(r3)
61 ld r27,144(r3)
62 ld r28,152(r3)
63 ld r29,160(r3)
64 ld r30,168(r3)
65 ld r31,176(r3)
66 ld r0,24(r3)
67 mtcrf 56,r0
68 ld r0,0(r3)
69 ld r1,8(r3)
70 ld r2,16(r3)
71 mtlr r0
72 mr r3,r4
73 blr
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
index 24a1e9f069a7..6504c4e69986 100644
--- a/arch/s390/kernel/compat_ioctl.c
+++ b/arch/s390/kernel/compat_ioctl.c
@@ -18,6 +18,8 @@
18#include <asm/dasd.h> 18#include <asm/dasd.h>
19#include <asm/cmb.h> 19#include <asm/cmb.h>
20#include <asm/tape390.h> 20#include <asm/tape390.h>
21#include <asm/ccwdev.h>
22#include "../../../drivers/s390/char/raw3270.h"
21 23
22static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, 24static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
23 unsigned long arg, struct file *f) 25 unsigned long arg, struct file *f)
@@ -62,6 +64,13 @@ COMPATIBLE_IOCTL(BIODASDCMFENABLE)
62COMPATIBLE_IOCTL(BIODASDCMFDISABLE) 64COMPATIBLE_IOCTL(BIODASDCMFDISABLE)
63COMPATIBLE_IOCTL(BIODASDREADALLCMB) 65COMPATIBLE_IOCTL(BIODASDREADALLCMB)
64 66
67COMPATIBLE_IOCTL(TUBICMD)
68COMPATIBLE_IOCTL(TUBOCMD)
69COMPATIBLE_IOCTL(TUBGETI)
70COMPATIBLE_IOCTL(TUBGETO)
71COMPATIBLE_IOCTL(TUBSETMOD)
72COMPATIBLE_IOCTL(TUBGETMOD)
73
65COMPATIBLE_IOCTL(TAPE390_DISPLAY) 74COMPATIBLE_IOCTL(TAPE390_DISPLAY)
66 75
67/* s390 doesn't need handlers here */ 76/* s390 doesn't need handlers here */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 55654b6e16dc..039354d72348 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -485,7 +485,9 @@ start:
485# 485#
486 .org 0x10000 486 .org 0x10000
487startup:basr %r13,0 # get base 487startup:basr %r13,0 # get base
488.LPG1: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 488.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13)
489 basr %r14, %r1
490 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
489 la %r12,_pstart-.LPG1(%r13) # pointer to parameter area 491 la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
490 # move IPL device to lowcore 492 # move IPL device to lowcore
491 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) 493 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
@@ -560,6 +562,9 @@ startup:basr %r13,0 # get base
560 mr %r2,%r1 # mem size in bytes in %r3 562 mr %r2,%r1 # mem size in bytes in %r3
561 b .Lfchunk-.LPG1(%r13) 563 b .Lfchunk-.LPG1(%r13)
562 564
565 .align 4
566.Lget_ipl_device_addr:
567 .long .Lget_ipl_device
563.Lpmask: 568.Lpmask:
564 .byte 0 569 .byte 0
565.align 8 570.align 8
@@ -755,6 +760,63 @@ _pstart:
755 .global _pend 760 .global _pend
756_pend: 761_pend:
757 762
763.Lget_ipl_device:
764 basr %r12,0
765.LPG2: l %r1,0xb8 # get sid
766 sll %r1,15 # test if subchannel is enabled
767 srl %r1,31
768 ltr %r1,%r1
769 bz 0(%r14) # subchannel disabled
770 l %r1,0xb8
771 la %r5,.Lipl_schib-.LPG2(%r12)
772 stsch 0(%r5) # get schib of subchannel
773 bnz 0(%r14) # schib not available
774 tm 5(%r5),0x01 # devno valid?
775 bno 0(%r14)
776 la %r6,ipl_parameter_flags-.LPG2(%r12)
777 oi 3(%r6),0x01 # set flag
778 la %r2,ipl_devno-.LPG2(%r12)
779 mvc 0(2,%r2),6(%r5) # store devno
780 tm 4(%r5),0x80 # qdio capable device?
781 bno 0(%r14)
782 oi 3(%r6),0x02 # set flag
783
784 # copy ipl parameters
785
786 lhi %r0,4096
787 l %r2,20(%r0) # get address of parameter list
788 lhi %r3,IPL_PARMBLOCK_ORIGIN
789 st %r3,20(%r0)
790 lhi %r4,1
791 cr %r2,%r3 # start parameters < destination ?
792 jl 0f
793 lhi %r1,1 # copy direction is upwards
794 j 1f
7950: lhi %r1,-1 # copy direction is downwards
796 ar %r2,%r0
797 ar %r3,%r0
798 ar %r2,%r1
799 ar %r3,%r1
8001: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
801 ar %r3,%r1
802 ar %r2,%r1
803 sr %r0,%r4
804 jne 1b
805 b 0(%r14)
806
807 .align 4
808.Lipl_schib:
809 .rept 13
810 .long 0
811 .endr
812
813 .globl ipl_parameter_flags
814ipl_parameter_flags:
815 .long 0
816 .globl ipl_devno
817ipl_devno:
818 .word 0
819
758#ifdef CONFIG_SHARED_KERNEL 820#ifdef CONFIG_SHARED_KERNEL
759 .org 0x100000 821 .org 0x100000
760#endif 822#endif
@@ -764,11 +826,11 @@ _pend:
764# 826#
765 .globl _stext 827 .globl _stext
766_stext: basr %r13,0 # get base 828_stext: basr %r13,0 # get base
767.LPG2: 829.LPG3:
768# 830#
769# Setup stack 831# Setup stack
770# 832#
771 l %r15,.Linittu-.LPG2(%r13) 833 l %r15,.Linittu-.LPG3(%r13)
772 mvc __LC_CURRENT(4),__TI_task(%r15) 834 mvc __LC_CURRENT(4),__TI_task(%r15)
773 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE 835 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
774 st %r15,__LC_KERNEL_STACK # set end of kernel stack 836 st %r15,__LC_KERNEL_STACK # set end of kernel stack
@@ -782,8 +844,8 @@ _stext: basr %r13,0 # get base
782 lctl %c0,%c15,0(%r15) 844 lctl %c0,%c15,0(%r15)
783 845
784# 846#
785 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess 847 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
786 l %r14,.Lstart-.LPG2(%r13) 848 l %r14,.Lstart-.LPG3(%r13)
787 basr %r14,%r14 # call start_kernel 849 basr %r14,%r14 # call start_kernel
788# 850#
789# We returned from start_kernel ?!? PANIK 851# We returned from start_kernel ?!? PANIK
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index c9ff0404c875..193aafa72f54 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -484,6 +484,8 @@ start:
484startup:basr %r13,0 # get base 484startup:basr %r13,0 # get base
485.LPG1: sll %r13,1 # remove high order bit 485.LPG1: sll %r13,1 # remove high order bit
486 srl %r13,1 486 srl %r13,1
487 l %r1,.Lget_ipl_device_addr-.LPG1(%r13)
488 basr %r14,%r1
487 lhi %r1,1 # mode 1 = esame 489 lhi %r1,1 # mode 1 = esame
488 slr %r0,%r0 # set cpuid to zero 490 slr %r0,%r0 # set cpuid to zero
489 sigp %r1,%r0,0x12 # switch to esame mode 491 sigp %r1,%r0,0x12 # switch to esame mode
@@ -556,6 +558,9 @@ startup:basr %r13,0 # get base
556 mlgr %r2,%r1 # mem size in bytes in %r3 558 mlgr %r2,%r1 # mem size in bytes in %r3
557 b .Lfchunk-.LPG1(%r13) 559 b .Lfchunk-.LPG1(%r13)
558 560
561 .align 4
562.Lget_ipl_device_addr:
563 .long .Lget_ipl_device
559.Lpmask: 564.Lpmask:
560 .byte 0 565 .byte 0
561 .align 8 566 .align 8
@@ -746,6 +751,63 @@ _pstart:
746 .global _pend 751 .global _pend
747_pend: 752_pend:
748 753
754.Lget_ipl_device:
755 basr %r12,0
756.LPG2: l %r1,0xb8 # get sid
757 sll %r1,15 # test if subchannel is enabled
758 srl %r1,31
759 ltr %r1,%r1
760 bz 0(%r14) # subchannel disabled
761 l %r1,0xb8
762 la %r5,.Lipl_schib-.LPG2(%r12)
763 stsch 0(%r5) # get schib of subchannel
764 bnz 0(%r14) # schib not available
765 tm 5(%r5),0x01 # devno valid?
766 bno 0(%r14)
767 la %r6,ipl_parameter_flags-.LPG2(%r12)
768 oi 3(%r6),0x01 # set flag
769 la %r2,ipl_devno-.LPG2(%r12)
770 mvc 0(2,%r2),6(%r5) # store devno
771 tm 4(%r5),0x80 # qdio capable device?
772 bno 0(%r14)
773 oi 3(%r6),0x02 # set flag
774
775 # copy ipl parameters
776
777 lhi %r0,4096
778 l %r2,20(%r0) # get address of parameter list
779 lhi %r3,IPL_PARMBLOCK_ORIGIN
780 st %r3,20(%r0)
781 lhi %r4,1
782 cr %r2,%r3 # start parameters < destination ?
783 jl 0f
784 lhi %r1,1 # copy direction is upwards
785 j 1f
7860: lhi %r1,-1 # copy direction is downwards
787 ar %r2,%r0
788 ar %r3,%r0
789 ar %r2,%r1
790 ar %r3,%r1
7911: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
792 ar %r3,%r1
793 ar %r2,%r1
794 sr %r0,%r4
795 jne 1b
796 b 0(%r14)
797
798 .align 4
799.Lipl_schib:
800 .rept 13
801 .long 0
802 .endr
803
804 .globl ipl_parameter_flags
805ipl_parameter_flags:
806 .long 0
807 .globl ipl_devno
808ipl_devno:
809 .word 0
810
749#ifdef CONFIG_SHARED_KERNEL 811#ifdef CONFIG_SHARED_KERNEL
750 .org 0x100000 812 .org 0x100000
751#endif 813#endif
@@ -755,7 +817,7 @@ _pend:
755# 817#
756 .globl _stext 818 .globl _stext
757_stext: basr %r13,0 # get base 819_stext: basr %r13,0 # get base
758.LPG2: 820.LPG3:
759# 821#
760# Setup stack 822# Setup stack
761# 823#
@@ -774,7 +836,7 @@ _stext: basr %r13,0 # get base
774 lctlg %c0,%c15,0(%r15) 836 lctlg %c0,%c15,0(%r15)
775 837
776# 838#
777 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess 839 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
778 brasl %r14,start_kernel # go to C code 840 brasl %r14,start_kernel # go to C code
779# 841#
780# We returned from start_kernel ?!? PANIK 842# We returned from start_kernel ?!? PANIK
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5204778b8e5e..31e7b19348b7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -36,6 +36,7 @@
36#include <linux/console.h> 36#include <linux/console.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/kernel_stat.h> 38#include <linux/kernel_stat.h>
39#include <linux/device.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/system.h> 42#include <asm/system.h>
@@ -685,3 +686,188 @@ struct seq_operations cpuinfo_op = {
685 .show = show_cpuinfo, 686 .show = show_cpuinfo,
686}; 687};
687 688
689#define DEFINE_IPL_ATTR(_name, _format, _value) \
690static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
691 char *page) \
692{ \
693 return sprintf(page, _format, _value); \
694} \
695static struct subsys_attribute ipl_##_name##_attr = \
696 __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
697
698DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
699 IPL_PARMBLOCK_START->fcp.wwpn);
700DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
701 IPL_PARMBLOCK_START->fcp.lun);
702DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
703 IPL_PARMBLOCK_START->fcp.bootprog);
704DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
705 IPL_PARMBLOCK_START->fcp.br_lba);
706
707enum ipl_type_type {
708 ipl_type_unknown,
709 ipl_type_ccw,
710 ipl_type_fcp,
711};
712
713static enum ipl_type_type
714get_ipl_type(void)
715{
716 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
717
718 if (!IPL_DEVNO_VALID)
719 return ipl_type_unknown;
720 if (!IPL_PARMBLOCK_VALID)
721 return ipl_type_ccw;
722 if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
723 return ipl_type_unknown;
724 if (ipl->fcp.pbt != IPL_TYPE_FCP)
725 return ipl_type_unknown;
726 return ipl_type_fcp;
727}
728
729static ssize_t
730ipl_type_show(struct subsystem *subsys, char *page)
731{
732 switch (get_ipl_type()) {
733 case ipl_type_ccw:
734 return sprintf(page, "ccw\n");
735 case ipl_type_fcp:
736 return sprintf(page, "fcp\n");
737 default:
738 return sprintf(page, "unknown\n");
739 }
740}
741
742static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
743
744static ssize_t
745ipl_device_show(struct subsystem *subsys, char *page)
746{
747 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
748
749 switch (get_ipl_type()) {
750 case ipl_type_ccw:
751 return sprintf(page, "0.0.%04x\n", ipl_devno);
752 case ipl_type_fcp:
753 return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
754 default:
755 return 0;
756 }
757}
758
759static struct subsys_attribute ipl_device_attr =
760 __ATTR(device, S_IRUGO, ipl_device_show, NULL);
761
762static struct attribute *ipl_fcp_attrs[] = {
763 &ipl_type_attr.attr,
764 &ipl_device_attr.attr,
765 &ipl_wwpn_attr.attr,
766 &ipl_lun_attr.attr,
767 &ipl_bootprog_attr.attr,
768 &ipl_br_lba_attr.attr,
769 NULL,
770};
771
772static struct attribute_group ipl_fcp_attr_group = {
773 .attrs = ipl_fcp_attrs,
774};
775
776static struct attribute *ipl_ccw_attrs[] = {
777 &ipl_type_attr.attr,
778 &ipl_device_attr.attr,
779 NULL,
780};
781
782static struct attribute_group ipl_ccw_attr_group = {
783 .attrs = ipl_ccw_attrs,
784};
785
786static struct attribute *ipl_unknown_attrs[] = {
787 &ipl_type_attr.attr,
788 NULL,
789};
790
791static struct attribute_group ipl_unknown_attr_group = {
792 .attrs = ipl_unknown_attrs,
793};
794
795static ssize_t
796ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
797{
798 unsigned int size = IPL_PARMBLOCK_SIZE;
799
800 if (off > size)
801 return 0;
802 if (off + count > size)
803 count = size - off;
804
805 memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
806 return count;
807}
808
809static struct bin_attribute ipl_parameter_attr = {
810 .attr = {
811 .name = "binary_parameter",
812 .mode = S_IRUGO,
813 .owner = THIS_MODULE,
814 },
815 .size = PAGE_SIZE,
816 .read = &ipl_parameter_read,
817};
818
819static ssize_t
820ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
821{
822 unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
823 void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
824
825 if (off > size)
826 return 0;
827 if (off + count > size)
828 count = size - off;
829
830 memcpy(buf, scp_data + off, count);
831 return count;
832}
833
834static struct bin_attribute ipl_scp_data_attr = {
835 .attr = {
836 .name = "scp_data",
837 .mode = S_IRUGO,
838 .owner = THIS_MODULE,
839 },
840 .size = PAGE_SIZE,
841 .read = &ipl_scp_data_read,
842};
843
844static decl_subsys(ipl, NULL, NULL);
845
846static int __init
847ipl_device_sysfs_register(void) {
848 int rc;
849
850 rc = firmware_register(&ipl_subsys);
851 if (rc)
852 return rc;
853
854 switch (get_ipl_type()) {
855 case ipl_type_ccw:
856 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group);
857 break;
858 case ipl_type_fcp:
859 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
860 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
861 &ipl_parameter_attr);
862 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
863 &ipl_scp_data_attr);
864 break;
865 default:
866 sysfs_create_group(&ipl_subsys.kset.kobj,
867 &ipl_unknown_attr_group);
868 break;
869 }
870 return 0;
871}
872
873__initcall(ipl_device_sysfs_register);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 2fd75da15495..9a1d95894f3d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -49,10 +49,6 @@
49 49
50#define TICK_SIZE tick 50#define TICK_SIZE tick
51 51
52u64 jiffies_64 = INITIAL_JIFFIES;
53
54EXPORT_SYMBOL(jiffies_64);
55
56static ext_int_info_t ext_int_info_cc; 52static ext_int_info_t ext_int_info_cc;
57static u64 init_timer_cc; 53static u64 init_timer_cc;
58static u64 jiffies_timer_cc; 54static u64 jiffies_timer_cc;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index fa0726507b3d..22a895ecb7a4 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -24,7 +24,6 @@
24#include <asm/s390_ext.h> 24#include <asm/s390_ext.h>
25#include <asm/timer.h> 25#include <asm/timer.h>
26 26
27#define VTIMER_MAGIC (TIMER_MAGIC + 1)
28static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
29DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
30 29
@@ -277,20 +276,12 @@ static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
277 276
278void init_virt_timer(struct vtimer_list *timer) 277void init_virt_timer(struct vtimer_list *timer)
279{ 278{
280 timer->magic = VTIMER_MAGIC;
281 timer->function = NULL; 279 timer->function = NULL;
282 INIT_LIST_HEAD(&timer->entry); 280 INIT_LIST_HEAD(&timer->entry);
283 spin_lock_init(&timer->lock); 281 spin_lock_init(&timer->lock);
284} 282}
285EXPORT_SYMBOL(init_virt_timer); 283EXPORT_SYMBOL(init_virt_timer);
286 284
287static inline int check_vtimer(struct vtimer_list *timer)
288{
289 if (timer->magic != VTIMER_MAGIC)
290 return -EINVAL;
291 return 0;
292}
293
294static inline int vtimer_pending(struct vtimer_list *timer) 285static inline int vtimer_pending(struct vtimer_list *timer)
295{ 286{
296 return (!list_empty(&timer->entry)); 287 return (!list_empty(&timer->entry));
@@ -346,7 +337,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
346 337
347static inline int prepare_vtimer(struct vtimer_list *timer) 338static inline int prepare_vtimer(struct vtimer_list *timer)
348{ 339{
349 if (check_vtimer(timer) || !timer->function) { 340 if (!timer->function) {
350 printk("add_virt_timer: uninitialized timer\n"); 341 printk("add_virt_timer: uninitialized timer\n");
351 return -EINVAL; 342 return -EINVAL;
352 } 343 }
@@ -414,7 +405,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
414 unsigned long flags; 405 unsigned long flags;
415 int cpu; 406 int cpu;
416 407
417 if (check_vtimer(timer) || !timer->function) { 408 if (!timer->function) {
418 printk("mod_virt_timer: uninitialized timer\n"); 409 printk("mod_virt_timer: uninitialized timer\n");
419 return -EINVAL; 410 return -EINVAL;
420 } 411 }
@@ -481,11 +472,6 @@ int del_virt_timer(struct vtimer_list *timer)
481 unsigned long flags; 472 unsigned long flags;
482 struct vtimer_queue *vt_list; 473 struct vtimer_queue *vt_list;
483 474
484 if (check_vtimer(timer)) {
485 printk("del_virt_timer: timer not initialized\n");
486 return -EINVAL;
487 }
488
489 /* check if timer is pending */ 475 /* check if timer is pending */
490 if (!vtimer_pending(timer)) 476 if (!vtimer_pending(timer))
491 return 0; 477 return 0;
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 71a6d4e7809f..6e3b58bd8795 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sysdev.h> 14#include <linux/sysdev.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/string.h>
16#include <asm/dma.h> 17#include <asm/dma.h>
17 18
18static struct sysdev_class dma_sysclass = { 19static struct sysdev_class dma_sysclass = {
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index e0b384bef55f..47abf6e49dfb 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/sched.h> /* set_cpus_allowed() */
23 24
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/watchdog.h> 26#include <asm/watchdog.h>
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index b28919b65682..1fbe5a428e31 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -80,7 +80,7 @@ void ptrace_disable(struct task_struct *child)
80 /* nothing to do.. */ 80 /* nothing to do.. */
81} 81}
82 82
83asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 83asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
84{ 84{
85 struct task_struct *child; 85 struct task_struct *child;
86 struct user * dummy = NULL; 86 struct user * dummy = NULL;
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 02ca69918d7c..671b876416bf 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -56,10 +56,6 @@ extern unsigned long wall_jiffies;
56#define TICK_SIZE (tick_nsec / 1000) 56#define TICK_SIZE (tick_nsec / 1000)
57DEFINE_SPINLOCK(tmu0_lock); 57DEFINE_SPINLOCK(tmu0_lock);
58 58
59u64 jiffies_64 = INITIAL_JIFFIES;
60
61EXPORT_SYMBOL(jiffies_64);
62
63/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want 59/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want
64 * these routines anywhere... */ 60 * these routines anywhere... */
65#ifdef CONFIG_SH_RTC 61#ifdef CONFIG_SH_RTC
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index fd2000956dae..71f2eec00b99 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -121,7 +121,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
121 return 0; 121 return 0;
122} 122}
123 123
124asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 124asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
125{ 125{
126 struct task_struct *child; 126 struct task_struct *child;
127 extern void poke_real_address_q(unsigned long long addr, unsigned long long data); 127 extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 43e395a14f49..870fe5327e09 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -116,8 +116,6 @@
116 116
117extern unsigned long wall_jiffies; 117extern unsigned long wall_jiffies;
118 118
119u64 jiffies_64 = INITIAL_JIFFIES;
120
121static unsigned long tmu_base, rtc_base; 119static unsigned long tmu_base, rtc_base;
122unsigned long cprc_base; 120unsigned long cprc_base;
123 121
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 36a40697b8d6..25e31d5ec99b 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -497,8 +497,8 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
497 * CheerIO makes a similar conversion. 497 * CheerIO makes a similar conversion.
498 * See ebus.c for details. 498 * See ebus.c for details.
499 * 499 *
500 * Note that check_region()/request_region() 500 * Note that request_region()
501 * work for these devices. 501 * works for these devices.
502 * 502 *
503 * XXX Neat trick, but it's a *bad* idea 503 * XXX Neat trick, but it's a *bad* idea
504 * to shit into regions like that. 504 * to shit into regions like that.
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 279a62627c10..24814d58f9e1 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -45,10 +45,6 @@
45 45
46extern unsigned long wall_jiffies; 46extern unsigned long wall_jiffies;
47 47
48u64 jiffies_64 = INITIAL_JIFFIES;
49
50EXPORT_SYMBOL(jiffies_64);
51
52DEFINE_SPINLOCK(rtc_lock); 48DEFINE_SPINLOCK(rtc_lock);
53enum sparc_clock_type sp_clock_typ; 49enum sparc_clock_type sp_clock_typ;
54DEFINE_SPINLOCK(mostek_lock); 50DEFINE_SPINLOCK(mostek_lock);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index 43fc3173d480..e6a00325075a 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -475,9 +475,6 @@ IOCTL_TABLE_START
475#include <linux/compat_ioctl.h> 475#include <linux/compat_ioctl.h>
476#define DECLARES 476#define DECLARES
477#include "compat_ioctl.c" 477#include "compat_ioctl.c"
478COMPATIBLE_IOCTL(TIOCSTART)
479COMPATIBLE_IOCTL(TIOCSTOP)
480COMPATIBLE_IOCTL(TIOCSLTC)
481COMPATIBLE_IOCTL(FBIOGTYPE) 478COMPATIBLE_IOCTL(FBIOGTYPE)
482COMPATIBLE_IOCTL(FBIOSATTR) 479COMPATIBLE_IOCTL(FBIOSATTR)
483COMPATIBLE_IOCTL(FBIOGATTR) 480COMPATIBLE_IOCTL(FBIOGATTR)
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 3f08a32f51a1..38c5525087a2 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -55,10 +55,6 @@ unsigned long ds1287_regs = 0UL;
55 55
56extern unsigned long wall_jiffies; 56extern unsigned long wall_jiffies;
57 57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62static void __iomem *mstk48t08_regs; 58static void __iomem *mstk48t08_regs;
63static void __iomem *mstk48t59_regs; 59static void __iomem *mstk48t59_regs;
64 60
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 684e1f8b2755..cd06ed7d842d 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -27,10 +27,6 @@ config UID16
27 bool 27 bool
28 default y 28 default y
29 29
30config RWSEM_GENERIC_SPINLOCK
31 bool
32 default y
33
34config GENERIC_CALIBRATE_DELAY 30config GENERIC_CALIBRATE_DELAY
35 bool 31 bool
36 default y 32 default y
@@ -40,6 +36,12 @@ config IRQ_RELEASE_METHOD
40 bool 36 bool
41 default y 37 default y
42 38
39menu "Host processor type and features"
40
41source "arch/i386/Kconfig.cpu"
42
43endmenu
44
43menu "UML-specific options" 45menu "UML-specific options"
44 46
45config MODE_TT 47config MODE_TT
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index bd35e59419c8..aae19bc4b06a 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -6,6 +6,11 @@ config 64BIT
6 bool 6 bool
7 default y 7 default y
8 8
9#XXX: this is so in the underlying arch, but it's wrong!!!
10config RWSEM_GENERIC_SPINLOCK
11 bool
12 default y
13
9config SEMAPHORE_SLEEPERS 14config SEMAPHORE_SLEEPERS
10 bool 15 bool
11 default y 16 default y
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 2ee8a2858117..aef7c50f8e13 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -29,6 +29,12 @@ endif
29 29
30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH) 30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
31 31
32ifneq ($(CONFIG_GPROF),y) 32# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
33ARCH_CFLAGS += -DUM_FASTCALL 33include $(srctree)/arch/i386/Makefile.cpu
34endif 34
35# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
36cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
37
38CFLAGS += $(cflags-y)
39USER_CFLAGS += $(cflags-y)
40
diff --git a/arch/um/include/sysdep-i386/syscalls.h b/arch/um/include/sysdep-i386/syscalls.h
index a0d5b74d3731..57bd79efbee3 100644
--- a/arch/um/include/sysdep-i386/syscalls.h
+++ b/arch/um/include/sysdep-i386/syscalls.h
@@ -11,7 +11,6 @@ typedef long syscall_handler_t(struct pt_regs);
11/* Not declared on x86, incompatible declarations on x86_64, so these have 11/* Not declared on x86, incompatible declarations on x86_64, so these have
12 * to go here rather than in sys_call_table.c 12 * to go here rather than in sys_call_table.c
13 */ 13 */
14extern syscall_handler_t sys_ptrace;
15extern syscall_handler_t sys_rt_sigaction; 14extern syscall_handler_t sys_rt_sigaction;
16 15
17extern syscall_handler_t old_mmap_i386; 16extern syscall_handler_t old_mmap_i386;
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 4e08f7545d63..020ca79b8d33 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -22,10 +22,6 @@
22#include "mode.h" 22#include "mode.h"
23#include "os.h" 23#include "os.h"
24 24
25u64 jiffies_64 = INITIAL_JIFFIES;
26
27EXPORT_SYMBOL(jiffies_64);
28
29int hz(void) 25int hz(void)
30{ 26{
31 return(HZ); 27 return(HZ);
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index 4726b87f5e5a..d6077ff47d22 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -113,7 +113,7 @@ static int set_single_step (struct task_struct *t, int val)
113 return 1; 113 return 1;
114} 114}
115 115
116int sys_ptrace(long request, long pid, long addr, long data) 116long sys_ptrace(long request, long pid, long addr, long data)
117{ 117{
118 struct task_struct *child; 118 struct task_struct *child;
119 int rval; 119 int rval;
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
index ea3fd8844ff0..c1e85c2aef65 100644
--- a/arch/v850/kernel/time.c
+++ b/arch/v850/kernel/time.c
@@ -26,10 +26,6 @@
26 26
27#include "mach.h" 27#include "mach.h"
28 28
29u64 jiffies_64 = INITIAL_JIFFIES;
30
31EXPORT_SYMBOL(jiffies_64);
32
33#define TICK_SIZE (tick_nsec / 1000) 29#define TICK_SIZE (tick_nsec / 1000)
34 30
35/* 31/*
diff --git a/arch/x86_64/ia32/ia32_ioctl.c b/arch/x86_64/ia32/ia32_ioctl.c
index 419758f19ca4..4ba0e293d5e5 100644
--- a/arch/x86_64/ia32/ia32_ioctl.c
+++ b/arch/x86_64/ia32/ia32_ioctl.c
@@ -12,40 +12,11 @@
12#define INCLUDES 12#define INCLUDES
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include "compat_ioctl.c" 14#include "compat_ioctl.c"
15#include <asm/mtrr.h>
16#include <asm/ia32.h> 15#include <asm/ia32.h>
17 16
18#define CODE 17#define CODE
19#include "compat_ioctl.c" 18#include "compat_ioctl.c"
20 19
21#ifndef TIOCGDEV
22#define TIOCGDEV _IOR('T',0x32, unsigned int)
23#endif
24static int tiocgdev(unsigned fd, unsigned cmd, unsigned int __user *ptr)
25{
26
27 struct file *file;
28 struct tty_struct *real_tty;
29 int fput_needed, ret;
30
31 file = fget_light(fd, &fput_needed);
32 if (!file)
33 return -EBADF;
34
35 ret = -EINVAL;
36 if (file->f_op->ioctl != tty_ioctl)
37 goto out;
38 real_tty = (struct tty_struct *)file->private_data;
39 if (!real_tty)
40 goto out;
41
42 ret = put_user(new_encode_dev(tty_devnum(real_tty)), ptr);
43
44out:
45 fput_light(file, fput_needed);
46 return ret;
47}
48
49#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */ 20#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */
50#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */ 21#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */
51#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */ 22#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */
@@ -85,90 +56,6 @@ static int rtc32_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
85 return sys_ioctl(fd,cmd,arg); 56 return sys_ioctl(fd,cmd,arg);
86} 57}
87 58
88/* /proc/mtrr ioctls */
89
90
91struct mtrr_sentry32
92{
93 compat_ulong_t base; /* Base address */
94 compat_uint_t size; /* Size of region */
95 compat_uint_t type; /* Type of region */
96};
97
98struct mtrr_gentry32
99{
100 compat_ulong_t regnum; /* Register number */
101 compat_uint_t base; /* Base address */
102 compat_uint_t size; /* Size of region */
103 compat_uint_t type; /* Type of region */
104};
105
106#define MTRR_IOCTL_BASE 'M'
107
108#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
109#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
110#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
111#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
112#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
113#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
114#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
115#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
116#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
117#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
118
119
120static int mtrr_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg)
121{
122 struct mtrr_gentry g;
123 struct mtrr_sentry s;
124 int get = 0, err = 0;
125 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)arg;
126 mm_segment_t oldfs = get_fs();
127
128 switch (cmd) {
129#define SET(x) case MTRRIOC32_ ## x ## _ENTRY: cmd = MTRRIOC_ ## x ## _ENTRY; break
130#define GET(x) case MTRRIOC32_ ## x ## _ENTRY: cmd = MTRRIOC_ ## x ## _ENTRY; get=1; break
131 SET(ADD);
132 SET(SET);
133 SET(DEL);
134 GET(GET);
135 SET(KILL);
136 SET(ADD_PAGE);
137 SET(SET_PAGE);
138 SET(DEL_PAGE);
139 GET(GET_PAGE);
140 SET(KILL_PAGE);
141 }
142
143 if (get) {
144 err = get_user(g.regnum, &g32->regnum);
145 err |= get_user(g.base, &g32->base);
146 err |= get_user(g.size, &g32->size);
147 err |= get_user(g.type, &g32->type);
148
149 arg = (unsigned long)&g;
150 } else {
151 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)arg;
152 err = get_user(s.base, &s32->base);
153 err |= get_user(s.size, &s32->size);
154 err |= get_user(s.type, &s32->type);
155
156 arg = (unsigned long)&s;
157 }
158 if (err) return err;
159
160 set_fs(KERNEL_DS);
161 err = sys_ioctl(fd, cmd, arg);
162 set_fs(oldfs);
163
164 if (!err && get) {
165 err = put_user(g.base, &g32->base);
166 err |= put_user(g.size, &g32->size);
167 err |= put_user(g.regnum, &g32->regnum);
168 err |= put_user(g.type, &g32->type);
169 }
170 return err;
171}
172 59
173#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) }, 60#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) },
174#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl) 61#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
@@ -185,7 +72,6 @@ COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain *
185COMPATIBLE_IOCTL(FIOQSIZE) 72COMPATIBLE_IOCTL(FIOQSIZE)
186 73
187/* And these ioctls need translation */ 74/* And these ioctls need translation */
188HANDLE_IOCTL(TIOCGDEV, tiocgdev)
189/* realtime device */ 75/* realtime device */
190HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl) 76HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl)
191HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl) 77HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl)
@@ -193,17 +79,6 @@ HANDLE_IOCTL(RTC_IRQP_SET32, rtc32_ioctl)
193HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl) 79HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl)
194HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl) 80HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl)
195/* take care of sizeof(sizeof()) breakage */ 81/* take care of sizeof(sizeof()) breakage */
196/* mtrr */
197HANDLE_IOCTL(MTRRIOC32_ADD_ENTRY, mtrr_ioctl32)
198HANDLE_IOCTL(MTRRIOC32_SET_ENTRY, mtrr_ioctl32)
199HANDLE_IOCTL(MTRRIOC32_DEL_ENTRY, mtrr_ioctl32)
200HANDLE_IOCTL(MTRRIOC32_GET_ENTRY, mtrr_ioctl32)
201HANDLE_IOCTL(MTRRIOC32_KILL_ENTRY, mtrr_ioctl32)
202HANDLE_IOCTL(MTRRIOC32_ADD_PAGE_ENTRY, mtrr_ioctl32)
203HANDLE_IOCTL(MTRRIOC32_SET_PAGE_ENTRY, mtrr_ioctl32)
204HANDLE_IOCTL(MTRRIOC32_DEL_PAGE_ENTRY, mtrr_ioctl32)
205HANDLE_IOCTL(MTRRIOC32_GET_PAGE_ENTRY, mtrr_ioctl32)
206HANDLE_IOCTL(MTRRIOC32_KILL_PAGE_ENTRY, mtrr_ioctl32)
207}; 82};
208 83
209int ioctl_table_size = ARRAY_SIZE(ioctl_start); 84int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index b2a238b5a17e..c6c9791d77c1 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -494,7 +494,7 @@ void invalidate_interrupt7(void);
494void thermal_interrupt(void); 494void thermal_interrupt(void);
495void i8254_timer_resume(void); 495void i8254_timer_resume(void);
496 496
497static void setup_timer(void) 497static void setup_timer_hardware(void)
498{ 498{
499 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 499 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
500 udelay(10); 500 udelay(10);
@@ -505,13 +505,13 @@ static void setup_timer(void)
505 505
506static int timer_resume(struct sys_device *dev) 506static int timer_resume(struct sys_device *dev)
507{ 507{
508 setup_timer(); 508 setup_timer_hardware();
509 return 0; 509 return 0;
510} 510}
511 511
512void i8254_timer_resume(void) 512void i8254_timer_resume(void)
513{ 513{
514 setup_timer(); 514 setup_timer_hardware();
515} 515}
516 516
517static struct sysdev_class timer_sysclass = { 517static struct sysdev_class timer_sysclass = {
@@ -594,7 +594,7 @@ void __init init_IRQ(void)
594 * Set the clock to HZ Hz, we already have a valid 594 * Set the clock to HZ Hz, we already have a valid
595 * vector now: 595 * vector now:
596 */ 596 */
597 setup_timer(); 597 setup_timer_hardware();
598 598
599 if (!acpi_ioapic) 599 if (!acpi_ioapic)
600 setup_irq(2, &irq2); 600 setup_irq(2, &irq2);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index cb28df14ff6f..da0bc3e7bdf5 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -1213,7 +1213,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1213 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1214 1214
1215 /* Intel-defined (#2) */ 1215 /* Intel-defined (#2) */
1216 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 1216 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1217 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 1217 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1218 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1218 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index f066c6ab3618..fd2bef780882 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -63,13 +63,12 @@ void save_processor_state(void)
63 __save_processor_state(&saved_context); 63 __save_processor_state(&saved_context);
64} 64}
65 65
66static void 66static void do_fpu_end(void)
67do_fpu_end(void)
68{ 67{
69 /* restore FPU regs if necessary */ 68 /*
70 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 69 * Restore FPU regs if necessary
71 kernel_fpu_end(); 70 */
72 mxcsr_feature_mask_init(); 71 kernel_fpu_end();
73} 72}
74 73
75void __restore_processor_state(struct saved_context *ctxt) 74void __restore_processor_state(struct saved_context *ctxt)
@@ -148,57 +147,7 @@ extern int restore_image(void);
148 147
149pgd_t *temp_level4_pgt; 148pgd_t *temp_level4_pgt;
150 149
151static void **pages; 150static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
152
153static inline void *__add_page(void)
154{
155 void **c;
156
157 c = (void **)get_usable_page(GFP_ATOMIC);
158 if (c) {
159 *c = pages;
160 pages = c;
161 }
162 return c;
163}
164
165static inline void *__next_page(void)
166{
167 void **c;
168
169 c = pages;
170 if (c) {
171 pages = *c;
172 *c = NULL;
173 }
174 return c;
175}
176
177/*
178 * Try to allocate as many usable pages as needed and daisy chain them.
179 * If one allocation fails, free the pages allocated so far
180 */
181static int alloc_usable_pages(unsigned long n)
182{
183 void *p;
184
185 pages = NULL;
186 do
187 if (!__add_page())
188 break;
189 while (--n);
190 if (n) {
191 p = __next_page();
192 while (p) {
193 free_page((unsigned long)p);
194 p = __next_page();
195 }
196 return -ENOMEM;
197 }
198 return 0;
199}
200
201static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
202{ 151{
203 long i, j; 152 long i, j;
204 153
@@ -212,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
212 if (paddr >= end) 161 if (paddr >= end)
213 break; 162 break;
214 163
215 pmd = (pmd_t *)__next_page(); 164 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
165 if (!pmd)
166 return -ENOMEM;
216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
217 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 168 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
218 unsigned long pe; 169 unsigned long pe;
@@ -224,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
224 set_pmd(pmd, __pmd(pe)); 175 set_pmd(pmd, __pmd(pe));
225 } 176 }
226 } 177 }
178 return 0;
227} 179}
228 180
229static void set_up_temporary_mappings(void) 181static int set_up_temporary_mappings(void)
230{ 182{
231 unsigned long start, end, next; 183 unsigned long start, end, next;
184 int error;
232 185
233 temp_level4_pgt = (pgd_t *)__next_page(); 186 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
187 if (!temp_level4_pgt)
188 return -ENOMEM;
234 189
235 /* It is safe to reuse the original kernel mapping */ 190 /* It is safe to reuse the original kernel mapping */
236 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 191 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@@ -241,29 +196,27 @@ static void set_up_temporary_mappings(void)
241 end = (unsigned long)pfn_to_kaddr(end_pfn); 196 end = (unsigned long)pfn_to_kaddr(end_pfn);
242 197
243 for (; start < end; start = next) { 198 for (; start < end; start = next) {
244 pud_t *pud = (pud_t *)__next_page(); 199 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
200 if (!pud)
201 return -ENOMEM;
245 next = start + PGDIR_SIZE; 202 next = start + PGDIR_SIZE;
246 if (next > end) 203 if (next > end)
247 next = end; 204 next = end;
248 res_phys_pud_init(pud, __pa(start), __pa(next)); 205 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
206 return error;
249 set_pgd(temp_level4_pgt + pgd_index(start), 207 set_pgd(temp_level4_pgt + pgd_index(start),
250 mk_kernel_pgd(__pa(pud))); 208 mk_kernel_pgd(__pa(pud)));
251 } 209 }
210 return 0;
252} 211}
253 212
254int swsusp_arch_resume(void) 213int swsusp_arch_resume(void)
255{ 214{
256 unsigned long n; 215 int error;
257 216
258 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
259 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
260 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
261 if (alloc_usable_pages(n)) {
262 free_eaten_memory();
263 return -ENOMEM;
264 }
265 /* We have got enough memory and from now on we cannot recover */ 217 /* We have got enough memory and from now on we cannot recover */
266 set_up_temporary_mappings(); 218 if ((error = set_up_temporary_mappings()))
219 return error;
267 restore_image(); 220 restore_image();
268 return 0; 221 return 0;
269} 222}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 703acde2a1a5..fdaddc4e5284 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -42,10 +42,6 @@
42#include <asm/apic.h> 42#include <asm/apic.h>
43#endif 43#endif
44 44
45u64 jiffies_64 = INITIAL_JIFFIES;
46
47EXPORT_SYMBOL(jiffies_64);
48
49#ifdef CONFIG_CPU_FREQ 45#ifdef CONFIG_CPU_FREQ
50static void cpufreq_delayed_get(void); 46static void cpufreq_delayed_get(void);
51#endif 47#endif
@@ -481,9 +477,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
481static unsigned int cyc2ns_scale; 477static unsigned int cyc2ns_scale;
482#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 478#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
483 479
484static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 480static inline void set_cyc2ns_scale(unsigned long cpu_khz)
485{ 481{
486 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 482 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
487} 483}
488 484
489static inline unsigned long long cycles_2_ns(unsigned long long cyc) 485static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -655,7 +651,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
655 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 651 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
656 } 652 }
657 653
658 set_cyc2ns_scale(cpu_khz_ref / 1000); 654 set_cyc2ns_scale(cpu_khz_ref);
659 655
660 return 0; 656 return 0;
661} 657}
@@ -939,7 +935,7 @@ void __init time_init(void)
939 rdtscll_sync(&vxtime.last_tsc); 935 rdtscll_sync(&vxtime.last_tsc);
940 setup_irq(0, &irq0); 936 setup_irq(0, &irq0);
941 937
942 set_cyc2ns_scale(cpu_khz / 1000); 938 set_cyc2ns_scale(cpu_khz);
943 939
944#ifndef CONFIG_SMP 940#ifndef CONFIG_SMP
945 time_init_gtod(); 941 time_init_gtod();
@@ -1093,6 +1089,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1093static unsigned long PIE_count; 1089static unsigned long PIE_count;
1094 1090
1095static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 1091static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1092static unsigned int hpet_t1_cmp; /* cached comparator register */
1096 1093
1097int is_hpet_enabled(void) 1094int is_hpet_enabled(void)
1098{ 1095{
@@ -1129,10 +1126,12 @@ int hpet_rtc_timer_init(void)
1129 cnt = hpet_readl(HPET_COUNTER); 1126 cnt = hpet_readl(HPET_COUNTER);
1130 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); 1127 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1131 hpet_writel(cnt, HPET_T1_CMP); 1128 hpet_writel(cnt, HPET_T1_CMP);
1129 hpet_t1_cmp = cnt;
1132 local_irq_restore(flags); 1130 local_irq_restore(flags);
1133 1131
1134 cfg = hpet_readl(HPET_T1_CFG); 1132 cfg = hpet_readl(HPET_T1_CFG);
1135 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; 1133 cfg &= ~HPET_TN_PERIODIC;
1134 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1136 hpet_writel(cfg, HPET_T1_CFG); 1135 hpet_writel(cfg, HPET_T1_CFG);
1137 1136
1138 return 1; 1137 return 1;
@@ -1142,8 +1141,12 @@ static void hpet_rtc_timer_reinit(void)
1142{ 1141{
1143 unsigned int cfg, cnt; 1142 unsigned int cfg, cnt;
1144 1143
1145 if (!(PIE_on | AIE_on | UIE_on)) 1144 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1145 cfg = hpet_readl(HPET_T1_CFG);
1146 cfg &= ~HPET_TN_ENABLE;
1147 hpet_writel(cfg, HPET_T1_CFG);
1146 return; 1148 return;
1149 }
1147 1150
1148 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) 1151 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1149 hpet_rtc_int_freq = PIE_freq; 1152 hpet_rtc_int_freq = PIE_freq;
@@ -1151,15 +1154,10 @@ static void hpet_rtc_timer_reinit(void)
1151 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; 1154 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1152 1155
1153 /* It is more accurate to use the comparator value than current count.*/ 1156 /* It is more accurate to use the comparator value than current count.*/
1154 cnt = hpet_readl(HPET_T1_CMP); 1157 cnt = hpet_t1_cmp;
1155 cnt += hpet_tick*HZ/hpet_rtc_int_freq; 1158 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1156 hpet_writel(cnt, HPET_T1_CMP); 1159 hpet_writel(cnt, HPET_T1_CMP);
1157 1160 hpet_t1_cmp = cnt;
1158 cfg = hpet_readl(HPET_T1_CFG);
1159 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1160 hpet_writel(cfg, HPET_T1_CFG);
1161
1162 return;
1163} 1161}
1164 1162
1165/* 1163/*
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 03674daabc66..a17930747f20 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -18,6 +18,7 @@
18#include <linux/time.h> 18#include <linux/time.h>
19#include <asm/platform.h> 19#include <asm/platform.h>
20#include <asm/timex.h> 20#include <asm/timex.h>
21#include <asm/param.h> /* HZ */
21 22
22#define _F(r,f,a,b) \ 23#define _F(r,f,a,b) \
23 r __platform_##f a b; \ 24 r __platform_##f a b; \
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 2659efdd4e99..14460743de07 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -45,7 +45,7 @@ void ptrace_disable(struct task_struct *child)
45 /* Nothing to do.. */ 45 /* Nothing to do.. */
46} 46}
47 47
48int sys_ptrace(long request, long pid, long addr, long data) 48long sys_ptrace(long request, long pid, long addr, long data)
49{ 49{
50 struct task_struct *child; 50 struct task_struct *child;
51 int ret = -EPERM; 51 int ret = -EPERM;
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 8e423d1335ce..cb6e38ed2b1d 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,9 +29,6 @@
29 29
30extern volatile unsigned long wall_jiffies; 30extern volatile unsigned long wall_jiffies;
31 31
32u64 jiffies_64 = INITIAL_JIFFIES;
33EXPORT_SYMBOL(jiffies_64);
34
35spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 32spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
36EXPORT_SYMBOL(rtc_lock); 33EXPORT_SYMBOL(rtc_lock);
37 34
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index 2b850e5860a0..e26f007a1417 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -9,6 +9,7 @@
9 * 9 *
10 * Driver for PCF8583 RTC & RAM chip 10 * Driver for PCF8583 RTC & RAM chip
11 */ 11 */
12#include <linux/module.h>
12#include <linux/i2c.h> 13#include <linux/i2c.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/string.h> 15#include <linux/string.h>
@@ -32,7 +33,8 @@ static struct i2c_client_address_data addr_data = {
32 .forces = forces, 33 .forces = forces,
33}; 34};
34 35
35#define DAT(x) ((unsigned int)(x->dev.driver_data)) 36#define set_ctrl(x, v) i2c_set_clientdata(x, (void *)(unsigned int)(v))
37#define get_ctrl(x) ((unsigned int)i2c_get_clientdata(x))
36 38
37static int 39static int
38pcf8583_attach(struct i2c_adapter *adap, int addr, int kind) 40pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
@@ -40,8 +42,17 @@ pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
40 struct i2c_client *c; 42 struct i2c_client *c;
41 unsigned char buf[1], ad[1] = { 0 }; 43 unsigned char buf[1], ad[1] = { 0 };
42 struct i2c_msg msgs[2] = { 44 struct i2c_msg msgs[2] = {
43 { addr, 0, 1, ad }, 45 {
44 { addr, I2C_M_RD, 1, buf } 46 .addr = addr,
47 .flags = 0,
48 .len = 1,
49 .buf = ad,
50 }, {
51 .addr = addr,
52 .flags = I2C_M_RD,
53 .len = 1,
54 .buf = buf,
55 }
45 }; 56 };
46 57
47 c = kmalloc(sizeof(*c), GFP_KERNEL); 58 c = kmalloc(sizeof(*c), GFP_KERNEL);
@@ -54,7 +65,7 @@ pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
54 c->driver = &pcf8583_driver; 65 c->driver = &pcf8583_driver;
55 66
56 if (i2c_transfer(c->adapter, msgs, 2) == 2) 67 if (i2c_transfer(c->adapter, msgs, 2) == 2)
57 DAT(c) = buf[0]; 68 set_ctrl(c, buf[0]);
58 69
59 return i2c_attach_client(c); 70 return i2c_attach_client(c);
60} 71}
@@ -78,8 +89,17 @@ pcf8583_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
78{ 89{
79 unsigned char buf[8], addr[1] = { 1 }; 90 unsigned char buf[8], addr[1] = { 1 };
80 struct i2c_msg msgs[2] = { 91 struct i2c_msg msgs[2] = {
81 { client->addr, 0, 1, addr }, 92 {
82 { client->addr, I2C_M_RD, 6, buf } 93 .addr = client->addr,
94 .flags = 0,
95 .len = 1,
96 .buf = addr,
97 }, {
98 .addr = client->addr,
99 .flags = I2C_M_RD,
100 .len = 6,
101 .buf = buf,
102 }
83 }; 103 };
84 int ret = -EIO; 104 int ret = -EIO;
85 105
@@ -113,7 +133,7 @@ pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
113 int ret, len = 6; 133 int ret, len = 6;
114 134
115 buf[0] = 0; 135 buf[0] = 0;
116 buf[1] = DAT(client) | 0x80; 136 buf[1] = get_ctrl(client) | 0x80;
117 buf[2] = BIN_TO_BCD(dt->cs); 137 buf[2] = BIN_TO_BCD(dt->cs);
118 buf[3] = BIN_TO_BCD(dt->secs); 138 buf[3] = BIN_TO_BCD(dt->secs);
119 buf[4] = BIN_TO_BCD(dt->mins); 139 buf[4] = BIN_TO_BCD(dt->mins);
@@ -129,7 +149,7 @@ pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
129 if (ret == len) 149 if (ret == len)
130 ret = 0; 150 ret = 0;
131 151
132 buf[1] = DAT(client); 152 buf[1] = get_ctrl(client);
133 i2c_master_send(client, (char *)buf, 2); 153 i2c_master_send(client, (char *)buf, 2);
134 154
135 return ret; 155 return ret;
@@ -138,7 +158,7 @@ pcf8583_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
138static int 158static int
139pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl) 159pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl)
140{ 160{
141 *ctrl = DAT(client); 161 *ctrl = get_ctrl(client);
142 return 0; 162 return 0;
143} 163}
144 164
@@ -149,7 +169,7 @@ pcf8583_set_ctrl(struct i2c_client *client, unsigned char *ctrl)
149 169
150 buf[0] = 0; 170 buf[0] = 0;
151 buf[1] = *ctrl; 171 buf[1] = *ctrl;
152 DAT(client) = *ctrl; 172 set_ctrl(client, *ctrl);
153 173
154 return i2c_master_send(client, (char *)buf, 2); 174 return i2c_master_send(client, (char *)buf, 2);
155} 175}
@@ -159,15 +179,23 @@ pcf8583_read_mem(struct i2c_client *client, struct mem *mem)
159{ 179{
160 unsigned char addr[1]; 180 unsigned char addr[1];
161 struct i2c_msg msgs[2] = { 181 struct i2c_msg msgs[2] = {
162 { client->addr, 0, 1, addr }, 182 {
163 { client->addr, I2C_M_RD, 0, mem->data } 183 .addr = client->addr,
184 .flags = 0,
185 .len = 1,
186 .buf = addr,
187 }, {
188 .addr = client->addr,
189 .flags = I2C_M_RD,
190 .len = mem->nr,
191 .buf = mem->data,
192 }
164 }; 193 };
165 194
166 if (mem->loc < 8) 195 if (mem->loc < 8)
167 return -EINVAL; 196 return -EINVAL;
168 197
169 addr[0] = mem->loc; 198 addr[0] = mem->loc;
170 msgs[1].len = mem->nr;
171 199
172 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO; 200 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
173} 201}
@@ -177,15 +205,23 @@ pcf8583_write_mem(struct i2c_client *client, struct mem *mem)
177{ 205{
178 unsigned char addr[1]; 206 unsigned char addr[1];
179 struct i2c_msg msgs[2] = { 207 struct i2c_msg msgs[2] = {
180 { client->addr, 0, 1, addr }, 208 {
181 { client->addr, 0, 0, mem->data } 209 .addr = client->addr,
210 .flags = 0,
211 .len = 1,
212 .buf = addr,
213 }, {
214 .addr = client->addr,
215 .flags = I2C_M_NOSTART,
216 .len = mem->nr,
217 .buf = mem->data,
218 }
182 }; 219 };
183 220
184 if (mem->loc < 8) 221 if (mem->loc < 8)
185 return -EINVAL; 222 return -EINVAL;
186 223
187 addr[0] = mem->loc; 224 addr[0] = mem->loc;
188 msgs[1].len = mem->nr;
189 225
190 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO; 226 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
191} 227}
@@ -234,4 +270,14 @@ static __init int pcf8583_init(void)
234 return i2c_add_driver(&pcf8583_driver); 270 return i2c_add_driver(&pcf8583_driver);
235} 271}
236 272
237__initcall(pcf8583_init); 273static __exit void pcf8583_exit(void)
274{
275 i2c_del_driver(&pcf8583_driver);
276}
277
278module_init(pcf8583_init);
279module_exit(pcf8583_exit);
280
281MODULE_AUTHOR("Russell King");
282MODULE_DESCRIPTION("PCF8583 I2C RTC driver");
283MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 26a3a4016115..161db4acfb91 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -37,6 +37,7 @@
37#include <linux/acpi.h> 37#include <linux/acpi.h>
38#include <linux/dmi.h> 38#include <linux/dmi.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/sched.h> /* need_resched() */
40 41
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index aee50b453265..930427fc0c4b 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -158,7 +158,15 @@ int acpi_suspend(u32 acpi_state)
158 return -EINVAL; 158 return -EINVAL;
159} 159}
160 160
161static int acpi_pm_state_valid(suspend_state_t pm_state)
162{
163 u32 acpi_state = acpi_suspend_states[pm_state];
164
165 return sleep_states[acpi_state];
166}
167
161static struct pm_ops acpi_pm_ops = { 168static struct pm_ops acpi_pm_ops = {
169 .valid = acpi_pm_state_valid,
162 .prepare = acpi_pm_prepare, 170 .prepare = acpi_pm_prepare,
163 .enter = acpi_pm_enter, 171 .enter = acpi_pm_enter,
164 .finish = acpi_pm_finish, 172 .finish = acpi_pm_finish,
diff --git a/drivers/base/class.c b/drivers/base/class.c
index c3e569730afe..db65fd0babe9 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/kdev_t.h> 18#include <linux/kdev_t.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/slab.h>
20#include "base.h" 21#include "base.h"
21 22
22#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) 23#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 081c927b1ed8..a95844790f7b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -16,6 +16,8 @@ struct sysdev_class cpu_sysdev_class = {
16}; 16};
17EXPORT_SYMBOL(cpu_sysdev_class); 17EXPORT_SYMBOL(cpu_sysdev_class);
18 18
19static struct sys_device *cpu_sys_devices[NR_CPUS];
20
19#ifdef CONFIG_HOTPLUG_CPU 21#ifdef CONFIG_HOTPLUG_CPU
20int __attribute__((weak)) smp_prepare_cpu (int cpu) 22int __attribute__((weak)) smp_prepare_cpu (int cpu)
21{ 23{
@@ -64,6 +66,7 @@ static void __devinit register_cpu_control(struct cpu *cpu)
64} 66}
65void unregister_cpu(struct cpu *cpu, struct node *root) 67void unregister_cpu(struct cpu *cpu, struct node *root)
66{ 68{
69 int logical_cpu = cpu->sysdev.id;
67 70
68 if (root) 71 if (root)
69 sysfs_remove_link(&root->sysdev.kobj, 72 sysfs_remove_link(&root->sysdev.kobj,
@@ -71,7 +74,7 @@ void unregister_cpu(struct cpu *cpu, struct node *root)
71 sysdev_remove_file(&cpu->sysdev, &attr_online); 74 sysdev_remove_file(&cpu->sysdev, &attr_online);
72 75
73 sysdev_unregister(&cpu->sysdev); 76 sysdev_unregister(&cpu->sysdev);
74 77 cpu_sys_devices[logical_cpu] = NULL;
75 return; 78 return;
76} 79}
77#else /* ... !CONFIG_HOTPLUG_CPU */ 80#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -103,10 +106,19 @@ int __devinit register_cpu(struct cpu *cpu, int num, struct node *root)
103 kobject_name(&cpu->sysdev.kobj)); 106 kobject_name(&cpu->sysdev.kobj));
104 if (!error && !cpu->no_control) 107 if (!error && !cpu->no_control)
105 register_cpu_control(cpu); 108 register_cpu_control(cpu);
109 if (!error)
110 cpu_sys_devices[num] = &cpu->sysdev;
106 return error; 111 return error;
107} 112}
108 113
109 114struct sys_device *get_cpu_sysdev(int cpu)
115{
116 if (cpu < NR_CPUS)
117 return cpu_sys_devices[cpu];
118 else
119 return NULL;
120}
121EXPORT_SYMBOL_GPL(get_cpu_sysdev);
110 122
111int __init cpu_dev_init(void) 123int __init cpu_dev_init(void)
112{ 124{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4acb2c5733c3..98f6c02d6790 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -62,14 +62,16 @@ firmware_timeout_show(struct class *class, char *buf)
62} 62}
63 63
64/** 64/**
65 * firmware_timeout_store: 65 * firmware_timeout_store - set number of seconds to wait for firmware
66 * Description: 66 * @class: device class pointer
67 * @buf: buffer to scan for timeout value
68 * @count: number of bytes in @buf
69 *
67 * Sets the number of seconds to wait for the firmware. Once 70 * Sets the number of seconds to wait for the firmware. Once
68 * this expires an error will be return to the driver and no 71 * this expires an error will be returned to the driver and no
69 * firmware will be provided. 72 * firmware will be provided.
70 * 73 *
71 * Note: zero means 'wait for ever' 74 * Note: zero means 'wait forever'.
72 *
73 **/ 75 **/
74static ssize_t 76static ssize_t
75firmware_timeout_store(struct class *class, const char *buf, size_t count) 77firmware_timeout_store(struct class *class, const char *buf, size_t count)
@@ -123,12 +125,15 @@ firmware_loading_show(struct class_device *class_dev, char *buf)
123} 125}
124 126
125/** 127/**
126 * firmware_loading_store: - loading control file 128 * firmware_loading_store - set value in the 'loading' control file
127 * Description: 129 * @class_dev: class_device pointer
130 * @buf: buffer to scan for loading control value
131 * @count: number of bytes in @buf
132 *
128 * The relevant values are: 133 * The relevant values are:
129 * 134 *
130 * 1: Start a load, discarding any previous partial load. 135 * 1: Start a load, discarding any previous partial load.
131 * 0: Conclude the load and handle the data to the driver code. 136 * 0: Conclude the load and hand the data to the driver code.
132 * -1: Conclude the load with an error and discard any written data. 137 * -1: Conclude the load with an error and discard any written data.
133 **/ 138 **/
134static ssize_t 139static ssize_t
@@ -201,6 +206,7 @@ out:
201 up(&fw_lock); 206 up(&fw_lock);
202 return ret_count; 207 return ret_count;
203} 208}
209
204static int 210static int
205fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 211fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
206{ 212{
@@ -227,11 +233,13 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
227} 233}
228 234
229/** 235/**
230 * firmware_data_write: 236 * firmware_data_write - write method for firmware
237 * @kobj: kobject for the class_device
238 * @buffer: buffer being written
239 * @offset: buffer offset for write in total data store area
240 * @count: buffer size
231 * 241 *
232 * Description: 242 * Data written to the 'data' attribute will be later handed to
233 *
234 * Data written to the 'data' attribute will be later handled to
235 * the driver as a firmware image. 243 * the driver as a firmware image.
236 **/ 244 **/
237static ssize_t 245static ssize_t
@@ -264,6 +272,7 @@ out:
264 up(&fw_lock); 272 up(&fw_lock);
265 return retval; 273 return retval;
266} 274}
275
267static struct bin_attribute firmware_attr_data_tmpl = { 276static struct bin_attribute firmware_attr_data_tmpl = {
268 .attr = {.name = "data", .mode = 0644, .owner = THIS_MODULE}, 277 .attr = {.name = "data", .mode = 0644, .owner = THIS_MODULE},
269 .size = 0, 278 .size = 0,
@@ -448,13 +457,16 @@ out:
448 457
449/** 458/**
450 * request_firmware: - request firmware to hotplug and wait for it 459 * request_firmware: - request firmware to hotplug and wait for it
451 * Description: 460 * @firmware_p: pointer to firmware image
452 * @firmware will be used to return a firmware image by the name 461 * @name: name of firmware file
462 * @device: device for which firmware is being loaded
463 *
464 * @firmware_p will be used to return a firmware image by the name
453 * of @name for device @device. 465 * of @name for device @device.
454 * 466 *
455 * Should be called from user context where sleeping is allowed. 467 * Should be called from user context where sleeping is allowed.
456 * 468 *
457 * @name will be use as $FIRMWARE in the hotplug environment and 469 * @name will be used as $FIRMWARE in the hotplug environment and
458 * should be distinctive enough not to be confused with any other 470 * should be distinctive enough not to be confused with any other
459 * firmware image for this or any other device. 471 * firmware image for this or any other device.
460 **/ 472 **/
@@ -468,6 +480,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
468 480
469/** 481/**
470 * release_firmware: - release the resource associated with a firmware image 482 * release_firmware: - release the resource associated with a firmware image
483 * @fw: firmware resource to release
471 **/ 484 **/
472void 485void
473release_firmware(const struct firmware *fw) 486release_firmware(const struct firmware *fw)
@@ -480,8 +493,10 @@ release_firmware(const struct firmware *fw)
480 493
481/** 494/**
482 * register_firmware: - provide a firmware image for later usage 495 * register_firmware: - provide a firmware image for later usage
496 * @name: name of firmware image file
497 * @data: buffer pointer for the firmware image
498 * @size: size of the data buffer area
483 * 499 *
484 * Description:
485 * Make sure that @data will be available by requesting firmware @name. 500 * Make sure that @data will be available by requesting firmware @name.
486 * 501 *
487 * Note: This will not be possible until some kind of persistence 502 * Note: This will not be possible until some kind of persistence
@@ -526,21 +541,19 @@ request_firmware_work_func(void *arg)
526} 541}
527 542
528/** 543/**
529 * request_firmware_nowait: 544 * request_firmware_nowait: asynchronous version of request_firmware
545 * @module: module requesting the firmware
546 * @hotplug: invokes hotplug event to copy the firmware image if this flag
547 * is non-zero else the firmware copy must be done manually.
548 * @name: name of firmware file
549 * @device: device for which firmware is being loaded
550 * @context: will be passed over to @cont, and
551 * @fw may be %NULL if firmware request fails.
552 * @cont: function will be called asynchronously when the firmware
553 * request is over.
530 * 554 *
531 * Description:
532 * Asynchronous variant of request_firmware() for contexts where 555 * Asynchronous variant of request_firmware() for contexts where
533 * it is not possible to sleep. 556 * it is not possible to sleep.
534 *
535 * @hotplug invokes hotplug event to copy the firmware image if this flag
536 * is non-zero else the firmware copy must be done manually.
537 *
538 * @cont will be called asynchronously when the firmware request is over.
539 *
540 * @context will be passed over to @cont.
541 *
542 * @fw may be %NULL if firmware request fails.
543 *
544 **/ 557 **/
545int 558int
546request_firmware_nowait( 559request_firmware_nowait(
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 75ce8711bca5..08d9cc99c7de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -16,6 +16,7 @@
16#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
17#include <linux/bootmem.h> 17#include <linux/bootmem.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h>
19 20
20#include "base.h" 21#include "base.h"
21 22
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3431eb6004c3..66ed8f2fece5 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <asm/semaphore.h>
24 25
25extern struct subsystem devices_subsys; 26extern struct subsystem devices_subsys;
26 27
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
index 6070a480600b..5b90d2fa63b8 100644
--- a/drivers/block/Kconfig.iosched
+++ b/drivers/block/Kconfig.iosched
@@ -38,4 +38,32 @@ config IOSCHED_CFQ
38 among all processes in the system. It should provide a fair 38 among all processes in the system. It should provide a fair
39 working environment, suitable for desktop systems. 39 working environment, suitable for desktop systems.
40 40
41choice
42 prompt "Default I/O scheduler"
43 default DEFAULT_AS
44 help
45 Select the I/O scheduler which will be used by default for all
46 block devices.
47
48 config DEFAULT_AS
49 bool "Anticipatory" if IOSCHED_AS
50
51 config DEFAULT_DEADLINE
52 bool "Deadline" if IOSCHED_DEADLINE
53
54 config DEFAULT_CFQ
55 bool "CFQ" if IOSCHED_CFQ
56
57 config DEFAULT_NOOP
58 bool "No-op"
59
60endchoice
61
62config DEFAULT_IOSCHED
63 string
64 default "anticipatory" if DEFAULT_AS
65 default "deadline" if DEFAULT_DEADLINE
66 default "cfq" if DEFAULT_CFQ
67 default "noop" if DEFAULT_NOOP
68
41endmenu 69endmenu
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 564172234819..c6744ff38294 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -1973,8 +1973,8 @@ static int __init as_init(void)
1973 1973
1974static void __exit as_exit(void) 1974static void __exit as_exit(void)
1975{ 1975{
1976 kmem_cache_destroy(arq_pool);
1977 elv_unregister(&iosched_as); 1976 elv_unregister(&iosched_as);
1977 kmem_cache_destroy(arq_pool);
1978} 1978}
1979 1979
1980module_init(as_init); 1980module_init(as_init);
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e183a3ef7839..ec27976a57da 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -28,13 +28,17 @@
28 through the array controller. Note in particular, neither 28 through the array controller. Note in particular, neither
29 physical nor logical disks are presented through the scsi layer. */ 29 physical nor logical disks are presented through the scsi layer. */
30 30
31#include <linux/timer.h>
32#include <linux/completion.h>
33#include <linux/slab.h>
34#include <linux/string.h>
35
36#include <asm/atomic.h>
37
31#include <scsi/scsi.h> 38#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h> 40#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
35#include <asm/atomic.h>
36#include <linux/timer.h>
37#include <linux/completion.h>
38 42
39#include "cciss_scsi.h" 43#include "cciss_scsi.h"
40 44
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 94690e4d41e0..5281f8e70510 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -2418,28 +2418,8 @@ static int __init cfq_init(void)
2418 2418
2419static void __exit cfq_exit(void) 2419static void __exit cfq_exit(void)
2420{ 2420{
2421 struct task_struct *g, *p;
2422 unsigned long flags;
2423
2424 read_lock_irqsave(&tasklist_lock, flags);
2425
2426 /*
2427 * iterate each process in the system, removing our io_context
2428 */
2429 do_each_thread(g, p) {
2430 struct io_context *ioc = p->io_context;
2431
2432 if (ioc && ioc->cic) {
2433 ioc->cic->exit(ioc->cic);
2434 cfq_free_io_context(ioc->cic);
2435 ioc->cic = NULL;
2436 }
2437 } while_each_thread(g, p);
2438
2439 read_unlock_irqrestore(&tasklist_lock, flags);
2440
2441 cfq_slab_kill();
2442 elv_unregister(&iosched_cfq); 2421 elv_unregister(&iosched_cfq);
2422 cfq_slab_kill();
2443} 2423}
2444 2424
2445module_init(cfq_init); 2425module_init(cfq_init);
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 55621d5c5774..36f1057084b0 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -147,24 +147,17 @@ static void elevator_setup_default(void)
147 struct elevator_type *e; 147 struct elevator_type *e;
148 148
149 /* 149 /*
150 * check if default is set and exists 150 * If default has not been set, use the compiled-in selection.
151 */ 151 */
152 if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) { 152 if (!chosen_elevator[0])
153 elevator_put(e); 153 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
154 return;
155 }
156 154
157#if defined(CONFIG_IOSCHED_AS) 155 /*
158 strcpy(chosen_elevator, "anticipatory"); 156 * If the given scheduler is not available, fall back to no-op.
159#elif defined(CONFIG_IOSCHED_DEADLINE) 157 */
160 strcpy(chosen_elevator, "deadline"); 158 if (!(e = elevator_find(chosen_elevator)))
161#elif defined(CONFIG_IOSCHED_CFQ) 159 strcpy(chosen_elevator, "noop");
162 strcpy(chosen_elevator, "cfq"); 160 elevator_put(e);
163#elif defined(CONFIG_IOSCHED_NOOP)
164 strcpy(chosen_elevator, "noop");
165#else
166#error "You must build at least 1 IO scheduler into the kernel"
167#endif
168} 161}
169 162
170static int __init elevator_setup(char *str) 163static int __init elevator_setup(char *str)
@@ -642,6 +635,27 @@ EXPORT_SYMBOL_GPL(elv_register);
642 635
643void elv_unregister(struct elevator_type *e) 636void elv_unregister(struct elevator_type *e)
644{ 637{
638 struct task_struct *g, *p;
639
640 /*
641 * Iterate every thread in the process to remove the io contexts.
642 */
643 read_lock(&tasklist_lock);
644 do_each_thread(g, p) {
645 struct io_context *ioc = p->io_context;
646 if (ioc && ioc->cic) {
647 ioc->cic->exit(ioc->cic);
648 ioc->cic->dtor(ioc->cic);
649 ioc->cic = NULL;
650 }
651 if (ioc && ioc->aic) {
652 ioc->aic->exit(ioc->aic);
653 ioc->aic->dtor(ioc->aic);
654 ioc->aic = NULL;
655 }
656 } while_each_thread(g, p);
657 read_unlock(&tasklist_lock);
658
645 spin_lock_irq(&elv_list_lock); 659 spin_lock_irq(&elv_list_lock);
646 list_del_init(&e->list); 660 list_del_init(&e->list);
647 spin_unlock_irq(&elv_list_lock); 661 spin_unlock_irq(&elv_list_lock);
@@ -739,8 +753,10 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
739 return -EINVAL; 753 return -EINVAL;
740 } 754 }
741 755
742 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) 756 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
757 elevator_put(e);
743 return count; 758 return count;
759 }
744 760
745 elevator_switch(q, e); 761 elevator_switch(q, e);
746 return count; 762 return count;
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
index 1fef136c0e41..ce94aa11f6a7 100644
--- a/drivers/block/paride/paride.c
+++ b/drivers/block/paride/paride.c
@@ -29,6 +29,7 @@
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/sched.h> /* TASK_* */
32 33
33#ifdef CONFIG_PARPORT_MODULE 34#ifdef CONFIG_PARPORT_MODULE
34#define CONFIG_PARPORT 35#define CONFIG_PARPORT
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 94af920465b5..e9746af29b9f 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -807,10 +807,6 @@ static int pf_next_buf(void)
807 return 1; 807 return 1;
808 spin_lock_irqsave(&pf_spin_lock, saved_flags); 808 spin_lock_irqsave(&pf_spin_lock, saved_flags);
809 pf_end_request(1); 809 pf_end_request(1);
810 if (pf_req) {
811 pf_count = pf_req->current_nr_sectors;
812 pf_buf = pf_req->buffer;
813 }
814 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
815 return 1; 811 return 1;
816} 812}
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 82f2d6d2eeef..6f5df0fad703 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -162,6 +162,8 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
162#include <linux/mtio.h> 162#include <linux/mtio.h>
163#include <linux/pg.h> 163#include <linux/pg.h>
164#include <linux/device.h> 164#include <linux/device.h>
165#include <linux/sched.h> /* current, TASK_* */
166#include <linux/jiffies.h>
165 167
166#include <asm/uaccess.h> 168#include <asm/uaccess.h>
167 169
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 686c95573452..715ae5dc88fb 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -146,6 +146,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
146#include <linux/slab.h> 146#include <linux/slab.h>
147#include <linux/mtio.h> 147#include <linux/mtio.h>
148#include <linux/device.h> 148#include <linux/device.h>
149#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
149 150
150#include <asm/uaccess.h> 151#include <asm/uaccess.h>
151 152
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index e46ecd23b3ac..709f809f79f1 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -778,13 +778,16 @@ static struct vio_device_id viodasd_device_table[] __devinitdata = {
778 { "viodasd", "" }, 778 { "viodasd", "" },
779 { "", "" } 779 { "", "" }
780}; 780};
781
782MODULE_DEVICE_TABLE(vio, viodasd_device_table); 781MODULE_DEVICE_TABLE(vio, viodasd_device_table);
782
783static struct vio_driver viodasd_driver = { 783static struct vio_driver viodasd_driver = {
784 .name = "viodasd",
785 .id_table = viodasd_device_table, 784 .id_table = viodasd_device_table,
786 .probe = viodasd_probe, 785 .probe = viodasd_probe,
787 .remove = viodasd_remove 786 .remove = viodasd_remove,
787 .driver = {
788 .name = "viodasd",
789 .owner = THIS_MODULE,
790 }
788}; 791};
789 792
790/* 793/*
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 0829db58462f..36f31d202223 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -736,13 +736,16 @@ static struct vio_device_id viocd_device_table[] __devinitdata = {
736 { "viocd", "" }, 736 { "viocd", "" },
737 { "", "" } 737 { "", "" }
738}; 738};
739
740MODULE_DEVICE_TABLE(vio, viocd_device_table); 739MODULE_DEVICE_TABLE(vio, viocd_device_table);
740
741static struct vio_driver viocd_driver = { 741static struct vio_driver viocd_driver = {
742 .name = "viocd",
743 .id_table = viocd_device_table, 742 .id_table = viocd_device_table,
744 .probe = viocd_probe, 743 .probe = viocd_probe,
745 .remove = viocd_remove 744 .remove = viocd_remove,
745 .driver = {
746 .name = "viocd",
747 .owner = THIS_MODULE,
748 }
746}; 749};
747 750
748static int __init viocd_init(void) 751static int __init viocd_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c29365d5b524..fdf4370db994 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -661,7 +661,7 @@ config HW_RANDOM
661 661
662config NVRAM 662config NVRAM
663 tristate "/dev/nvram support" 663 tristate "/dev/nvram support"
664 depends on ATARI || X86 || X86_64 || ARM || GENERIC_NVRAM 664 depends on ATARI || X86 || ARM || GENERIC_NVRAM
665 ---help--- 665 ---help---
666 If you say Y here and create a character special file /dev/nvram 666 If you say Y here and create a character special file /dev/nvram
667 with major number 10 and minor number 144 using mknod ("man mknod"), 667 with major number 10 and minor number 144 using mknod ("man mknod"),
@@ -985,7 +985,7 @@ config MAX_RAW_DEVS
985 985
986config HANGCHECK_TIMER 986config HANGCHECK_TIMER
987 tristate "Hangcheck timer" 987 tristate "Hangcheck timer"
988 depends on X86_64 || X86 || IA64 || PPC64 || ARCH_S390 988 depends on X86 || IA64 || PPC64 || ARCH_S390
989 help 989 help
990 The hangcheck-timer module detects when the system has gone 990 The hangcheck-timer module detects when the system has gone
991 out to lunch past a certain margin. It can reboot the system 991 out to lunch past a certain margin. It can reboot the system
@@ -1001,5 +1001,17 @@ config MMTIMER
1001 1001
1002source "drivers/char/tpm/Kconfig" 1002source "drivers/char/tpm/Kconfig"
1003 1003
1004config TELCLOCK
1005 tristate "Telecom clock driver for MPBL0010 ATCA SBC"
1006 depends on EXPERIMENTAL
1007 default n
1008 help
1009 The telecom clock device is specific to the MPBL0010 ATCA computer and
1010 allows direct userspace access to the configuration of the telecom clock
1011 configuration settings. This device is used for hardware synchronization
1012 across the ATCA backplane fabric. Upon loading, the driver exports a
1013 sysfs directory, /sys/devices/platform/telco_clock, with a number of
1014 files for controlling the behavior of this hardware.
1015
1004endmenu 1016endmenu
1005 1017
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 08f69287ea36..4aeae687e88a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
82obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o 82obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
83obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o 83obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
84obj-$(CONFIG_TANBAC_TB0219) += tb0219.o 84obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
85obj-$(CONFIG_TELCLOCK) += tlclk.o
85 86
86obj-$(CONFIG_WATCHDOG) += watchdog/ 87obj-$(CONFIG_WATCHDOG) += watchdog/
87obj-$(CONFIG_MWAVE) += mwave/ 88obj-$(CONFIG_MWAVE) += mwave/
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 7f8c1b53b754..486ed8a11b59 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -27,7 +27,7 @@ config AGP
27 27
28config AGP_ALI 28config AGP_ALI
29 tristate "ALI chipset support" 29 tristate "ALI chipset support"
30 depends on AGP && X86 && !X86_64 30 depends on AGP && X86_32
31 ---help--- 31 ---help---
32 This option gives you AGP support for the GLX component of 32 This option gives you AGP support for the GLX component of
33 XFree86 4.x on the following ALi chipsets. The supported chipsets 33 XFree86 4.x on the following ALi chipsets. The supported chipsets
@@ -45,7 +45,7 @@ config AGP_ALI
45 45
46config AGP_ATI 46config AGP_ATI
47 tristate "ATI chipset support" 47 tristate "ATI chipset support"
48 depends on AGP && X86 && !X86_64 48 depends on AGP && X86_32
49 ---help--- 49 ---help---
50 This option gives you AGP support for the GLX component of 50 This option gives you AGP support for the GLX component of
51 XFree86 4.x on the ATI RadeonIGP family of chipsets. 51 XFree86 4.x on the ATI RadeonIGP family of chipsets.
@@ -55,7 +55,7 @@ config AGP_ATI
55 55
56config AGP_AMD 56config AGP_AMD
57 tristate "AMD Irongate, 761, and 762 chipset support" 57 tristate "AMD Irongate, 761, and 762 chipset support"
58 depends on AGP && X86 && !X86_64 58 depends on AGP && X86_32
59 help 59 help
60 This option gives you AGP support for the GLX component of 60 This option gives you AGP support for the GLX component of
61 XFree86 4.x on AMD Irongate, 761, and 762 chipsets. 61 XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
@@ -91,7 +91,7 @@ config AGP_INTEL
91 91
92config AGP_NVIDIA 92config AGP_NVIDIA
93 tristate "NVIDIA nForce/nForce2 chipset support" 93 tristate "NVIDIA nForce/nForce2 chipset support"
94 depends on AGP && X86 && !X86_64 94 depends on AGP && X86_32
95 help 95 help
96 This option gives you AGP support for the GLX component of 96 This option gives you AGP support for the GLX component of
97 XFree86 4.x on the following NVIDIA chipsets. The supported chipsets 97 XFree86 4.x on the following NVIDIA chipsets. The supported chipsets
@@ -99,7 +99,7 @@ config AGP_NVIDIA
99 99
100config AGP_SIS 100config AGP_SIS
101 tristate "SiS chipset support" 101 tristate "SiS chipset support"
102 depends on AGP && X86 && !X86_64 102 depends on AGP && X86_32
103 help 103 help
104 This option gives you AGP support for the GLX component of 104 This option gives you AGP support for the GLX component of
105 XFree86 4.x on Silicon Integrated Systems [SiS] chipsets. 105 XFree86 4.x on Silicon Integrated Systems [SiS] chipsets.
@@ -111,14 +111,14 @@ config AGP_SIS
111 111
112config AGP_SWORKS 112config AGP_SWORKS
113 tristate "Serverworks LE/HE chipset support" 113 tristate "Serverworks LE/HE chipset support"
114 depends on AGP && X86 && !X86_64 114 depends on AGP && X86_32
115 help 115 help
116 Say Y here to support the Serverworks AGP card. See 116 Say Y here to support the Serverworks AGP card. See
117 <http://www.serverworks.com/> for product descriptions and images. 117 <http://www.serverworks.com/> for product descriptions and images.
118 118
119config AGP_VIA 119config AGP_VIA
120 tristate "VIA chipset support" 120 tristate "VIA chipset support"
121 depends on AGP && X86 && !X86_64 121 depends on AGP && X86_32
122 help 122 help
123 This option gives you AGP support for the GLX component of 123 This option gives you AGP support for the GLX component of
124 XFree86 4.x on VIA MVP3/Apollo Pro chipsets. 124 XFree86 4.x on VIA MVP3/Apollo Pro chipsets.
@@ -154,7 +154,7 @@ config AGP_UNINORTH
154 154
155config AGP_EFFICEON 155config AGP_EFFICEON
156 tristate "Transmeta Efficeon support" 156 tristate "Transmeta Efficeon support"
157 depends on AGP && X86 && !X86_64 157 depends on AGP && X86_32
158 help 158 help
159 This option gives you AGP support for the Transmeta Efficeon 159 This option gives you AGP support for the Transmeta Efficeon
160 series processors with integrated northbridges. 160 series processors with integrated northbridges.
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 9c9c9c2247ce..b02fc2267159 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -7,6 +7,7 @@
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/agp_backend.h> 9#include <linux/agp_backend.h>
10#include <asm/page.h> /* PAGE_SIZE */
10#include "agp.h" 11#include "agp.h"
11 12
12#define ALI_AGPCTRL 0xb8 13#define ALI_AGPCTRL 0xb8
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 0a7624a9b1c1..0e6c3a31d344 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/agp_backend.h> 15#include <linux/agp_backend.h>
16#include <asm/page.h> /* PAGE_SIZE */
16#include "agp.h" 17#include "agp.h"
17 18
18/* Will need to be increased if AMD64 ever goes >8-way. */ 19/* Will need to be increased if AMD64 ever goes >8-way. */
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index e572ced9100a..0b6e72642d6e 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -6,6 +6,8 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/string.h>
10#include <linux/slab.h>
9#include <linux/agp_backend.h> 11#include <linux/agp_backend.h>
10#include <asm/agp.h> 12#include <asm/agp.h>
11#include "agp.h" 13#include "agp.h"
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 94943298c03e..a2d9e5e48bbe 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -10,6 +10,8 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/string.h>
14#include <linux/slab.h>
13#include <linux/agp_backend.h> 15#include <linux/agp_backend.h>
14 16
15#include "agp.h" 17#include "agp.h"
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index c9ac731504f2..40083241804e 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -6,6 +6,7 @@
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/agp_backend.h> 7#include <linux/agp_backend.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/slab.h>
9 10
10#include "agp.h" 11#include "agp.h"
11 12
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index a9fb12c20eb7..71ea59a1dbeb 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -5,6 +5,8 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/string.h>
9#include <linux/slab.h>
8#include <linux/agp_backend.h> 10#include <linux/agp_backend.h>
9#include "agp.h" 11#include "agp.h"
10 12
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index cf4c3648463d..c7f818cd7b02 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -281,7 +281,7 @@ static char rcsid[] =
281 * make sure "cyc" appears in all kernel messages; all soft interrupts 281 * make sure "cyc" appears in all kernel messages; all soft interrupts
282 * handled by same routine; recognize out-of-band reception; comment 282 * handled by same routine; recognize out-of-band reception; comment
283 * out some diagnostic messages; leave RTS/CTS flow control to hardware; 283 * out some diagnostic messages; leave RTS/CTS flow control to hardware;
284 * fix race condition in -Z buffer management; only -Y needs to explictly 284 * fix race condition in -Z buffer management; only -Y needs to explicitly
285 * flush chars; tidy up some startup messages; 285 * flush chars; tidy up some startup messages;
286 * 286 *
287 * Revision 1.36.4.18 1996/07/25 18:57:31 bentson 287 * Revision 1.36.4.18 1996/07/25 18:57:31 bentson
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 475cc5e555e1..6d3449761914 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -15,6 +15,8 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/string.h>
18 20
19#include "drm_core.h" 21#include "drm_core.h"
20#include "drmP.h" 22#include "drmP.h"
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 407708a001e4..b7a0e4d6b934 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -3113,6 +3113,7 @@ MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
3113int __init init_PCI (void) 3113int __init init_PCI (void)
3114{ /* Begin init_PCI */ 3114{ /* Begin init_PCI */
3115 memset (&epca_driver, 0, sizeof (epca_driver)); 3115 memset (&epca_driver, 0, sizeof (epca_driver));
3116 epca_driver.owner = THIS_MODULE;
3116 epca_driver.name = "epca"; 3117 epca_driver.name = "epca";
3117 epca_driver.id_table = epca_pci_tbl; 3118 epca_driver.id_table = epca_pci_tbl;
3118 epca_driver.probe = epca_init_one; 3119 epca_driver.probe = epca_init_one;
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index a54bc93353af..66e53dd450ff 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -117,7 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); 117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
118#endif /* not MODULE */ 118#endif /* not MODULE */
119 119
120#if defined(CONFIG_X86) || defined(CONFIG_X86_64) 120#if defined(CONFIG_X86)
121# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
123#elif defined(CONFIG_ARCH_S390) 123#elif defined(CONFIG_ARCH_S390)
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index c055bb630ffc..3808d9572619 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -49,7 +49,9 @@
49#define HPET_USER_FREQ (64) 49#define HPET_USER_FREQ (64)
50#define HPET_DRIFT (500) 50#define HPET_DRIFT (500)
51 51
52static u32 hpet_ntimer, hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 52#define HPET_RANGE_SIZE 1024 /* from HPET spec */
53
54static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
53 55
54/* A lock for concurrent access by app and isr hpet activity. */ 56/* A lock for concurrent access by app and isr hpet activity. */
55static DEFINE_SPINLOCK(hpet_lock); 57static DEFINE_SPINLOCK(hpet_lock);
@@ -78,7 +80,7 @@ struct hpets {
78 struct hpet __iomem *hp_hpet; 80 struct hpet __iomem *hp_hpet;
79 unsigned long hp_hpet_phys; 81 unsigned long hp_hpet_phys;
80 struct time_interpolator *hp_interpolator; 82 struct time_interpolator *hp_interpolator;
81 unsigned long hp_period; 83 unsigned long long hp_tick_freq;
82 unsigned long hp_delta; 84 unsigned long hp_delta;
83 unsigned int hp_ntimer; 85 unsigned int hp_ntimer;
84 unsigned int hp_which; 86 unsigned int hp_which;
@@ -90,6 +92,7 @@ static struct hpets *hpets;
90#define HPET_OPEN 0x0001 92#define HPET_OPEN 0x0001
91#define HPET_IE 0x0002 /* interrupt enabled */ 93#define HPET_IE 0x0002 /* interrupt enabled */
92#define HPET_PERIODIC 0x0004 94#define HPET_PERIODIC 0x0004
95#define HPET_SHARED_IRQ 0x0008
93 96
94#if BITS_PER_LONG == 64 97#if BITS_PER_LONG == 64
95#define write_counter(V, MC) writeq(V, MC) 98#define write_counter(V, MC) writeq(V, MC)
@@ -120,6 +123,11 @@ static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs)
120 unsigned long isr; 123 unsigned long isr;
121 124
122 devp = data; 125 devp = data;
126 isr = 1 << (devp - devp->hd_hpets->hp_dev);
127
128 if ((devp->hd_flags & HPET_SHARED_IRQ) &&
129 !(isr & readl(&devp->hd_hpet->hpet_isr)))
130 return IRQ_NONE;
123 131
124 spin_lock(&hpet_lock); 132 spin_lock(&hpet_lock);
125 devp->hd_irqdata++; 133 devp->hd_irqdata++;
@@ -137,8 +145,8 @@ static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs)
137 &devp->hd_timer->hpet_compare); 145 &devp->hd_timer->hpet_compare);
138 } 146 }
139 147
140 isr = (1 << (devp - devp->hd_hpets->hp_dev)); 148 if (devp->hd_flags & HPET_SHARED_IRQ)
141 writeq(isr, &devp->hd_hpet->hpet_isr); 149 writel(isr, &devp->hd_hpet->hpet_isr);
142 spin_unlock(&hpet_lock); 150 spin_unlock(&hpet_lock);
143 151
144 spin_lock(&hpet_task_lock); 152 spin_lock(&hpet_task_lock);
@@ -276,7 +284,8 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
276 284
277 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 285 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
278 PAGE_SIZE, vma->vm_page_prot)) { 286 PAGE_SIZE, vma->vm_page_prot)) {
279 printk(KERN_ERR "remap_pfn_range failed in hpet.c\n"); 287 printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
288 __FUNCTION__);
280 return -EAGAIN; 289 return -EAGAIN;
281 } 290 }
282 291
@@ -364,7 +373,9 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
364 hpet = devp->hd_hpet; 373 hpet = devp->hd_hpet;
365 hpetp = devp->hd_hpets; 374 hpetp = devp->hd_hpets;
366 375
367 v = readq(&timer->hpet_config); 376 if (!devp->hd_ireqfreq)
377 return -EIO;
378
368 spin_lock_irq(&hpet_lock); 379 spin_lock_irq(&hpet_lock);
369 380
370 if (devp->hd_flags & HPET_IE) { 381 if (devp->hd_flags & HPET_IE) {
@@ -373,16 +384,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
373 } 384 }
374 385
375 devp->hd_flags |= HPET_IE; 386 devp->hd_flags |= HPET_IE;
387
388 if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
389 devp->hd_flags |= HPET_SHARED_IRQ;
376 spin_unlock_irq(&hpet_lock); 390 spin_unlock_irq(&hpet_lock);
377 391
378 t = readq(&timer->hpet_config);
379 irq = devp->hd_hdwirq; 392 irq = devp->hd_hdwirq;
380 393
381 if (irq) { 394 if (irq) {
382 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 395 unsigned long irq_flags;
383 396
384 if (request_irq 397 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
385 (irq, hpet_interrupt, SA_INTERRUPT, devp->hd_name, (void *)devp)) { 398 irq_flags = devp->hd_flags & HPET_SHARED_IRQ
399 ? SA_SHIRQ : SA_INTERRUPT;
400 if (request_irq(irq, hpet_interrupt, irq_flags,
401 devp->hd_name, (void *)devp)) {
386 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 402 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
387 irq = 0; 403 irq = 0;
388 } 404 }
@@ -416,20 +432,24 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
416 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 432 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
417 } 433 }
418 434
419 isr = (1 << (devp - hpets->hp_dev)); 435 if (devp->hd_flags & HPET_SHARED_IRQ) {
420 writeq(isr, &hpet->hpet_isr); 436 isr = 1 << (devp - devp->hd_hpets->hp_dev);
437 writel(isr, &hpet->hpet_isr);
438 }
421 writeq(g, &timer->hpet_config); 439 writeq(g, &timer->hpet_config);
422 local_irq_restore(flags); 440 local_irq_restore(flags);
423 441
424 return 0; 442 return 0;
425} 443}
426 444
427static inline unsigned long hpet_time_div(unsigned long dis) 445/* converts Hz to number of timer ticks */
446static inline unsigned long hpet_time_div(struct hpets *hpets,
447 unsigned long dis)
428{ 448{
429 unsigned long long m = 1000000000000000ULL; 449 unsigned long long m;
430 450
451 m = hpets->hp_tick_freq + (dis >> 1);
431 do_div(m, dis); 452 do_div(m, dis);
432
433 return (unsigned long)m; 453 return (unsigned long)m;
434} 454}
435 455
@@ -477,14 +497,21 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
477 { 497 {
478 struct hpet_info info; 498 struct hpet_info info;
479 499
480 info.hi_ireqfreq = hpet_time_div(hpetp->hp_period * 500 if (devp->hd_ireqfreq)
481 devp->hd_ireqfreq); 501 info.hi_ireqfreq =
502 hpet_time_div(hpetp, devp->hd_ireqfreq);
503 else
504 info.hi_ireqfreq = 0;
482 info.hi_flags = 505 info.hi_flags =
483 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 506 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
484 info.hi_hpet = devp->hd_hpets->hp_which; 507 info.hi_hpet = hpetp->hp_which;
485 info.hi_timer = devp - devp->hd_hpets->hp_dev; 508 info.hi_timer = devp - hpetp->hp_dev;
486 if (copy_to_user((void __user *)arg, &info, sizeof(info))) 509 if (kernel)
487 err = -EFAULT; 510 memcpy((void *)arg, &info, sizeof(info));
511 else
512 if (copy_to_user((void __user *)arg, &info,
513 sizeof(info)))
514 err = -EFAULT;
488 break; 515 break;
489 } 516 }
490 case HPET_EPI: 517 case HPET_EPI:
@@ -516,12 +543,12 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
516 break; 543 break;
517 } 544 }
518 545
519 if (arg & (arg - 1)) { 546 if (!arg) {
520 err = -EINVAL; 547 err = -EINVAL;
521 break; 548 break;
522 } 549 }
523 550
524 devp->hd_ireqfreq = hpet_time_div(hpetp->hp_period * arg); 551 devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
525 } 552 }
526 553
527 return err; 554 return err;
@@ -539,6 +566,17 @@ static struct file_operations hpet_fops = {
539 .mmap = hpet_mmap, 566 .mmap = hpet_mmap,
540}; 567};
541 568
569static int hpet_is_known(struct hpet_data *hdp)
570{
571 struct hpets *hpetp;
572
573 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
574 if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
575 return 1;
576
577 return 0;
578}
579
542EXPORT_SYMBOL(hpet_alloc); 580EXPORT_SYMBOL(hpet_alloc);
543EXPORT_SYMBOL(hpet_register); 581EXPORT_SYMBOL(hpet_register);
544EXPORT_SYMBOL(hpet_unregister); 582EXPORT_SYMBOL(hpet_unregister);
@@ -563,6 +601,8 @@ int hpet_register(struct hpet_task *tp, int periodic)
563 return -EINVAL; 601 return -EINVAL;
564 } 602 }
565 603
604 tp->ht_opaque = NULL;
605
566 spin_lock_irq(&hpet_task_lock); 606 spin_lock_irq(&hpet_task_lock);
567 spin_lock(&hpet_lock); 607 spin_lock(&hpet_lock);
568 608
@@ -702,15 +742,14 @@ static void hpet_register_interpolator(struct hpets *hpetp)
702#ifdef CONFIG_TIME_INTERPOLATION 742#ifdef CONFIG_TIME_INTERPOLATION
703 struct time_interpolator *ti; 743 struct time_interpolator *ti;
704 744
705 ti = kmalloc(sizeof(*ti), GFP_KERNEL); 745 ti = kzalloc(sizeof(*ti), GFP_KERNEL);
706 if (!ti) 746 if (!ti)
707 return; 747 return;
708 748
709 memset(ti, 0, sizeof(*ti));
710 ti->source = TIME_SOURCE_MMIO64; 749 ti->source = TIME_SOURCE_MMIO64;
711 ti->shift = 10; 750 ti->shift = 10;
712 ti->addr = &hpetp->hp_hpet->hpet_mc; 751 ti->addr = &hpetp->hp_hpet->hpet_mc;
713 ti->frequency = hpet_time_div(hpets->hp_period); 752 ti->frequency = hpetp->hp_tick_freq;
714 ti->drift = HPET_DRIFT; 753 ti->drift = HPET_DRIFT;
715 ti->mask = -1; 754 ti->mask = -1;
716 755
@@ -743,11 +782,11 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
743 if (!timer) 782 if (!timer)
744 return 0; 783 return 0;
745 784
746 hpet = hpets->hp_hpet; 785 hpet = hpetp->hp_hpet;
747 t = read_counter(&timer->hpet_compare); 786 t = read_counter(&timer->hpet_compare);
748 787
749 i = 0; 788 i = 0;
750 count = hpet_time_div(hpetp->hp_period * TICK_CALIBRATE); 789 count = hpet_time_div(hpetp, TICK_CALIBRATE);
751 790
752 local_irq_save(flags); 791 local_irq_save(flags);
753 792
@@ -771,28 +810,29 @@ int hpet_alloc(struct hpet_data *hdp)
771 struct hpets *hpetp; 810 struct hpets *hpetp;
772 size_t siz; 811 size_t siz;
773 struct hpet __iomem *hpet; 812 struct hpet __iomem *hpet;
774 static struct hpets *last = (struct hpets *)0; 813 static struct hpets *last = NULL;
775 unsigned long ns; 814 unsigned long period;
815 unsigned long long temp;
776 816
777 /* 817 /*
778 * hpet_alloc can be called by platform dependent code. 818 * hpet_alloc can be called by platform dependent code.
779 * if platform dependent code has allocated the hpet 819 * If platform dependent code has allocated the hpet that
780 * ACPI also reports hpet, then we catch it here. 820 * ACPI has also reported, then we catch it here.
781 */ 821 */
782 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 822 if (hpet_is_known(hdp)) {
783 if (hpetp->hp_hpet == hdp->hd_address) 823 printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
784 return 0; 824 __FUNCTION__);
825 return 0;
826 }
785 827
786 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 828 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
787 sizeof(struct hpet_dev)); 829 sizeof(struct hpet_dev));
788 830
789 hpetp = kmalloc(siz, GFP_KERNEL); 831 hpetp = kzalloc(siz, GFP_KERNEL);
790 832
791 if (!hpetp) 833 if (!hpetp)
792 return -ENOMEM; 834 return -ENOMEM;
793 835
794 memset(hpetp, 0, siz);
795
796 hpetp->hp_which = hpet_nhpet++; 836 hpetp->hp_which = hpet_nhpet++;
797 hpetp->hp_hpet = hdp->hd_address; 837 hpetp->hp_hpet = hdp->hd_address;
798 hpetp->hp_hpet_phys = hdp->hd_phys_address; 838 hpetp->hp_hpet_phys = hdp->hd_phys_address;
@@ -822,21 +862,23 @@ int hpet_alloc(struct hpet_data *hdp)
822 862
823 last = hpetp; 863 last = hpetp;
824 864
825 hpetp->hp_period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 865 period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
826 HPET_COUNTER_CLK_PERIOD_SHIFT; 866 HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
867 temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
868 temp += period >> 1; /* round */
869 do_div(temp, period);
870 hpetp->hp_tick_freq = temp; /* ticks per second */
827 871
828 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 872 printk(KERN_INFO "hpet%d: at MMIO 0x%lx (virtual 0x%p), IRQ%s",
829 hpetp->hp_which, hdp->hd_phys_address, 873 hpetp->hp_which, hdp->hd_phys_address, hdp->hd_address,
830 hpetp->hp_ntimer > 1 ? "s" : ""); 874 hpetp->hp_ntimer > 1 ? "s" : "");
831 for (i = 0; i < hpetp->hp_ntimer; i++) 875 for (i = 0; i < hpetp->hp_ntimer; i++)
832 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 876 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
833 printk("\n"); 877 printk("\n");
834 878
835 ns = hpetp->hp_period; /* femptoseconds, 10^-15 */ 879 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
836 ns /= 1000000; /* convert to nanoseconds, 10^-9 */ 880 hpetp->hp_which, hpetp->hp_ntimer,
837 printk(KERN_INFO "hpet%d: %ldns tick, %d %d-bit timers\n", 881 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, hpetp->hp_tick_freq);
838 hpetp->hp_which, ns, hpetp->hp_ntimer,
839 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32);
840 882
841 mcfg = readq(&hpet->hpet_config); 883 mcfg = readq(&hpet->hpet_config);
842 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 884 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
@@ -845,13 +887,10 @@ int hpet_alloc(struct hpet_data *hdp)
845 writeq(mcfg, &hpet->hpet_config); 887 writeq(mcfg, &hpet->hpet_config);
846 } 888 }
847 889
848 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; 890 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
849 i++, hpet_ntimer++, devp++) {
850 unsigned long v;
851 struct hpet_timer __iomem *timer; 891 struct hpet_timer __iomem *timer;
852 892
853 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 893 timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
854 v = readq(&timer->hpet_config);
855 894
856 devp->hd_hpets = hpetp; 895 devp->hd_hpets = hpetp;
857 devp->hd_hpet = hpet; 896 devp->hd_hpet = hpet;
@@ -880,7 +919,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
880 struct hpet_data *hdp; 919 struct hpet_data *hdp;
881 acpi_status status; 920 acpi_status status;
882 struct acpi_resource_address64 addr; 921 struct acpi_resource_address64 addr;
883 struct hpets *hpetp;
884 922
885 hdp = data; 923 hdp = data;
886 924
@@ -893,9 +931,29 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
893 hdp->hd_phys_address = addr.min_address_range; 931 hdp->hd_phys_address = addr.min_address_range;
894 hdp->hd_address = ioremap(addr.min_address_range, size); 932 hdp->hd_address = ioremap(addr.min_address_range, size);
895 933
896 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 934 if (hpet_is_known(hdp)) {
897 if (hpetp->hp_hpet == hdp->hd_address) 935 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
898 return -EBUSY; 936 __FUNCTION__, hdp->hd_phys_address);
937 iounmap(hdp->hd_address);
938 return -EBUSY;
939 }
940 } else if (res->id == ACPI_RSTYPE_FIXED_MEM32) {
941 struct acpi_resource_fixed_mem32 *fixmem32;
942
943 fixmem32 = &res->data.fixed_memory32;
944 if (!fixmem32)
945 return -EINVAL;
946
947 hdp->hd_phys_address = fixmem32->range_base_address;
948 hdp->hd_address = ioremap(fixmem32->range_base_address,
949 HPET_RANGE_SIZE);
950
951 if (hpet_is_known(hdp)) {
952 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
953 __FUNCTION__, hdp->hd_phys_address);
954 iounmap(hdp->hd_address);
955 return -EBUSY;
956 }
899 } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { 957 } else if (res->id == ACPI_RSTYPE_EXT_IRQ) {
900 struct acpi_resource_ext_irq *irqp; 958 struct acpi_resource_ext_irq *irqp;
901 int i; 959 int i;
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 78d681dc35a8..f5212eb2b41d 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -95,11 +95,11 @@ static int __devexit hvc_vio_remove(struct vio_dev *vdev)
95} 95}
96 96
97static struct vio_driver hvc_vio_driver = { 97static struct vio_driver hvc_vio_driver = {
98 .name = hvc_driver_name,
99 .id_table = hvc_driver_table, 98 .id_table = hvc_driver_table,
100 .probe = hvc_vio_probe, 99 .probe = hvc_vio_probe,
101 .remove = hvc_vio_remove, 100 .remove = hvc_vio_remove,
102 .driver = { 101 .driver = {
102 .name = hvc_driver_name,
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 } 104 }
105}; 105};
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index f47f009f9259..53dc77c760fc 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -720,10 +720,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
720}; 720};
721 721
722static struct vio_driver hvcs_vio_driver = { 722static struct vio_driver hvcs_vio_driver = {
723 .name = hvcs_driver_name,
724 .id_table = hvcs_driver_table, 723 .id_table = hvcs_driver_table,
725 .probe = hvcs_probe, 724 .probe = hvcs_probe,
726 .remove = hvcs_remove, 725 .remove = hvcs_remove,
726 .driver = {
727 .name = hvcs_driver_name,
728 .owner = THIS_MODULE,
729 }
727}; 730};
728 731
729/* Only called from hvcs_get_pi please */ 732/* Only called from hvcs_get_pi please */
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 38be4b0dbd1c..91dd669273e0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -231,9 +231,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
231static int mmap_mem(struct file * file, struct vm_area_struct * vma) 231static int mmap_mem(struct file * file, struct vm_area_struct * vma)
232{ 232{
233#if defined(__HAVE_PHYS_MEM_ACCESS_PROT) 233#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
234 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 234 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
235
236 vma->vm_page_prot = phys_mem_access_prot(file, offset,
237 vma->vm_end - vma->vm_start, 235 vma->vm_end - vma->vm_start,
238 vma->vm_page_prot); 236 vma->vm_page_prot);
239#elif defined(pgprot_noncached) 237#elif defined(pgprot_noncached)
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 613aed9e1840..d1fe05e83882 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -53,6 +53,8 @@
53#include <linux/ioport.h> 53#include <linux/ioport.h>
54#include <linux/init.h> 54#include <linux/init.h>
55#include <linux/bitops.h> 55#include <linux/bitops.h>
56#include <linux/sched.h> /* cond_resched() */
57
56#include <asm/io.h> 58#include <asm/io.h>
57#include <asm/uaccess.h> 59#include <asm/uaccess.h>
58#include <asm/system.h> 60#include <asm/system.h>
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 5b1d3680c8ab..928b850cc679 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -256,7 +256,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
256static int sReadAiopID(ByteIO_t io); 256static int sReadAiopID(ByteIO_t io);
257static int sReadAiopNumChan(WordIO_t io); 257static int sReadAiopNumChan(WordIO_t io);
258 258
259#ifdef MODULE
260MODULE_AUTHOR("Theodore Ts'o"); 259MODULE_AUTHOR("Theodore Ts'o");
261MODULE_DESCRIPTION("Comtrol RocketPort driver"); 260MODULE_DESCRIPTION("Comtrol RocketPort driver");
262module_param(board1, ulong, 0); 261module_param(board1, ulong, 0);
@@ -288,17 +287,14 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
288module_param_array(pc104_4, ulong, NULL, 0); 287module_param_array(pc104_4, ulong, NULL, 0);
289MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,..."); 288MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
290 289
291int rp_init(void); 290static int rp_init(void);
292static void rp_cleanup_module(void); 291static void rp_cleanup_module(void);
293 292
294module_init(rp_init); 293module_init(rp_init);
295module_exit(rp_cleanup_module); 294module_exit(rp_cleanup_module);
296 295
297#endif
298 296
299#ifdef MODULE_LICENSE
300MODULE_LICENSE("Dual BSD/GPL"); 297MODULE_LICENSE("Dual BSD/GPL");
301#endif
302 298
303/*************************************************************************/ 299/*************************************************************************/
304/* Module code starts here */ 300/* Module code starts here */
@@ -2378,7 +2374,7 @@ static struct tty_operations rocket_ops = {
2378/* 2374/*
2379 * The module "startup" routine; it's run when the module is loaded. 2375 * The module "startup" routine; it's run when the module is loaded.
2380 */ 2376 */
2381int __init rp_init(void) 2377static int __init rp_init(void)
2382{ 2378{
2383 int retval, pci_boards_found, isa_boards_found, i; 2379 int retval, pci_boards_found, isa_boards_found, i;
2384 2380
@@ -2502,7 +2498,6 @@ int __init rp_init(void)
2502 return 0; 2498 return 0;
2503} 2499}
2504 2500
2505#ifdef MODULE
2506 2501
2507static void rp_cleanup_module(void) 2502static void rp_cleanup_module(void)
2508{ 2503{
@@ -2530,7 +2525,6 @@ static void rp_cleanup_module(void)
2530 if (controller) 2525 if (controller)
2531 release_region(controller, 4); 2526 release_region(controller, 4);
2532} 2527}
2533#endif
2534 2528
2535/*************************************************************************** 2529/***************************************************************************
2536Function: sInitController 2530Function: sInitController
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 6b4e9d155f50..dda30e42ec79 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -790,7 +790,7 @@ static int __init a2232board_init(void)
790 790
791 } 791 }
792 792
793 printk("Total: %d A2232 boards initialized.\n.", nr_a2232); /* Some status report if no card was found */ 793 printk("Total: %d A2232 boards initialized.\n", nr_a2232); /* Some status report if no card was found */
794 794
795 a2232_init_portstructs(); 795 a2232_init_portstructs();
796 796
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 50e0b612a8a2..352547eabf7b 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -38,19 +38,19 @@
38 * 38 *
39 * Revision 1.0: April 1st 1997. 39 * Revision 1.0: April 1st 1997.
40 * Initial release for alpha testing. 40 * Initial release for alpha testing.
41 * Revision 1.1: April 14th 1997. 41 * Revision 1.1: April 14th 1997.
42 * Incorporated Richard Hudsons suggestions, 42 * Incorporated Richard Hudsons suggestions,
43 * removed some debugging printk's. 43 * removed some debugging printk's.
44 * Revision 1.2: April 15th 1997. 44 * Revision 1.2: April 15th 1997.
45 * Ported to 2.1.x kernels. 45 * Ported to 2.1.x kernels.
46 * Revision 1.3: April 17th 1997 46 * Revision 1.3: April 17th 1997
47 * Backported to 2.0. (Compatibility macros). 47 * Backported to 2.0. (Compatibility macros).
48 * Revision 1.4: April 18th 1997 48 * Revision 1.4: April 18th 1997
49 * Fixed DTR/RTS bug that caused the card to indicate 49 * Fixed DTR/RTS bug that caused the card to indicate
50 * "don't send data" to a modem after the password prompt. 50 * "don't send data" to a modem after the password prompt.
51 * Fixed bug for premature (fake) interrupts. 51 * Fixed bug for premature (fake) interrupts.
52 * Revision 1.5: April 19th 1997 52 * Revision 1.5: April 19th 1997
53 * fixed a minor typo in the header file, cleanup a little. 53 * fixed a minor typo in the header file, cleanup a little.
54 * performance warnings are now MAXed at once per minute. 54 * performance warnings are now MAXed at once per minute.
55 * Revision 1.6: May 23 1997 55 * Revision 1.6: May 23 1997
56 * Changed the specialix=... format to include interrupt. 56 * Changed the specialix=... format to include interrupt.
@@ -60,10 +60,10 @@
60 * port to linux-2.1.43 kernel. 60 * port to linux-2.1.43 kernel.
61 * Revision 1.9: Oct 9 1998 61 * Revision 1.9: Oct 9 1998
62 * Added stuff for the IO8+/PCI version. 62 * Added stuff for the IO8+/PCI version.
63 * Revision 1.10: Oct 22 1999 / Jan 21 2000. 63 * Revision 1.10: Oct 22 1999 / Jan 21 2000.
64 * Added stuff for setserial. 64 * Added stuff for setserial.
65 * Nicolas Mailhot (Nicolas.Mailhot@email.enst.fr) 65 * Nicolas Mailhot (Nicolas.Mailhot@email.enst.fr)
66 * 66 *
67 */ 67 */
68 68
69#define VERSION "1.11" 69#define VERSION "1.11"
@@ -154,7 +154,7 @@ static int sx_poll = HZ;
154 154
155 155
156 156
157/* 157/*
158 * The following defines are mostly for testing purposes. But if you need 158 * The following defines are mostly for testing purposes. But if you need
159 * some nice reporting in your syslog, you can define them also. 159 * some nice reporting in your syslog, you can define them also.
160 */ 160 */
@@ -188,7 +188,7 @@ static DECLARE_MUTEX(tmp_buf_sem);
188 188
189static unsigned long baud_table[] = { 189static unsigned long baud_table[] = {
190 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 190 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
191 9600, 19200, 38400, 57600, 115200, 0, 191 9600, 19200, 38400, 57600, 115200, 0,
192}; 192};
193 193
194static struct specialix_board sx_board[SX_NBOARD] = { 194static struct specialix_board sx_board[SX_NBOARD] = {
@@ -216,7 +216,7 @@ static inline int sx_paranoia_check(struct specialix_port const * port,
216 KERN_ERR "sx: Warning: bad specialix port magic number for device %s in %s\n"; 216 KERN_ERR "sx: Warning: bad specialix port magic number for device %s in %s\n";
217 static const char *badinfo = 217 static const char *badinfo =
218 KERN_ERR "sx: Warning: null specialix port for device %s in %s\n"; 218 KERN_ERR "sx: Warning: null specialix port for device %s in %s\n";
219 219
220 if (!port) { 220 if (!port) {
221 printk(badinfo, name, routine); 221 printk(badinfo, name, routine);
222 return 1; 222 return 1;
@@ -231,9 +231,9 @@ static inline int sx_paranoia_check(struct specialix_port const * port,
231 231
232 232
233/* 233/*
234 * 234 *
235 * Service functions for specialix IO8+ driver. 235 * Service functions for specialix IO8+ driver.
236 * 236 *
237 */ 237 */
238 238
239/* Get board number from pointer */ 239/* Get board number from pointer */
@@ -246,7 +246,7 @@ static inline int board_No (struct specialix_board * bp)
246/* Get port number from pointer */ 246/* Get port number from pointer */
247static inline int port_No (struct specialix_port const * port) 247static inline int port_No (struct specialix_port const * port)
248{ 248{
249 return SX_PORT(port - sx_port); 249 return SX_PORT(port - sx_port);
250} 250}
251 251
252 252
@@ -309,7 +309,7 @@ static inline void sx_wait_CCR(struct specialix_board * bp)
309 return; 309 return;
310 udelay (1); 310 udelay (1);
311 } 311 }
312 312
313 printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp)); 313 printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp));
314} 314}
315 315
@@ -329,7 +329,7 @@ static inline void sx_wait_CCR_off(struct specialix_board * bp)
329 return; 329 return;
330 udelay (1); 330 udelay (1);
331 } 331 }
332 332
333 printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp)); 333 printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp));
334} 334}
335 335
@@ -338,34 +338,28 @@ static inline void sx_wait_CCR_off(struct specialix_board * bp)
338 * specialix IO8+ IO range functions. 338 * specialix IO8+ IO range functions.
339 */ 339 */
340 340
341static inline int sx_check_io_range(struct specialix_board * bp) 341static inline int sx_request_io_range(struct specialix_board * bp)
342{ 342{
343 return check_region (bp->base, SX_IO_SPACE); 343 return request_region(bp->base,
344} 344 bp->flags & SX_BOARD_IS_PCI ? SX_PCI_IO_SPACE : SX_IO_SPACE,
345 345 "specialix IO8+") == NULL;
346
347static inline void sx_request_io_range(struct specialix_board * bp)
348{
349 request_region(bp->base,
350 bp->flags&SX_BOARD_IS_PCI?SX_PCI_IO_SPACE:SX_IO_SPACE,
351 "specialix IO8+" );
352} 346}
353 347
354 348
355static inline void sx_release_io_range(struct specialix_board * bp) 349static inline void sx_release_io_range(struct specialix_board * bp)
356{ 350{
357 release_region(bp->base, 351 release_region(bp->base,
358 bp->flags&SX_BOARD_IS_PCI?SX_PCI_IO_SPACE:SX_IO_SPACE); 352 bp->flags&SX_BOARD_IS_PCI?SX_PCI_IO_SPACE:SX_IO_SPACE);
359} 353}
360 354
361 355
362/* Must be called with enabled interrupts */ 356/* Must be called with enabled interrupts */
363/* Ugly. Very ugly. Don't use this for anything else than initialization 357/* Ugly. Very ugly. Don't use this for anything else than initialization
364 code */ 358 code */
365static inline void sx_long_delay(unsigned long delay) 359static inline void sx_long_delay(unsigned long delay)
366{ 360{
367 unsigned long i; 361 unsigned long i;
368 362
369 for (i = jiffies + delay; time_after(i, jiffies); ) ; 363 for (i = jiffies + delay; time_after(i, jiffies); ) ;
370} 364}
371 365
@@ -378,7 +372,7 @@ static int sx_set_irq ( struct specialix_board *bp)
378 int i; 372 int i;
379 unsigned long flags; 373 unsigned long flags;
380 374
381 if (bp->flags & SX_BOARD_IS_PCI) 375 if (bp->flags & SX_BOARD_IS_PCI)
382 return 1; 376 return 1;
383 switch (bp->irq) { 377 switch (bp->irq) {
384 /* In the same order as in the docs... */ 378 /* In the same order as in the docs... */
@@ -420,7 +414,7 @@ static int sx_init_CD186x(struct specialix_board * bp)
420 sx_out_off(bp, CD186x_PILR3, SX_ACK_RINT); /* Prio for receiver intr */ 414 sx_out_off(bp, CD186x_PILR3, SX_ACK_RINT); /* Prio for receiver intr */
421 /* Set RegAckEn */ 415 /* Set RegAckEn */
422 sx_out_off(bp, CD186x_SRCR, sx_in (bp, CD186x_SRCR) | SRCR_REGACKEN); 416 sx_out_off(bp, CD186x_SRCR, sx_in (bp, CD186x_SRCR) | SRCR_REGACKEN);
423 417
424 /* Setting up prescaler. We need 4 ticks per 1 ms */ 418 /* Setting up prescaler. We need 4 ticks per 1 ms */
425 scaler = SX_OSCFREQ/SPECIALIX_TPS; 419 scaler = SX_OSCFREQ/SPECIALIX_TPS;
426 420
@@ -448,7 +442,7 @@ static int read_cross_byte (struct specialix_board *bp, int reg, int bit)
448 spin_lock_irqsave(&bp->lock, flags); 442 spin_lock_irqsave(&bp->lock, flags);
449 for (i=0, t=0;i<8;i++) { 443 for (i=0, t=0;i<8;i++) {
450 sx_out_off (bp, CD186x_CAR, i); 444 sx_out_off (bp, CD186x_CAR, i);
451 if (sx_in_off (bp, reg) & bit) 445 if (sx_in_off (bp, reg) & bit)
452 t |= 1 << i; 446 t |= 1 << i;
453 } 447 }
454 spin_unlock_irqrestore(&bp->lock, flags); 448 spin_unlock_irqrestore(&bp->lock, flags);
@@ -472,7 +466,7 @@ void missed_irq (unsigned long data)
472 spin_unlock_irqrestore(&bp->lock, flags); 466 spin_unlock_irqrestore(&bp->lock, flags);
473 if (irq) { 467 if (irq) {
474 printk (KERN_INFO "Missed interrupt... Calling int from timer. \n"); 468 printk (KERN_INFO "Missed interrupt... Calling int from timer. \n");
475 sx_interrupt (((struct specialix_board *)data)->irq, 469 sx_interrupt (((struct specialix_board *)data)->irq,
476 (void*)data, NULL); 470 (void*)data, NULL);
477 } 471 }
478 missed_irq_timer.expires = jiffies + sx_poll; 472 missed_irq_timer.expires = jiffies + sx_poll;
@@ -495,7 +489,7 @@ static int sx_probe(struct specialix_board *bp)
495 489
496 func_enter(); 490 func_enter();
497 491
498 if (sx_check_io_range(bp)) { 492 if (sx_request_io_range(bp)) {
499 func_exit(); 493 func_exit();
500 return 1; 494 return 1;
501 } 495 }
@@ -509,15 +503,16 @@ static int sx_probe(struct specialix_board *bp)
509 short_pause (); 503 short_pause ();
510 val2 = sx_in_off(bp, CD186x_PPRL); 504 val2 = sx_in_off(bp, CD186x_PPRL);
511 505
512 506
513 if ((val1 != 0x5a) || (val2 != 0xa5)) { 507 if ((val1 != 0x5a) || (val2 != 0xa5)) {
514 printk(KERN_INFO "sx%d: specialix IO8+ Board at 0x%03x not found.\n", 508 printk(KERN_INFO "sx%d: specialix IO8+ Board at 0x%03x not found.\n",
515 board_No(bp), bp->base); 509 board_No(bp), bp->base);
510 sx_release_io_range(bp);
516 func_exit(); 511 func_exit();
517 return 1; 512 return 1;
518 } 513 }
519 514
520 /* Check the DSR lines that Specialix uses as board 515 /* Check the DSR lines that Specialix uses as board
521 identification */ 516 identification */
522 val1 = read_cross_byte (bp, CD186x_MSVR, MSVR_DSR); 517 val1 = read_cross_byte (bp, CD186x_MSVR, MSVR_DSR);
523 val2 = read_cross_byte (bp, CD186x_MSVR, MSVR_RTS); 518 val2 = read_cross_byte (bp, CD186x_MSVR, MSVR_RTS);
@@ -532,6 +527,7 @@ static int sx_probe(struct specialix_board *bp)
532 if (val1 != val2) { 527 if (val1 != val2) {
533 printk(KERN_INFO "sx%d: specialix IO8+ ID %02x at 0x%03x not found (%02x).\n", 528 printk(KERN_INFO "sx%d: specialix IO8+ ID %02x at 0x%03x not found (%02x).\n",
534 board_No(bp), val2, bp->base, val1); 529 board_No(bp), val2, bp->base, val1);
530 sx_release_io_range(bp);
535 func_exit(); 531 func_exit();
536 return 1; 532 return 1;
537 } 533 }
@@ -546,7 +542,7 @@ static int sx_probe(struct specialix_board *bp)
546 sx_wait_CCR(bp); 542 sx_wait_CCR(bp);
547 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */ 543 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */
548 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */ 544 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */
549 sx_long_delay(HZ/20); 545 sx_long_delay(HZ/20);
550 irqs = probe_irq_off(irqs); 546 irqs = probe_irq_off(irqs);
551 547
552 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR)); 548 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
@@ -561,14 +557,15 @@ static int sx_probe(struct specialix_board *bp)
561 } 557 }
562 558
563 dprintk (SX_DEBUG_INIT "val1 = %02x, val2 = %02x, val3 = %02x.\n", 559 dprintk (SX_DEBUG_INIT "val1 = %02x, val2 = %02x, val3 = %02x.\n",
564 val1, val2, val3); 560 val1, val2, val3);
565 561
566 } 562 }
567 563
568#if 0 564#if 0
569 if (irqs <= 0) { 565 if (irqs <= 0) {
570 printk(KERN_ERR "sx%d: Can't find IRQ for specialix IO8+ board at 0x%03x.\n", 566 printk(KERN_ERR "sx%d: Can't find IRQ for specialix IO8+ board at 0x%03x.\n",
571 board_No(bp), bp->base); 567 board_No(bp), bp->base);
568 sx_release_io_range(bp);
572 func_exit(); 569 func_exit();
573 return 1; 570 return 1;
574 } 571 }
@@ -579,19 +576,20 @@ static int sx_probe(struct specialix_board *bp)
579#endif 576#endif
580 /* Reset CD186x again */ 577 /* Reset CD186x again */
581 if (!sx_init_CD186x(bp)) { 578 if (!sx_init_CD186x(bp)) {
579 sx_release_io_range(bp);
582 func_exit(); 580 func_exit();
583 return -EIO; 581 return 1;
584 } 582 }
585 583
586 sx_request_io_range(bp); 584 sx_request_io_range(bp);
587 bp->flags |= SX_BOARD_PRESENT; 585 bp->flags |= SX_BOARD_PRESENT;
588 586
589 /* Chip revcode pkgtype 587 /* Chip revcode pkgtype
590 GFRCR SRCR bit 7 588 GFRCR SRCR bit 7
591 CD180 rev B 0x81 0 589 CD180 rev B 0x81 0
592 CD180 rev C 0x82 0 590 CD180 rev C 0x82 0
593 CD1864 rev A 0x82 1 591 CD1864 rev A 0x82 1
594 CD1865 rev A 0x83 1 -- Do not use!!! Does not work. 592 CD1865 rev A 0x83 1 -- Do not use!!! Does not work.
595 CD1865 rev B 0x84 1 593 CD1865 rev B 0x84 1
596 -- Thanks to Gwen Wang, Cirrus Logic. 594 -- Thanks to Gwen Wang, Cirrus Logic.
597 */ 595 */
@@ -623,8 +621,8 @@ static int sx_probe(struct specialix_board *bp)
623 return 0; 621 return 0;
624} 622}
625 623
626/* 624/*
627 * 625 *
628 * Interrupt processing routines. 626 * Interrupt processing routines.
629 * */ 627 * */
630 628
@@ -657,7 +655,7 @@ static inline struct specialix_port * sx_get_port(struct specialix_board * bp,
657 return port; 655 return port;
658 } 656 }
659 } 657 }
660 printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n", 658 printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n",
661 board_No(bp), what, channel); 659 board_No(bp), what, channel);
662 return NULL; 660 return NULL;
663} 661}
@@ -681,7 +679,7 @@ static inline void sx_receive_exc(struct specialix_board * bp)
681 tty = port->tty; 679 tty = port->tty;
682 dprintk (SX_DEBUG_RX, "port: %p count: %d BUFF_SIZE: %d\n", 680 dprintk (SX_DEBUG_RX, "port: %p count: %d BUFF_SIZE: %d\n",
683 port, tty->flip.count, TTY_FLIPBUF_SIZE); 681 port, tty->flip.count, TTY_FLIPBUF_SIZE);
684 682
685 status = sx_in(bp, CD186x_RCSR); 683 status = sx_in(bp, CD186x_RCSR);
686 684
687 dprintk (SX_DEBUG_RX, "status: 0x%x\n", status); 685 dprintk (SX_DEBUG_RX, "status: 0x%x\n", status);
@@ -707,30 +705,30 @@ static inline void sx_receive_exc(struct specialix_board * bp)
707 return; 705 return;
708 } 706 }
709 if (status & RCSR_TOUT) { 707 if (status & RCSR_TOUT) {
710 printk(KERN_INFO "sx%d: port %d: Receiver timeout. Hardware problems ?\n", 708 printk(KERN_INFO "sx%d: port %d: Receiver timeout. Hardware problems ?\n",
711 board_No(bp), port_No(port)); 709 board_No(bp), port_No(port));
712 func_exit(); 710 func_exit();
713 return; 711 return;
714 712
715 } else if (status & RCSR_BREAK) { 713 } else if (status & RCSR_BREAK) {
716 dprintk(SX_DEBUG_RX, "sx%d: port %d: Handling break...\n", 714 dprintk(SX_DEBUG_RX, "sx%d: port %d: Handling break...\n",
717 board_No(bp), port_No(port)); 715 board_No(bp), port_No(port));
718 *tty->flip.flag_buf_ptr++ = TTY_BREAK; 716 *tty->flip.flag_buf_ptr++ = TTY_BREAK;
719 if (port->flags & ASYNC_SAK) 717 if (port->flags & ASYNC_SAK)
720 do_SAK(tty); 718 do_SAK(tty);
721 719
722 } else if (status & RCSR_PE) 720 } else if (status & RCSR_PE)
723 *tty->flip.flag_buf_ptr++ = TTY_PARITY; 721 *tty->flip.flag_buf_ptr++ = TTY_PARITY;
724 722
725 else if (status & RCSR_FE) 723 else if (status & RCSR_FE)
726 *tty->flip.flag_buf_ptr++ = TTY_FRAME; 724 *tty->flip.flag_buf_ptr++ = TTY_FRAME;
727 725
728 else if (status & RCSR_OE) 726 else if (status & RCSR_OE)
729 *tty->flip.flag_buf_ptr++ = TTY_OVERRUN; 727 *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
730 728
731 else 729 else
732 *tty->flip.flag_buf_ptr++ = 0; 730 *tty->flip.flag_buf_ptr++ = 0;
733 731
734 *tty->flip.char_buf_ptr++ = ch; 732 *tty->flip.char_buf_ptr++ = ch;
735 tty->flip.count++; 733 tty->flip.count++;
736 schedule_delayed_work(&tty->flip.work, 1); 734 schedule_delayed_work(&tty->flip.work, 1);
@@ -746,18 +744,18 @@ static inline void sx_receive(struct specialix_board * bp)
746 unsigned char count; 744 unsigned char count;
747 745
748 func_enter(); 746 func_enter();
749 747
750 if (!(port = sx_get_port(bp, "Receive"))) { 748 if (!(port = sx_get_port(bp, "Receive"))) {
751 dprintk (SX_DEBUG_RX, "Hmm, couldn't find port.\n"); 749 dprintk (SX_DEBUG_RX, "Hmm, couldn't find port.\n");
752 func_exit(); 750 func_exit();
753 return; 751 return;
754 } 752 }
755 tty = port->tty; 753 tty = port->tty;
756 754
757 count = sx_in(bp, CD186x_RDCR); 755 count = sx_in(bp, CD186x_RDCR);
758 dprintk (SX_DEBUG_RX, "port: %p: count: %d\n", port, count); 756 dprintk (SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
759 port->hits[count > 8 ? 9 : count]++; 757 port->hits[count > 8 ? 9 : count]++;
760 758
761 while (count--) { 759 while (count--) {
762 if (tty->flip.count >= TTY_FLIPBUF_SIZE) { 760 if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
763 printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n", 761 printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n",
@@ -787,7 +785,7 @@ static inline void sx_transmit(struct specialix_board * bp)
787 } 785 }
788 dprintk (SX_DEBUG_TX, "port: %p\n", port); 786 dprintk (SX_DEBUG_TX, "port: %p\n", port);
789 tty = port->tty; 787 tty = port->tty;
790 788
791 if (port->IER & IER_TXEMPTY) { 789 if (port->IER & IER_TXEMPTY) {
792 /* FIFO drained */ 790 /* FIFO drained */
793 sx_out(bp, CD186x_CAR, port_No(port)); 791 sx_out(bp, CD186x_CAR, port_No(port));
@@ -796,7 +794,7 @@ static inline void sx_transmit(struct specialix_board * bp)
796 func_exit(); 794 func_exit();
797 return; 795 return;
798 } 796 }
799 797
800 if ((port->xmit_cnt <= 0 && !port->break_length) 798 if ((port->xmit_cnt <= 0 && !port->break_length)
801 || tty->stopped || tty->hw_stopped) { 799 || tty->stopped || tty->hw_stopped) {
802 sx_out(bp, CD186x_CAR, port_No(port)); 800 sx_out(bp, CD186x_CAR, port_No(port));
@@ -805,7 +803,7 @@ static inline void sx_transmit(struct specialix_board * bp)
805 func_exit(); 803 func_exit();
806 return; 804 return;
807 } 805 }
808 806
809 if (port->break_length) { 807 if (port->break_length) {
810 if (port->break_length > 0) { 808 if (port->break_length > 0) {
811 if (port->COR2 & COR2_ETC) { 809 if (port->COR2 & COR2_ETC) {
@@ -831,7 +829,7 @@ static inline void sx_transmit(struct specialix_board * bp)
831 func_exit(); 829 func_exit();
832 return; 830 return;
833 } 831 }
834 832
835 count = CD186x_NFIFO; 833 count = CD186x_NFIFO;
836 do { 834 do {
837 sx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]); 835 sx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]);
@@ -839,7 +837,7 @@ static inline void sx_transmit(struct specialix_board * bp)
839 if (--port->xmit_cnt <= 0) 837 if (--port->xmit_cnt <= 0)
840 break; 838 break;
841 } while (--count > 0); 839 } while (--count > 0);
842 840
843 if (port->xmit_cnt <= 0) { 841 if (port->xmit_cnt <= 0) {
844 sx_out(bp, CD186x_CAR, port_No(port)); 842 sx_out(bp, CD186x_CAR, port_No(port));
845 port->IER &= ~IER_TXRDY; 843 port->IER &= ~IER_TXRDY;
@@ -862,9 +860,9 @@ static inline void sx_check_modem(struct specialix_board * bp)
862 dprintk (SX_DEBUG_SIGNALS, "Modem intr. "); 860 dprintk (SX_DEBUG_SIGNALS, "Modem intr. ");
863 if (!(port = sx_get_port(bp, "Modem"))) 861 if (!(port = sx_get_port(bp, "Modem")))
864 return; 862 return;
865 863
866 tty = port->tty; 864 tty = port->tty;
867 865
868 mcr = sx_in(bp, CD186x_MCR); 866 mcr = sx_in(bp, CD186x_MCR);
869 printk ("mcr = %02x.\n", mcr); 867 printk ("mcr = %02x.\n", mcr);
870 868
@@ -879,7 +877,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
879 schedule_work(&port->tqueue_hangup); 877 schedule_work(&port->tqueue_hangup);
880 } 878 }
881 } 879 }
882 880
883#ifdef SPECIALIX_BRAIN_DAMAGED_CTS 881#ifdef SPECIALIX_BRAIN_DAMAGED_CTS
884 if (mcr & MCR_CTSCHG) { 882 if (mcr & MCR_CTSCHG) {
885 if (sx_in(bp, CD186x_MSVR) & MSVR_CTS) { 883 if (sx_in(bp, CD186x_MSVR) & MSVR_CTS) {
@@ -906,7 +904,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
906 sx_out(bp, CD186x_IER, port->IER); 904 sx_out(bp, CD186x_IER, port->IER);
907 } 905 }
908#endif /* SPECIALIX_BRAIN_DAMAGED_CTS */ 906#endif /* SPECIALIX_BRAIN_DAMAGED_CTS */
909 907
910 /* Clear change bits */ 908 /* Clear change bits */
911 sx_out(bp, CD186x_MCR, 0); 909 sx_out(bp, CD186x_MCR, 0);
912} 910}
@@ -940,7 +938,7 @@ static irqreturn_t sx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
940 while ((++loop < 16) && (status = (sx_in(bp, CD186x_SRSR) & 938 while ((++loop < 16) && (status = (sx_in(bp, CD186x_SRSR) &
941 (SRSR_RREQint | 939 (SRSR_RREQint |
942 SRSR_TREQint | 940 SRSR_TREQint |
943 SRSR_MREQint)))) { 941 SRSR_MREQint)))) {
944 if (status & SRSR_RREQint) { 942 if (status & SRSR_RREQint) {
945 ack = sx_in(bp, CD186x_RRAR); 943 ack = sx_in(bp, CD186x_RRAR);
946 944
@@ -951,7 +949,7 @@ static irqreturn_t sx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
951 else 949 else
952 printk(KERN_ERR "sx%d: status: 0x%x Bad receive ack 0x%02x.\n", 950 printk(KERN_ERR "sx%d: status: 0x%x Bad receive ack 0x%02x.\n",
953 board_No(bp), status, ack); 951 board_No(bp), status, ack);
954 952
955 } else if (status & SRSR_TREQint) { 953 } else if (status & SRSR_TREQint) {
956 ack = sx_in(bp, CD186x_TRAR); 954 ack = sx_in(bp, CD186x_TRAR);
957 955
@@ -963,13 +961,13 @@ static irqreturn_t sx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
963 } else if (status & SRSR_MREQint) { 961 } else if (status & SRSR_MREQint) {
964 ack = sx_in(bp, CD186x_MRAR); 962 ack = sx_in(bp, CD186x_MRAR);
965 963
966 if (ack == (SX_ID | GIVR_IT_MODEM)) 964 if (ack == (SX_ID | GIVR_IT_MODEM))
967 sx_check_modem(bp); 965 sx_check_modem(bp);
968 else 966 else
969 printk(KERN_ERR "sx%d: status: 0x%x Bad modem ack 0x%02x.\n", 967 printk(KERN_ERR "sx%d: status: 0x%x Bad modem ack 0x%02x.\n",
970 board_No(bp), status, ack); 968 board_No(bp), status, ack);
971 969
972 } 970 }
973 971
974 sx_out(bp, CD186x_EOIR, 0); /* Mark end of interrupt */ 972 sx_out(bp, CD186x_EOIR, 0); /* Mark end of interrupt */
975 } 973 }
@@ -1026,7 +1024,7 @@ static inline int sx_setup_board(struct specialix_board * bp)
1026{ 1024{
1027 int error; 1025 int error;
1028 1026
1029 if (bp->flags & SX_BOARD_ACTIVE) 1027 if (bp->flags & SX_BOARD_ACTIVE)
1030 return 0; 1028 return 0;
1031 1029
1032 if (bp->flags & SX_BOARD_IS_PCI) 1030 if (bp->flags & SX_BOARD_IS_PCI)
@@ -1034,7 +1032,7 @@ static inline int sx_setup_board(struct specialix_board * bp)
1034 else 1032 else
1035 error = request_irq(bp->irq, sx_interrupt, SA_INTERRUPT, "specialix IO8+", bp); 1033 error = request_irq(bp->irq, sx_interrupt, SA_INTERRUPT, "specialix IO8+", bp);
1036 1034
1037 if (error) 1035 if (error)
1038 return error; 1036 return error;
1039 1037
1040 turn_ints_on (bp); 1038 turn_ints_on (bp);
@@ -1055,7 +1053,7 @@ static inline void sx_shutdown_board(struct specialix_board *bp)
1055 } 1053 }
1056 1054
1057 bp->flags &= ~SX_BOARD_ACTIVE; 1055 bp->flags &= ~SX_BOARD_ACTIVE;
1058 1056
1059 dprintk (SX_DEBUG_IRQ, "Freeing IRQ%d for board %d.\n", 1057 dprintk (SX_DEBUG_IRQ, "Freeing IRQ%d for board %d.\n",
1060 bp->irq, board_No (bp)); 1058 bp->irq, board_No (bp));
1061 free_irq(bp->irq, bp); 1059 free_irq(bp->irq, bp);
@@ -1068,7 +1066,7 @@ static inline void sx_shutdown_board(struct specialix_board *bp)
1068 1066
1069 1067
1070/* 1068/*
1071 * Setting up port characteristics. 1069 * Setting up port characteristics.
1072 * Must be called with disabled interrupts 1070 * Must be called with disabled interrupts
1073 */ 1071 */
1074static void sx_change_speed(struct specialix_board *bp, struct specialix_port *port) 1072static void sx_change_speed(struct specialix_board *bp, struct specialix_port *port)
@@ -1103,10 +1101,10 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1103 spin_unlock_irqrestore(&bp->lock, flags); 1101 spin_unlock_irqrestore(&bp->lock, flags);
1104 dprintk (SX_DEBUG_TERMIOS, "sx: got MSVR=%02x.\n", port->MSVR); 1102 dprintk (SX_DEBUG_TERMIOS, "sx: got MSVR=%02x.\n", port->MSVR);
1105 baud = C_BAUD(tty); 1103 baud = C_BAUD(tty);
1106 1104
1107 if (baud & CBAUDEX) { 1105 if (baud & CBAUDEX) {
1108 baud &= ~CBAUDEX; 1106 baud &= ~CBAUDEX;
1109 if (baud < 1 || baud > 2) 1107 if (baud < 1 || baud > 2)
1110 port->tty->termios->c_cflag &= ~CBAUDEX; 1108 port->tty->termios->c_cflag &= ~CBAUDEX;
1111 else 1109 else
1112 baud += 15; 1110 baud += 15;
@@ -1117,8 +1115,8 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1117 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) 1115 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
1118 baud += 2; 1116 baud += 2;
1119 } 1117 }
1120 1118
1121 1119
1122 if (!baud_table[baud]) { 1120 if (!baud_table[baud]) {
1123 /* Drop DTR & exit */ 1121 /* Drop DTR & exit */
1124 dprintk (SX_DEBUG_TERMIOS, "Dropping DTR... Hmm....\n"); 1122 dprintk (SX_DEBUG_TERMIOS, "Dropping DTR... Hmm....\n");
@@ -1127,7 +1125,7 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1127 spin_lock_irqsave(&bp->lock, flags); 1125 spin_lock_irqsave(&bp->lock, flags);
1128 sx_out(bp, CD186x_MSVR, port->MSVR ); 1126 sx_out(bp, CD186x_MSVR, port->MSVR );
1129 spin_unlock_irqrestore(&bp->lock, flags); 1127 spin_unlock_irqrestore(&bp->lock, flags);
1130 } 1128 }
1131 else 1129 else
1132 dprintk (SX_DEBUG_TERMIOS, "Can't drop DTR: no DTR.\n"); 1130 dprintk (SX_DEBUG_TERMIOS, "Can't drop DTR: no DTR.\n");
1133 return; 1131 return;
@@ -1137,9 +1135,9 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1137 port ->MSVR |= MSVR_DTR; 1135 port ->MSVR |= MSVR_DTR;
1138 } 1136 }
1139 } 1137 }
1140 1138
1141 /* 1139 /*
1142 * Now we must calculate some speed depended things 1140 * Now we must calculate some speed depended things
1143 */ 1141 */
1144 1142
1145 /* Set baud rate for port */ 1143 /* Set baud rate for port */
@@ -1152,7 +1150,7 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1152 tmp = (((SX_OSCFREQ + baud_table[baud]/2) / baud_table[baud] + 1150 tmp = (((SX_OSCFREQ + baud_table[baud]/2) / baud_table[baud] +
1153 CD186x_TPC/2) / CD186x_TPC); 1151 CD186x_TPC/2) / CD186x_TPC);
1154 1152
1155 if ((tmp < 0x10) && time_before(again, jiffies)) { 1153 if ((tmp < 0x10) && time_before(again, jiffies)) {
1156 again = jiffies + HZ * 60; 1154 again = jiffies + HZ * 60;
1157 /* Page 48 of version 2.0 of the CL-CD1865 databook */ 1155 /* Page 48 of version 2.0 of the CL-CD1865 databook */
1158 if (tmp >= 12) { 1156 if (tmp >= 12) {
@@ -1164,27 +1162,27 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1164 printk (KERN_INFO "sx%d: Baud rate divisor is %ld. \n" 1162 printk (KERN_INFO "sx%d: Baud rate divisor is %ld. \n"
1165 "Warning: overstressing Cirrus chip. " 1163 "Warning: overstressing Cirrus chip. "
1166 "This might not work.\n" 1164 "This might not work.\n"
1167 "Read specialix.txt for more info.\n", 1165 "Read specialix.txt for more info.\n",
1168 port_No (port), tmp); 1166 port_No (port), tmp);
1169 } 1167 }
1170 } 1168 }
1171 spin_lock_irqsave(&bp->lock, flags); 1169 spin_lock_irqsave(&bp->lock, flags);
1172 sx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff); 1170 sx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff);
1173 sx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff); 1171 sx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff);
1174 sx_out(bp, CD186x_RBPRL, tmp & 0xff); 1172 sx_out(bp, CD186x_RBPRL, tmp & 0xff);
1175 sx_out(bp, CD186x_TBPRL, tmp & 0xff); 1173 sx_out(bp, CD186x_TBPRL, tmp & 0xff);
1176 spin_unlock_irqrestore(&bp->lock, flags); 1174 spin_unlock_irqrestore(&bp->lock, flags);
1177 if (port->custom_divisor) { 1175 if (port->custom_divisor) {
1178 baud = (SX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor; 1176 baud = (SX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor;
1179 baud = ( baud + 5 ) / 10; 1177 baud = ( baud + 5 ) / 10;
1180 } else 1178 } else
1181 baud = (baud_table[baud] + 5) / 10; /* Estimated CPS */ 1179 baud = (baud_table[baud] + 5) / 10; /* Estimated CPS */
1182 1180
1183 /* Two timer ticks seems enough to wakeup something like SLIP driver */ 1181 /* Two timer ticks seems enough to wakeup something like SLIP driver */
1184 tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO; 1182 tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO;
1185 port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? 1183 port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ?
1186 SERIAL_XMIT_SIZE - 1 : tmp); 1184 SERIAL_XMIT_SIZE - 1 : tmp);
1187 1185
1188 /* Receiver timeout will be transmission time for 1.5 chars */ 1186 /* Receiver timeout will be transmission time for 1.5 chars */
1189 tmp = (SPECIALIX_TPS + SPECIALIX_TPS/2 + baud/2) / baud; 1187 tmp = (SPECIALIX_TPS + SPECIALIX_TPS/2 + baud/2) / baud;
1190 tmp = (tmp > 0xff) ? 0xff : tmp; 1188 tmp = (tmp > 0xff) ? 0xff : tmp;
@@ -1205,29 +1203,29 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1205 cor1 |= COR1_8BITS; 1203 cor1 |= COR1_8BITS;
1206 break; 1204 break;
1207 } 1205 }
1208 1206
1209 if (C_CSTOPB(tty)) 1207 if (C_CSTOPB(tty))
1210 cor1 |= COR1_2SB; 1208 cor1 |= COR1_2SB;
1211 1209
1212 cor1 |= COR1_IGNORE; 1210 cor1 |= COR1_IGNORE;
1213 if (C_PARENB(tty)) { 1211 if (C_PARENB(tty)) {
1214 cor1 |= COR1_NORMPAR; 1212 cor1 |= COR1_NORMPAR;
1215 if (C_PARODD(tty)) 1213 if (C_PARODD(tty))
1216 cor1 |= COR1_ODDP; 1214 cor1 |= COR1_ODDP;
1217 if (I_INPCK(tty)) 1215 if (I_INPCK(tty))
1218 cor1 &= ~COR1_IGNORE; 1216 cor1 &= ~COR1_IGNORE;
1219 } 1217 }
1220 /* Set marking of some errors */ 1218 /* Set marking of some errors */
1221 port->mark_mask = RCSR_OE | RCSR_TOUT; 1219 port->mark_mask = RCSR_OE | RCSR_TOUT;
1222 if (I_INPCK(tty)) 1220 if (I_INPCK(tty))
1223 port->mark_mask |= RCSR_FE | RCSR_PE; 1221 port->mark_mask |= RCSR_FE | RCSR_PE;
1224 if (I_BRKINT(tty) || I_PARMRK(tty)) 1222 if (I_BRKINT(tty) || I_PARMRK(tty))
1225 port->mark_mask |= RCSR_BREAK; 1223 port->mark_mask |= RCSR_BREAK;
1226 if (I_IGNPAR(tty)) 1224 if (I_IGNPAR(tty))
1227 port->mark_mask &= ~(RCSR_FE | RCSR_PE); 1225 port->mark_mask &= ~(RCSR_FE | RCSR_PE);
1228 if (I_IGNBRK(tty)) { 1226 if (I_IGNBRK(tty)) {
1229 port->mark_mask &= ~RCSR_BREAK; 1227 port->mark_mask &= ~RCSR_BREAK;
1230 if (I_IGNPAR(tty)) 1228 if (I_IGNPAR(tty))
1231 /* Real raw mode. Ignore all */ 1229 /* Real raw mode. Ignore all */
1232 port->mark_mask &= ~RCSR_OE; 1230 port->mark_mask &= ~RCSR_OE;
1233 } 1231 }
@@ -1241,7 +1239,7 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1241 tty->hw_stopped = !(sx_in(bp, CD186x_MSVR) & (MSVR_CTS|MSVR_DSR)); 1239 tty->hw_stopped = !(sx_in(bp, CD186x_MSVR) & (MSVR_CTS|MSVR_DSR));
1242 spin_unlock_irqrestore(&bp->lock, flags); 1240 spin_unlock_irqrestore(&bp->lock, flags);
1243#else 1241#else
1244 port->COR2 |= COR2_CTSAE; 1242 port->COR2 |= COR2_CTSAE;
1245#endif 1243#endif
1246 } 1244 }
1247 /* Enable Software Flow Control. FIXME: I'm not sure about this */ 1245 /* Enable Software Flow Control. FIXME: I'm not sure about this */
@@ -1264,11 +1262,11 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
1264 mcor1 |= MCOR1_CDZD; 1262 mcor1 |= MCOR1_CDZD;
1265 mcor2 |= MCOR2_CDOD; 1263 mcor2 |= MCOR2_CDOD;
1266 } 1264 }
1267 1265
1268 if (C_CREAD(tty)) 1266 if (C_CREAD(tty))
1269 /* Enable receiver */ 1267 /* Enable receiver */
1270 port->IER |= IER_RXD; 1268 port->IER |= IER_RXD;
1271 1269
1272 /* Set input FIFO size (1-8 bytes) */ 1270 /* Set input FIFO size (1-8 bytes) */
1273 cor3 |= sx_rxfifo; 1271 cor3 |= sx_rxfifo;
1274 /* Setting up CD186x channel registers */ 1272 /* Setting up CD186x channel registers */
@@ -1311,11 +1309,11 @@ static int sx_setup_port(struct specialix_board *bp, struct specialix_port *port
1311 func_exit(); 1309 func_exit();
1312 return 0; 1310 return 0;
1313 } 1311 }
1314 1312
1315 if (!port->xmit_buf) { 1313 if (!port->xmit_buf) {
1316 /* We may sleep in get_zeroed_page() */ 1314 /* We may sleep in get_zeroed_page() */
1317 unsigned long tmp; 1315 unsigned long tmp;
1318 1316
1319 if (!(tmp = get_zeroed_page(GFP_KERNEL))) { 1317 if (!(tmp = get_zeroed_page(GFP_KERNEL))) {
1320 func_exit(); 1318 func_exit();
1321 return -ENOMEM; 1319 return -ENOMEM;
@@ -1328,10 +1326,10 @@ static int sx_setup_port(struct specialix_board *bp, struct specialix_port *port
1328 } 1326 }
1329 port->xmit_buf = (unsigned char *) tmp; 1327 port->xmit_buf = (unsigned char *) tmp;
1330 } 1328 }
1331 1329
1332 spin_lock_irqsave(&port->lock, flags); 1330 spin_lock_irqsave(&port->lock, flags);
1333 1331
1334 if (port->tty) 1332 if (port->tty)
1335 clear_bit(TTY_IO_ERROR, &port->tty->flags); 1333 clear_bit(TTY_IO_ERROR, &port->tty->flags);
1336 1334
1337 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; 1335 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
@@ -1340,7 +1338,7 @@ static int sx_setup_port(struct specialix_board *bp, struct specialix_port *port
1340 1338
1341 spin_unlock_irqrestore(&port->lock, flags); 1339 spin_unlock_irqrestore(&port->lock, flags);
1342 1340
1343 1341
1344 func_exit(); 1342 func_exit();
1345 return 0; 1343 return 0;
1346} 1344}
@@ -1352,14 +1350,14 @@ static void sx_shutdown_port(struct specialix_board *bp, struct specialix_port *
1352 struct tty_struct *tty; 1350 struct tty_struct *tty;
1353 int i; 1351 int i;
1354 unsigned long flags; 1352 unsigned long flags;
1355 1353
1356 func_enter(); 1354 func_enter();
1357 1355
1358 if (!(port->flags & ASYNC_INITIALIZED)) { 1356 if (!(port->flags & ASYNC_INITIALIZED)) {
1359 func_exit(); 1357 func_exit();
1360 return; 1358 return;
1361 } 1359 }
1362 1360
1363 if (sx_debug & SX_DEBUG_FIFO) { 1361 if (sx_debug & SX_DEBUG_FIFO) {
1364 dprintk(SX_DEBUG_FIFO, "sx%d: port %d: %ld overruns, FIFO hits [ ", 1362 dprintk(SX_DEBUG_FIFO, "sx%d: port %d: %ld overruns, FIFO hits [ ",
1365 board_No(bp), port_No(port), port->overrun); 1363 board_No(bp), port_No(port), port->overrun);
@@ -1394,13 +1392,13 @@ static void sx_shutdown_port(struct specialix_board *bp, struct specialix_port *
1394 if (tty) 1392 if (tty)
1395 set_bit(TTY_IO_ERROR, &tty->flags); 1393 set_bit(TTY_IO_ERROR, &tty->flags);
1396 port->flags &= ~ASYNC_INITIALIZED; 1394 port->flags &= ~ASYNC_INITIALIZED;
1397 1395
1398 if (!bp->count) 1396 if (!bp->count)
1399 sx_shutdown_board(bp); 1397 sx_shutdown_board(bp);
1400 func_exit(); 1398 func_exit();
1401} 1399}
1402 1400
1403 1401
1404static int block_til_ready(struct tty_struct *tty, struct file * filp, 1402static int block_til_ready(struct tty_struct *tty, struct file * filp,
1405 struct specialix_port *port) 1403 struct specialix_port *port)
1406{ 1404{
@@ -1427,7 +1425,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
1427 return -ERESTARTSYS; 1425 return -ERESTARTSYS;
1428 } 1426 }
1429 } 1427 }
1430 1428
1431 /* 1429 /*
1432 * If non-blocking mode is set, or the port is not enabled, 1430 * If non-blocking mode is set, or the port is not enabled,
1433 * then make the check up front and then exit. 1431 * then make the check up front and then exit.
@@ -1477,7 +1475,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
1477 if (port->flags & ASYNC_HUP_NOTIFY) 1475 if (port->flags & ASYNC_HUP_NOTIFY)
1478 retval = -EAGAIN; 1476 retval = -EAGAIN;
1479 else 1477 else
1480 retval = -ERESTARTSYS; 1478 retval = -ERESTARTSYS;
1481 break; 1479 break;
1482 } 1480 }
1483 if (!(port->flags & ASYNC_CLOSING) && 1481 if (!(port->flags & ASYNC_CLOSING) &&
@@ -1506,7 +1504,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
1506 port->flags |= ASYNC_NORMAL_ACTIVE; 1504 port->flags |= ASYNC_NORMAL_ACTIVE;
1507 func_exit(); 1505 func_exit();
1508 return 0; 1506 return 0;
1509} 1507}
1510 1508
1511 1509
1512static int sx_open(struct tty_struct * tty, struct file * filp) 1510static int sx_open(struct tty_struct * tty, struct file * filp)
@@ -1526,7 +1524,7 @@ static int sx_open(struct tty_struct * tty, struct file * filp)
1526 func_exit(); 1524 func_exit();
1527 return -ENODEV; 1525 return -ENODEV;
1528 } 1526 }
1529 1527
1530 bp = &sx_board[board]; 1528 bp = &sx_board[board];
1531 port = sx_port + board * SX_NPORT + SX_PORT(tty->index); 1529 port = sx_port + board * SX_NPORT + SX_PORT(tty->index);
1532 port->overrun = 0; 1530 port->overrun = 0;
@@ -1557,7 +1555,7 @@ static int sx_open(struct tty_struct * tty, struct file * filp)
1557 func_enter(); 1555 func_enter();
1558 return error; 1556 return error;
1559 } 1557 }
1560 1558
1561 if ((error = block_til_ready(tty, filp, port))) { 1559 if ((error = block_til_ready(tty, filp, port))) {
1562 func_enter(); 1560 func_enter();
1563 return error; 1561 return error;
@@ -1574,7 +1572,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
1574 struct specialix_board *bp; 1572 struct specialix_board *bp;
1575 unsigned long flags; 1573 unsigned long flags;
1576 unsigned long timeout; 1574 unsigned long timeout;
1577 1575
1578 func_enter(); 1576 func_enter();
1579 if (!port || sx_paranoia_check(port, tty->name, "close")) { 1577 if (!port || sx_paranoia_check(port, tty->name, "close")) {
1580 func_exit(); 1578 func_exit();
@@ -1587,7 +1585,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
1587 func_exit(); 1585 func_exit();
1588 return; 1586 return;
1589 } 1587 }
1590 1588
1591 bp = port_Board(port); 1589 bp = port_Board(port);
1592 if ((tty->count == 1) && (port->count != 1)) { 1590 if ((tty->count == 1) && (port->count != 1)) {
1593 printk(KERN_ERR "sx%d: sx_close: bad port count;" 1591 printk(KERN_ERR "sx%d: sx_close: bad port count;"
@@ -1607,7 +1605,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
1607 } 1605 }
1608 port->flags |= ASYNC_CLOSING; 1606 port->flags |= ASYNC_CLOSING;
1609 /* 1607 /*
1610 * Now we wait for the transmit buffer to clear; and we notify 1608 * Now we wait for the transmit buffer to clear; and we notify
1611 * the line discipline to only process XON/XOFF characters. 1609 * the line discipline to only process XON/XOFF characters.
1612 */ 1610 */
1613 tty->closing = 1; 1611 tty->closing = 1;
@@ -1681,7 +1679,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
1681} 1679}
1682 1680
1683 1681
1684static int sx_write(struct tty_struct * tty, 1682static int sx_write(struct tty_struct * tty,
1685 const unsigned char *buf, int count) 1683 const unsigned char *buf, int count)
1686{ 1684{
1687 struct specialix_port *port = (struct specialix_port *)tty->driver_data; 1685 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
@@ -1694,7 +1692,7 @@ static int sx_write(struct tty_struct * tty,
1694 func_exit(); 1692 func_exit();
1695 return 0; 1693 return 0;
1696 } 1694 }
1697 1695
1698 bp = port_Board(port); 1696 bp = port_Board(port);
1699 1697
1700 if (!tty || !port->xmit_buf || !tmp_buf) { 1698 if (!tty || !port->xmit_buf || !tmp_buf) {
@@ -1824,7 +1822,7 @@ static int sx_chars_in_buffer(struct tty_struct *tty)
1824 struct specialix_port *port = (struct specialix_port *)tty->driver_data; 1822 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
1825 1823
1826 func_enter(); 1824 func_enter();
1827 1825
1828 if (sx_paranoia_check(port, tty->name, "sx_chars_in_buffer")) { 1826 if (sx_paranoia_check(port, tty->name, "sx_chars_in_buffer")) {
1829 func_exit(); 1827 func_exit();
1830 return 0; 1828 return 0;
@@ -1881,13 +1879,13 @@ static int sx_tiocmget(struct tty_struct *tty, struct file *file)
1881 port_No(port), status, sx_in (bp, CD186x_CAR)); 1879 port_No(port), status, sx_in (bp, CD186x_CAR));
1882 dprintk (SX_DEBUG_INIT, "sx_port = %p, port = %p\n", sx_port, port); 1880 dprintk (SX_DEBUG_INIT, "sx_port = %p, port = %p\n", sx_port, port);
1883 if (SX_CRTSCTS(port->tty)) { 1881 if (SX_CRTSCTS(port->tty)) {
1884 result = /* (status & MSVR_RTS) ? */ TIOCM_DTR /* : 0) */ 1882 result = /* (status & MSVR_RTS) ? */ TIOCM_DTR /* : 0) */
1885 | ((status & MSVR_DTR) ? TIOCM_RTS : 0) 1883 | ((status & MSVR_DTR) ? TIOCM_RTS : 0)
1886 | ((status & MSVR_CD) ? TIOCM_CAR : 0) 1884 | ((status & MSVR_CD) ? TIOCM_CAR : 0)
1887 |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ 1885 |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */
1888 | ((status & MSVR_CTS) ? TIOCM_CTS : 0); 1886 | ((status & MSVR_CTS) ? TIOCM_CTS : 0);
1889 } else { 1887 } else {
1890 result = /* (status & MSVR_RTS) ? */ TIOCM_RTS /* : 0) */ 1888 result = /* (status & MSVR_RTS) ? */ TIOCM_RTS /* : 0) */
1891 | ((status & MSVR_DTR) ? TIOCM_DTR : 0) 1889 | ((status & MSVR_DTR) ? TIOCM_DTR : 0)
1892 | ((status & MSVR_CD) ? TIOCM_CAR : 0) 1890 | ((status & MSVR_CD) ? TIOCM_CAR : 0)
1893 |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ 1891 |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */
@@ -1955,7 +1953,7 @@ static inline void sx_send_break(struct specialix_port * port, unsigned long len
1955{ 1953{
1956 struct specialix_board *bp = port_Board(port); 1954 struct specialix_board *bp = port_Board(port);
1957 unsigned long flags; 1955 unsigned long flags;
1958 1956
1959 func_enter(); 1957 func_enter();
1960 1958
1961 spin_lock_irqsave (&port->lock, flags); 1959 spin_lock_irqsave (&port->lock, flags);
@@ -1996,8 +1994,8 @@ static inline int sx_set_serial_info(struct specialix_port * port,
1996 func_enter(); 1994 func_enter();
1997 return -EFAULT; 1995 return -EFAULT;
1998 } 1996 }
1999 1997
2000#if 0 1998#if 0
2001 if ((tmp.irq != bp->irq) || 1999 if ((tmp.irq != bp->irq) ||
2002 (tmp.port != bp->base) || 2000 (tmp.port != bp->base) ||
2003 (tmp.type != PORT_CIRRUS) || 2001 (tmp.type != PORT_CIRRUS) ||
@@ -2008,12 +2006,12 @@ static inline int sx_set_serial_info(struct specialix_port * port,
2008 func_exit(); 2006 func_exit();
2009 return -EINVAL; 2007 return -EINVAL;
2010 } 2008 }
2011#endif 2009#endif
2012 2010
2013 change_speed = ((port->flags & ASYNC_SPD_MASK) != 2011 change_speed = ((port->flags & ASYNC_SPD_MASK) !=
2014 (tmp.flags & ASYNC_SPD_MASK)); 2012 (tmp.flags & ASYNC_SPD_MASK));
2015 change_speed |= (tmp.custom_divisor != port->custom_divisor); 2013 change_speed |= (tmp.custom_divisor != port->custom_divisor);
2016 2014
2017 if (!capable(CAP_SYS_ADMIN)) { 2015 if (!capable(CAP_SYS_ADMIN)) {
2018 if ((tmp.close_delay != port->close_delay) || 2016 if ((tmp.close_delay != port->close_delay) ||
2019 (tmp.closing_wait != port->closing_wait) || 2017 (tmp.closing_wait != port->closing_wait) ||
@@ -2045,7 +2043,7 @@ static inline int sx_get_serial_info(struct specialix_port * port,
2045{ 2043{
2046 struct serial_struct tmp; 2044 struct serial_struct tmp;
2047 struct specialix_board *bp = port_Board(port); 2045 struct specialix_board *bp = port_Board(port);
2048 2046
2049 func_enter(); 2047 func_enter();
2050 2048
2051 /* 2049 /*
@@ -2074,7 +2072,7 @@ static inline int sx_get_serial_info(struct specialix_port * port,
2074} 2072}
2075 2073
2076 2074
2077static int sx_ioctl(struct tty_struct * tty, struct file * filp, 2075static int sx_ioctl(struct tty_struct * tty, struct file * filp,
2078 unsigned int cmd, unsigned long arg) 2076 unsigned int cmd, unsigned long arg)
2079{ 2077{
2080 struct specialix_port *port = (struct specialix_port *)tty->driver_data; 2078 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
@@ -2087,7 +2085,7 @@ static int sx_ioctl(struct tty_struct * tty, struct file * filp,
2087 func_exit(); 2085 func_exit();
2088 return -ENODEV; 2086 return -ENODEV;
2089 } 2087 }
2090 2088
2091 switch (cmd) { 2089 switch (cmd) {
2092 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2090 case TCSBRK: /* SVID version: non-zero arg --> no break */
2093 retval = tty_check_change(tty); 2091 retval = tty_check_change(tty);
@@ -2129,7 +2127,7 @@ static int sx_ioctl(struct tty_struct * tty, struct file * filp,
2129 case TIOCGSERIAL: 2127 case TIOCGSERIAL:
2130 func_exit(); 2128 func_exit();
2131 return sx_get_serial_info(port, argp); 2129 return sx_get_serial_info(port, argp);
2132 case TIOCSSERIAL: 2130 case TIOCSSERIAL:
2133 func_exit(); 2131 func_exit();
2134 return sx_set_serial_info(port, argp); 2132 return sx_set_serial_info(port, argp);
2135 default: 2133 default:
@@ -2153,16 +2151,16 @@ static void sx_throttle(struct tty_struct * tty)
2153 func_exit(); 2151 func_exit();
2154 return; 2152 return;
2155 } 2153 }
2156 2154
2157 bp = port_Board(port); 2155 bp = port_Board(port);
2158 2156
2159 /* Use DTR instead of RTS ! */ 2157 /* Use DTR instead of RTS ! */
2160 if (SX_CRTSCTS (tty)) 2158 if (SX_CRTSCTS (tty))
2161 port->MSVR &= ~MSVR_DTR; 2159 port->MSVR &= ~MSVR_DTR;
2162 else { 2160 else {
2163 /* Auch!!! I think the system shouldn't call this then. */ 2161 /* Auch!!! I think the system shouldn't call this then. */
2164 /* Or maybe we're supposed (allowed?) to do our side of hw 2162 /* Or maybe we're supposed (allowed?) to do our side of hw
2165 handshake anyway, even when hardware handshake is off. 2163 handshake anyway, even when hardware handshake is off.
2166 When you see this in your logs, please report.... */ 2164 When you see this in your logs, please report.... */
2167 printk (KERN_ERR "sx%d: Need to throttle, but can't (hardware hs is off)\n", 2165 printk (KERN_ERR "sx%d: Need to throttle, but can't (hardware hs is off)\n",
2168 port_No (port)); 2166 port_No (port));
@@ -2193,14 +2191,14 @@ static void sx_unthrottle(struct tty_struct * tty)
2193 unsigned long flags; 2191 unsigned long flags;
2194 2192
2195 func_enter(); 2193 func_enter();
2196 2194
2197 if (sx_paranoia_check(port, tty->name, "sx_unthrottle")) { 2195 if (sx_paranoia_check(port, tty->name, "sx_unthrottle")) {
2198 func_exit(); 2196 func_exit();
2199 return; 2197 return;
2200 } 2198 }
2201 2199
2202 bp = port_Board(port); 2200 bp = port_Board(port);
2203 2201
2204 spin_lock_irqsave(&port->lock, flags); 2202 spin_lock_irqsave(&port->lock, flags);
2205 /* XXXX Use DTR INSTEAD???? */ 2203 /* XXXX Use DTR INSTEAD???? */
2206 if (SX_CRTSCTS(tty)) { 2204 if (SX_CRTSCTS(tty)) {
@@ -2234,14 +2232,14 @@ static void sx_stop(struct tty_struct * tty)
2234 unsigned long flags; 2232 unsigned long flags;
2235 2233
2236 func_enter(); 2234 func_enter();
2237 2235
2238 if (sx_paranoia_check(port, tty->name, "sx_stop")) { 2236 if (sx_paranoia_check(port, tty->name, "sx_stop")) {
2239 func_exit(); 2237 func_exit();
2240 return; 2238 return;
2241 } 2239 }
2242 2240
2243 bp = port_Board(port); 2241 bp = port_Board(port);
2244 2242
2245 spin_lock_irqsave(&port->lock, flags); 2243 spin_lock_irqsave(&port->lock, flags);
2246 port->IER &= ~IER_TXRDY; 2244 port->IER &= ~IER_TXRDY;
2247 spin_lock_irqsave(&bp->lock, flags); 2245 spin_lock_irqsave(&bp->lock, flags);
@@ -2261,14 +2259,14 @@ static void sx_start(struct tty_struct * tty)
2261 unsigned long flags; 2259 unsigned long flags;
2262 2260
2263 func_enter(); 2261 func_enter();
2264 2262
2265 if (sx_paranoia_check(port, tty->name, "sx_start")) { 2263 if (sx_paranoia_check(port, tty->name, "sx_start")) {
2266 func_exit(); 2264 func_exit();
2267 return; 2265 return;
2268 } 2266 }
2269 2267
2270 bp = port_Board(port); 2268 bp = port_Board(port);
2271 2269
2272 spin_lock_irqsave(&port->lock, flags); 2270 spin_lock_irqsave(&port->lock, flags);
2273 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { 2271 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
2274 port->IER |= IER_TXRDY; 2272 port->IER |= IER_TXRDY;
@@ -2290,13 +2288,13 @@ static void sx_start(struct tty_struct * tty)
2290 * 2288 *
2291 * serial interrupt routine -> (workqueue) -> 2289 * serial interrupt routine -> (workqueue) ->
2292 * do_sx_hangup() -> tty->hangup() -> sx_hangup() 2290 * do_sx_hangup() -> tty->hangup() -> sx_hangup()
2293 * 2291 *
2294 */ 2292 */
2295static void do_sx_hangup(void *private_) 2293static void do_sx_hangup(void *private_)
2296{ 2294{
2297 struct specialix_port *port = (struct specialix_port *) private_; 2295 struct specialix_port *port = (struct specialix_port *) private_;
2298 struct tty_struct *tty; 2296 struct tty_struct *tty;
2299 2297
2300 func_enter(); 2298 func_enter();
2301 2299
2302 tty = port->tty; 2300 tty = port->tty;
@@ -2319,9 +2317,9 @@ static void sx_hangup(struct tty_struct * tty)
2319 func_exit(); 2317 func_exit();
2320 return; 2318 return;
2321 } 2319 }
2322 2320
2323 bp = port_Board(port); 2321 bp = port_Board(port);
2324 2322
2325 sx_shutdown_port(bp, port); 2323 sx_shutdown_port(bp, port);
2326 spin_lock_irqsave(&port->lock, flags); 2324 spin_lock_irqsave(&port->lock, flags);
2327 port->event = 0; 2325 port->event = 0;
@@ -2346,10 +2344,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios
2346 struct specialix_port *port = (struct specialix_port *)tty->driver_data; 2344 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
2347 unsigned long flags; 2345 unsigned long flags;
2348 struct specialix_board * bp; 2346 struct specialix_board * bp;
2349 2347
2350 if (sx_paranoia_check(port, tty->name, "sx_set_termios")) 2348 if (sx_paranoia_check(port, tty->name, "sx_set_termios"))
2351 return; 2349 return;
2352 2350
2353 if (tty->termios->c_cflag == old_termios->c_cflag && 2351 if (tty->termios->c_cflag == old_termios->c_cflag &&
2354 tty->termios->c_iflag == old_termios->c_iflag) 2352 tty->termios->c_iflag == old_termios->c_iflag)
2355 return; 2353 return;
@@ -2420,7 +2418,7 @@ static int sx_init_drivers(void)
2420 func_exit(); 2418 func_exit();
2421 return 1; 2419 return 1;
2422 } 2420 }
2423 2421
2424 if (!(tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL))) { 2422 if (!(tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL))) {
2425 printk(KERN_ERR "sx: Couldn't get free page.\n"); 2423 printk(KERN_ERR "sx: Couldn't get free page.\n");
2426 put_tty_driver(specialix_driver); 2424 put_tty_driver(specialix_driver);
@@ -2457,7 +2455,7 @@ static int sx_init_drivers(void)
2457 init_waitqueue_head(&sx_port[i].close_wait); 2455 init_waitqueue_head(&sx_port[i].close_wait);
2458 spin_lock_init(&sx_port[i].lock); 2456 spin_lock_init(&sx_port[i].lock);
2459 } 2457 }
2460 2458
2461 func_exit(); 2459 func_exit();
2462 return 0; 2460 return 0;
2463} 2461}
@@ -2472,8 +2470,8 @@ static void sx_release_drivers(void)
2472 func_exit(); 2470 func_exit();
2473} 2471}
2474 2472
2475/* 2473/*
2476 * This routine must be called by kernel at boot time 2474 * This routine must be called by kernel at boot time
2477 */ 2475 */
2478static int __init specialix_init(void) 2476static int __init specialix_init(void)
2479{ 2477{
@@ -2489,7 +2487,7 @@ static int __init specialix_init(void)
2489#else 2487#else
2490 printk (KERN_INFO "sx: DTR/RTS pin is RTS when CRTSCTS is on.\n"); 2488 printk (KERN_INFO "sx: DTR/RTS pin is RTS when CRTSCTS is on.\n");
2491#endif 2489#endif
2492 2490
2493 for (i = 0; i < SX_NBOARD; i++) 2491 for (i = 0; i < SX_NBOARD; i++)
2494 sx_board[i].lock = SPIN_LOCK_UNLOCKED; 2492 sx_board[i].lock = SPIN_LOCK_UNLOCKED;
2495 2493
@@ -2498,7 +2496,7 @@ static int __init specialix_init(void)
2498 return -EIO; 2496 return -EIO;
2499 } 2497 }
2500 2498
2501 for (i = 0; i < SX_NBOARD; i++) 2499 for (i = 0; i < SX_NBOARD; i++)
2502 if (sx_board[i].base && !sx_probe(&sx_board[i])) 2500 if (sx_board[i].base && !sx_probe(&sx_board[i]))
2503 found++; 2501 found++;
2504 2502
@@ -2512,8 +2510,8 @@ static int __init specialix_init(void)
2512 i++; 2510 i++;
2513 continue; 2511 continue;
2514 } 2512 }
2515 pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX, 2513 pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX,
2516 PCI_DEVICE_ID_SPECIALIX_IO8, 2514 PCI_DEVICE_ID_SPECIALIX_IO8,
2517 pdev); 2515 pdev);
2518 if (!pdev) break; 2516 if (!pdev) break;
2519 2517
@@ -2557,10 +2555,10 @@ module_param(sx_poll, int, 0);
2557/* 2555/*
2558 * You can setup up to 4 boards. 2556 * You can setup up to 4 boards.
2559 * by specifying "iobase=0xXXX,0xXXX ..." as insmod parameter. 2557 * by specifying "iobase=0xXXX,0xXXX ..." as insmod parameter.
2560 * You should specify the IRQs too in that case "irq=....,...". 2558 * You should specify the IRQs too in that case "irq=....,...".
2561 * 2559 *
2562 * More than 4 boards in one computer is not possible, as the card can 2560 * More than 4 boards in one computer is not possible, as the card can
2563 * only use 4 different interrupts. 2561 * only use 4 different interrupts.
2564 * 2562 *
2565 */ 2563 */
2566static int __init specialix_init_module(void) 2564static int __init specialix_init_module(void)
@@ -2583,16 +2581,16 @@ static int __init specialix_init_module(void)
2583 2581
2584 return specialix_init(); 2582 return specialix_init();
2585} 2583}
2586 2584
2587static void __exit specialix_exit_module(void) 2585static void __exit specialix_exit_module(void)
2588{ 2586{
2589 int i; 2587 int i;
2590 2588
2591 func_enter(); 2589 func_enter();
2592 2590
2593 sx_release_drivers(); 2591 sx_release_drivers();
2594 for (i = 0; i < SX_NBOARD; i++) 2592 for (i = 0; i < SX_NBOARD; i++)
2595 if (sx_board[i].flags & SX_BOARD_PRESENT) 2593 if (sx_board[i].flags & SX_BOARD_PRESENT)
2596 sx_release_io_range(&sx_board[i]); 2594 sx_release_io_range(&sx_board[i]);
2597#ifdef SPECIALIX_TIMER 2595#ifdef SPECIALIX_TIMER
2598 del_timer (&missed_irq_timer); 2596 del_timer (&missed_irq_timer);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index ea2d54be4843..0133dc0e25d0 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -912,6 +912,7 @@ MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
912MODULE_LICENSE("GPL"); 912MODULE_LICENSE("GPL");
913 913
914static struct pci_driver synclink_pci_driver = { 914static struct pci_driver synclink_pci_driver = {
915 .owner = THIS_MODULE,
915 .name = "synclink", 916 .name = "synclink",
916 .id_table = synclink_pci_tbl, 917 .id_table = synclink_pci_tbl,
917 .probe = synclink_init_one, 918 .probe = synclink_init_one,
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 6fb165cf8a61..f185724448b1 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -500,6 +500,7 @@ MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl);
500MODULE_LICENSE("GPL"); 500MODULE_LICENSE("GPL");
501 501
502static struct pci_driver synclinkmp_pci_driver = { 502static struct pci_driver synclinkmp_pci_driver = {
503 .owner = THIS_MODULE,
503 .name = "synclinkmp", 504 .name = "synclinkmp",
504 .id_table = synclinkmp_pci_tbl, 505 .id_table = synclinkmp_pci_tbl,
505 .probe = synclinkmp_init_one, 506 .probe = synclinkmp_init_one,
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
new file mode 100644
index 000000000000..18cdd4361dc6
--- /dev/null
+++ b/drivers/char/tlclk.c
@@ -0,0 +1,896 @@
1/*
2 * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010
3 *
4 * Copyright (C) 2005 Kontron Canada
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send feedback to <sebastien.bouchard@ca.kontron.com> and the current
24 * Maintainer <mark.gross@intel.com>
25 *
26 * Description : This is the TELECOM CLOCK module driver for the ATCA
27 * MPCBL0010 ATCA computer.
28 */
29
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34#include <linux/kernel.h> /* printk() */
35#include <linux/fs.h> /* everything... */
36#include <linux/errno.h> /* error codes */
37#include <linux/delay.h> /* udelay */
38#include <linux/slab.h>
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/spinlock.h>
42#include <linux/timer.h>
43#include <linux/sysfs.h>
44#include <linux/device.h>
45#include <linux/miscdevice.h>
46#include <asm/io.h> /* inb/outb */
47#include <asm/uaccess.h>
48
49MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
50MODULE_LICENSE("GPL");
51
52/*Hardware Reset of the PLL */
53#define RESET_ON 0x00
54#define RESET_OFF 0x01
55
56/* MODE SELECT */
57#define NORMAL_MODE 0x00
58#define HOLDOVER_MODE 0x10
59#define FREERUN_MODE 0x20
60
61/* FILTER SELECT */
62#define FILTER_6HZ 0x04
63#define FILTER_12HZ 0x00
64
65/* SELECT REFERENCE FREQUENCY */
66#define REF_CLK1_8kHz 0x00
67#define REF_CLK2_19_44MHz 0x02
68
69/* Select primary or secondary redundant clock */
70#define PRIMARY_CLOCK 0x00
71#define SECONDARY_CLOCK 0x01
72
73/* CLOCK TRANSMISSION DEFINE */
74#define CLK_8kHz 0xff
75#define CLK_16_384MHz 0xfb
76
77#define CLK_1_544MHz 0x00
78#define CLK_2_048MHz 0x01
79#define CLK_4_096MHz 0x02
80#define CLK_6_312MHz 0x03
81#define CLK_8_192MHz 0x04
82#define CLK_19_440MHz 0x06
83
84#define CLK_8_592MHz 0x08
85#define CLK_11_184MHz 0x09
86#define CLK_34_368MHz 0x0b
87#define CLK_44_736MHz 0x0a
88
89/* RECEIVED REFERENCE */
90#define AMC_B1 0
91#define AMC_B2 1
92
93/* HARDWARE SWITCHING DEFINE */
94#define HW_ENABLE 0x80
95#define HW_DISABLE 0x00
96
97/* HARDWARE SWITCHING MODE DEFINE */
98#define PLL_HOLDOVER 0x40
99#define LOST_CLOCK 0x00
100
101/* ALARMS DEFINE */
102#define UNLOCK_MASK 0x10
103#define HOLDOVER_MASK 0x20
104#define SEC_LOST_MASK 0x40
105#define PRI_LOST_MASK 0x80
106
107/* INTERRUPT CAUSE DEFINE */
108
109#define PRI_LOS_01_MASK 0x01
110#define PRI_LOS_10_MASK 0x02
111
112#define SEC_LOS_01_MASK 0x04
113#define SEC_LOS_10_MASK 0x08
114
115#define HOLDOVER_01_MASK 0x10
116#define HOLDOVER_10_MASK 0x20
117
118#define UNLOCK_01_MASK 0x40
119#define UNLOCK_10_MASK 0x80
120
121struct tlclk_alarms {
122 __u32 lost_clocks;
123 __u32 lost_primary_clock;
124 __u32 lost_secondary_clock;
125 __u32 primary_clock_back;
126 __u32 secondary_clock_back;
127 __u32 switchover_primary;
128 __u32 switchover_secondary;
129 __u32 pll_holdover;
130 __u32 pll_end_holdover;
131 __u32 pll_lost_sync;
132 __u32 pll_sync;
133};
134/* Telecom clock I/O register definition */
135#define TLCLK_BASE 0xa08
136#define TLCLK_REG0 TLCLK_BASE
137#define TLCLK_REG1 (TLCLK_BASE+1)
138#define TLCLK_REG2 (TLCLK_BASE+2)
139#define TLCLK_REG3 (TLCLK_BASE+3)
140#define TLCLK_REG4 (TLCLK_BASE+4)
141#define TLCLK_REG5 (TLCLK_BASE+5)
142#define TLCLK_REG6 (TLCLK_BASE+6)
143#define TLCLK_REG7 (TLCLK_BASE+7)
144
145#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port)
146
147/* 0 = Dynamic allocation of the major device number */
148#define TLCLK_MAJOR 0
149
150/* sysfs interface definition:
151Upon loading the driver will create a sysfs directory under
152/sys/devices/platform/telco_clock.
153
154This directory exports the following interfaces. There operation is
155documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
156alarms :
157current_ref :
158enable_clk3a_output :
159enable_clk3b_output :
160enable_clka0_output :
161enable_clka1_output :
162enable_clkb0_output :
163enable_clkb1_output :
164filter_select :
165hardware_switching :
166hardware_switching_mode :
167interrupt_switch :
168mode_select :
169refalign :
170reset :
171select_amcb1_transmit_clock :
172select_amcb2_transmit_clock :
173select_redundant_clock :
174select_ref_frequency :
175test_mode :
176
177All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
178has the same effect as echo 0x99 > refalign.
179*/
180
181static unsigned int telclk_interrupt;
182
183static int int_events; /* Event that generate a interrupt */
184static int got_event; /* if events processing have been done */
185
186static void switchover_timeout(unsigned long data);
187static struct timer_list switchover_timer =
188 TIMER_INITIALIZER(switchover_timeout , 0, 0);
189
190static struct tlclk_alarms *alarm_events;
191
192static DEFINE_SPINLOCK(event_lock);
193
194static int tlclk_major = TLCLK_MAJOR;
195
196static irqreturn_t tlclk_interrupt(int irq, void *dev_id, struct pt_regs *regs);
197
198static DECLARE_WAIT_QUEUE_HEAD(wq);
199
200static int tlclk_open(struct inode *inode, struct file *filp)
201{
202 int result;
203
204 /* Make sure there is no interrupt pending while
205 * initialising interrupt handler */
206 inb(TLCLK_REG6);
207
208 /* This device is wired through the FPGA IO space of the ATCA blade
209 * we can't share this IRQ */
210 result = request_irq(telclk_interrupt, &tlclk_interrupt,
211 SA_INTERRUPT, "telco_clock", tlclk_interrupt);
212 if (result == -EBUSY) {
213 printk(KERN_ERR "telco_clock: Interrupt can't be reserved!\n");
214 return -EBUSY;
215 }
216 inb(TLCLK_REG6); /* Clear interrupt events */
217
218 return 0;
219}
220
221static int tlclk_release(struct inode *inode, struct file *filp)
222{
223 free_irq(telclk_interrupt, tlclk_interrupt);
224
225 return 0;
226}
227
228ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
229 loff_t *f_pos)
230{
231 if (count < sizeof(struct tlclk_alarms))
232 return -EIO;
233
234 wait_event_interruptible(wq, got_event);
235 if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms)))
236 return -EFAULT;
237
238 memset(alarm_events, 0, sizeof(struct tlclk_alarms));
239 got_event = 0;
240
241 return sizeof(struct tlclk_alarms);
242}
243
244ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
245 loff_t *f_pos)
246{
247 return 0;
248}
249
250static struct file_operations tlclk_fops = {
251 .read = tlclk_read,
252 .write = tlclk_write,
253 .open = tlclk_open,
254 .release = tlclk_release,
255
256};
257
258static struct miscdevice tlclk_miscdev = {
259 .minor = MISC_DYNAMIC_MINOR,
260 .name = "telco_clock",
261 .fops = &tlclk_fops,
262};
263
264static ssize_t show_current_ref(struct device *d,
265 struct device_attribute *attr, char *buf)
266{
267 unsigned long ret_val;
268 unsigned long flags;
269
270 spin_lock_irqsave(&event_lock, flags);
271 ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3);
272 spin_unlock_irqrestore(&event_lock, flags);
273
274 return sprintf(buf, "0x%lX\n", ret_val);
275}
276
277static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
278
279
280static ssize_t show_interrupt_switch(struct device *d,
281 struct device_attribute *attr, char *buf)
282{
283 unsigned long ret_val;
284 unsigned long flags;
285
286 spin_lock_irqsave(&event_lock, flags);
287 ret_val = inb(TLCLK_REG6);
288 spin_unlock_irqrestore(&event_lock, flags);
289
290 return sprintf(buf, "0x%lX\n", ret_val);
291}
292
293static DEVICE_ATTR(interrupt_switch, S_IRUGO,
294 show_interrupt_switch, NULL);
295
296static ssize_t show_alarms(struct device *d,
297 struct device_attribute *attr, char *buf)
298{
299 unsigned long ret_val;
300 unsigned long flags;
301
302 spin_lock_irqsave(&event_lock, flags);
303 ret_val = (inb(TLCLK_REG2) & 0xf0);
304 spin_unlock_irqrestore(&event_lock, flags);
305
306 return sprintf(buf, "0x%lX\n", ret_val);
307}
308
309static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
310
311static ssize_t store_enable_clk3b_output(struct device *d,
312 struct device_attribute *attr, const char *buf, size_t count)
313{
314 unsigned long tmp;
315 unsigned char val;
316 unsigned long flags;
317
318 sscanf(buf, "%lX", &tmp);
319 dev_dbg(d, ": tmp = 0x%lX\n", tmp);
320
321 val = (unsigned char)tmp;
322 spin_lock_irqsave(&event_lock, flags);
323 SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7);
324 spin_unlock_irqrestore(&event_lock, flags);
325
326 return strnlen(buf, count);
327}
328
329static DEVICE_ATTR(enable_clk3b_output, S_IWUGO, NULL,
330 store_enable_clk3b_output);
331
332static ssize_t store_enable_clk3a_output(struct device *d,
333 struct device_attribute *attr, const char *buf, size_t count)
334{
335 unsigned long flags;
336 unsigned long tmp;
337 unsigned char val;
338
339 sscanf(buf, "%lX", &tmp);
340 dev_dbg(d, "tmp = 0x%lX\n", tmp);
341
342 val = (unsigned char)tmp;
343 spin_lock_irqsave(&event_lock, flags);
344 SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6);
345 spin_unlock_irqrestore(&event_lock, flags);
346
347 return strnlen(buf, count);
348}
349
350static DEVICE_ATTR(enable_clk3a_output, S_IWUGO, NULL,
351 store_enable_clk3a_output);
352
353static ssize_t store_enable_clkb1_output(struct device *d,
354 struct device_attribute *attr, const char *buf, size_t count)
355{
356 unsigned long flags;
357 unsigned long tmp;
358 unsigned char val;
359
360 sscanf(buf, "%lX", &tmp);
361 dev_dbg(d, "tmp = 0x%lX\n", tmp);
362
363 val = (unsigned char)tmp;
364 spin_lock_irqsave(&event_lock, flags);
365 SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3);
366 spin_unlock_irqrestore(&event_lock, flags);
367
368 return strnlen(buf, count);
369}
370
371static DEVICE_ATTR(enable_clkb1_output, S_IWUGO, NULL,
372 store_enable_clkb1_output);
373
374
375static ssize_t store_enable_clka1_output(struct device *d,
376 struct device_attribute *attr, const char *buf, size_t count)
377{
378 unsigned long flags;
379 unsigned long tmp;
380 unsigned char val;
381
382 sscanf(buf, "%lX", &tmp);
383 dev_dbg(d, "tmp = 0x%lX\n", tmp);
384
385 val = (unsigned char)tmp;
386 spin_lock_irqsave(&event_lock, flags);
387 SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2);
388 spin_unlock_irqrestore(&event_lock, flags);
389
390 return strnlen(buf, count);
391}
392
393static DEVICE_ATTR(enable_clka1_output, S_IWUGO, NULL,
394 store_enable_clka1_output);
395
396static ssize_t store_enable_clkb0_output(struct device *d,
397 struct device_attribute *attr, const char *buf, size_t count)
398{
399 unsigned long flags;
400 unsigned long tmp;
401 unsigned char val;
402
403 sscanf(buf, "%lX", &tmp);
404 dev_dbg(d, "tmp = 0x%lX\n", tmp);
405
406 val = (unsigned char)tmp;
407 spin_lock_irqsave(&event_lock, flags);
408 SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1);
409 spin_unlock_irqrestore(&event_lock, flags);
410
411 return strnlen(buf, count);
412}
413
414static DEVICE_ATTR(enable_clkb0_output, S_IWUGO, NULL,
415 store_enable_clkb0_output);
416
417static ssize_t store_enable_clka0_output(struct device *d,
418 struct device_attribute *attr, const char *buf, size_t count)
419{
420 unsigned long flags;
421 unsigned long tmp;
422 unsigned char val;
423
424 sscanf(buf, "%lX", &tmp);
425 dev_dbg(d, "tmp = 0x%lX\n", tmp);
426
427 val = (unsigned char)tmp;
428 spin_lock_irqsave(&event_lock, flags);
429 SET_PORT_BITS(TLCLK_REG2, 0xfe, val);
430 spin_unlock_irqrestore(&event_lock, flags);
431
432 return strnlen(buf, count);
433}
434
435static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
436 store_enable_clka0_output);
437
438static ssize_t store_test_mode(struct device *d,
439 struct device_attribute *attr, const char *buf, size_t count)
440{
441 unsigned long flags;
442 unsigned long tmp;
443 unsigned char val;
444
445 sscanf(buf, "%lX", &tmp);
446 dev_dbg(d, "tmp = 0x%lX\n", tmp);
447
448 val = (unsigned char)tmp;
449 spin_lock_irqsave(&event_lock, flags);
450 SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
451 spin_unlock_irqrestore(&event_lock, flags);
452
453 return strnlen(buf, count);
454}
455
456static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
457
458static ssize_t store_select_amcb2_transmit_clock(struct device *d,
459 struct device_attribute *attr, const char *buf, size_t count)
460{
461 unsigned long flags;
462 unsigned long tmp;
463 unsigned char val;
464
465 sscanf(buf, "%lX", &tmp);
466 dev_dbg(d, "tmp = 0x%lX\n", tmp);
467
468 val = (unsigned char)tmp;
469 spin_lock_irqsave(&event_lock, flags);
470 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
471 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
472 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
473 } else if (val >= CLK_8_592MHz) {
474 SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
475 switch (val) {
476 case CLK_8_592MHz:
477 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
478 break;
479 case CLK_11_184MHz:
480 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
481 break;
482 case CLK_34_368MHz:
483 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
484 break;
485 case CLK_44_736MHz:
486 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
487 break;
488 }
489 } else
490 SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
491
492 spin_unlock_irqrestore(&event_lock, flags);
493
494 return strnlen(buf, count);
495}
496
497static DEVICE_ATTR(select_amcb2_transmit_clock, S_IWUGO, NULL,
498 store_select_amcb2_transmit_clock);
499
500static ssize_t store_select_amcb1_transmit_clock(struct device *d,
501 struct device_attribute *attr, const char *buf, size_t count)
502{
503 unsigned long tmp;
504 unsigned char val;
505 unsigned long flags;
506
507 sscanf(buf, "%lX", &tmp);
508 dev_dbg(d, "tmp = 0x%lX\n", tmp);
509
510 val = (unsigned char)tmp;
511 spin_lock_irqsave(&event_lock, flags);
512 if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
513 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
514 SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
515 } else if (val >= CLK_8_592MHz) {
516 SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
517 switch (val) {
518 case CLK_8_592MHz:
519 SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
520 break;
521 case CLK_11_184MHz:
522 SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
523 break;
524 case CLK_34_368MHz:
525 SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
526 break;
527 case CLK_44_736MHz:
528 SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
529 break;
530 }
531 } else
532 SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
533 spin_unlock_irqrestore(&event_lock, flags);
534
535 return strnlen(buf, count);
536}
537
538static DEVICE_ATTR(select_amcb1_transmit_clock, S_IWUGO, NULL,
539 store_select_amcb1_transmit_clock);
540
541static ssize_t store_select_redundant_clock(struct device *d,
542 struct device_attribute *attr, const char *buf, size_t count)
543{
544 unsigned long tmp;
545 unsigned char val;
546 unsigned long flags;
547
548 sscanf(buf, "%lX", &tmp);
549 dev_dbg(d, "tmp = 0x%lX\n", tmp);
550
551 val = (unsigned char)tmp;
552 spin_lock_irqsave(&event_lock, flags);
553 SET_PORT_BITS(TLCLK_REG1, 0xfe, val);
554 spin_unlock_irqrestore(&event_lock, flags);
555
556 return strnlen(buf, count);
557}
558
559static DEVICE_ATTR(select_redundant_clock, S_IWUGO, NULL,
560 store_select_redundant_clock);
561
562static ssize_t store_select_ref_frequency(struct device *d,
563 struct device_attribute *attr, const char *buf, size_t count)
564{
565 unsigned long tmp;
566 unsigned char val;
567 unsigned long flags;
568
569 sscanf(buf, "%lX", &tmp);
570 dev_dbg(d, "tmp = 0x%lX\n", tmp);
571
572 val = (unsigned char)tmp;
573 spin_lock_irqsave(&event_lock, flags);
574 SET_PORT_BITS(TLCLK_REG1, 0xfd, val);
575 spin_unlock_irqrestore(&event_lock, flags);
576
577 return strnlen(buf, count);
578}
579
580static DEVICE_ATTR(select_ref_frequency, S_IWUGO, NULL,
581 store_select_ref_frequency);
582
583static ssize_t store_filter_select(struct device *d,
584 struct device_attribute *attr, const char *buf, size_t count)
585{
586 unsigned long tmp;
587 unsigned char val;
588 unsigned long flags;
589
590 sscanf(buf, "%lX", &tmp);
591 dev_dbg(d, "tmp = 0x%lX\n", tmp);
592
593 val = (unsigned char)tmp;
594 spin_lock_irqsave(&event_lock, flags);
595 SET_PORT_BITS(TLCLK_REG0, 0xfb, val);
596 spin_unlock_irqrestore(&event_lock, flags);
597
598 return strnlen(buf, count);
599}
600
601static DEVICE_ATTR(filter_select, S_IWUGO, NULL, store_filter_select);
602
603static ssize_t store_hardware_switching_mode(struct device *d,
604 struct device_attribute *attr, const char *buf, size_t count)
605{
606 unsigned long tmp;
607 unsigned char val;
608 unsigned long flags;
609
610 sscanf(buf, "%lX", &tmp);
611 dev_dbg(d, "tmp = 0x%lX\n", tmp);
612
613 val = (unsigned char)tmp;
614 spin_lock_irqsave(&event_lock, flags);
615 SET_PORT_BITS(TLCLK_REG0, 0xbf, val);
616 spin_unlock_irqrestore(&event_lock, flags);
617
618 return strnlen(buf, count);
619}
620
621static DEVICE_ATTR(hardware_switching_mode, S_IWUGO, NULL,
622 store_hardware_switching_mode);
623
624static ssize_t store_hardware_switching(struct device *d,
625 struct device_attribute *attr, const char *buf, size_t count)
626{
627 unsigned long tmp;
628 unsigned char val;
629 unsigned long flags;
630
631 sscanf(buf, "%lX", &tmp);
632 dev_dbg(d, "tmp = 0x%lX\n", tmp);
633
634 val = (unsigned char)tmp;
635 spin_lock_irqsave(&event_lock, flags);
636 SET_PORT_BITS(TLCLK_REG0, 0x7f, val);
637 spin_unlock_irqrestore(&event_lock, flags);
638
639 return strnlen(buf, count);
640}
641
642static DEVICE_ATTR(hardware_switching, S_IWUGO, NULL,
643 store_hardware_switching);
644
645static ssize_t store_refalign (struct device *d,
646 struct device_attribute *attr, const char *buf, size_t count)
647{
648 unsigned long tmp;
649 unsigned long flags;
650
651 sscanf(buf, "%lX", &tmp);
652 dev_dbg(d, "tmp = 0x%lX\n", tmp);
653 spin_lock_irqsave(&event_lock, flags);
654 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
655 udelay(2);
656 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
657 udelay(2);
658 SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
659 spin_unlock_irqrestore(&event_lock, flags);
660
661 return strnlen(buf, count);
662}
663
664static DEVICE_ATTR(refalign, S_IWUGO, NULL, store_refalign);
665
666static ssize_t store_mode_select (struct device *d,
667 struct device_attribute *attr, const char *buf, size_t count)
668{
669 unsigned long tmp;
670 unsigned char val;
671 unsigned long flags;
672
673 sscanf(buf, "%lX", &tmp);
674 dev_dbg(d, "tmp = 0x%lX\n", tmp);
675
676 val = (unsigned char)tmp;
677 spin_lock_irqsave(&event_lock, flags);
678 SET_PORT_BITS(TLCLK_REG0, 0xcf, val);
679 spin_unlock_irqrestore(&event_lock, flags);
680
681 return strnlen(buf, count);
682}
683
684static DEVICE_ATTR(mode_select, S_IWUGO, NULL, store_mode_select);
685
686static ssize_t store_reset (struct device *d,
687 struct device_attribute *attr, const char *buf, size_t count)
688{
689 unsigned long tmp;
690 unsigned char val;
691 unsigned long flags;
692
693 sscanf(buf, "%lX", &tmp);
694 dev_dbg(d, "tmp = 0x%lX\n", tmp);
695
696 val = (unsigned char)tmp;
697 spin_lock_irqsave(&event_lock, flags);
698 SET_PORT_BITS(TLCLK_REG4, 0xfd, val);
699 spin_unlock_irqrestore(&event_lock, flags);
700
701 return strnlen(buf, count);
702}
703
704static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
705
706static struct attribute *tlclk_sysfs_entries[] = {
707 &dev_attr_current_ref.attr,
708 &dev_attr_interrupt_switch.attr,
709 &dev_attr_alarms.attr,
710 &dev_attr_enable_clk3a_output.attr,
711 &dev_attr_enable_clk3b_output.attr,
712 &dev_attr_enable_clkb1_output.attr,
713 &dev_attr_enable_clka1_output.attr,
714 &dev_attr_enable_clkb0_output.attr,
715 &dev_attr_enable_clka0_output.attr,
716 &dev_attr_test_mode.attr,
717 &dev_attr_select_amcb1_transmit_clock.attr,
718 &dev_attr_select_amcb2_transmit_clock.attr,
719 &dev_attr_select_redundant_clock.attr,
720 &dev_attr_select_ref_frequency.attr,
721 &dev_attr_filter_select.attr,
722 &dev_attr_hardware_switching_mode.attr,
723 &dev_attr_hardware_switching.attr,
724 &dev_attr_refalign.attr,
725 &dev_attr_mode_select.attr,
726 &dev_attr_reset.attr,
727 NULL
728};
729
730static struct attribute_group tlclk_attribute_group = {
731 .name = NULL, /* put in device directory */
732 .attrs = tlclk_sysfs_entries,
733};
734
735static struct platform_device *tlclk_device;
736
737static int __init tlclk_init(void)
738{
739 int ret;
740
741 ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
742 if (ret < 0) {
743 printk(KERN_ERR "telco_clock: can't get major! %d\n", tlclk_major);
744 return ret;
745 }
746 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
747 if (!alarm_events)
748 goto out1;
749
750 /* Read telecom clock IRQ number (Set by BIOS) */
751 if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
752 printk(KERN_ERR "tlclk: request_region failed! 0x%X\n",
753 TLCLK_BASE);
754 ret = -EBUSY;
755 goto out2;
756 }
757 telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
758
759 if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
760 printk(KERN_ERR "telclk_interrup = 0x%x non-mcpbl0010 hw\n",
761 telclk_interrupt);
762 ret = -ENXIO;
763 goto out3;
764 }
765
766 init_timer(&switchover_timer);
767
768 ret = misc_register(&tlclk_miscdev);
769 if (ret < 0) {
770 printk(KERN_ERR " misc_register retruns %d\n", ret);
771 ret = -EBUSY;
772 goto out3;
773 }
774
775 tlclk_device = platform_device_register_simple("telco_clock",
776 -1, NULL, 0);
777 if (!tlclk_device) {
778 printk(KERN_ERR " platform_device_register retruns 0x%X\n",
779 (unsigned int) tlclk_device);
780 ret = -EBUSY;
781 goto out4;
782 }
783
784 ret = sysfs_create_group(&tlclk_device->dev.kobj,
785 &tlclk_attribute_group);
786 if (ret) {
787 printk(KERN_ERR "failed to create sysfs device attributes\n");
788 sysfs_remove_group(&tlclk_device->dev.kobj,
789 &tlclk_attribute_group);
790 goto out5;
791 }
792
793 return 0;
794out5:
795 platform_device_unregister(tlclk_device);
796out4:
797 misc_deregister(&tlclk_miscdev);
798out3:
799 release_region(TLCLK_BASE, 8);
800out2:
801 kfree(alarm_events);
802out1:
803 unregister_chrdev(tlclk_major, "telco_clock");
804 return ret;
805}
806
807static void __exit tlclk_cleanup(void)
808{
809 sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
810 platform_device_unregister(tlclk_device);
811 misc_deregister(&tlclk_miscdev);
812 unregister_chrdev(tlclk_major, "telco_clock");
813
814 release_region(TLCLK_BASE, 8);
815 del_timer_sync(&switchover_timer);
816 kfree(alarm_events);
817
818}
819
820static void switchover_timeout(unsigned long data)
821{
822 if ((data & 1)) {
823 if ((inb(TLCLK_REG1) & 0x08) != (data & 0x08))
824 alarm_events->switchover_primary++;
825 } else {
826 if ((inb(TLCLK_REG1) & 0x08) != (data & 0x08))
827 alarm_events->switchover_secondary++;
828 }
829
830 /* Alarm processing is done, wake up read task */
831 del_timer(&switchover_timer);
832 got_event = 1;
833 wake_up(&wq);
834}
835
836static irqreturn_t tlclk_interrupt(int irq, void *dev_id, struct pt_regs *regs)
837{
838 unsigned long flags;
839
840 spin_lock_irqsave(&event_lock, flags);
841 /* Read and clear interrupt events */
842 int_events = inb(TLCLK_REG6);
843
844 /* Primary_Los changed from 0 to 1 ? */
845 if (int_events & PRI_LOS_01_MASK) {
846 if (inb(TLCLK_REG2) & SEC_LOST_MASK)
847 alarm_events->lost_clocks++;
848 else
849 alarm_events->lost_primary_clock++;
850 }
851
852 /* Primary_Los changed from 1 to 0 ? */
853 if (int_events & PRI_LOS_10_MASK) {
854 alarm_events->primary_clock_back++;
855 SET_PORT_BITS(TLCLK_REG1, 0xFE, 1);
856 }
857 /* Secondary_Los changed from 0 to 1 ? */
858 if (int_events & SEC_LOS_01_MASK) {
859 if (inb(TLCLK_REG2) & PRI_LOST_MASK)
860 alarm_events->lost_clocks++;
861 else
862 alarm_events->lost_secondary_clock++;
863 }
864 /* Secondary_Los changed from 1 to 0 ? */
865 if (int_events & SEC_LOS_10_MASK) {
866 alarm_events->secondary_clock_back++;
867 SET_PORT_BITS(TLCLK_REG1, 0xFE, 0);
868 }
869 if (int_events & HOLDOVER_10_MASK)
870 alarm_events->pll_end_holdover++;
871
872 if (int_events & UNLOCK_01_MASK)
873 alarm_events->pll_lost_sync++;
874
875 if (int_events & UNLOCK_10_MASK)
876 alarm_events->pll_sync++;
877
878 /* Holdover changed from 0 to 1 ? */
879 if (int_events & HOLDOVER_01_MASK) {
880 alarm_events->pll_holdover++;
881
882 /* TIMEOUT in ~10ms */
883 switchover_timer.expires = jiffies + msecs_to_jiffies(10);
884 switchover_timer.data = inb(TLCLK_REG1);
885 add_timer(&switchover_timer);
886 } else {
887 got_event = 1;
888 wake_up(&wq);
889 }
890 spin_unlock_irqrestore(&event_lock, flags);
891
892 return IRQ_HANDLED;
893}
894
895module_init(tlclk_init);
896module_exit(tlclk_cleanup);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 049d128ae7f0..303f15880466 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -64,7 +64,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
64 if (count == 0) 64 if (count == 0)
65 return -ENODATA; 65 return -ENODATA;
66 if (count > bufsiz) { 66 if (count > bufsiz) {
67 dev_err(&chip->pci_dev->dev, 67 dev_err(chip->dev,
68 "invalid count value %x %zx \n", count, bufsiz); 68 "invalid count value %x %zx \n", count, bufsiz);
69 return -E2BIG; 69 return -E2BIG;
70 } 70 }
@@ -72,21 +72,21 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
72 down(&chip->tpm_mutex); 72 down(&chip->tpm_mutex);
73 73
74 if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) { 74 if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
75 dev_err(&chip->pci_dev->dev, 75 dev_err(chip->dev,
76 "tpm_transmit: tpm_send: error %zd\n", rc); 76 "tpm_transmit: tpm_send: error %zd\n", rc);
77 goto out; 77 goto out;
78 } 78 }
79 79
80 stop = jiffies + 2 * 60 * HZ; 80 stop = jiffies + 2 * 60 * HZ;
81 do { 81 do {
82 u8 status = inb(chip->vendor->base + 1); 82 u8 status = chip->vendor->status(chip);
83 if ((status & chip->vendor->req_complete_mask) == 83 if ((status & chip->vendor->req_complete_mask) ==
84 chip->vendor->req_complete_val) { 84 chip->vendor->req_complete_val) {
85 goto out_recv; 85 goto out_recv;
86 } 86 }
87 87
88 if ((status == chip->vendor->req_canceled)) { 88 if ((status == chip->vendor->req_canceled)) {
89 dev_err(&chip->pci_dev->dev, "Operation Canceled\n"); 89 dev_err(chip->dev, "Operation Canceled\n");
90 rc = -ECANCELED; 90 rc = -ECANCELED;
91 goto out; 91 goto out;
92 } 92 }
@@ -97,14 +97,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
97 97
98 98
99 chip->vendor->cancel(chip); 99 chip->vendor->cancel(chip);
100 dev_err(&chip->pci_dev->dev, "Operation Timed out\n"); 100 dev_err(chip->dev, "Operation Timed out\n");
101 rc = -ETIME; 101 rc = -ETIME;
102 goto out; 102 goto out;
103 103
104out_recv: 104out_recv:
105 rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz); 105 rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
106 if (rc < 0) 106 if (rc < 0)
107 dev_err(&chip->pci_dev->dev, 107 dev_err(chip->dev,
108 "tpm_transmit: tpm_recv: error %zd\n", rc); 108 "tpm_transmit: tpm_recv: error %zd\n", rc);
109out: 109out:
110 up(&chip->tpm_mutex); 110 up(&chip->tpm_mutex);
@@ -139,15 +139,14 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
139 __be32 index; 139 __be32 index;
140 char *str = buf; 140 char *str = buf;
141 141
142 struct tpm_chip *chip = 142 struct tpm_chip *chip = dev_get_drvdata(dev);
143 pci_get_drvdata(to_pci_dev(dev));
144 if (chip == NULL) 143 if (chip == NULL)
145 return -ENODEV; 144 return -ENODEV;
146 145
147 memcpy(data, cap_pcr, sizeof(cap_pcr)); 146 memcpy(data, cap_pcr, sizeof(cap_pcr));
148 if ((len = tpm_transmit(chip, data, sizeof(data))) 147 if ((len = tpm_transmit(chip, data, sizeof(data)))
149 < CAP_PCR_RESULT_SIZE) { 148 < CAP_PCR_RESULT_SIZE) {
150 dev_dbg(&chip->pci_dev->dev, "A TPM error (%d) occurred " 149 dev_dbg(chip->dev, "A TPM error (%d) occurred "
151 "attempting to determine the number of PCRS\n", 150 "attempting to determine the number of PCRS\n",
152 be32_to_cpu(*((__be32 *) (data + 6)))); 151 be32_to_cpu(*((__be32 *) (data + 6))));
153 return 0; 152 return 0;
@@ -161,9 +160,10 @@ ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
161 memcpy(data + 10, &index, 4); 160 memcpy(data + 10, &index, 4);
162 if ((len = tpm_transmit(chip, data, sizeof(data))) 161 if ((len = tpm_transmit(chip, data, sizeof(data)))
163 < READ_PCR_RESULT_SIZE){ 162 < READ_PCR_RESULT_SIZE){
164 dev_dbg(&chip->pci_dev->dev, "A TPM error (%d) occurred" 163 dev_dbg(chip->dev, "A TPM error (%d) occurred"
165 " attempting to read PCR %d of %d\n", 164 " attempting to read PCR %d of %d\n",
166 be32_to_cpu(*((__be32 *) (data + 6))), i, num_pcrs); 165 be32_to_cpu(*((__be32 *) (data + 6))),
166 i, num_pcrs);
167 goto out; 167 goto out;
168 } 168 }
169 str += sprintf(str, "PCR-%02d: ", i); 169 str += sprintf(str, "PCR-%02d: ", i);
@@ -191,21 +191,19 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
191 int i, rc; 191 int i, rc;
192 char *str = buf; 192 char *str = buf;
193 193
194 struct tpm_chip *chip = 194 struct tpm_chip *chip = dev_get_drvdata(dev);
195 pci_get_drvdata(to_pci_dev(dev));
196 if (chip == NULL) 195 if (chip == NULL)
197 return -ENODEV; 196 return -ENODEV;
198 197
199 data = kmalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL); 198 data = kzalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
200 if (!data) 199 if (!data)
201 return -ENOMEM; 200 return -ENOMEM;
202 201
203 memcpy(data, readpubek, sizeof(readpubek)); 202 memcpy(data, readpubek, sizeof(readpubek));
204 memset(data + sizeof(readpubek), 0, 20); /* zero nonce */
205 203
206 if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) < 204 if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) <
207 READ_PUBEK_RESULT_SIZE) { 205 READ_PUBEK_RESULT_SIZE) {
208 dev_dbg(&chip->pci_dev->dev, "A TPM error (%d) occurred " 206 dev_dbg(chip->dev, "A TPM error (%d) occurred "
209 "attempting to read the PUBEK\n", 207 "attempting to read the PUBEK\n",
210 be32_to_cpu(*((__be32 *) (data + 6)))); 208 be32_to_cpu(*((__be32 *) (data + 6))));
211 rc = 0; 209 rc = 0;
@@ -245,7 +243,6 @@ out:
245 kfree(data); 243 kfree(data);
246 return rc; 244 return rc;
247} 245}
248
249EXPORT_SYMBOL_GPL(tpm_show_pubek); 246EXPORT_SYMBOL_GPL(tpm_show_pubek);
250 247
251#define CAP_VER_RESULT_SIZE 18 248#define CAP_VER_RESULT_SIZE 18
@@ -274,8 +271,7 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
274 ssize_t len; 271 ssize_t len;
275 char *str = buf; 272 char *str = buf;
276 273
277 struct tpm_chip *chip = 274 struct tpm_chip *chip = dev_get_drvdata(dev);
278 pci_get_drvdata(to_pci_dev(dev));
279 if (chip == NULL) 275 if (chip == NULL)
280 return -ENODEV; 276 return -ENODEV;
281 277
@@ -315,7 +311,6 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
315} 311}
316EXPORT_SYMBOL_GPL(tpm_store_cancel); 312EXPORT_SYMBOL_GPL(tpm_store_cancel);
317 313
318
319/* 314/*
320 * Device file system interface to the TPM 315 * Device file system interface to the TPM
321 */ 316 */
@@ -339,21 +334,20 @@ int tpm_open(struct inode *inode, struct file *file)
339 } 334 }
340 335
341 if (chip->num_opens) { 336 if (chip->num_opens) {
342 dev_dbg(&chip->pci_dev->dev, 337 dev_dbg(chip->dev, "Another process owns this TPM\n");
343 "Another process owns this TPM\n");
344 rc = -EBUSY; 338 rc = -EBUSY;
345 goto err_out; 339 goto err_out;
346 } 340 }
347 341
348 chip->num_opens++; 342 chip->num_opens++;
349 pci_dev_get(chip->pci_dev); 343 get_device(chip->dev);
350 344
351 spin_unlock(&driver_lock); 345 spin_unlock(&driver_lock);
352 346
353 chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); 347 chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
354 if (chip->data_buffer == NULL) { 348 if (chip->data_buffer == NULL) {
355 chip->num_opens--; 349 chip->num_opens--;
356 pci_dev_put(chip->pci_dev); 350 put_device(chip->dev);
357 return -ENOMEM; 351 return -ENOMEM;
358 } 352 }
359 353
@@ -366,7 +360,6 @@ err_out:
366 spin_unlock(&driver_lock); 360 spin_unlock(&driver_lock);
367 return rc; 361 return rc;
368} 362}
369
370EXPORT_SYMBOL_GPL(tpm_open); 363EXPORT_SYMBOL_GPL(tpm_open);
371 364
372int tpm_release(struct inode *inode, struct file *file) 365int tpm_release(struct inode *inode, struct file *file)
@@ -378,15 +371,14 @@ int tpm_release(struct inode *inode, struct file *file)
378 chip->num_opens--; 371 chip->num_opens--;
379 del_singleshot_timer_sync(&chip->user_read_timer); 372 del_singleshot_timer_sync(&chip->user_read_timer);
380 atomic_set(&chip->data_pending, 0); 373 atomic_set(&chip->data_pending, 0);
381 pci_dev_put(chip->pci_dev); 374 put_device(chip->dev);
382 kfree(chip->data_buffer); 375 kfree(chip->data_buffer);
383 spin_unlock(&driver_lock); 376 spin_unlock(&driver_lock);
384 return 0; 377 return 0;
385} 378}
386
387EXPORT_SYMBOL_GPL(tpm_release); 379EXPORT_SYMBOL_GPL(tpm_release);
388 380
389ssize_t tpm_write(struct file * file, const char __user * buf, 381ssize_t tpm_write(struct file *file, const char __user *buf,
390 size_t size, loff_t * off) 382 size_t size, loff_t * off)
391{ 383{
392 struct tpm_chip *chip = file->private_data; 384 struct tpm_chip *chip = file->private_data;
@@ -422,7 +414,7 @@ ssize_t tpm_write(struct file * file, const char __user * buf,
422 414
423EXPORT_SYMBOL_GPL(tpm_write); 415EXPORT_SYMBOL_GPL(tpm_write);
424 416
425ssize_t tpm_read(struct file * file, char __user * buf, 417ssize_t tpm_read(struct file * file, char __user *buf,
426 size_t size, loff_t * off) 418 size_t size, loff_t * off)
427{ 419{
428 struct tpm_chip *chip = file->private_data; 420 struct tpm_chip *chip = file->private_data;
@@ -444,15 +436,14 @@ ssize_t tpm_read(struct file * file, char __user * buf,
444 436
445 return ret_size; 437 return ret_size;
446} 438}
447
448EXPORT_SYMBOL_GPL(tpm_read); 439EXPORT_SYMBOL_GPL(tpm_read);
449 440
450void __devexit tpm_remove(struct pci_dev *pci_dev) 441void tpm_remove_hardware(struct device *dev)
451{ 442{
452 struct tpm_chip *chip = pci_get_drvdata(pci_dev); 443 struct tpm_chip *chip = dev_get_drvdata(dev);
453 444
454 if (chip == NULL) { 445 if (chip == NULL) {
455 dev_err(&pci_dev->dev, "No device data found\n"); 446 dev_err(dev, "No device data found\n");
456 return; 447 return;
457 } 448 }
458 449
@@ -462,22 +453,20 @@ void __devexit tpm_remove(struct pci_dev *pci_dev)
462 453
463 spin_unlock(&driver_lock); 454 spin_unlock(&driver_lock);
464 455
465 pci_set_drvdata(pci_dev, NULL); 456 dev_set_drvdata(dev, NULL);
466 misc_deregister(&chip->vendor->miscdev); 457 misc_deregister(&chip->vendor->miscdev);
467 kfree(chip->vendor->miscdev.name); 458 kfree(chip->vendor->miscdev.name);
468 459
469 sysfs_remove_group(&pci_dev->dev.kobj, chip->vendor->attr_group); 460 sysfs_remove_group(&dev->kobj, chip->vendor->attr_group);
470 461
471 pci_disable_device(pci_dev); 462 dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES ] &=
472 463 !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
473 dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES ] &= !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
474 464
475 kfree(chip); 465 kfree(chip);
476 466
477 pci_dev_put(pci_dev); 467 put_device(dev);
478} 468}
479 469EXPORT_SYMBOL_GPL(tpm_remove_hardware);
480EXPORT_SYMBOL_GPL(tpm_remove);
481 470
482static u8 savestate[] = { 471static u8 savestate[] = {
483 0, 193, /* TPM_TAG_RQU_COMMAND */ 472 0, 193, /* TPM_TAG_RQU_COMMAND */
@@ -489,32 +478,30 @@ static u8 savestate[] = {
489 * We are about to suspend. Save the TPM state 478 * We are about to suspend. Save the TPM state
490 * so that it can be restored. 479 * so that it can be restored.
491 */ 480 */
492int tpm_pm_suspend(struct pci_dev *pci_dev, pm_message_t pm_state) 481int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
493{ 482{
494 struct tpm_chip *chip = pci_get_drvdata(pci_dev); 483 struct tpm_chip *chip = dev_get_drvdata(dev);
495 if (chip == NULL) 484 if (chip == NULL)
496 return -ENODEV; 485 return -ENODEV;
497 486
498 tpm_transmit(chip, savestate, sizeof(savestate)); 487 tpm_transmit(chip, savestate, sizeof(savestate));
499 return 0; 488 return 0;
500} 489}
501
502EXPORT_SYMBOL_GPL(tpm_pm_suspend); 490EXPORT_SYMBOL_GPL(tpm_pm_suspend);
503 491
504/* 492/*
505 * Resume from a power safe. The BIOS already restored 493 * Resume from a power safe. The BIOS already restored
506 * the TPM state. 494 * the TPM state.
507 */ 495 */
508int tpm_pm_resume(struct pci_dev *pci_dev) 496int tpm_pm_resume(struct device *dev)
509{ 497{
510 struct tpm_chip *chip = pci_get_drvdata(pci_dev); 498 struct tpm_chip *chip = dev_get_drvdata(dev);
511 499
512 if (chip == NULL) 500 if (chip == NULL)
513 return -ENODEV; 501 return -ENODEV;
514 502
515 return 0; 503 return 0;
516} 504}
517
518EXPORT_SYMBOL_GPL(tpm_pm_resume); 505EXPORT_SYMBOL_GPL(tpm_pm_resume);
519 506
520/* 507/*
@@ -524,8 +511,7 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
524 * upon errant exit from this function specific probe function should call 511 * upon errant exit from this function specific probe function should call
525 * pci_disable_device 512 * pci_disable_device
526 */ 513 */
527int tpm_register_hardware(struct pci_dev *pci_dev, 514int tpm_register_hardware(struct device *dev, struct tpm_vendor_specific *entry)
528 struct tpm_vendor_specific *entry)
529{ 515{
530#define DEVNAME_SIZE 7 516#define DEVNAME_SIZE 7
531 517
@@ -534,12 +520,10 @@ int tpm_register_hardware(struct pci_dev *pci_dev,
534 int i, j; 520 int i, j;
535 521
536 /* Driver specific per-device data */ 522 /* Driver specific per-device data */
537 chip = kmalloc(sizeof(*chip), GFP_KERNEL); 523 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
538 if (chip == NULL) 524 if (chip == NULL)
539 return -ENOMEM; 525 return -ENOMEM;
540 526
541 memset(chip, 0, sizeof(struct tpm_chip));
542
543 init_MUTEX(&chip->buffer_mutex); 527 init_MUTEX(&chip->buffer_mutex);
544 init_MUTEX(&chip->tpm_mutex); 528 init_MUTEX(&chip->tpm_mutex);
545 INIT_LIST_HEAD(&chip->list); 529 INIT_LIST_HEAD(&chip->list);
@@ -563,8 +547,7 @@ int tpm_register_hardware(struct pci_dev *pci_dev,
563 547
564dev_num_search_complete: 548dev_num_search_complete:
565 if (chip->dev_num < 0) { 549 if (chip->dev_num < 0) {
566 dev_err(&pci_dev->dev, 550 dev_err(dev, "No available tpm device numbers\n");
567 "No available tpm device numbers\n");
568 kfree(chip); 551 kfree(chip);
569 return -ENODEV; 552 return -ENODEV;
570 } else if (chip->dev_num == 0) 553 } else if (chip->dev_num == 0)
@@ -576,15 +559,15 @@ dev_num_search_complete:
576 scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); 559 scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
577 chip->vendor->miscdev.name = devname; 560 chip->vendor->miscdev.name = devname;
578 561
579 chip->vendor->miscdev.dev = &(pci_dev->dev); 562 chip->vendor->miscdev.dev = dev;
580 chip->pci_dev = pci_dev_get(pci_dev); 563 chip->dev = get_device(dev);
581 564
582 if (misc_register(&chip->vendor->miscdev)) { 565 if (misc_register(&chip->vendor->miscdev)) {
583 dev_err(&chip->pci_dev->dev, 566 dev_err(chip->dev,
584 "unable to misc_register %s, minor %d\n", 567 "unable to misc_register %s, minor %d\n",
585 chip->vendor->miscdev.name, 568 chip->vendor->miscdev.name,
586 chip->vendor->miscdev.minor); 569 chip->vendor->miscdev.minor);
587 pci_dev_put(pci_dev); 570 put_device(dev);
588 kfree(chip); 571 kfree(chip);
589 dev_mask[i] &= !(1 << j); 572 dev_mask[i] &= !(1 << j);
590 return -ENODEV; 573 return -ENODEV;
@@ -592,17 +575,16 @@ dev_num_search_complete:
592 575
593 spin_lock(&driver_lock); 576 spin_lock(&driver_lock);
594 577
595 pci_set_drvdata(pci_dev, chip); 578 dev_set_drvdata(dev, chip);
596 579
597 list_add(&chip->list, &tpm_chip_list); 580 list_add(&chip->list, &tpm_chip_list);
598 581
599 spin_unlock(&driver_lock); 582 spin_unlock(&driver_lock);
600 583
601 sysfs_create_group(&pci_dev->dev.kobj, chip->vendor->attr_group); 584 sysfs_create_group(&dev->kobj, chip->vendor->attr_group);
602 585
603 return 0; 586 return 0;
604} 587}
605
606EXPORT_SYMBOL_GPL(tpm_register_hardware); 588EXPORT_SYMBOL_GPL(tpm_register_hardware);
607 589
608MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); 590MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 373b41f6b460..024814b50c3c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,12 +55,13 @@ struct tpm_vendor_specific {
55 int (*recv) (struct tpm_chip *, u8 *, size_t); 55 int (*recv) (struct tpm_chip *, u8 *, size_t);
56 int (*send) (struct tpm_chip *, u8 *, size_t); 56 int (*send) (struct tpm_chip *, u8 *, size_t);
57 void (*cancel) (struct tpm_chip *); 57 void (*cancel) (struct tpm_chip *);
58 u8 (*status) (struct tpm_chip *);
58 struct miscdevice miscdev; 59 struct miscdevice miscdev;
59 struct attribute_group *attr_group; 60 struct attribute_group *attr_group;
60}; 61};
61 62
62struct tpm_chip { 63struct tpm_chip {
63 struct pci_dev *pci_dev; /* PCI device stuff */ 64 struct device *dev; /* Device stuff */
64 65
65 int dev_num; /* /dev/tpm# */ 66 int dev_num; /* /dev/tpm# */
66 int num_opens; /* only one allowed */ 67 int num_opens; /* only one allowed */
@@ -91,13 +92,13 @@ static inline void tpm_write_index(int base, int index, int value)
91 outb(value & 0xFF, base+1); 92 outb(value & 0xFF, base+1);
92} 93}
93 94
94extern int tpm_register_hardware(struct pci_dev *, 95extern int tpm_register_hardware(struct device *,
95 struct tpm_vendor_specific *); 96 struct tpm_vendor_specific *);
96extern int tpm_open(struct inode *, struct file *); 97extern int tpm_open(struct inode *, struct file *);
97extern int tpm_release(struct inode *, struct file *); 98extern int tpm_release(struct inode *, struct file *);
98extern ssize_t tpm_write(struct file *, const char __user *, size_t, 99extern ssize_t tpm_write(struct file *, const char __user *, size_t,
99 loff_t *); 100 loff_t *);
100extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); 101extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
101extern void __devexit tpm_remove(struct pci_dev *); 102extern void tpm_remove_hardware(struct device *);
102extern int tpm_pm_suspend(struct pci_dev *, pm_message_t); 103extern int tpm_pm_suspend(struct device *, pm_message_t);
103extern int tpm_pm_resume(struct pci_dev *); 104extern int tpm_pm_resume(struct device *);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index c0d64914595f..8cb42e84723c 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -40,7 +40,7 @@ enum tpm_atmel_read_status {
40 ATML_STATUS_READY = 0x08 40 ATML_STATUS_READY = 0x08
41}; 41};
42 42
43static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count) 43static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
44{ 44{
45 u8 status, *hdr = buf; 45 u8 status, *hdr = buf;
46 u32 size; 46 u32 size;
@@ -54,7 +54,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
54 for (i = 0; i < 6; i++) { 54 for (i = 0; i < 6; i++) {
55 status = inb(chip->vendor->base + 1); 55 status = inb(chip->vendor->base + 1);
56 if ((status & ATML_STATUS_DATA_AVAIL) == 0) { 56 if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
57 dev_err(&chip->pci_dev->dev, 57 dev_err(chip->dev,
58 "error reading header\n"); 58 "error reading header\n");
59 return -EIO; 59 return -EIO;
60 } 60 }
@@ -66,12 +66,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
66 size = be32_to_cpu(*native_size); 66 size = be32_to_cpu(*native_size);
67 67
68 if (count < size) { 68 if (count < size) {
69 dev_err(&chip->pci_dev->dev, 69 dev_err(chip->dev,
70 "Recv size(%d) less than available space\n", size); 70 "Recv size(%d) less than available space\n", size);
71 for (; i < size; i++) { /* clear the waiting data anyway */ 71 for (; i < size; i++) { /* clear the waiting data anyway */
72 status = inb(chip->vendor->base + 1); 72 status = inb(chip->vendor->base + 1);
73 if ((status & ATML_STATUS_DATA_AVAIL) == 0) { 73 if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
74 dev_err(&chip->pci_dev->dev, 74 dev_err(chip->dev,
75 "error reading data\n"); 75 "error reading data\n");
76 return -EIO; 76 return -EIO;
77 } 77 }
@@ -83,7 +83,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
83 for (; i < size; i++) { 83 for (; i < size; i++) {
84 status = inb(chip->vendor->base + 1); 84 status = inb(chip->vendor->base + 1);
85 if ((status & ATML_STATUS_DATA_AVAIL) == 0) { 85 if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
86 dev_err(&chip->pci_dev->dev, 86 dev_err(chip->dev,
87 "error reading data\n"); 87 "error reading data\n");
88 return -EIO; 88 return -EIO;
89 } 89 }
@@ -93,20 +93,20 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
93 /* make sure data available is gone */ 93 /* make sure data available is gone */
94 status = inb(chip->vendor->base + 1); 94 status = inb(chip->vendor->base + 1);
95 if (status & ATML_STATUS_DATA_AVAIL) { 95 if (status & ATML_STATUS_DATA_AVAIL) {
96 dev_err(&chip->pci_dev->dev, "data available is stuck\n"); 96 dev_err(chip->dev, "data available is stuck\n");
97 return -EIO; 97 return -EIO;
98 } 98 }
99 99
100 return size; 100 return size;
101} 101}
102 102
103static int tpm_atml_send(struct tpm_chip *chip, u8 * buf, size_t count) 103static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
104{ 104{
105 int i; 105 int i;
106 106
107 dev_dbg(&chip->pci_dev->dev, "tpm_atml_send: "); 107 dev_dbg(chip->dev, "tpm_atml_send:\n");
108 for (i = 0; i < count; i++) { 108 for (i = 0; i < count; i++) {
109 dev_dbg(&chip->pci_dev->dev, "0x%x(%d) ", buf[i], buf[i]); 109 dev_dbg(chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
110 outb(buf[i], chip->vendor->base); 110 outb(buf[i], chip->vendor->base);
111 } 111 }
112 112
@@ -118,6 +118,11 @@ static void tpm_atml_cancel(struct tpm_chip *chip)
118 outb(ATML_STATUS_ABORT, chip->vendor->base + 1); 118 outb(ATML_STATUS_ABORT, chip->vendor->base + 1);
119} 119}
120 120
121static u8 tpm_atml_status(struct tpm_chip *chip)
122{
123 return inb(chip->vendor->base + 1);
124}
125
121static struct file_operations atmel_ops = { 126static struct file_operations atmel_ops = {
122 .owner = THIS_MODULE, 127 .owner = THIS_MODULE,
123 .llseek = no_llseek, 128 .llseek = no_llseek,
@@ -137,7 +142,7 @@ static struct attribute* atmel_attrs[] = {
137 &dev_attr_pcrs.attr, 142 &dev_attr_pcrs.attr,
138 &dev_attr_caps.attr, 143 &dev_attr_caps.attr,
139 &dev_attr_cancel.attr, 144 &dev_attr_cancel.attr,
140 0, 145 NULL,
141}; 146};
142 147
143static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs }; 148static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
@@ -146,6 +151,7 @@ static struct tpm_vendor_specific tpm_atmel = {
146 .recv = tpm_atml_recv, 151 .recv = tpm_atml_recv,
147 .send = tpm_atml_send, 152 .send = tpm_atml_send,
148 .cancel = tpm_atml_cancel, 153 .cancel = tpm_atml_cancel,
154 .status = tpm_atml_status,
149 .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL, 155 .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
150 .req_complete_val = ATML_STATUS_DATA_AVAIL, 156 .req_complete_val = ATML_STATUS_DATA_AVAIL,
151 .req_canceled = ATML_STATUS_READY, 157 .req_canceled = ATML_STATUS_READY,
@@ -153,86 +159,94 @@ static struct tpm_vendor_specific tpm_atmel = {
153 .miscdev = { .fops = &atmel_ops, }, 159 .miscdev = { .fops = &atmel_ops, },
154}; 160};
155 161
156static int __devinit tpm_atml_init(struct pci_dev *pci_dev, 162static struct platform_device *pdev;
157 const struct pci_device_id *pci_id) 163
164static void __devexit tpm_atml_remove(struct device *dev)
165{
166 struct tpm_chip *chip = dev_get_drvdata(dev);
167 if (chip) {
168 release_region(chip->vendor->base, 2);
169 tpm_remove_hardware(chip->dev);
170 }
171}
172
173static struct device_driver atml_drv = {
174 .name = "tpm_atmel",
175 .bus = &platform_bus_type,
176 .owner = THIS_MODULE,
177 .suspend = tpm_pm_suspend,
178 .resume = tpm_pm_resume,
179};
180
181static int __init init_atmel(void)
158{ 182{
159 u8 version[4];
160 int rc = 0; 183 int rc = 0;
161 int lo, hi; 184 int lo, hi;
162 185
163 if (pci_enable_device(pci_dev)) 186 driver_register(&atml_drv);
164 return -EIO;
165 187
166 lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO); 188 lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
167 hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI); 189 hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
168 190
169 tpm_atmel.base = (hi<<8)|lo; 191 tpm_atmel.base = (hi<<8)|lo;
170 dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
171 192
172 /* verify that it is an Atmel part */ 193 /* verify that it is an Atmel part */
173 if (tpm_read_index(TPM_ADDR, 4) != 'A' || tpm_read_index(TPM_ADDR, 5) != 'T' 194 if (tpm_read_index(TPM_ADDR, 4) != 'A' || tpm_read_index(TPM_ADDR, 5) != 'T'
174 || tpm_read_index(TPM_ADDR, 6) != 'M' || tpm_read_index(TPM_ADDR, 7) != 'L') { 195 || tpm_read_index(TPM_ADDR, 6) != 'M' || tpm_read_index(TPM_ADDR, 7) != 'L') {
175 rc = -ENODEV; 196 return -ENODEV;
176 goto out_err;
177 } 197 }
178 198
179 /* query chip for its version number */ 199 /* verify chip version number is 1.1 */
180 if ((version[0] = tpm_read_index(TPM_ADDR, 0x00)) != 0xFF) { 200 if ( (tpm_read_index(TPM_ADDR, 0x00) != 0x01) ||
181 version[1] = tpm_read_index(TPM_ADDR, 0x01); 201 (tpm_read_index(TPM_ADDR, 0x01) != 0x01 ))
182 version[2] = tpm_read_index(TPM_ADDR, 0x02); 202 return -ENODEV;
183 version[3] = tpm_read_index(TPM_ADDR, 0x03); 203
184 } else { 204 pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
185 dev_info(&pci_dev->dev, "version query failed\n"); 205 if ( !pdev )
186 rc = -ENODEV; 206 return -ENOMEM;
187 goto out_err; 207
208 pdev->name = "tpm_atmel0";
209 pdev->id = -1;
210 pdev->num_resources = 0;
211 pdev->dev.release = tpm_atml_remove;
212 pdev->dev.driver = &atml_drv;
213
214 if ((rc = platform_device_register(pdev)) < 0) {
215 kfree(pdev);
216 pdev = NULL;
217 return rc;
188 } 218 }
189 219
190 if ((rc = tpm_register_hardware(pci_dev, &tpm_atmel)) < 0) 220 if (request_region(tpm_atmel.base, 2, "tpm_atmel0") == NULL ) {
191 goto out_err; 221 platform_device_unregister(pdev);
222 kfree(pdev);
223 pdev = NULL;
224 return -EBUSY;
225 }
192 226
193 dev_info(&pci_dev->dev, 227 if ((rc = tpm_register_hardware(&pdev->dev, &tpm_atmel)) < 0) {
194 "Atmel TPM version %d.%d.%d.%d\n", version[0], version[1], 228 release_region(tpm_atmel.base, 2);
195 version[2], version[3]); 229 platform_device_unregister(pdev);
230 kfree(pdev);
231 pdev = NULL;
232 return rc;
233 }
196 234
235 dev_info(&pdev->dev, "Atmel TPM 1.1, Base Address: 0x%x\n",
236 tpm_atmel.base);
197 return 0; 237 return 0;
198out_err:
199 pci_disable_device(pci_dev);
200 return rc;
201}
202
203static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
204 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
205 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
207 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
208 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
209 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
210 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
211 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
212 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
213 {PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)},
214 {0,}
215};
216
217MODULE_DEVICE_TABLE(pci, tpm_pci_tbl);
218
219static struct pci_driver atmel_pci_driver = {
220 .name = "tpm_atmel",
221 .id_table = tpm_pci_tbl,
222 .probe = tpm_atml_init,
223 .remove = __devexit_p(tpm_remove),
224 .suspend = tpm_pm_suspend,
225 .resume = tpm_pm_resume,
226};
227
228static int __init init_atmel(void)
229{
230 return pci_register_driver(&atmel_pci_driver);
231} 238}
232 239
233static void __exit cleanup_atmel(void) 240static void __exit cleanup_atmel(void)
234{ 241{
235 pci_unregister_driver(&atmel_pci_driver); 242 if (pdev) {
243 tpm_atml_remove(&pdev->dev);
244 platform_device_unregister(pdev);
245 kfree(pdev);
246 pdev = NULL;
247 }
248
249 driver_unregister(&atml_drv);
236} 250}
237 251
238module_init(init_atmel); 252module_init(init_atmel);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 939e51e119e6..8198dbb7370f 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -5,6 +5,7 @@
5 * Specifications at www.trustedcomputinggroup.org 5 * Specifications at www.trustedcomputinggroup.org
6 * 6 *
7 * Copyright (C) 2005, Marcel Selhorst <selhorst@crypto.rub.de> 7 * Copyright (C) 2005, Marcel Selhorst <selhorst@crypto.rub.de>
8 * Sirrix AG - security technologies, http://www.sirrix.com and
8 * Applied Data Security Group, Ruhr-University Bochum, Germany 9 * Applied Data Security Group, Ruhr-University Bochum, Germany
9 * Project-Homepage: http://www.prosec.rub.de/tpm 10 * Project-Homepage: http://www.prosec.rub.de/tpm
10 * 11 *
@@ -29,9 +30,10 @@
29#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 30#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
30 31
31/* These values will be filled after PnP-call */ 32/* These values will be filled after PnP-call */
32static int TPM_INF_DATA = 0; 33static int TPM_INF_DATA;
33static int TPM_INF_ADDR = 0; 34static int TPM_INF_ADDR;
34static int pnp_registered = 0; 35static int TPM_INF_BASE;
36static int TPM_INF_PORT_LEN;
35 37
36/* TPM header definitions */ 38/* TPM header definitions */
37enum infineon_tpm_header { 39enum infineon_tpm_header {
@@ -143,11 +145,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
143 } 145 }
144 if (i == TPM_MAX_TRIES) { /* timeout occurs */ 146 if (i == TPM_MAX_TRIES) { /* timeout occurs */
145 if (wait_for_bit == STAT_XFE) 147 if (wait_for_bit == STAT_XFE)
146 dev_err(&chip->pci_dev->dev, 148 dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n");
147 "Timeout in wait(STAT_XFE)\n");
148 if (wait_for_bit == STAT_RDA) 149 if (wait_for_bit == STAT_RDA)
149 dev_err(&chip->pci_dev->dev, 150 dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n");
150 "Timeout in wait(STAT_RDA)\n");
151 return -EIO; 151 return -EIO;
152 } 152 }
153 return 0; 153 return 0;
@@ -170,7 +170,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
170static void tpm_wtx(struct tpm_chip *chip) 170static void tpm_wtx(struct tpm_chip *chip)
171{ 171{
172 number_of_wtx++; 172 number_of_wtx++;
173 dev_info(&chip->pci_dev->dev, "Granting WTX (%02d / %02d)\n", 173 dev_info(chip->dev, "Granting WTX (%02d / %02d)\n",
174 number_of_wtx, TPM_MAX_WTX_PACKAGES); 174 number_of_wtx, TPM_MAX_WTX_PACKAGES);
175 wait_and_send(chip, TPM_VL_VER); 175 wait_and_send(chip, TPM_VL_VER);
176 wait_and_send(chip, TPM_CTRL_WTX); 176 wait_and_send(chip, TPM_CTRL_WTX);
@@ -181,7 +181,7 @@ static void tpm_wtx(struct tpm_chip *chip)
181 181
182static void tpm_wtx_abort(struct tpm_chip *chip) 182static void tpm_wtx_abort(struct tpm_chip *chip)
183{ 183{
184 dev_info(&chip->pci_dev->dev, "Aborting WTX\n"); 184 dev_info(chip->dev, "Aborting WTX\n");
185 wait_and_send(chip, TPM_VL_VER); 185 wait_and_send(chip, TPM_VL_VER);
186 wait_and_send(chip, TPM_CTRL_WTX_ABORT); 186 wait_and_send(chip, TPM_CTRL_WTX_ABORT);
187 wait_and_send(chip, 0x00); 187 wait_and_send(chip, 0x00);
@@ -206,7 +206,7 @@ recv_begin:
206 } 206 }
207 207
208 if (buf[0] != TPM_VL_VER) { 208 if (buf[0] != TPM_VL_VER) {
209 dev_err(&chip->pci_dev->dev, 209 dev_err(chip->dev,
210 "Wrong transport protocol implementation!\n"); 210 "Wrong transport protocol implementation!\n");
211 return -EIO; 211 return -EIO;
212 } 212 }
@@ -221,8 +221,7 @@ recv_begin:
221 } 221 }
222 222
223 if ((size == 0x6D00) && (buf[1] == 0x80)) { 223 if ((size == 0x6D00) && (buf[1] == 0x80)) {
224 dev_err(&chip->pci_dev->dev, 224 dev_err(chip->dev, "Error handling on vendor layer!\n");
225 "Error handling on vendor layer!\n");
226 return -EIO; 225 return -EIO;
227 } 226 }
228 227
@@ -234,7 +233,7 @@ recv_begin:
234 } 233 }
235 234
236 if (buf[1] == TPM_CTRL_WTX) { 235 if (buf[1] == TPM_CTRL_WTX) {
237 dev_info(&chip->pci_dev->dev, "WTX-package received\n"); 236 dev_info(chip->dev, "WTX-package received\n");
238 if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { 237 if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
239 tpm_wtx(chip); 238 tpm_wtx(chip);
240 goto recv_begin; 239 goto recv_begin;
@@ -245,14 +244,14 @@ recv_begin:
245 } 244 }
246 245
247 if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { 246 if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
248 dev_info(&chip->pci_dev->dev, "WTX-abort acknowledged\n"); 247 dev_info(chip->dev, "WTX-abort acknowledged\n");
249 return size; 248 return size;
250 } 249 }
251 250
252 if (buf[1] == TPM_CTRL_ERROR) { 251 if (buf[1] == TPM_CTRL_ERROR) {
253 dev_err(&chip->pci_dev->dev, "ERROR-package received:\n"); 252 dev_err(chip->dev, "ERROR-package received:\n");
254 if (buf[4] == TPM_INF_NAK) 253 if (buf[4] == TPM_INF_NAK)
255 dev_err(&chip->pci_dev->dev, 254 dev_err(chip->dev,
256 "-> Negative acknowledgement" 255 "-> Negative acknowledgement"
257 " - retransmit command!\n"); 256 " - retransmit command!\n");
258 return -EIO; 257 return -EIO;
@@ -271,7 +270,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
271 270
272 ret = empty_fifo(chip, 1); 271 ret = empty_fifo(chip, 1);
273 if (ret) { 272 if (ret) {
274 dev_err(&chip->pci_dev->dev, "Timeout while clearing FIFO\n"); 273 dev_err(chip->dev, "Timeout while clearing FIFO\n");
275 return -EIO; 274 return -EIO;
276 } 275 }
277 276
@@ -316,6 +315,11 @@ static void tpm_inf_cancel(struct tpm_chip *chip)
316 */ 315 */
317} 316}
318 317
318static u8 tpm_inf_status(struct tpm_chip *chip)
319{
320 return inb(chip->vendor->base + STAT);
321}
322
319static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); 323static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
320static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); 324static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
321static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); 325static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
@@ -344,6 +348,7 @@ static struct tpm_vendor_specific tpm_inf = {
344 .recv = tpm_inf_recv, 348 .recv = tpm_inf_recv,
345 .send = tpm_inf_send, 349 .send = tpm_inf_send,
346 .cancel = tpm_inf_cancel, 350 .cancel = tpm_inf_cancel,
351 .status = tpm_inf_status,
347 .req_complete_mask = 0, 352 .req_complete_mask = 0,
348 .req_complete_val = 0, 353 .req_complete_val = 0,
349 .attr_group = &inf_attr_grp, 354 .attr_group = &inf_attr_grp,
@@ -356,30 +361,11 @@ static const struct pnp_device_id tpm_pnp_tbl[] = {
356 {"IFX0102", 0}, 361 {"IFX0102", 0},
357 {"", 0} 362 {"", 0}
358}; 363};
364
359MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 365MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
360 366
361static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 367static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
362 const struct pnp_device_id *dev_id) 368 const struct pnp_device_id *dev_id)
363{
364 if (pnp_port_valid(dev, 0)) {
365 TPM_INF_ADDR = (pnp_port_start(dev, 0) & 0xff);
366 TPM_INF_DATA = ((TPM_INF_ADDR + 1) & 0xff);
367 tpm_inf.base = pnp_port_start(dev, 1);
368 dev_info(&dev->dev, "Found %s with ID %s\n",
369 dev->name, dev_id->id);
370 return 0;
371 }
372 return -ENODEV;
373}
374
375static struct pnp_driver tpm_inf_pnp = {
376 .name = "tpm_inf_pnp",
377 .id_table = tpm_pnp_tbl,
378 .probe = tpm_inf_pnp_probe,
379};
380
381static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
382 const struct pci_device_id *pci_id)
383{ 369{
384 int rc = 0; 370 int rc = 0;
385 u8 iol, ioh; 371 u8 iol, ioh;
@@ -388,30 +374,28 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
388 int productid[2]; 374 int productid[2];
389 char chipname[20]; 375 char chipname[20];
390 376
391 rc = pci_enable_device(pci_dev); 377 /* read IO-ports through PnP */
392 if (rc) 378 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
393 return rc; 379 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
394 380 TPM_INF_ADDR = pnp_port_start(dev, 0);
395 dev_info(&pci_dev->dev, "LPC-bus found at 0x%x\n", pci_id->device); 381 TPM_INF_DATA = (TPM_INF_ADDR + 1);
396 382 TPM_INF_BASE = pnp_port_start(dev, 1);
397 /* read IO-ports from PnP */ 383 TPM_INF_PORT_LEN = pnp_port_len(dev, 1);
398 rc = pnp_register_driver(&tpm_inf_pnp); 384 if (!TPM_INF_PORT_LEN)
399 if (rc < 0) { 385 return -EINVAL;
400 dev_err(&pci_dev->dev, 386 dev_info(&dev->dev, "Found %s with ID %s\n",
401 "Error %x from pnp_register_driver!\n",rc); 387 dev->name, dev_id->id);
402 goto error2; 388 if (!((TPM_INF_BASE >> 8) & 0xff))
403 } 389 return -EINVAL;
404 if (!rc) { 390 /* publish my base address and request region */
405 dev_info(&pci_dev->dev, "No Infineon TPM found!\n"); 391 tpm_inf.base = TPM_INF_BASE;
406 goto error; 392 if (request_region
393 (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
394 release_region(tpm_inf.base, TPM_INF_PORT_LEN);
395 return -EINVAL;
396 }
407 } else { 397 } else {
408 pnp_registered = 1; 398 return -EINVAL;
409 }
410
411 /* Make sure, we have received valid config ports */
412 if (!TPM_INF_ADDR) {
413 dev_err(&pci_dev->dev, "No valid IO-ports received!\n");
414 goto error;
415 } 399 }
416 400
417 /* query chip for its vendor, its version number a.s.o. */ 401 /* query chip for its vendor, its version number a.s.o. */
@@ -443,10 +427,6 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
443 427
444 if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { 428 if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
445 429
446 if (tpm_inf.base == 0) {
447 dev_err(&pci_dev->dev, "No IO-ports found!\n");
448 goto error;
449 }
450 /* configure TPM with IO-ports */ 430 /* configure TPM with IO-ports */
451 outb(IOLIMH, TPM_INF_ADDR); 431 outb(IOLIMH, TPM_INF_ADDR);
452 outb(((tpm_inf.base >> 8) & 0xff), TPM_INF_DATA); 432 outb(((tpm_inf.base >> 8) & 0xff), TPM_INF_DATA);
@@ -460,10 +440,11 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
460 iol = inb(TPM_INF_DATA); 440 iol = inb(TPM_INF_DATA);
461 441
462 if ((ioh << 8 | iol) != tpm_inf.base) { 442 if ((ioh << 8 | iol) != tpm_inf.base) {
463 dev_err(&pci_dev->dev, 443 dev_err(&dev->dev,
464 "Could not set IO-ports to %04x\n", 444 "Could not set IO-ports to %04x\n",
465 tpm_inf.base); 445 tpm_inf.base);
466 goto error; 446 release_region(tpm_inf.base, TPM_INF_PORT_LEN);
447 return -EIO;
467 } 448 }
468 449
469 /* activate register */ 450 /* activate register */
@@ -475,7 +456,7 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
475 outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD); 456 outb(RESET_LP_IRQC_DISABLE, tpm_inf.base + CMD);
476 457
477 /* Finally, we're done, print some infos */ 458 /* Finally, we're done, print some infos */
478 dev_info(&pci_dev->dev, "TPM found: " 459 dev_info(&dev->dev, "TPM found: "
479 "config base 0x%x, " 460 "config base 0x%x, "
480 "io base 0x%x, " 461 "io base 0x%x, "
481 "chip version %02x%02x, " 462 "chip version %02x%02x, "
@@ -483,59 +464,53 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
483 "product id %02x%02x" 464 "product id %02x%02x"
484 "%s\n", 465 "%s\n",
485 TPM_INF_ADDR, 466 TPM_INF_ADDR,
486 tpm_inf.base, 467 TPM_INF_BASE,
487 version[0], version[1], 468 version[0], version[1],
488 vendorid[0], vendorid[1], 469 vendorid[0], vendorid[1],
489 productid[0], productid[1], chipname); 470 productid[0], productid[1], chipname);
490 471
491 rc = tpm_register_hardware(pci_dev, &tpm_inf); 472 rc = tpm_register_hardware(&dev->dev, &tpm_inf);
492 if (rc < 0) 473 if (rc < 0) {
493 goto error; 474 release_region(tpm_inf.base, TPM_INF_PORT_LEN);
475 return -ENODEV;
476 }
494 return 0; 477 return 0;
495 } else { 478 } else {
496 dev_info(&pci_dev->dev, "No Infineon TPM found!\n"); 479 dev_info(&dev->dev, "No Infineon TPM found!\n");
497error:
498 pnp_unregister_driver(&tpm_inf_pnp);
499error2:
500 pci_disable_device(pci_dev);
501 pnp_registered = 0;
502 return -ENODEV; 480 return -ENODEV;
503 } 481 }
504} 482}
505 483
506static struct pci_device_id tpm_pci_tbl[] __devinitdata = { 484static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
507 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)}, 485{
508 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)}, 486 struct tpm_chip *chip = pnp_get_drvdata(dev);
509 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
510 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
511 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
512 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
513 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
514 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2)},
515 {0,}
516};
517 487
518MODULE_DEVICE_TABLE(pci, tpm_pci_tbl); 488 if (chip) {
489 release_region(chip->vendor->base, TPM_INF_PORT_LEN);
490 tpm_remove_hardware(chip->dev);
491 }
492}
519 493
520static struct pci_driver inf_pci_driver = { 494static struct pnp_driver tpm_inf_pnp = {
521 .name = "tpm_inf", 495 .name = "tpm_inf_pnp",
522 .id_table = tpm_pci_tbl, 496 .driver = {
523 .probe = tpm_inf_probe, 497 .owner = THIS_MODULE,
524 .remove = __devexit_p(tpm_remove), 498 .suspend = tpm_pm_suspend,
525 .suspend = tpm_pm_suspend, 499 .resume = tpm_pm_resume,
526 .resume = tpm_pm_resume, 500 },
501 .id_table = tpm_pnp_tbl,
502 .probe = tpm_inf_pnp_probe,
503 .remove = tpm_inf_pnp_remove,
527}; 504};
528 505
529static int __init init_inf(void) 506static int __init init_inf(void)
530{ 507{
531 return pci_register_driver(&inf_pci_driver); 508 return pnp_register_driver(&tpm_inf_pnp);
532} 509}
533 510
534static void __exit cleanup_inf(void) 511static void __exit cleanup_inf(void)
535{ 512{
536 if (pnp_registered) 513 pnp_unregister_driver(&tpm_inf_pnp);
537 pnp_unregister_driver(&tpm_inf_pnp);
538 pci_unregister_driver(&inf_pci_driver);
539} 514}
540 515
541module_init(init_inf); 516module_init(init_inf);
@@ -543,5 +518,5 @@ module_exit(cleanup_inf);
543 518
544MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>"); 519MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
545MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 520MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
546MODULE_VERSION("1.5"); 521MODULE_VERSION("1.6");
547MODULE_LICENSE("GPL"); 522MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index b4127348c063..253871b5b1e2 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -111,7 +111,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
111 } 111 }
112 while (time_before(jiffies, stop)); 112 while (time_before(jiffies, stop));
113 113
114 dev_info(&chip->pci_dev->dev, "wait for ready failed\n"); 114 dev_info(chip->dev, "wait for ready failed\n");
115 return -EBUSY; 115 return -EBUSY;
116} 116}
117 117
@@ -127,12 +127,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
127 return -EIO; 127 return -EIO;
128 128
129 if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { 129 if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
130 dev_err(&chip->pci_dev->dev, "F0 timeout\n"); 130 dev_err(chip->dev, "F0 timeout\n");
131 return -EIO; 131 return -EIO;
132 } 132 }
133 if ((data = 133 if ((data =
134 inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) { 134 inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
135 dev_err(&chip->pci_dev->dev, "not in normal mode (0x%x)\n", 135 dev_err(chip->dev, "not in normal mode (0x%x)\n",
136 data); 136 data);
137 return -EIO; 137 return -EIO;
138 } 138 }
@@ -141,7 +141,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
141 for (p = buffer; p < &buffer[count]; p++) { 141 for (p = buffer; p < &buffer[count]; p++) {
142 if (wait_for_stat 142 if (wait_for_stat
143 (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { 143 (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
144 dev_err(&chip->pci_dev->dev, 144 dev_err(chip->dev,
145 "OBF timeout (while reading data)\n"); 145 "OBF timeout (while reading data)\n");
146 return -EIO; 146 return -EIO;
147 } 147 }
@@ -152,11 +152,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
152 152
153 if ((data & NSC_STATUS_F0) == 0 && 153 if ((data & NSC_STATUS_F0) == 0 &&
154 (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { 154 (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
155 dev_err(&chip->pci_dev->dev, "F0 not set\n"); 155 dev_err(chip->dev, "F0 not set\n");
156 return -EIO; 156 return -EIO;
157 } 157 }
158 if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) { 158 if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
159 dev_err(&chip->pci_dev->dev, 159 dev_err(chip->dev,
160 "expected end of command(0x%x)\n", data); 160 "expected end of command(0x%x)\n", data);
161 return -EIO; 161 return -EIO;
162 } 162 }
@@ -187,19 +187,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
187 return -EIO; 187 return -EIO;
188 188
189 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { 189 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
190 dev_err(&chip->pci_dev->dev, "IBF timeout\n"); 190 dev_err(chip->dev, "IBF timeout\n");
191 return -EIO; 191 return -EIO;
192 } 192 }
193 193
194 outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND); 194 outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
195 if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { 195 if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
196 dev_err(&chip->pci_dev->dev, "IBR timeout\n"); 196 dev_err(chip->dev, "IBR timeout\n");
197 return -EIO; 197 return -EIO;
198 } 198 }
199 199
200 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
201 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { 201 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
202 dev_err(&chip->pci_dev->dev, 202 dev_err(chip->dev,
203 "IBF timeout (while writing data)\n"); 203 "IBF timeout (while writing data)\n");
204 return -EIO; 204 return -EIO;
205 } 205 }
@@ -207,7 +207,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
207 } 207 }
208 208
209 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { 209 if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
210 dev_err(&chip->pci_dev->dev, "IBF timeout\n"); 210 dev_err(chip->dev, "IBF timeout\n");
211 return -EIO; 211 return -EIO;
212 } 212 }
213 outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND); 213 outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
@@ -220,6 +220,11 @@ static void tpm_nsc_cancel(struct tpm_chip *chip)
220 outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND); 220 outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
221} 221}
222 222
223static u8 tpm_nsc_status(struct tpm_chip *chip)
224{
225 return inb(chip->vendor->base + NSC_STATUS);
226}
227
223static struct file_operations nsc_ops = { 228static struct file_operations nsc_ops = {
224 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
225 .llseek = no_llseek, 230 .llseek = no_llseek,
@@ -239,7 +244,7 @@ static struct attribute * nsc_attrs[] = {
239 &dev_attr_pcrs.attr, 244 &dev_attr_pcrs.attr,
240 &dev_attr_caps.attr, 245 &dev_attr_caps.attr,
241 &dev_attr_cancel.attr, 246 &dev_attr_cancel.attr,
242 0, 247 NULL,
243}; 248};
244 249
245static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs }; 250static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
@@ -248,6 +253,7 @@ static struct tpm_vendor_specific tpm_nsc = {
248 .recv = tpm_nsc_recv, 253 .recv = tpm_nsc_recv,
249 .send = tpm_nsc_send, 254 .send = tpm_nsc_send,
250 .cancel = tpm_nsc_cancel, 255 .cancel = tpm_nsc_cancel,
256 .status = tpm_nsc_status,
251 .req_complete_mask = NSC_STATUS_OBF, 257 .req_complete_mask = NSC_STATUS_OBF,
252 .req_complete_val = NSC_STATUS_OBF, 258 .req_complete_val = NSC_STATUS_OBF,
253 .req_canceled = NSC_STATUS_RDY, 259 .req_canceled = NSC_STATUS_RDY,
@@ -255,16 +261,32 @@ static struct tpm_vendor_specific tpm_nsc = {
255 .miscdev = { .fops = &nsc_ops, }, 261 .miscdev = { .fops = &nsc_ops, },
256}; 262};
257 263
258static int __devinit tpm_nsc_init(struct pci_dev *pci_dev, 264static struct platform_device *pdev = NULL;
259 const struct pci_device_id *pci_id) 265
266static void __devexit tpm_nsc_remove(struct device *dev)
267{
268 struct tpm_chip *chip = dev_get_drvdata(dev);
269 if ( chip ) {
270 release_region(chip->vendor->base, 2);
271 tpm_remove_hardware(chip->dev);
272 }
273}
274
275static struct device_driver nsc_drv = {
276 .name = "tpm_nsc",
277 .bus = &platform_bus_type,
278 .owner = THIS_MODULE,
279 .suspend = tpm_pm_suspend,
280 .resume = tpm_pm_resume,
281};
282
283static int __init init_nsc(void)
260{ 284{
261 int rc = 0; 285 int rc = 0;
262 int lo, hi; 286 int lo, hi;
263 int nscAddrBase = TPM_ADDR; 287 int nscAddrBase = TPM_ADDR;
264 288
265 289 driver_register(&nsc_drv);
266 if (pci_enable_device(pci_dev))
267 return -EIO;
268 290
269 /* select PM channel 1 */ 291 /* select PM channel 1 */
270 tpm_write_index(nscAddrBase,NSC_LDN_INDEX, 0x12); 292 tpm_write_index(nscAddrBase,NSC_LDN_INDEX, 0x12);
@@ -273,37 +295,71 @@ static int __devinit tpm_nsc_init(struct pci_dev *pci_dev,
273 if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { 295 if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
274 nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)| 296 nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
275 (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE); 297 (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
276 if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) { 298 if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
277 rc = -ENODEV; 299 return -ENODEV;
278 goto out_err;
279 }
280 } 300 }
281 301
282 hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); 302 hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
283 lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); 303 lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
284 tpm_nsc.base = (hi<<8) | lo; 304 tpm_nsc.base = (hi<<8) | lo;
285 305
286 dev_dbg(&pci_dev->dev, "NSC TPM detected\n"); 306 /* enable the DPM module */
287 dev_dbg(&pci_dev->dev, 307 tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
308
309 pdev = kmalloc(sizeof(struct platform_device), GFP_KERNEL);
310 if ( !pdev )
311 return -ENOMEM;
312
313 memset(pdev, 0, sizeof(struct platform_device));
314
315 pdev->name = "tpm_nscl0";
316 pdev->id = -1;
317 pdev->num_resources = 0;
318 pdev->dev.release = tpm_nsc_remove;
319 pdev->dev.driver = &nsc_drv;
320
321 if ((rc=platform_device_register(pdev)) < 0) {
322 kfree(pdev);
323 pdev = NULL;
324 return rc;
325 }
326
327 if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) {
328 platform_device_unregister(pdev);
329 kfree(pdev);
330 pdev = NULL;
331 return -EBUSY;
332 }
333
334 if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0) {
335 release_region(tpm_nsc.base, 2);
336 platform_device_unregister(pdev);
337 kfree(pdev);
338 pdev = NULL;
339 return rc;
340 }
341
342 dev_dbg(&pdev->dev, "NSC TPM detected\n");
343 dev_dbg(&pdev->dev,
288 "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", 344 "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
289 tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20), 345 tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
290 tpm_read_index(nscAddrBase,0x27)); 346 tpm_read_index(nscAddrBase,0x27));
291 dev_dbg(&pci_dev->dev, 347 dev_dbg(&pdev->dev,
292 "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", 348 "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
293 tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25), 349 tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
294 tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28)); 350 tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
295 dev_dbg(&pci_dev->dev, "NSC IO Base0 0x%x\n", 351 dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
296 (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61)); 352 (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
297 dev_dbg(&pci_dev->dev, "NSC IO Base1 0x%x\n", 353 dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
298 (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63)); 354 (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
299 dev_dbg(&pci_dev->dev, "NSC Interrupt number and wakeup 0x%x\n", 355 dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
300 tpm_read_index(nscAddrBase,0x70)); 356 tpm_read_index(nscAddrBase,0x70));
301 dev_dbg(&pci_dev->dev, "NSC IRQ type select 0x%x\n", 357 dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
302 tpm_read_index(nscAddrBase,0x71)); 358 tpm_read_index(nscAddrBase,0x71));
303 dev_dbg(&pci_dev->dev, 359 dev_dbg(&pdev->dev,
304 "NSC DMA channel select0 0x%x, select1 0x%x\n", 360 "NSC DMA channel select0 0x%x, select1 0x%x\n",
305 tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75)); 361 tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
306 dev_dbg(&pci_dev->dev, 362 dev_dbg(&pdev->dev,
307 "NSC Config " 363 "NSC Config "
308 "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 364 "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
309 tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1), 365 tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
@@ -312,55 +368,23 @@ static int __devinit tpm_nsc_init(struct pci_dev *pci_dev,
312 tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7), 368 tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
313 tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9)); 369 tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
314 370
315 dev_info(&pci_dev->dev, 371 dev_info(&pdev->dev,
316 "NSC TPM revision %d\n", 372 "NSC TPM revision %d\n",
317 tpm_read_index(nscAddrBase, 0x27) & 0x1F); 373 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
318 374
319 /* enable the DPM module */
320 tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
321
322 if ((rc = tpm_register_hardware(pci_dev, &tpm_nsc)) < 0)
323 goto out_err;
324
325 return 0; 375 return 0;
326
327out_err:
328 pci_disable_device(pci_dev);
329 return rc;
330}
331
332static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
333 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
334 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
335 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
336 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
337 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
338 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
339 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
340 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
341 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
342 {0,}
343};
344
345MODULE_DEVICE_TABLE(pci, tpm_pci_tbl);
346
347static struct pci_driver nsc_pci_driver = {
348 .name = "tpm_nsc",
349 .id_table = tpm_pci_tbl,
350 .probe = tpm_nsc_init,
351 .remove = __devexit_p(tpm_remove),
352 .suspend = tpm_pm_suspend,
353 .resume = tpm_pm_resume,
354};
355
356static int __init init_nsc(void)
357{
358 return pci_register_driver(&nsc_pci_driver);
359} 376}
360 377
361static void __exit cleanup_nsc(void) 378static void __exit cleanup_nsc(void)
362{ 379{
363 pci_unregister_driver(&nsc_pci_driver); 380 if (pdev) {
381 tpm_nsc_remove(&pdev->dev);
382 platform_device_unregister(pdev);
383 kfree(pdev);
384 pdev = NULL;
385 }
386
387 driver_unregister(&nsc_drv);
364} 388}
365 389
366module_init(init_nsc); 390module_init(init_nsc);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f5649a337743..c586bfa852ee 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -809,7 +809,7 @@ static void do_tty_hangup(void *data)
809 check_tty_count(tty, "do_tty_hangup"); 809 check_tty_count(tty, "do_tty_hangup");
810 file_list_lock(); 810 file_list_lock();
811 /* This breaks for file handles being sent over AF_UNIX sockets ? */ 811 /* This breaks for file handles being sent over AF_UNIX sockets ? */
812 list_for_each_entry(filp, &tty->tty_files, f_list) { 812 list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
813 if (filp->f_op->write == redirected_tty_write) 813 if (filp->f_op->write == redirected_tty_write)
814 cons_filp = filp; 814 cons_filp = filp;
815 if (filp->f_op->write != tty_write) 815 if (filp->f_op->write != tty_write)
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index a5e104f428f8..51abd3defc1c 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -993,13 +993,16 @@ static struct vio_device_id viotape_device_table[] __devinitdata = {
993 { "viotape", "" }, 993 { "viotape", "" },
994 { "", "" } 994 { "", "" }
995}; 995};
996
997MODULE_DEVICE_TABLE(vio, viotape_device_table); 996MODULE_DEVICE_TABLE(vio, viotape_device_table);
997
998static struct vio_driver viotape_driver = { 998static struct vio_driver viotape_driver = {
999 .name = "viotape",
1000 .id_table = viotape_device_table, 999 .id_table = viotape_device_table,
1001 .probe = viotape_probe, 1000 .probe = viotape_probe,
1002 .remove = viotape_remove 1001 .remove = viotape_remove,
1002 .driver = {
1003 .name = "viotape",
1004 .owner = THIS_MODULE,
1005 }
1003}; 1006};
1004 1007
1005 1008
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 1d44f69e1fda..003dda147cd0 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -192,6 +192,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
192 int i, j, k; 192 int i, j, k;
193 int ret; 193 int ret;
194 194
195 if (!capable(CAP_SYS_TTY_CONFIG))
196 return -EPERM;
197
195 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); 198 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
196 if (!kbs) { 199 if (!kbs) {
197 ret = -ENOMEM; 200 ret = -ENOMEM;
diff --git a/drivers/char/watchdog/cpu5wdt.c b/drivers/char/watchdog/cpu5wdt.c
index 2865dac0a813..e75045fe2641 100644
--- a/drivers/char/watchdog/cpu5wdt.c
+++ b/drivers/char/watchdog/cpu5wdt.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/jiffies.h>
31#include <asm/io.h> 32#include <asm/io.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
33 34
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index 7fc2188386d9..d8dede575402 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -45,6 +45,8 @@
45#include <linux/fs.h> 45#include <linux/fs.h>
46#include <linux/reboot.h> 46#include <linux/reboot.h>
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/jiffies.h>
49#include <linux/timer.h>
48#include <asm/uaccess.h> 50#include <asm/uaccess.h>
49#include <asm/io.h> 51#include <asm/io.h>
50 52
diff --git a/drivers/char/watchdog/pcwd.c b/drivers/char/watchdog/pcwd.c
index 427ad51b7a35..37c9e13ad3ac 100644
--- a/drivers/char/watchdog/pcwd.c
+++ b/drivers/char/watchdog/pcwd.c
@@ -66,7 +66,7 @@
66#include <linux/init.h> 66#include <linux/init.h>
67#include <linux/spinlock.h> 67#include <linux/spinlock.h>
68#include <linux/reboot.h> 68#include <linux/reboot.h>
69 69#include <linux/sched.h> /* TASK_INTERRUPTIBLE, set_current_state() and friends */
70#include <asm/uaccess.h> 70#include <asm/uaccess.h>
71#include <asm/io.h> 71#include <asm/io.h>
72 72
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 0b8e493be045..5308e5c8f29a 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -753,6 +753,7 @@ static struct pci_device_id pcipcwd_pci_tbl[] = {
753MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl); 753MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl);
754 754
755static struct pci_driver pcipcwd_driver = { 755static struct pci_driver pcipcwd_driver = {
756 .owner = THIS_MODULE,
756 .name = WATCHDOG_NAME, 757 .name = WATCHDOG_NAME,
757 .id_table = pcipcwd_pci_tbl, 758 .id_table = pcipcwd_pci_tbl,
758 .probe = pcipcwd_card_init, 759 .probe = pcipcwd_card_init,
diff --git a/drivers/char/watchdog/sc520_wdt.c b/drivers/char/watchdog/sc520_wdt.c
index 72501be79b0c..4ee9974ad8cb 100644
--- a/drivers/char/watchdog/sc520_wdt.c
+++ b/drivers/char/watchdog/sc520_wdt.c
@@ -63,6 +63,7 @@
63#include <linux/notifier.h> 63#include <linux/notifier.h>
64#include <linux/reboot.h> 64#include <linux/reboot.h>
65#include <linux/init.h> 65#include <linux/init.h>
66#include <linux/jiffies.h>
66 67
67#include <asm/io.h> 68#include <asm/io.h>
68#include <asm/uaccess.h> 69#include <asm/uaccess.h>
diff --git a/drivers/char/watchdog/softdog.c b/drivers/char/watchdog/softdog.c
index 20e5eb8667f2..a91edaf3a350 100644
--- a/drivers/char/watchdog/softdog.c
+++ b/drivers/char/watchdog/softdog.c
@@ -47,6 +47,8 @@
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/reboot.h> 48#include <linux/reboot.h>
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/jiffies.h>
51
50#include <asm/uaccess.h> 52#include <asm/uaccess.h>
51 53
52#define PFX "SoftDog: " 54#define PFX "SoftDog: "
diff --git a/drivers/char/watchdog/wdt_pci.c b/drivers/char/watchdog/wdt_pci.c
index 4b3311993d48..dc9370f6c348 100644
--- a/drivers/char/watchdog/wdt_pci.c
+++ b/drivers/char/watchdog/wdt_pci.c
@@ -711,6 +711,7 @@ MODULE_DEVICE_TABLE(pci, wdtpci_pci_tbl);
711 711
712 712
713static struct pci_driver wdtpci_driver = { 713static struct pci_driver wdtpci_driver = {
714 .owner = THIS_MODULE,
714 .name = "wdt_pci", 715 .name = "wdt_pci",
715 .id_table = wdtpci_pci_tbl, 716 .id_table = wdtpci_pci_tbl,
716 .probe = wdtpci_init_one, 717 .probe = wdtpci_init_one,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 109d62ccf651..6c6121b85a54 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -4,6 +4,9 @@
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * 6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 *
7 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
@@ -36,13 +39,6 @@ static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
36static DEFINE_SPINLOCK(cpufreq_driver_lock); 39static DEFINE_SPINLOCK(cpufreq_driver_lock);
37 40
38 41
39/* we keep a copy of all ->add'ed CPU's struct sys_device here;
40 * as it is only accessed in ->add and ->remove, no lock or reference
41 * count is necessary.
42 */
43static struct sys_device *cpu_sys_devices[NR_CPUS];
44
45
46/* internal prototypes */ 42/* internal prototypes */
47static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 43static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
48static void handle_update(void *data); 44static void handle_update(void *data);
@@ -574,6 +570,9 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
574 unsigned long flags; 570 unsigned long flags;
575 unsigned int j; 571 unsigned int j;
576 572
573 if (cpu_is_offline(cpu))
574 return 0;
575
577 cpufreq_debug_disable_ratelimit(); 576 cpufreq_debug_disable_ratelimit();
578 dprintk("adding CPU %u\n", cpu); 577 dprintk("adding CPU %u\n", cpu);
579 578
@@ -582,7 +581,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
582 * CPU because it is in the same boat. */ 581 * CPU because it is in the same boat. */
583 policy = cpufreq_cpu_get(cpu); 582 policy = cpufreq_cpu_get(cpu);
584 if (unlikely(policy)) { 583 if (unlikely(policy)) {
585 cpu_sys_devices[cpu] = sys_dev;
586 dprintk("CPU already managed, adding link\n"); 584 dprintk("CPU already managed, adding link\n");
587 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); 585 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
588 cpufreq_debug_enable_ratelimit(); 586 cpufreq_debug_enable_ratelimit();
@@ -657,7 +655,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
657 } 655 }
658 656
659 module_put(cpufreq_driver->owner); 657 module_put(cpufreq_driver->owner);
660 cpu_sys_devices[cpu] = sys_dev;
661 dprintk("initialization complete\n"); 658 dprintk("initialization complete\n");
662 cpufreq_debug_enable_ratelimit(); 659 cpufreq_debug_enable_ratelimit();
663 660
@@ -682,7 +679,7 @@ err_out:
682 679
683nomem_out: 680nomem_out:
684 module_put(cpufreq_driver->owner); 681 module_put(cpufreq_driver->owner);
685 module_out: 682module_out:
686 cpufreq_debug_enable_ratelimit(); 683 cpufreq_debug_enable_ratelimit();
687 return ret; 684 return ret;
688} 685}
@@ -698,6 +695,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
698 unsigned int cpu = sys_dev->id; 695 unsigned int cpu = sys_dev->id;
699 unsigned long flags; 696 unsigned long flags;
700 struct cpufreq_policy *data; 697 struct cpufreq_policy *data;
698 struct sys_device *cpu_sys_dev;
701#ifdef CONFIG_SMP 699#ifdef CONFIG_SMP
702 unsigned int j; 700 unsigned int j;
703#endif 701#endif
@@ -710,7 +708,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
710 708
711 if (!data) { 709 if (!data) {
712 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 710 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
713 cpu_sys_devices[cpu] = NULL;
714 cpufreq_debug_enable_ratelimit(); 711 cpufreq_debug_enable_ratelimit();
715 return -EINVAL; 712 return -EINVAL;
716 } 713 }
@@ -725,14 +722,12 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
725 dprintk("removing link\n"); 722 dprintk("removing link\n");
726 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 723 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
727 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 724 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
728 cpu_sys_devices[cpu] = NULL;
729 cpufreq_cpu_put(data); 725 cpufreq_cpu_put(data);
730 cpufreq_debug_enable_ratelimit(); 726 cpufreq_debug_enable_ratelimit();
731 return 0; 727 return 0;
732 } 728 }
733#endif 729#endif
734 730
735 cpu_sys_devices[cpu] = NULL;
736 731
737 if (!kobject_get(&data->kobj)) { 732 if (!kobject_get(&data->kobj)) {
738 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 733 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -761,7 +756,8 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
761 if (j == cpu) 756 if (j == cpu)
762 continue; 757 continue;
763 dprintk("removing link for cpu %u\n", j); 758 dprintk("removing link for cpu %u\n", j);
764 sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq"); 759 cpu_sys_dev = get_cpu_sysdev(j);
760 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
765 cpufreq_cpu_put(data); 761 cpufreq_cpu_put(data);
766 } 762 }
767 } 763 }
@@ -772,7 +768,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
772 down(&data->lock); 768 down(&data->lock);
773 if (cpufreq_driver->target) 769 if (cpufreq_driver->target)
774 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 770 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
775 cpufreq_driver->target = NULL;
776 up(&data->lock); 771 up(&data->lock);
777 772
778 kobject_unregister(&data->kobj); 773 kobject_unregister(&data->kobj);
@@ -1119,17 +1114,30 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1119 unsigned int relation) 1114 unsigned int relation)
1120{ 1115{
1121 int retval = -EINVAL; 1116 int retval = -EINVAL;
1122 lock_cpu_hotplug(); 1117
1118 /*
1119 * Converted the lock_cpu_hotplug to preempt_disable()
1120 * and preempt_enable(). This is a bit kludgy and relies on how cpu
1121 * hotplug works. All we need is a guarantee that cpu hotplug won't make
1122 * progress on any cpu. Once we do preempt_disable(), this would ensure
1123 * that hotplug threads don't get onto this cpu, thereby delaying
1124 * the cpu remove process.
1125 *
1126 * We removed the lock_cpu_hotplug since we need to call this function
1127 * via cpu hotplug callbacks, which result in locking the cpu hotplug
1128 * thread itself. Agree this is not very clean, cpufreq community
1129 * could improve this if required. - Ashok Raj <ashok.raj@intel.com>
1130 */
1131 preempt_disable();
1123 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1132 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1124 target_freq, relation); 1133 target_freq, relation);
1125 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1134 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1126 retval = cpufreq_driver->target(policy, target_freq, relation); 1135 retval = cpufreq_driver->target(policy, target_freq, relation);
1127 unlock_cpu_hotplug(); 1136 preempt_enable();
1128 return retval; 1137 return retval;
1129} 1138}
1130EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1139EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1131 1140
1132
1133int cpufreq_driver_target(struct cpufreq_policy *policy, 1141int cpufreq_driver_target(struct cpufreq_policy *policy,
1134 unsigned int target_freq, 1142 unsigned int target_freq,
1135 unsigned int relation) 1143 unsigned int relation)
@@ -1416,6 +1424,45 @@ int cpufreq_update_policy(unsigned int cpu)
1416} 1424}
1417EXPORT_SYMBOL(cpufreq_update_policy); 1425EXPORT_SYMBOL(cpufreq_update_policy);
1418 1426
1427static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1428 unsigned long action, void *hcpu)
1429{
1430 unsigned int cpu = (unsigned long)hcpu;
1431 struct cpufreq_policy *policy;
1432 struct sys_device *sys_dev;
1433
1434 sys_dev = get_cpu_sysdev(cpu);
1435
1436 if (sys_dev) {
1437 switch (action) {
1438 case CPU_ONLINE:
1439 cpufreq_add_dev(sys_dev);
1440 break;
1441 case CPU_DOWN_PREPARE:
1442 /*
1443 * We attempt to put this cpu in lowest frequency
1444 * possible before going down. This will permit
1445 * hardware-managed P-State to switch other related
1446 * threads to min or higher speeds if possible.
1447 */
1448 policy = cpufreq_cpu_data[cpu];
1449 if (policy) {
1450 cpufreq_driver_target(policy, policy->min,
1451 CPUFREQ_RELATION_H);
1452 }
1453 break;
1454 case CPU_DEAD:
1455 cpufreq_remove_dev(sys_dev);
1456 break;
1457 }
1458 }
1459 return NOTIFY_OK;
1460}
1461
1462static struct notifier_block cpufreq_cpu_notifier =
1463{
1464 .notifier_call = cpufreq_cpu_callback,
1465};
1419 1466
1420/********************************************************************* 1467/*********************************************************************
1421 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1468 * REGISTER / UNREGISTER CPUFREQ DRIVER *
@@ -1476,6 +1523,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1476 } 1523 }
1477 1524
1478 if (!ret) { 1525 if (!ret) {
1526 register_cpu_notifier(&cpufreq_cpu_notifier);
1479 dprintk("driver %s up and running\n", driver_data->name); 1527 dprintk("driver %s up and running\n", driver_data->name);
1480 cpufreq_debug_enable_ratelimit(); 1528 cpufreq_debug_enable_ratelimit();
1481 } 1529 }
@@ -1507,6 +1555,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1507 dprintk("unregistering driver %s\n", driver->name); 1555 dprintk("unregistering driver %s\n", driver->name);
1508 1556
1509 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1557 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1558 unregister_cpu_notifier(&cpufreq_cpu_notifier);
1510 1559
1511 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1560 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1512 cpufreq_driver = NULL; 1561 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 741b6b191e6a..3597f25d5efa 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,6 +19,7 @@
19#include <linux/percpu.h> 19#include <linux/percpu.h>
20#include <linux/kobject.h> 20#include <linux/kobject.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/notifier.h>
22#include <asm/cputime.h> 23#include <asm/cputime.h>
23 24
24static spinlock_t cpufreq_stats_lock; 25static spinlock_t cpufreq_stats_lock;
@@ -298,6 +299,27 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
298 return 0; 299 return 0;
299} 300}
300 301
302static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
303 unsigned long action, void *hcpu)
304{
305 unsigned int cpu = (unsigned long)hcpu;
306
307 switch (action) {
308 case CPU_ONLINE:
309 cpufreq_update_policy(cpu);
310 break;
311 case CPU_DEAD:
312 cpufreq_stats_free_table(cpu);
313 break;
314 }
315 return NOTIFY_OK;
316}
317
318static struct notifier_block cpufreq_stat_cpu_notifier =
319{
320 .notifier_call = cpufreq_stat_cpu_callback,
321};
322
301static struct notifier_block notifier_policy_block = { 323static struct notifier_block notifier_policy_block = {
302 .notifier_call = cpufreq_stat_notifier_policy 324 .notifier_call = cpufreq_stat_notifier_policy
303}; 325};
@@ -311,6 +333,7 @@ __init cpufreq_stats_init(void)
311{ 333{
312 int ret; 334 int ret;
313 unsigned int cpu; 335 unsigned int cpu;
336
314 spin_lock_init(&cpufreq_stats_lock); 337 spin_lock_init(&cpufreq_stats_lock);
315 if ((ret = cpufreq_register_notifier(&notifier_policy_block, 338 if ((ret = cpufreq_register_notifier(&notifier_policy_block,
316 CPUFREQ_POLICY_NOTIFIER))) 339 CPUFREQ_POLICY_NOTIFIER)))
@@ -323,20 +346,31 @@ __init cpufreq_stats_init(void)
323 return ret; 346 return ret;
324 } 347 }
325 348
326 for_each_cpu(cpu) 349 register_cpu_notifier(&cpufreq_stat_cpu_notifier);
327 cpufreq_update_policy(cpu); 350 lock_cpu_hotplug();
351 for_each_online_cpu(cpu) {
352 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
353 (void *)(long)cpu);
354 }
355 unlock_cpu_hotplug();
328 return 0; 356 return 0;
329} 357}
330static void 358static void
331__exit cpufreq_stats_exit(void) 359__exit cpufreq_stats_exit(void)
332{ 360{
333 unsigned int cpu; 361 unsigned int cpu;
362
334 cpufreq_unregister_notifier(&notifier_policy_block, 363 cpufreq_unregister_notifier(&notifier_policy_block,
335 CPUFREQ_POLICY_NOTIFIER); 364 CPUFREQ_POLICY_NOTIFIER);
336 cpufreq_unregister_notifier(&notifier_trans_block, 365 cpufreq_unregister_notifier(&notifier_trans_block,
337 CPUFREQ_TRANSITION_NOTIFIER); 366 CPUFREQ_TRANSITION_NOTIFIER);
338 for_each_cpu(cpu) 367 unregister_cpu_notifier(&cpufreq_stat_cpu_notifier);
339 cpufreq_stats_free_table(cpu); 368 lock_cpu_hotplug();
369 for_each_online_cpu(cpu) {
370 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
371 (void *)(long)cpu);
372 }
373 unlock_cpu_hotplug();
340} 374}
341 375
342MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 376MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 094835cce321..4263935443cc 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -2,7 +2,7 @@ menu "Hardware crypto devices"
2 2
3config CRYPTO_DEV_PADLOCK 3config CRYPTO_DEV_PADLOCK
4 tristate "Support for VIA PadLock ACE" 4 tristate "Support for VIA PadLock ACE"
5 depends on CRYPTO && X86 && !X86_64 5 depends on CRYPTO && X86_32
6 help 6 help
7 Some VIA processors come with an integrated crypto engine 7 Some VIA processors come with an integrated crypto engine
8 (so called VIA PadLock ACE, Advanced Cryptography Engine) 8 (so called VIA PadLock ACE, Advanced Cryptography Engine)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 327b58e64875..b6815c6c29a2 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -70,8 +70,7 @@ config DELL_RBU
70 70
71config DCDBAS 71config DCDBAS
72 tristate "Dell Systems Management Base Driver" 72 tristate "Dell Systems Management Base Driver"
73 depends on X86 || X86_64 73 depends on X86
74 default m
75 help 74 help
76 The Dell Systems Management Base Driver provides a sysfs interface 75 The Dell Systems Management Base Driver provides a sysfs interface
77 for systems management software to perform System Management 76 for systems management software to perform System Management
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 74af7e074868..8b9d85526596 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -2211,13 +2211,12 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
2211 2211
2212 if (toc == NULL) { 2212 if (toc == NULL) {
2213 /* Try to allocate space. */ 2213 /* Try to allocate space. */
2214 toc = (struct atapi_toc *) kmalloc (sizeof (struct atapi_toc), 2214 toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
2215 GFP_KERNEL);
2216 info->toc = toc;
2217 if (toc == NULL) { 2215 if (toc == NULL) {
2218 printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name); 2216 printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name);
2219 return -ENOMEM; 2217 return -ENOMEM;
2220 } 2218 }
2219 info->toc = toc;
2221 } 2220 }
2222 2221
2223 /* Check to see if the existing data is still valid. 2222 /* Check to see if the existing data is still valid.
@@ -2240,7 +2239,8 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
2240 /* First read just the header, so we know how long the TOC is. */ 2239 /* First read just the header, so we know how long the TOC is. */
2241 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 2240 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
2242 sizeof(struct atapi_toc_header), sense); 2241 sizeof(struct atapi_toc_header), sense);
2243 if (stat) return stat; 2242 if (stat)
2243 return stat;
2244 2244
2245#if ! STANDARD_ATAPI 2245#if ! STANDARD_ATAPI
2246 if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd) { 2246 if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd) {
@@ -2324,7 +2324,8 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
2324 /* Read the multisession information. */ 2324 /* Read the multisession information. */
2325 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, 2325 stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
2326 sizeof(ms_tmp), sense); 2326 sizeof(ms_tmp), sense);
2327 if (stat) return stat; 2327 if (stat)
2328 return stat;
2328 2329
2329 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); 2330 toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
2330 } else { 2331 } else {
@@ -2460,7 +2461,7 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi,
2460 struct packet_command *cgc) 2461 struct packet_command *cgc)
2461{ 2462{
2462 struct request req; 2463 struct request req;
2463 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2464 ide_drive_t *drive = cdi->handle;
2464 2465
2465 if (cgc->timeout <= 0) 2466 if (cgc->timeout <= 0)
2466 cgc->timeout = ATAPI_WAIT_PC; 2467 cgc->timeout = ATAPI_WAIT_PC;
@@ -2537,7 +2538,7 @@ int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2537 unsigned int cmd, void *arg) 2538 unsigned int cmd, void *arg)
2538 2539
2539{ 2540{
2540 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2541 ide_drive_t *drive = cdi->handle;
2541 struct cdrom_info *info = drive->driver_data; 2542 struct cdrom_info *info = drive->driver_data;
2542 int stat; 2543 int stat;
2543 2544
@@ -2548,7 +2549,7 @@ int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2548 */ 2549 */
2549 case CDROMPLAYTRKIND: { 2550 case CDROMPLAYTRKIND: {
2550 unsigned long lba_start, lba_end; 2551 unsigned long lba_start, lba_end;
2551 struct cdrom_ti *ti = (struct cdrom_ti *)arg; 2552 struct cdrom_ti *ti = arg;
2552 struct atapi_toc_entry *first_toc, *last_toc; 2553 struct atapi_toc_entry *first_toc, *last_toc;
2553 2554
2554 stat = cdrom_get_toc_entry(drive, ti->cdti_trk0, &first_toc); 2555 stat = cdrom_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
@@ -2571,12 +2572,13 @@ int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2571 } 2572 }
2572 2573
2573 case CDROMREADTOCHDR: { 2574 case CDROMREADTOCHDR: {
2574 struct cdrom_tochdr *tochdr = (struct cdrom_tochdr *) arg; 2575 struct cdrom_tochdr *tochdr = arg;
2575 struct atapi_toc *toc; 2576 struct atapi_toc *toc;
2576 2577
2577 /* Make sure our saved TOC is valid. */ 2578 /* Make sure our saved TOC is valid. */
2578 stat = cdrom_read_toc(drive, NULL); 2579 stat = cdrom_read_toc(drive, NULL);
2579 if (stat) return stat; 2580 if (stat)
2581 return stat;
2580 2582
2581 toc = info->toc; 2583 toc = info->toc;
2582 tochdr->cdth_trk0 = toc->hdr.first_track; 2584 tochdr->cdth_trk0 = toc->hdr.first_track;
@@ -2586,11 +2588,12 @@ int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2586 } 2588 }
2587 2589
2588 case CDROMREADTOCENTRY: { 2590 case CDROMREADTOCENTRY: {
2589 struct cdrom_tocentry *tocentry = (struct cdrom_tocentry*) arg; 2591 struct cdrom_tocentry *tocentry = arg;
2590 struct atapi_toc_entry *toce; 2592 struct atapi_toc_entry *toce;
2591 2593
2592 stat = cdrom_get_toc_entry(drive, tocentry->cdte_track, &toce); 2594 stat = cdrom_get_toc_entry(drive, tocentry->cdte_track, &toce);
2593 if (stat) return stat; 2595 if (stat)
2596 return stat;
2594 2597
2595 tocentry->cdte_ctrl = toce->control; 2598 tocentry->cdte_ctrl = toce->control;
2596 tocentry->cdte_adr = toce->adr; 2599 tocentry->cdte_adr = toce->adr;
@@ -2613,7 +2616,7 @@ int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2613static 2616static
2614int ide_cdrom_reset (struct cdrom_device_info *cdi) 2617int ide_cdrom_reset (struct cdrom_device_info *cdi)
2615{ 2618{
2616 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2619 ide_drive_t *drive = cdi->handle;
2617 struct request_sense sense; 2620 struct request_sense sense;
2618 struct request req; 2621 struct request req;
2619 int ret; 2622 int ret;
@@ -2636,12 +2639,13 @@ int ide_cdrom_reset (struct cdrom_device_info *cdi)
2636static 2639static
2637int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position) 2640int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position)
2638{ 2641{
2639 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2642 ide_drive_t *drive = cdi->handle;
2640 struct request_sense sense; 2643 struct request_sense sense;
2641 2644
2642 if (position) { 2645 if (position) {
2643 int stat = cdrom_lockdoor(drive, 0, &sense); 2646 int stat = cdrom_lockdoor(drive, 0, &sense);
2644 if (stat) return stat; 2647 if (stat)
2648 return stat;
2645 } 2649 }
2646 2650
2647 return cdrom_eject(drive, !position, &sense); 2651 return cdrom_eject(drive, !position, &sense);
@@ -2650,7 +2654,7 @@ int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position)
2650static 2654static
2651int ide_cdrom_lock_door (struct cdrom_device_info *cdi, int lock) 2655int ide_cdrom_lock_door (struct cdrom_device_info *cdi, int lock)
2652{ 2656{
2653 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2657 ide_drive_t *drive = cdi->handle;
2654 return cdrom_lockdoor(drive, lock, NULL); 2658 return cdrom_lockdoor(drive, lock, NULL);
2655} 2659}
2656 2660
@@ -2700,7 +2704,7 @@ void ide_cdrom_update_speed (ide_drive_t *drive, struct atapi_capabilities_page
2700static 2704static
2701int ide_cdrom_select_speed (struct cdrom_device_info *cdi, int speed) 2705int ide_cdrom_select_speed (struct cdrom_device_info *cdi, int speed)
2702{ 2706{
2703 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2707 ide_drive_t *drive = cdi->handle;
2704 struct request_sense sense; 2708 struct request_sense sense;
2705 struct atapi_capabilities_page cap; 2709 struct atapi_capabilities_page cap;
2706 int stat; 2710 int stat;
@@ -2723,7 +2727,7 @@ int ide_cdrom_select_speed (struct cdrom_device_info *cdi, int speed)
2723static 2727static
2724int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr) 2728int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr)
2725{ 2729{
2726 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2730 ide_drive_t *drive = cdi->handle;
2727 struct media_event_desc med; 2731 struct media_event_desc med;
2728 struct request_sense sense; 2732 struct request_sense sense;
2729 int stat; 2733 int stat;
@@ -2769,7 +2773,7 @@ int ide_cdrom_get_last_session (struct cdrom_device_info *cdi,
2769 struct cdrom_multisession *ms_info) 2773 struct cdrom_multisession *ms_info)
2770{ 2774{
2771 struct atapi_toc *toc; 2775 struct atapi_toc *toc;
2772 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2776 ide_drive_t *drive = cdi->handle;
2773 struct cdrom_info *info = drive->driver_data; 2777 struct cdrom_info *info = drive->driver_data;
2774 struct request_sense sense; 2778 struct request_sense sense;
2775 int ret; 2779 int ret;
@@ -2791,7 +2795,7 @@ int ide_cdrom_get_mcn (struct cdrom_device_info *cdi,
2791{ 2795{
2792 int stat; 2796 int stat;
2793 char mcnbuf[24]; 2797 char mcnbuf[24];
2794 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2798 ide_drive_t *drive = cdi->handle;
2795 2799
2796/* get MCN */ 2800/* get MCN */
2797 if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof (mcnbuf), NULL))) 2801 if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof (mcnbuf), NULL)))
@@ -2815,7 +2819,7 @@ static
2815int ide_cdrom_check_media_change_real (struct cdrom_device_info *cdi, 2819int ide_cdrom_check_media_change_real (struct cdrom_device_info *cdi,
2816 int slot_nr) 2820 int slot_nr)
2817{ 2821{
2818 ide_drive_t *drive = (ide_drive_t*) cdi->handle; 2822 ide_drive_t *drive = cdi->handle;
2819 int retval; 2823 int retval;
2820 2824
2821 if (slot_nr == CDSL_CURRENT) { 2825 if (slot_nr == CDSL_CURRENT) {
@@ -2886,7 +2890,7 @@ static int ide_cdrom_register (ide_drive_t *drive, int nslots)
2886 devinfo->mask = 0; 2890 devinfo->mask = 0;
2887 devinfo->speed = CDROM_STATE_FLAGS(drive)->current_speed; 2891 devinfo->speed = CDROM_STATE_FLAGS(drive)->current_speed;
2888 devinfo->capacity = nslots; 2892 devinfo->capacity = nslots;
2889 devinfo->handle = (void *) drive; 2893 devinfo->handle = drive;
2890 strcpy(devinfo->name, drive->name); 2894 strcpy(devinfo->name, drive->name);
2891 2895
2892 /* set capability mask to match the probe. */ 2896 /* set capability mask to match the probe. */
@@ -2942,7 +2946,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
2942 * registered with the Uniform layer yet, it can't do this. 2946 * registered with the Uniform layer yet, it can't do this.
2943 * Same goes for cdi->ops. 2947 * Same goes for cdi->ops.
2944 */ 2948 */
2945 cdi->handle = (ide_drive_t *) drive; 2949 cdi->handle = drive;
2946 cdi->ops = &ide_cdrom_dops; 2950 cdi->ops = &ide_cdrom_dops;
2947 2951
2948 if (ide_cdrom_get_capabilities(drive, &cap)) 2952 if (ide_cdrom_get_capabilities(drive, &cap))
@@ -3254,6 +3258,7 @@ int ide_cdrom_setup (ide_drive_t *drive)
3254 return 0; 3258 return 0;
3255} 3259}
3256 3260
3261#ifdef CONFIG_PROC_FS
3257static 3262static
3258sector_t ide_cdrom_capacity (ide_drive_t *drive) 3263sector_t ide_cdrom_capacity (ide_drive_t *drive)
3259{ 3264{
@@ -3264,6 +3269,7 @@ sector_t ide_cdrom_capacity (ide_drive_t *drive)
3264 3269
3265 return capacity * sectors_per_frame; 3270 return capacity * sectors_per_frame;
3266} 3271}
3272#endif
3267 3273
3268static int ide_cd_remove(struct device *dev) 3274static int ide_cd_remove(struct device *dev)
3269{ 3275{
@@ -3309,7 +3315,7 @@ static int ide_cd_probe(struct device *);
3309static int proc_idecd_read_capacity 3315static int proc_idecd_read_capacity
3310 (char *page, char **start, off_t off, int count, int *eof, void *data) 3316 (char *page, char **start, off_t off, int count, int *eof, void *data)
3311{ 3317{
3312 ide_drive_t*drive = (ide_drive_t *)data; 3318 ide_drive_t *drive = data;
3313 int len; 3319 int len;
3314 3320
3315 len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive)); 3321 len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive));
@@ -3449,7 +3455,7 @@ static int ide_cd_probe(struct device *dev)
3449 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name); 3455 printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name);
3450 goto failed; 3456 goto failed;
3451 } 3457 }
3452 info = (struct cdrom_info *) kmalloc (sizeof (struct cdrom_info), GFP_KERNEL); 3458 info = kmalloc(sizeof(struct cdrom_info), GFP_KERNEL);
3453 if (info == NULL) { 3459 if (info == NULL) {
3454 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name); 3460 printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name);
3455 goto failed; 3461 goto failed;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 87d1f8a1f41e..d8c3d8ebad30 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -81,7 +81,7 @@ typedef struct pmac_ide_hwif {
81 81
82} pmac_ide_hwif_t; 82} pmac_ide_hwif_t;
83 83
84static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata; 84static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
85static int pmac_ide_count; 85static int pmac_ide_count;
86 86
87enum { 87enum {
@@ -242,7 +242,7 @@ struct mdma_timings_t {
242 int cycleTime; 242 int cycleTime;
243}; 243};
244 244
245struct mdma_timings_t mdma_timings_33[] __pmacdata = 245struct mdma_timings_t mdma_timings_33[] =
246{ 246{
247 { 240, 240, 480 }, 247 { 240, 240, 480 },
248 { 180, 180, 360 }, 248 { 180, 180, 360 },
@@ -255,7 +255,7 @@ struct mdma_timings_t mdma_timings_33[] __pmacdata =
255 { 0, 0, 0 } 255 { 0, 0, 0 }
256}; 256};
257 257
258struct mdma_timings_t mdma_timings_33k[] __pmacdata = 258struct mdma_timings_t mdma_timings_33k[] =
259{ 259{
260 { 240, 240, 480 }, 260 { 240, 240, 480 },
261 { 180, 180, 360 }, 261 { 180, 180, 360 },
@@ -268,7 +268,7 @@ struct mdma_timings_t mdma_timings_33k[] __pmacdata =
268 { 0, 0, 0 } 268 { 0, 0, 0 }
269}; 269};
270 270
271struct mdma_timings_t mdma_timings_66[] __pmacdata = 271struct mdma_timings_t mdma_timings_66[] =
272{ 272{
273 { 240, 240, 480 }, 273 { 240, 240, 480 },
274 { 180, 180, 360 }, 274 { 180, 180, 360 },
@@ -286,7 +286,7 @@ struct {
286 int addrSetup; /* ??? */ 286 int addrSetup; /* ??? */
287 int rdy2pause; 287 int rdy2pause;
288 int wrDataSetup; 288 int wrDataSetup;
289} kl66_udma_timings[] __pmacdata = 289} kl66_udma_timings[] =
290{ 290{
291 { 0, 180, 120 }, /* Mode 0 */ 291 { 0, 180, 120 }, /* Mode 0 */
292 { 0, 150, 90 }, /* 1 */ 292 { 0, 150, 90 }, /* 1 */
@@ -301,7 +301,7 @@ struct kauai_timing {
301 u32 timing_reg; 301 u32 timing_reg;
302}; 302};
303 303
304static struct kauai_timing kauai_pio_timings[] __pmacdata = 304static struct kauai_timing kauai_pio_timings[] =
305{ 305{
306 { 930 , 0x08000fff }, 306 { 930 , 0x08000fff },
307 { 600 , 0x08000a92 }, 307 { 600 , 0x08000a92 },
@@ -316,7 +316,7 @@ static struct kauai_timing kauai_pio_timings[] __pmacdata =
316 { 120 , 0x04000148 } 316 { 120 , 0x04000148 }
317}; 317};
318 318
319static struct kauai_timing kauai_mdma_timings[] __pmacdata = 319static struct kauai_timing kauai_mdma_timings[] =
320{ 320{
321 { 1260 , 0x00fff000 }, 321 { 1260 , 0x00fff000 },
322 { 480 , 0x00618000 }, 322 { 480 , 0x00618000 },
@@ -330,7 +330,7 @@ static struct kauai_timing kauai_mdma_timings[] __pmacdata =
330 { 0 , 0 }, 330 { 0 , 0 },
331}; 331};
332 332
333static struct kauai_timing kauai_udma_timings[] __pmacdata = 333static struct kauai_timing kauai_udma_timings[] =
334{ 334{
335 { 120 , 0x000070c0 }, 335 { 120 , 0x000070c0 },
336 { 90 , 0x00005d80 }, 336 { 90 , 0x00005d80 },
@@ -341,7 +341,7 @@ static struct kauai_timing kauai_udma_timings[] __pmacdata =
341 { 0 , 0 }, 341 { 0 , 0 },
342}; 342};
343 343
344static struct kauai_timing shasta_pio_timings[] __pmacdata = 344static struct kauai_timing shasta_pio_timings[] =
345{ 345{
346 { 930 , 0x08000fff }, 346 { 930 , 0x08000fff },
347 { 600 , 0x0A000c97 }, 347 { 600 , 0x0A000c97 },
@@ -356,7 +356,7 @@ static struct kauai_timing shasta_pio_timings[] __pmacdata =
356 { 120 , 0x0400010a } 356 { 120 , 0x0400010a }
357}; 357};
358 358
359static struct kauai_timing shasta_mdma_timings[] __pmacdata = 359static struct kauai_timing shasta_mdma_timings[] =
360{ 360{
361 { 1260 , 0x00fff000 }, 361 { 1260 , 0x00fff000 },
362 { 480 , 0x00820800 }, 362 { 480 , 0x00820800 },
@@ -370,7 +370,7 @@ static struct kauai_timing shasta_mdma_timings[] __pmacdata =
370 { 0 , 0 }, 370 { 0 , 0 },
371}; 371};
372 372
373static struct kauai_timing shasta_udma133_timings[] __pmacdata = 373static struct kauai_timing shasta_udma133_timings[] =
374{ 374{
375 { 120 , 0x00035901, }, 375 { 120 , 0x00035901, },
376 { 90 , 0x000348b1, }, 376 { 90 , 0x000348b1, },
@@ -522,7 +522,7 @@ pmu_hd_blink_init(void)
522 * N.B. this can't be an initfunc, because the media-bay task can 522 * N.B. this can't be an initfunc, because the media-bay task can
523 * call ide_[un]register at any time. 523 * call ide_[un]register at any time.
524 */ 524 */
525void __pmac 525void
526pmac_ide_init_hwif_ports(hw_regs_t *hw, 526pmac_ide_init_hwif_ports(hw_regs_t *hw,
527 unsigned long data_port, unsigned long ctrl_port, 527 unsigned long data_port, unsigned long ctrl_port,
528 int *irq) 528 int *irq)
@@ -559,7 +559,7 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
559 * timing register when selecting that unit. This version is for 559 * timing register when selecting that unit. This version is for
560 * ASICs with a single timing register 560 * ASICs with a single timing register
561 */ 561 */
562static void __pmac 562static void
563pmac_ide_selectproc(ide_drive_t *drive) 563pmac_ide_selectproc(ide_drive_t *drive)
564{ 564{
565 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 565 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -579,7 +579,7 @@ pmac_ide_selectproc(ide_drive_t *drive)
579 * timing register when selecting that unit. This version is for 579 * timing register when selecting that unit. This version is for
580 * ASICs with a dual timing register (Kauai) 580 * ASICs with a dual timing register (Kauai)
581 */ 581 */
582static void __pmac 582static void
583pmac_ide_kauai_selectproc(ide_drive_t *drive) 583pmac_ide_kauai_selectproc(ide_drive_t *drive)
584{ 584{
585 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 585 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -600,7 +600,7 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
600/* 600/*
601 * Force an update of controller timing values for a given drive 601 * Force an update of controller timing values for a given drive
602 */ 602 */
603static void __pmac 603static void
604pmac_ide_do_update_timings(ide_drive_t *drive) 604pmac_ide_do_update_timings(ide_drive_t *drive)
605{ 605{
606 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 606 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -633,7 +633,7 @@ pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
633 * to sort that out sooner or later and see if I can finally get the 633 * to sort that out sooner or later and see if I can finally get the
634 * common version to work properly in all cases 634 * common version to work properly in all cases
635 */ 635 */
636static int __pmac 636static int
637pmac_ide_do_setfeature(ide_drive_t *drive, u8 command) 637pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
638{ 638{
639 ide_hwif_t *hwif = HWIF(drive); 639 ide_hwif_t *hwif = HWIF(drive);
@@ -710,7 +710,7 @@ out:
710/* 710/*
711 * Old tuning functions (called on hdparm -p), sets up drive PIO timings 711 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
712 */ 712 */
713static void __pmac 713static void
714pmac_ide_tuneproc(ide_drive_t *drive, u8 pio) 714pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
715{ 715{
716 ide_pio_data_t d; 716 ide_pio_data_t d;
@@ -801,7 +801,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
801/* 801/*
802 * Calculate KeyLargo ATA/66 UDMA timings 802 * Calculate KeyLargo ATA/66 UDMA timings
803 */ 803 */
804static int __pmac 804static int
805set_timings_udma_ata4(u32 *timings, u8 speed) 805set_timings_udma_ata4(u32 *timings, u8 speed)
806{ 806{
807 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks; 807 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
@@ -829,7 +829,7 @@ set_timings_udma_ata4(u32 *timings, u8 speed)
829/* 829/*
830 * Calculate Kauai ATA/100 UDMA timings 830 * Calculate Kauai ATA/100 UDMA timings
831 */ 831 */
832static int __pmac 832static int
833set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed) 833set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
834{ 834{
835 struct ide_timing *t = ide_timing_find_mode(speed); 835 struct ide_timing *t = ide_timing_find_mode(speed);
@@ -849,7 +849,7 @@ set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
849/* 849/*
850 * Calculate Shasta ATA/133 UDMA timings 850 * Calculate Shasta ATA/133 UDMA timings
851 */ 851 */
852static int __pmac 852static int
853set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed) 853set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
854{ 854{
855 struct ide_timing *t = ide_timing_find_mode(speed); 855 struct ide_timing *t = ide_timing_find_mode(speed);
@@ -869,7 +869,7 @@ set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
869/* 869/*
870 * Calculate MDMA timings for all cells 870 * Calculate MDMA timings for all cells
871 */ 871 */
872static int __pmac 872static int
873set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, 873set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
874 u8 speed, int drive_cycle_time) 874 u8 speed, int drive_cycle_time)
875{ 875{
@@ -1014,7 +1014,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
1014 * our dedicated function is more precise as it uses the drive provided 1014 * our dedicated function is more precise as it uses the drive provided
1015 * cycle time value. We should probably fix this one to deal with that too... 1015 * cycle time value. We should probably fix this one to deal with that too...
1016 */ 1016 */
1017static int __pmac 1017static int
1018pmac_ide_tune_chipset (ide_drive_t *drive, byte speed) 1018pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
1019{ 1019{
1020 int unit = (drive->select.b.unit & 0x01); 1020 int unit = (drive->select.b.unit & 0x01);
@@ -1092,7 +1092,7 @@ pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
1092 * Blast some well known "safe" values to the timing registers at init or 1092 * Blast some well known "safe" values to the timing registers at init or
1093 * wakeup from sleep time, before we do real calculation 1093 * wakeup from sleep time, before we do real calculation
1094 */ 1094 */
1095static void __pmac 1095static void
1096sanitize_timings(pmac_ide_hwif_t *pmif) 1096sanitize_timings(pmac_ide_hwif_t *pmif)
1097{ 1097{
1098 unsigned int value, value2 = 0; 1098 unsigned int value, value2 = 0;
@@ -1123,13 +1123,13 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
1123 pmif->timings[2] = pmif->timings[3] = value2; 1123 pmif->timings[2] = pmif->timings[3] = value2;
1124} 1124}
1125 1125
1126unsigned long __pmac 1126unsigned long
1127pmac_ide_get_base(int index) 1127pmac_ide_get_base(int index)
1128{ 1128{
1129 return pmac_ide[index].regbase; 1129 return pmac_ide[index].regbase;
1130} 1130}
1131 1131
1132int __pmac 1132int
1133pmac_ide_check_base(unsigned long base) 1133pmac_ide_check_base(unsigned long base)
1134{ 1134{
1135 int ix; 1135 int ix;
@@ -1140,7 +1140,7 @@ pmac_ide_check_base(unsigned long base)
1140 return -1; 1140 return -1;
1141} 1141}
1142 1142
1143int __pmac 1143int
1144pmac_ide_get_irq(unsigned long base) 1144pmac_ide_get_irq(unsigned long base)
1145{ 1145{
1146 int ix; 1146 int ix;
@@ -1151,7 +1151,7 @@ pmac_ide_get_irq(unsigned long base)
1151 return 0; 1151 return 0;
1152} 1152}
1153 1153
1154static int ide_majors[] __pmacdata = { 3, 22, 33, 34, 56, 57 }; 1154static int ide_majors[] = { 3, 22, 33, 34, 56, 57 };
1155 1155
1156dev_t __init 1156dev_t __init
1157pmac_find_ide_boot(char *bootdevice, int n) 1157pmac_find_ide_boot(char *bootdevice, int n)
@@ -1701,7 +1701,7 @@ pmac_ide_probe(void)
1701 * pmac_ide_build_dmatable builds the DBDMA command list 1701 * pmac_ide_build_dmatable builds the DBDMA command list
1702 * for a transfer and sets the DBDMA channel to point to it. 1702 * for a transfer and sets the DBDMA channel to point to it.
1703 */ 1703 */
1704static int __pmac 1704static int
1705pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) 1705pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1706{ 1706{
1707 struct dbdma_cmd *table; 1707 struct dbdma_cmd *table;
@@ -1785,7 +1785,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1785} 1785}
1786 1786
1787/* Teardown mappings after DMA has completed. */ 1787/* Teardown mappings after DMA has completed. */
1788static void __pmac 1788static void
1789pmac_ide_destroy_dmatable (ide_drive_t *drive) 1789pmac_ide_destroy_dmatable (ide_drive_t *drive)
1790{ 1790{
1791 ide_hwif_t *hwif = drive->hwif; 1791 ide_hwif_t *hwif = drive->hwif;
@@ -1802,7 +1802,7 @@ pmac_ide_destroy_dmatable (ide_drive_t *drive)
1802/* 1802/*
1803 * Pick up best MDMA timing for the drive and apply it 1803 * Pick up best MDMA timing for the drive and apply it
1804 */ 1804 */
1805static int __pmac 1805static int
1806pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode) 1806pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1807{ 1807{
1808 ide_hwif_t *hwif = HWIF(drive); 1808 ide_hwif_t *hwif = HWIF(drive);
@@ -1859,7 +1859,7 @@ pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1859/* 1859/*
1860 * Pick up best UDMA timing for the drive and apply it 1860 * Pick up best UDMA timing for the drive and apply it
1861 */ 1861 */
1862static int __pmac 1862static int
1863pmac_ide_udma_enable(ide_drive_t *drive, u16 mode) 1863pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1864{ 1864{
1865 ide_hwif_t *hwif = HWIF(drive); 1865 ide_hwif_t *hwif = HWIF(drive);
@@ -1915,7 +1915,7 @@ pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1915 * Check what is the best DMA timing setting for the drive and 1915 * Check what is the best DMA timing setting for the drive and
1916 * call appropriate functions to apply it. 1916 * call appropriate functions to apply it.
1917 */ 1917 */
1918static int __pmac 1918static int
1919pmac_ide_dma_check(ide_drive_t *drive) 1919pmac_ide_dma_check(ide_drive_t *drive)
1920{ 1920{
1921 struct hd_driveid *id = drive->id; 1921 struct hd_driveid *id = drive->id;
@@ -1967,7 +1967,7 @@ pmac_ide_dma_check(ide_drive_t *drive)
1967 * Prepare a DMA transfer. We build the DMA table, adjust the timings for 1967 * Prepare a DMA transfer. We build the DMA table, adjust the timings for
1968 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion 1968 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
1969 */ 1969 */
1970static int __pmac 1970static int
1971pmac_ide_dma_setup(ide_drive_t *drive) 1971pmac_ide_dma_setup(ide_drive_t *drive)
1972{ 1972{
1973 ide_hwif_t *hwif = HWIF(drive); 1973 ide_hwif_t *hwif = HWIF(drive);
@@ -1997,7 +1997,7 @@ pmac_ide_dma_setup(ide_drive_t *drive)
1997 return 0; 1997 return 0;
1998} 1998}
1999 1999
2000static void __pmac 2000static void
2001pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 2001pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
2002{ 2002{
2003 /* issue cmd to drive */ 2003 /* issue cmd to drive */
@@ -2008,7 +2008,7 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
2008 * Kick the DMA controller into life after the DMA command has been issued 2008 * Kick the DMA controller into life after the DMA command has been issued
2009 * to the drive. 2009 * to the drive.
2010 */ 2010 */
2011static void __pmac 2011static void
2012pmac_ide_dma_start(ide_drive_t *drive) 2012pmac_ide_dma_start(ide_drive_t *drive)
2013{ 2013{
2014 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2014 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2024,7 +2024,7 @@ pmac_ide_dma_start(ide_drive_t *drive)
2024/* 2024/*
2025 * After a DMA transfer, make sure the controller is stopped 2025 * After a DMA transfer, make sure the controller is stopped
2026 */ 2026 */
2027static int __pmac 2027static int
2028pmac_ide_dma_end (ide_drive_t *drive) 2028pmac_ide_dma_end (ide_drive_t *drive)
2029{ 2029{
2030 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2030 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2052,7 +2052,7 @@ pmac_ide_dma_end (ide_drive_t *drive)
2052 * that's not implemented yet), on the other hand, we don't have shared interrupts 2052 * that's not implemented yet), on the other hand, we don't have shared interrupts
2053 * so it's not really a problem 2053 * so it's not really a problem
2054 */ 2054 */
2055static int __pmac 2055static int
2056pmac_ide_dma_test_irq (ide_drive_t *drive) 2056pmac_ide_dma_test_irq (ide_drive_t *drive)
2057{ 2057{
2058 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2058 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2108,19 +2108,19 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
2108 return 1; 2108 return 1;
2109} 2109}
2110 2110
2111static int __pmac 2111static int
2112pmac_ide_dma_host_off (ide_drive_t *drive) 2112pmac_ide_dma_host_off (ide_drive_t *drive)
2113{ 2113{
2114 return 0; 2114 return 0;
2115} 2115}
2116 2116
2117static int __pmac 2117static int
2118pmac_ide_dma_host_on (ide_drive_t *drive) 2118pmac_ide_dma_host_on (ide_drive_t *drive)
2119{ 2119{
2120 return 0; 2120 return 0;
2121} 2121}
2122 2122
2123static int __pmac 2123static int
2124pmac_ide_dma_lostirq (ide_drive_t *drive) 2124pmac_ide_dma_lostirq (ide_drive_t *drive)
2125{ 2125{
2126 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2126 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f014e639088c..c57a3871184c 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/errno.h> 39#include <linux/errno.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/sched.h> /* INIT_WORK, schedule_work(), flush_scheduled_work() */
41 42
42#include <rdma/ib_cache.h> 43#include <rdma/ib_cache.h>
43 44
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 89ce9dc210d4..acda7d63d6fe 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -43,6 +43,7 @@
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/idr.h> 45#include <linux/idr.h>
46#include <linux/workqueue.h>
46 47
47#include <rdma/ib_pack.h> 48#include <rdma/ib_pack.h>
48#include <rdma/ib_sa.h> 49#include <rdma/ib_sa.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 889e85096736..22fdc446f25c 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -34,6 +34,8 @@
34 */ 34 */
35 35
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/string.h>
38#include <linux/slab.h>
37 39
38#include <rdma/ib_verbs.h> 40#include <rdma/ib_verbs.h>
39#include <rdma/ib_cache.h> 41#include <rdma/ib_cache.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 8561b297a19b..1229c604c6e0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -34,6 +34,9 @@
34 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 34 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
35 */ 35 */
36 36
37#include <linux/string.h>
38#include <linux/slab.h>
39
37#include <rdma/ib_verbs.h> 40#include <rdma/ib_verbs.h>
38#include <rdma/ib_mad.h> 41#include <rdma/ib_mad.h>
39#include <rdma/ib_smi.h> 42#include <rdma/ib_smi.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index b47ea7daf088..2fc449da418d 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -33,6 +33,8 @@
33 */ 33 */
34 34
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/string.h>
37#include <linux/slab.h>
36 38
37#include "mthca_dev.h" 39#include "mthca_dev.h"
38#include "mthca_cmd.h" 40#include "mthca_cmd.h"
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 0576056b34f4..bd1338682074 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -35,6 +35,8 @@
35 35
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/moduleparam.h> 37#include <linux/moduleparam.h>
38#include <linux/string.h>
39#include <linux/slab.h>
38 40
39#include "mthca_profile.h" 41#include "mthca_profile.h"
40 42
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 62ff091505da..7c9afde5ace5 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -36,6 +36,8 @@
36 */ 36 */
37 37
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/string.h>
40#include <linux/slab.h>
39 41
40#include <rdma/ib_verbs.h> 42#include <rdma/ib_verbs.h>
41#include <rdma/ib_cache.h> 43#include <rdma/ib_cache.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 4f995391dd1d..df5e494a9d38 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -37,6 +37,7 @@
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h>
40 41
41#include "mthca_dev.h" 42#include "mthca_dev.h"
42#include "mthca_cmd.h" 43#include "mthca_cmd.h"
diff --git a/drivers/infiniband/hw/mthca/mthca_uar.c b/drivers/infiniband/hw/mthca/mthca_uar.c
index 1c8791ded6ff..8e9219842be4 100644
--- a/drivers/infiniband/hw/mthca/mthca_uar.c
+++ b/drivers/infiniband/hw/mthca/mthca_uar.c
@@ -32,6 +32,8 @@
32 * $Id$ 32 * $Id$
33 */ 33 */
34 34
35#include <asm/page.h> /* PAGE_SHIFT */
36
35#include "mthca_dev.h" 37#include "mthca_dev.h"
36#include "mthca_memfree.h" 38#include "mthca_memfree.h"
37 39
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index ab09cf4093e3..0506934244f0 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/sched.h> /* HZ */
24 25
25/*#include <asm/io.h>*/ 26/*#include <asm/io.h>*/
26 27
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index bf65430181fa..4571ea3a4b92 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/gameport.h> 35#include <linux/gameport.h>
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/jiffies.h>
37 38
38#define DRIVER_DESC "FP-Gaming Assasin 3D joystick driver" 39#define DRIVER_DESC "FP-Gaming Assasin 3D joystick driver"
39 40
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 9d95459f4bcb..704bf70f1db7 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -34,6 +34,7 @@
34#include <linux/input.h> 34#include <linux/input.h>
35#include <linux/gameport.h> 35#include <linux/gameport.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/jiffies.h>
37 38
38#define DRIVER_DESC "Logitech ADI joystick family driver" 39#define DRIVER_DESC "Logitech ADI joystick family driver"
39 40
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c75ac6eb1ffb..3121961e3e7c 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -38,6 +38,7 @@
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/input.h> 39#include <linux/input.h>
40#include <linux/gameport.h> 40#include <linux/gameport.h>
41#include <linux/jiffies.h>
41#include <asm/timex.h> 42#include <asm/timex.h>
42 43
43#define DRIVER_DESC "Analog joystick and gamepad driver" 44#define DRIVER_DESC "Analog joystick and gamepad driver"
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 9a3dfc724a41..1909f7ef340c 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/gameport.h> 35#include <linux/gameport.h>
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/jiffies.h>
37 38
38#define DRIVER_DESC "Creative Labs Blaster GamePad Cobra driver" 39#define DRIVER_DESC "Creative Labs Blaster GamePad Cobra driver"
39 40
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index e151f8c5bcb9..8a3ad455eb38 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/gameport.h> 37#include <linux/gameport.h>
38#include <linux/jiffies.h>
38 39
39#define DRIVER_DESC "Genius Flight 2000 joystick driver" 40#define DRIVER_DESC "Genius Flight 2000 joystick driver"
40 41
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index e206bb56e53c..a936e7aedb10 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/gameport.h> 35#include <linux/gameport.h>
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/jiffies.h>
37 38
38#define DRIVER_DESC "Gravis GrIP protocol joystick driver" 39#define DRIVER_DESC "Gravis GrIP protocol joystick driver"
39 40
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index a0ba93ccac72..51a912222e85 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -19,6 +19,7 @@
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/jiffies.h>
22 23
23#define DRIVER_DESC "Gravis Grip Multiport driver" 24#define DRIVER_DESC "Gravis Grip Multiport driver"
24 25
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index c528473c09d8..6e2c721c26ba 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/gameport.h> 36#include <linux/gameport.h>
37#include <linux/input.h> 37#include <linux/input.h>
38#include <linux/jiffies.h>
38 39
39#define DRIVER_DESC "Guillemot Digital joystick driver" 40#define DRIVER_DESC "Guillemot Digital joystick driver"
40 41
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 8511ee7bb263..c4ed01758226 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -38,6 +38,7 @@
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/gameport.h> 39#include <linux/gameport.h>
40#include <linux/input.h> 40#include <linux/input.h>
41#include <linux/jiffies.h>
41 42
42#define DRIVER_DESC "InterAct digital joystick driver" 43#define DRIVER_DESC "InterAct digital joystick driver"
43 44
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 4234ccaf9146..88ec5a918f2e 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -34,6 +34,7 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/slab.h>
37 38
38#define DRIVER_DESC "Gameport data dumper module" 39#define DRIVER_DESC "Gameport data dumper module"
39 40
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index eaaad45cc750..78dd163cd702 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/input.h> 34#include <linux/input.h>
35#include <linux/gameport.h> 35#include <linux/gameport.h>
36#include <linux/jiffies.h>
36 37
37#define DRIVER_DESC "Microsoft SideWinder joystick family driver" 38#define DRIVER_DESC "Microsoft SideWinder joystick family driver"
38 39
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 3a7d1bb46472..60e2aac7d06e 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -38,6 +38,7 @@
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/gameport.h> 39#include <linux/gameport.h>
40#include <linux/input.h> 40#include <linux/input.h>
41#include <linux/jiffies.h>
41 42
42#define DRIVER_DESC "ThrustMaster DirectConnect joystick driver" 43#define DRIVER_DESC "ThrustMaster DirectConnect joystick driver"
43 44
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index bb934e6d9636..b3eaac1b35b6 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -14,7 +14,7 @@ if INPUT_MISC
14 14
15config INPUT_PCSPKR 15config INPUT_PCSPKR
16 tristate "PC Speaker support" 16 tristate "PC Speaker support"
17 depends on ALPHA || X86 || X86_64 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES 17 depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES
18 help 18 help
19 Say Y here if you want the standard PC Speaker to be used for 19 Say Y here if you want the standard PC Speaker to be used for
20 bells and whistles. 20 bells and whistles.
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index e3c44ffae674..1c9426fd5205 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -40,6 +40,7 @@
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/string.h> 42#include <linux/string.h>
43#include <asm/semaphore.h>
43 44
44#define PREFIX "HP SDC MLC: " 45#define PREFIX "HP SDC MLC: "
45 46
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 3abd7fc6e5ef..7b564c0dd996 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/sched.h> /* current */
18 19
19MODULE_DESCRIPTION("CAPI4Linux: /dev/capi/ filesystem"); 20MODULE_DESCRIPTION("CAPI4Linux: /dev/capi/ filesystem");
20MODULE_AUTHOR("Carsten Paeth"); 21MODULE_AUTHOR("Carsten Paeth");
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 7333377ab31d..e3866b0a97fd 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1063,7 +1063,7 @@ tx_b_frame(struct hfc4s8s_btype *bch)
1063 Write_hfc8(l1->hw, A_INC_RES_FIFO, 1); 1063 Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
1064 } 1064 }
1065 ack_len += skb->truesize; 1065 ack_len += skb->truesize;
1066 bch->tx_skb = 0; 1066 bch->tx_skb = NULL;
1067 bch->tx_cnt = 0; 1067 bch->tx_cnt = 0;
1068 dev_kfree_skb(skb); 1068 dev_kfree_skb(skb);
1069 } else 1069 } else
@@ -1659,10 +1659,10 @@ hfc4s8s_remove(struct pci_dev *pdev)
1659} 1659}
1660 1660
1661static struct pci_driver hfc4s8s_driver = { 1661static struct pci_driver hfc4s8s_driver = {
1662 name:"hfc4s8s_l1", 1662 .name = "hfc4s8s_l1",
1663 probe:hfc4s8s_probe, 1663 .probe = hfc4s8s_probe,
1664 remove:__devexit_p(hfc4s8s_remove), 1664 .remove = __devexit_p(hfc4s8s_remove),
1665 id_table:hfc4s8s_ids, 1665 .id_table = hfc4s8s_ids,
1666}; 1666};
1667 1667
1668/**********************/ 1668/**********************/
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 5e0811dc6536..2b8a6e821d44 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -27,7 +27,7 @@ static volatile unsigned char __iomem *anslcd_ptr;
27 27
28#undef DEBUG 28#undef DEBUG
29 29
30static void __pmac 30static void
31anslcd_write_byte_ctrl ( unsigned char c ) 31anslcd_write_byte_ctrl ( unsigned char c )
32{ 32{
33#ifdef DEBUG 33#ifdef DEBUG
@@ -43,14 +43,14 @@ anslcd_write_byte_ctrl ( unsigned char c )
43 } 43 }
44} 44}
45 45
46static void __pmac 46static void
47anslcd_write_byte_data ( unsigned char c ) 47anslcd_write_byte_data ( unsigned char c )
48{ 48{
49 out_8(anslcd_ptr + ANSLCD_DATA_IX, c); 49 out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
50 udelay(anslcd_short_delay); 50 udelay(anslcd_short_delay);
51} 51}
52 52
53static ssize_t __pmac 53static ssize_t
54anslcd_write( struct file * file, const char __user * buf, 54anslcd_write( struct file * file, const char __user * buf,
55 size_t count, loff_t *ppos ) 55 size_t count, loff_t *ppos )
56{ 56{
@@ -73,7 +73,7 @@ anslcd_write( struct file * file, const char __user * buf,
73 return p - buf; 73 return p - buf;
74} 74}
75 75
76static int __pmac 76static int
77anslcd_ioctl( struct inode * inode, struct file * file, 77anslcd_ioctl( struct inode * inode, struct file * file,
78 unsigned int cmd, unsigned long arg ) 78 unsigned int cmd, unsigned long arg )
79{ 79{
@@ -115,7 +115,7 @@ anslcd_ioctl( struct inode * inode, struct file * file,
115 } 115 }
116} 116}
117 117
118static int __pmac 118static int
119anslcd_open( struct inode * inode, struct file * file ) 119anslcd_open( struct inode * inode, struct file * file )
120{ 120{
121 return 0; 121 return 0;
diff --git a/drivers/macintosh/apm_emu.c b/drivers/macintosh/apm_emu.c
index 19d3e05d6825..e5a2bbf99399 100644
--- a/drivers/macintosh/apm_emu.c
+++ b/drivers/macintosh/apm_emu.c
@@ -430,8 +430,8 @@ static int apm_emu_get_info(char *buf, char **start, off_t fpos, int length)
430 -1: Unknown 430 -1: Unknown
431 8) min = minutes; sec = seconds */ 431 8) min = minutes; sec = seconds */
432 432
433 unsigned short ac_line_status = 0xff; 433 unsigned short ac_line_status;
434 unsigned short battery_status = 0xff; 434 unsigned short battery_status = 0;
435 unsigned short battery_flag = 0xff; 435 unsigned short battery_flag = 0xff;
436 int percentage = -1; 436 int percentage = -1;
437 int time_units = -1; 437 int time_units = -1;
@@ -446,6 +446,7 @@ static int apm_emu_get_info(char *buf, char **start, off_t fpos, int length)
446 ac_line_status = ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0); 446 ac_line_status = ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0);
447 for (i=0; i<pmu_battery_count; i++) { 447 for (i=0; i<pmu_battery_count; i++) {
448 if (pmu_batteries[i].flags & PMU_BATT_PRESENT) { 448 if (pmu_batteries[i].flags & PMU_BATT_PRESENT) {
449 battery_status++;
449 if (percentage < 0) 450 if (percentage < 0)
450 percentage = 0; 451 percentage = 0;
451 if (charge < 0) 452 if (charge < 0)
@@ -461,6 +462,9 @@ static int apm_emu_get_info(char *buf, char **start, off_t fpos, int length)
461 charging++; 462 charging++;
462 } 463 }
463 } 464 }
465 if (0 == battery_status)
466 ac_line_status = 1;
467 battery_status = 0xff;
464 if (real_count) { 468 if (real_count) {
465 if (amperage < 0) { 469 if (amperage < 0) {
466 if (btype == PMU_BATT_TYPE_SMART) 470 if (btype == PMU_BATT_TYPE_SMART)
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 1ee003346923..c34c96d18907 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -17,6 +17,8 @@
17#include <linux/pci_ids.h> 17#include <linux/pci_ids.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/slab.h>
21
20#include <asm/machdep.h> 22#include <asm/machdep.h>
21#include <asm/macio.h> 23#include <asm/macio.h>
22#include <asm/pmac_feature.h> 24#include <asm/pmac_feature.h>
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 97d22bb4516a..7f7d4eaca870 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -39,6 +39,31 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
39 return length; 39 return length;
40} 40}
41 41
42static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
43 char *buf)
44{
45 struct of_device *of;
46 char *compat;
47 int cplen;
48 int length;
49
50 of = &to_macio_device (dev)->ofdev;
51 compat = (char *) get_property (of->node, "compatible", &cplen);
52 if (!compat) compat = "", cplen = 1;
53 length = sprintf (buf, "of:N%sT%s", of->node->name, of->node->type);
54 buf += length;
55 while (cplen > 0) {
56 int l;
57 length += sprintf (buf, "C%s", compat);
58 buf += length;
59 l = strlen (compat) + 1;
60 compat += l;
61 cplen -= l;
62 }
63
64 return length;
65}
66
42macio_config_of_attr (name, "%s\n"); 67macio_config_of_attr (name, "%s\n");
43macio_config_of_attr (type, "%s\n"); 68macio_config_of_attr (type, "%s\n");
44 69
@@ -46,5 +71,6 @@ struct device_attribute macio_dev_attrs[] = {
46 __ATTR_RO(name), 71 __ATTR_RO(name),
47 __ATTR_RO(type), 72 __ATTR_RO(type),
48 __ATTR_RO(compatible), 73 __ATTR_RO(compatible),
74 __ATTR_RO(modalias),
49 __ATTR_NULL 75 __ATTR_NULL
50}; 76};
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index c0712a1ea5af..b856bb67169c 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -167,19 +167,19 @@ enum {
167 * Functions for polling content of media bay 167 * Functions for polling content of media bay
168 */ 168 */
169 169
170static u8 __pmac 170static u8
171ohare_mb_content(struct media_bay_info *bay) 171ohare_mb_content(struct media_bay_info *bay)
172{ 172{
173 return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7; 173 return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7;
174} 174}
175 175
176static u8 __pmac 176static u8
177heathrow_mb_content(struct media_bay_info *bay) 177heathrow_mb_content(struct media_bay_info *bay)
178{ 178{
179 return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7; 179 return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7;
180} 180}
181 181
182static u8 __pmac 182static u8
183keylargo_mb_content(struct media_bay_info *bay) 183keylargo_mb_content(struct media_bay_info *bay)
184{ 184{
185 int new_gpio; 185 int new_gpio;
@@ -205,7 +205,7 @@ keylargo_mb_content(struct media_bay_info *bay)
205 * into reset state as well 205 * into reset state as well
206 */ 206 */
207 207
208static void __pmac 208static void
209ohare_mb_power(struct media_bay_info* bay, int on_off) 209ohare_mb_power(struct media_bay_info* bay, int on_off)
210{ 210{
211 if (on_off) { 211 if (on_off) {
@@ -224,7 +224,7 @@ ohare_mb_power(struct media_bay_info* bay, int on_off)
224 MB_BIC(bay, OHARE_MBCR, 0x00000F00); 224 MB_BIC(bay, OHARE_MBCR, 0x00000F00);
225} 225}
226 226
227static void __pmac 227static void
228heathrow_mb_power(struct media_bay_info* bay, int on_off) 228heathrow_mb_power(struct media_bay_info* bay, int on_off)
229{ 229{
230 if (on_off) { 230 if (on_off) {
@@ -243,7 +243,7 @@ heathrow_mb_power(struct media_bay_info* bay, int on_off)
243 MB_BIC(bay, HEATHROW_MBCR, 0x00000F00); 243 MB_BIC(bay, HEATHROW_MBCR, 0x00000F00);
244} 244}
245 245
246static void __pmac 246static void
247keylargo_mb_power(struct media_bay_info* bay, int on_off) 247keylargo_mb_power(struct media_bay_info* bay, int on_off)
248{ 248{
249 if (on_off) { 249 if (on_off) {
@@ -267,7 +267,7 @@ keylargo_mb_power(struct media_bay_info* bay, int on_off)
267 * enable the related busses 267 * enable the related busses
268 */ 268 */
269 269
270static int __pmac 270static int
271ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 271ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
272{ 272{
273 switch(device_id) { 273 switch(device_id) {
@@ -287,7 +287,7 @@ ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
287 return -ENODEV; 287 return -ENODEV;
288} 288}
289 289
290static int __pmac 290static int
291heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 291heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
292{ 292{
293 switch(device_id) { 293 switch(device_id) {
@@ -307,7 +307,7 @@ heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
307 return -ENODEV; 307 return -ENODEV;
308} 308}
309 309
310static int __pmac 310static int
311keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 311keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
312{ 312{
313 switch(device_id) { 313 switch(device_id) {
@@ -330,43 +330,43 @@ keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
330 * Functions for tweaking resets 330 * Functions for tweaking resets
331 */ 331 */
332 332
333static void __pmac 333static void
334ohare_mb_un_reset(struct media_bay_info* bay) 334ohare_mb_un_reset(struct media_bay_info* bay)
335{ 335{
336 MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N); 336 MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N);
337} 337}
338 338
339static void __pmac keylargo_mb_init(struct media_bay_info *bay) 339static void keylargo_mb_init(struct media_bay_info *bay)
340{ 340{
341 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE); 341 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE);
342} 342}
343 343
344static void __pmac heathrow_mb_un_reset(struct media_bay_info* bay) 344static void heathrow_mb_un_reset(struct media_bay_info* bay)
345{ 345{
346 MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N); 346 MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N);
347} 347}
348 348
349static void __pmac keylargo_mb_un_reset(struct media_bay_info* bay) 349static void keylargo_mb_un_reset(struct media_bay_info* bay)
350{ 350{
351 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET); 351 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET);
352} 352}
353 353
354static void __pmac ohare_mb_un_reset_ide(struct media_bay_info* bay) 354static void ohare_mb_un_reset_ide(struct media_bay_info* bay)
355{ 355{
356 MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N); 356 MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N);
357} 357}
358 358
359static void __pmac heathrow_mb_un_reset_ide(struct media_bay_info* bay) 359static void heathrow_mb_un_reset_ide(struct media_bay_info* bay)
360{ 360{
361 MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N); 361 MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N);
362} 362}
363 363
364static void __pmac keylargo_mb_un_reset_ide(struct media_bay_info* bay) 364static void keylargo_mb_un_reset_ide(struct media_bay_info* bay)
365{ 365{
366 MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N); 366 MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N);
367} 367}
368 368
369static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff) 369static inline void set_mb_power(struct media_bay_info* bay, int onoff)
370{ 370{
371 /* Power up up and assert the bay reset line */ 371 /* Power up up and assert the bay reset line */
372 if (onoff) { 372 if (onoff) {
@@ -382,7 +382,7 @@ static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff)
382 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 382 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
383} 383}
384 384
385static void __pmac poll_media_bay(struct media_bay_info* bay) 385static void poll_media_bay(struct media_bay_info* bay)
386{ 386{
387 int id = bay->ops->content(bay); 387 int id = bay->ops->content(bay);
388 388
@@ -415,7 +415,7 @@ static void __pmac poll_media_bay(struct media_bay_info* bay)
415 } 415 }
416} 416}
417 417
418int __pmac check_media_bay(struct device_node *which_bay, int what) 418int check_media_bay(struct device_node *which_bay, int what)
419{ 419{
420#ifdef CONFIG_BLK_DEV_IDE 420#ifdef CONFIG_BLK_DEV_IDE
421 int i; 421 int i;
@@ -432,7 +432,7 @@ int __pmac check_media_bay(struct device_node *which_bay, int what)
432} 432}
433EXPORT_SYMBOL(check_media_bay); 433EXPORT_SYMBOL(check_media_bay);
434 434
435int __pmac check_media_bay_by_base(unsigned long base, int what) 435int check_media_bay_by_base(unsigned long base, int what)
436{ 436{
437#ifdef CONFIG_BLK_DEV_IDE 437#ifdef CONFIG_BLK_DEV_IDE
438 int i; 438 int i;
@@ -449,7 +449,7 @@ int __pmac check_media_bay_by_base(unsigned long base, int what)
449 return -ENODEV; 449 return -ENODEV;
450} 450}
451 451
452int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, 452int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
453 int irq, int index) 453 int irq, int index)
454{ 454{
455#ifdef CONFIG_BLK_DEV_IDE 455#ifdef CONFIG_BLK_DEV_IDE
@@ -489,7 +489,7 @@ int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long
489 return -ENODEV; 489 return -ENODEV;
490} 490}
491 491
492static void __pmac media_bay_step(int i) 492static void media_bay_step(int i)
493{ 493{
494 struct media_bay_info* bay = &media_bays[i]; 494 struct media_bay_info* bay = &media_bays[i];
495 495
@@ -619,7 +619,7 @@ static void __pmac media_bay_step(int i)
619 * with the IDE driver. It needs to be a thread because 619 * with the IDE driver. It needs to be a thread because
620 * ide_register can't be called from interrupt context. 620 * ide_register can't be called from interrupt context.
621 */ 621 */
622static int __pmac media_bay_task(void *x) 622static int media_bay_task(void *x)
623{ 623{
624 int i; 624 int i;
625 625
@@ -704,7 +704,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
704 704
705} 705}
706 706
707static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state) 707static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
708{ 708{
709 struct media_bay_info *bay = macio_get_drvdata(mdev); 709 struct media_bay_info *bay = macio_get_drvdata(mdev);
710 710
@@ -719,7 +719,7 @@ static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
719 return 0; 719 return 0;
720} 720}
721 721
722static int __pmac media_bay_resume(struct macio_dev *mdev) 722static int media_bay_resume(struct macio_dev *mdev)
723{ 723{
724 struct media_bay_info *bay = macio_get_drvdata(mdev); 724 struct media_bay_info *bay = macio_get_drvdata(mdev);
725 725
@@ -760,7 +760,7 @@ static int __pmac media_bay_resume(struct macio_dev *mdev)
760 760
761/* Definitions of "ops" structures. 761/* Definitions of "ops" structures.
762 */ 762 */
763static struct mb_ops ohare_mb_ops __pmacdata = { 763static struct mb_ops ohare_mb_ops = {
764 .name = "Ohare", 764 .name = "Ohare",
765 .content = ohare_mb_content, 765 .content = ohare_mb_content,
766 .power = ohare_mb_power, 766 .power = ohare_mb_power,
@@ -769,7 +769,7 @@ static struct mb_ops ohare_mb_ops __pmacdata = {
769 .un_reset_ide = ohare_mb_un_reset_ide, 769 .un_reset_ide = ohare_mb_un_reset_ide,
770}; 770};
771 771
772static struct mb_ops heathrow_mb_ops __pmacdata = { 772static struct mb_ops heathrow_mb_ops = {
773 .name = "Heathrow", 773 .name = "Heathrow",
774 .content = heathrow_mb_content, 774 .content = heathrow_mb_content,
775 .power = heathrow_mb_power, 775 .power = heathrow_mb_power,
@@ -778,7 +778,7 @@ static struct mb_ops heathrow_mb_ops __pmacdata = {
778 .un_reset_ide = heathrow_mb_un_reset_ide, 778 .un_reset_ide = heathrow_mb_un_reset_ide,
779}; 779};
780 780
781static struct mb_ops keylargo_mb_ops __pmacdata = { 781static struct mb_ops keylargo_mb_ops = {
782 .name = "KeyLargo", 782 .name = "KeyLargo",
783 .init = keylargo_mb_init, 783 .init = keylargo_mb_init,
784 .content = keylargo_mb_content, 784 .content = keylargo_mb_content,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 9b38674fbf75..34f3c7e2d832 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1094,7 +1094,7 @@ static int smu_release(struct inode *inode, struct file *file)
1094} 1094}
1095 1095
1096 1096
1097static struct file_operations smu_device_fops __pmacdata = { 1097static struct file_operations smu_device_fops = {
1098 .llseek = no_llseek, 1098 .llseek = no_llseek,
1099 .read = smu_read, 1099 .read = smu_read,
1100 .write = smu_write, 1100 .write = smu_write,
@@ -1103,7 +1103,7 @@ static struct file_operations smu_device_fops __pmacdata = {
1103 .release = smu_release, 1103 .release = smu_release,
1104}; 1104};
1105 1105
1106static struct miscdevice pmu_device __pmacdata = { 1106static struct miscdevice pmu_device = {
1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops 1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1108}; 1108};
1109 1109
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 417deb5de108..d843a6c9c6df 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(cuda_lock);
37 37
38#ifdef CONFIG_MAC 38#ifdef CONFIG_MAC
39#define CUDA_IRQ IRQ_MAC_ADB 39#define CUDA_IRQ IRQ_MAC_ADB
40#define __openfirmware
41#define eieio() 40#define eieio()
42#else 41#else
43#define CUDA_IRQ vias->intrs[0].line 42#define CUDA_IRQ vias->intrs[0].line
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 645a2e5c70ab..91920a1140fa 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -244,7 +244,7 @@ int pmu_wink(struct adb_request *req);
244 * - the number of response bytes which the PMU will return, or 244 * - the number of response bytes which the PMU will return, or
245 * -1 if it will send a length byte. 245 * -1 if it will send a length byte.
246 */ 246 */
247static const s8 pmu_data_len[256][2] __openfirmwaredata = { 247static const s8 pmu_data_len[256][2] = {
248/* 0 1 2 3 4 5 6 7 */ 248/* 0 1 2 3 4 5 6 7 */
249/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 249/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
250/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 250/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
@@ -295,7 +295,7 @@ static struct backlight_controller pmu_backlight_controller = {
295}; 295};
296#endif /* CONFIG_PMAC_BACKLIGHT */ 296#endif /* CONFIG_PMAC_BACKLIGHT */
297 297
298int __openfirmware 298int
299find_via_pmu(void) 299find_via_pmu(void)
300{ 300{
301 if (via != 0) 301 if (via != 0)
@@ -374,7 +374,7 @@ find_via_pmu(void)
374} 374}
375 375
376#ifdef CONFIG_ADB 376#ifdef CONFIG_ADB
377static int __openfirmware 377static int
378pmu_probe(void) 378pmu_probe(void)
379{ 379{
380 return vias == NULL? -ENODEV: 0; 380 return vias == NULL? -ENODEV: 0;
@@ -405,7 +405,7 @@ static int __init via_pmu_start(void)
405 bright_req_2.complete = 1; 405 bright_req_2.complete = 1;
406 batt_req.complete = 1; 406 batt_req.complete = 1;
407 407
408#ifdef CONFIG_PPC32 408#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
409 if (pmu_kind == PMU_KEYLARGO_BASED) 409 if (pmu_kind == PMU_KEYLARGO_BASED)
410 openpic_set_irq_priority(vias->intrs[0].line, 410 openpic_set_irq_priority(vias->intrs[0].line,
411 OPENPIC_PRIORITY_DEFAULT + 1); 411 OPENPIC_PRIORITY_DEFAULT + 1);
@@ -520,7 +520,7 @@ static int __init via_pmu_dev_init(void)
520 520
521device_initcall(via_pmu_dev_init); 521device_initcall(via_pmu_dev_init);
522 522
523static int __openfirmware 523static int
524init_pmu(void) 524init_pmu(void)
525{ 525{
526 int timeout; 526 int timeout;
@@ -588,17 +588,6 @@ pmu_get_model(void)
588 return pmu_kind; 588 return pmu_kind;
589} 589}
590 590
591#ifndef CONFIG_PPC64
592static inline void wakeup_decrementer(void)
593{
594 set_dec(tb_ticks_per_jiffy);
595 /* No currently-supported powerbook has a 601,
596 * so use get_tbl, not native
597 */
598 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
599}
600#endif
601
602static void pmu_set_server_mode(int server_mode) 591static void pmu_set_server_mode(int server_mode)
603{ 592{
604 struct adb_request req; 593 struct adb_request req;
@@ -625,7 +614,7 @@ static void pmu_set_server_mode(int server_mode)
625/* This new version of the code for 2400/3400/3500 powerbooks 614/* This new version of the code for 2400/3400/3500 powerbooks
626 * is inspired from the implementation in gkrellm-pmu 615 * is inspired from the implementation in gkrellm-pmu
627 */ 616 */
628static void __pmac 617static void
629done_battery_state_ohare(struct adb_request* req) 618done_battery_state_ohare(struct adb_request* req)
630{ 619{
631 /* format: 620 /* format:
@@ -713,7 +702,7 @@ done_battery_state_ohare(struct adb_request* req)
713 clear_bit(0, &async_req_locks); 702 clear_bit(0, &async_req_locks);
714} 703}
715 704
716static void __pmac 705static void
717done_battery_state_smart(struct adb_request* req) 706done_battery_state_smart(struct adb_request* req)
718{ 707{
719 /* format: 708 /* format:
@@ -791,7 +780,7 @@ done_battery_state_smart(struct adb_request* req)
791 clear_bit(0, &async_req_locks); 780 clear_bit(0, &async_req_locks);
792} 781}
793 782
794static void __pmac 783static void
795query_battery_state(void) 784query_battery_state(void)
796{ 785{
797 if (test_and_set_bit(0, &async_req_locks)) 786 if (test_and_set_bit(0, &async_req_locks))
@@ -804,7 +793,7 @@ query_battery_state(void)
804 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 793 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
805} 794}
806 795
807static int __pmac 796static int
808proc_get_info(char *page, char **start, off_t off, 797proc_get_info(char *page, char **start, off_t off,
809 int count, int *eof, void *data) 798 int count, int *eof, void *data)
810{ 799{
@@ -819,7 +808,7 @@ proc_get_info(char *page, char **start, off_t off,
819 return p - page; 808 return p - page;
820} 809}
821 810
822static int __pmac 811static int
823proc_get_irqstats(char *page, char **start, off_t off, 812proc_get_irqstats(char *page, char **start, off_t off,
824 int count, int *eof, void *data) 813 int count, int *eof, void *data)
825{ 814{
@@ -846,7 +835,7 @@ proc_get_irqstats(char *page, char **start, off_t off,
846 return p - page; 835 return p - page;
847} 836}
848 837
849static int __pmac 838static int
850proc_get_batt(char *page, char **start, off_t off, 839proc_get_batt(char *page, char **start, off_t off,
851 int count, int *eof, void *data) 840 int count, int *eof, void *data)
852{ 841{
@@ -870,7 +859,7 @@ proc_get_batt(char *page, char **start, off_t off,
870 return p - page; 859 return p - page;
871} 860}
872 861
873static int __pmac 862static int
874proc_read_options(char *page, char **start, off_t off, 863proc_read_options(char *page, char **start, off_t off,
875 int count, int *eof, void *data) 864 int count, int *eof, void *data)
876{ 865{
@@ -887,7 +876,7 @@ proc_read_options(char *page, char **start, off_t off,
887 return p - page; 876 return p - page;
888} 877}
889 878
890static int __pmac 879static int
891proc_write_options(struct file *file, const char __user *buffer, 880proc_write_options(struct file *file, const char __user *buffer,
892 unsigned long count, void *data) 881 unsigned long count, void *data)
893{ 882{
@@ -934,7 +923,7 @@ proc_write_options(struct file *file, const char __user *buffer,
934 923
935#ifdef CONFIG_ADB 924#ifdef CONFIG_ADB
936/* Send an ADB command */ 925/* Send an ADB command */
937static int __pmac 926static int
938pmu_send_request(struct adb_request *req, int sync) 927pmu_send_request(struct adb_request *req, int sync)
939{ 928{
940 int i, ret; 929 int i, ret;
@@ -1014,7 +1003,7 @@ pmu_send_request(struct adb_request *req, int sync)
1014} 1003}
1015 1004
1016/* Enable/disable autopolling */ 1005/* Enable/disable autopolling */
1017static int __pmac 1006static int
1018pmu_adb_autopoll(int devs) 1007pmu_adb_autopoll(int devs)
1019{ 1008{
1020 struct adb_request req; 1009 struct adb_request req;
@@ -1037,7 +1026,7 @@ pmu_adb_autopoll(int devs)
1037} 1026}
1038 1027
1039/* Reset the ADB bus */ 1028/* Reset the ADB bus */
1040static int __pmac 1029static int
1041pmu_adb_reset_bus(void) 1030pmu_adb_reset_bus(void)
1042{ 1031{
1043 struct adb_request req; 1032 struct adb_request req;
@@ -1072,7 +1061,7 @@ pmu_adb_reset_bus(void)
1072#endif /* CONFIG_ADB */ 1061#endif /* CONFIG_ADB */
1073 1062
1074/* Construct and send a pmu request */ 1063/* Construct and send a pmu request */
1075int __openfirmware 1064int
1076pmu_request(struct adb_request *req, void (*done)(struct adb_request *), 1065pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
1077 int nbytes, ...) 1066 int nbytes, ...)
1078{ 1067{
@@ -1098,7 +1087,7 @@ pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
1098 return pmu_queue_request(req); 1087 return pmu_queue_request(req);
1099} 1088}
1100 1089
1101int __pmac 1090int
1102pmu_queue_request(struct adb_request *req) 1091pmu_queue_request(struct adb_request *req)
1103{ 1092{
1104 unsigned long flags; 1093 unsigned long flags;
@@ -1190,7 +1179,7 @@ pmu_done(struct adb_request *req)
1190 (*done)(req); 1179 (*done)(req);
1191} 1180}
1192 1181
1193static void __pmac 1182static void
1194pmu_start(void) 1183pmu_start(void)
1195{ 1184{
1196 struct adb_request *req; 1185 struct adb_request *req;
@@ -1214,7 +1203,7 @@ pmu_start(void)
1214 send_byte(req->data[0]); 1203 send_byte(req->data[0]);
1215} 1204}
1216 1205
1217void __openfirmware 1206void
1218pmu_poll(void) 1207pmu_poll(void)
1219{ 1208{
1220 if (!via) 1209 if (!via)
@@ -1224,7 +1213,7 @@ pmu_poll(void)
1224 via_pmu_interrupt(0, NULL, NULL); 1213 via_pmu_interrupt(0, NULL, NULL);
1225} 1214}
1226 1215
1227void __openfirmware 1216void
1228pmu_poll_adb(void) 1217pmu_poll_adb(void)
1229{ 1218{
1230 if (!via) 1219 if (!via)
@@ -1239,7 +1228,7 @@ pmu_poll_adb(void)
1239 || req_awaiting_reply)); 1228 || req_awaiting_reply));
1240} 1229}
1241 1230
1242void __openfirmware 1231void
1243pmu_wait_complete(struct adb_request *req) 1232pmu_wait_complete(struct adb_request *req)
1244{ 1233{
1245 if (!via) 1234 if (!via)
@@ -1253,7 +1242,7 @@ pmu_wait_complete(struct adb_request *req)
1253 * This is done to avoid spurrious shutdowns when we know we'll have 1242 * This is done to avoid spurrious shutdowns when we know we'll have
1254 * interrupts switched off for a long time 1243 * interrupts switched off for a long time
1255 */ 1244 */
1256void __openfirmware 1245void
1257pmu_suspend(void) 1246pmu_suspend(void)
1258{ 1247{
1259 unsigned long flags; 1248 unsigned long flags;
@@ -1293,7 +1282,7 @@ pmu_suspend(void)
1293 } while (1); 1282 } while (1);
1294} 1283}
1295 1284
1296void __openfirmware 1285void
1297pmu_resume(void) 1286pmu_resume(void)
1298{ 1287{
1299 unsigned long flags; 1288 unsigned long flags;
@@ -1323,7 +1312,7 @@ pmu_resume(void)
1323} 1312}
1324 1313
1325/* Interrupt data could be the result data from an ADB cmd */ 1314/* Interrupt data could be the result data from an ADB cmd */
1326static void __pmac 1315static void
1327pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs) 1316pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
1328{ 1317{
1329 unsigned char ints, pirq; 1318 unsigned char ints, pirq;
@@ -1435,7 +1424,7 @@ next:
1435 goto next; 1424 goto next;
1436} 1425}
1437 1426
1438static struct adb_request* __pmac 1427static struct adb_request*
1439pmu_sr_intr(struct pt_regs *regs) 1428pmu_sr_intr(struct pt_regs *regs)
1440{ 1429{
1441 struct adb_request *req; 1430 struct adb_request *req;
@@ -1541,7 +1530,7 @@ pmu_sr_intr(struct pt_regs *regs)
1541 return NULL; 1530 return NULL;
1542} 1531}
1543 1532
1544static irqreturn_t __pmac 1533static irqreturn_t
1545via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs) 1534via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
1546{ 1535{
1547 unsigned long flags; 1536 unsigned long flags;
@@ -1629,7 +1618,7 @@ no_free_slot:
1629 return IRQ_RETVAL(handled); 1618 return IRQ_RETVAL(handled);
1630} 1619}
1631 1620
1632void __pmac 1621void
1633pmu_unlock(void) 1622pmu_unlock(void)
1634{ 1623{
1635 unsigned long flags; 1624 unsigned long flags;
@@ -1642,7 +1631,7 @@ pmu_unlock(void)
1642} 1631}
1643 1632
1644 1633
1645static irqreturn_t __pmac 1634static irqreturn_t
1646gpio1_interrupt(int irq, void *arg, struct pt_regs *regs) 1635gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
1647{ 1636{
1648 unsigned long flags; 1637 unsigned long flags;
@@ -1663,12 +1652,12 @@ gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
1663} 1652}
1664 1653
1665#ifdef CONFIG_PMAC_BACKLIGHT 1654#ifdef CONFIG_PMAC_BACKLIGHT
1666static int backlight_to_bright[] __pmacdata = { 1655static int backlight_to_bright[] = {
1667 0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e, 1656 0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e,
1668 0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e 1657 0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e
1669}; 1658};
1670 1659
1671static int __openfirmware 1660static int
1672pmu_set_backlight_enable(int on, int level, void* data) 1661pmu_set_backlight_enable(int on, int level, void* data)
1673{ 1662{
1674 struct adb_request req; 1663 struct adb_request req;
@@ -1688,7 +1677,7 @@ pmu_set_backlight_enable(int on, int level, void* data)
1688 return 0; 1677 return 0;
1689} 1678}
1690 1679
1691static void __openfirmware 1680static void
1692pmu_bright_complete(struct adb_request *req) 1681pmu_bright_complete(struct adb_request *req)
1693{ 1682{
1694 if (req == &bright_req_1) 1683 if (req == &bright_req_1)
@@ -1697,7 +1686,7 @@ pmu_bright_complete(struct adb_request *req)
1697 clear_bit(2, &async_req_locks); 1686 clear_bit(2, &async_req_locks);
1698} 1687}
1699 1688
1700static int __openfirmware 1689static int
1701pmu_set_backlight_level(int level, void* data) 1690pmu_set_backlight_level(int level, void* data)
1702{ 1691{
1703 if (vias == NULL) 1692 if (vias == NULL)
@@ -1717,7 +1706,7 @@ pmu_set_backlight_level(int level, void* data)
1717} 1706}
1718#endif /* CONFIG_PMAC_BACKLIGHT */ 1707#endif /* CONFIG_PMAC_BACKLIGHT */
1719 1708
1720void __pmac 1709void
1721pmu_enable_irled(int on) 1710pmu_enable_irled(int on)
1722{ 1711{
1723 struct adb_request req; 1712 struct adb_request req;
@@ -1732,7 +1721,7 @@ pmu_enable_irled(int on)
1732 pmu_wait_complete(&req); 1721 pmu_wait_complete(&req);
1733} 1722}
1734 1723
1735void __pmac 1724void
1736pmu_restart(void) 1725pmu_restart(void)
1737{ 1726{
1738 struct adb_request req; 1727 struct adb_request req;
@@ -1757,7 +1746,7 @@ pmu_restart(void)
1757 ; 1746 ;
1758} 1747}
1759 1748
1760void __pmac 1749void
1761pmu_shutdown(void) 1750pmu_shutdown(void)
1762{ 1751{
1763 struct adb_request req; 1752 struct adb_request req;
@@ -2076,7 +2065,7 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
2076} 2065}
2077 2066
2078/* Sleep is broadcast last-to-first */ 2067/* Sleep is broadcast last-to-first */
2079static int __pmac 2068static int
2080broadcast_sleep(int when, int fallback) 2069broadcast_sleep(int when, int fallback)
2081{ 2070{
2082 int ret = PBOOK_SLEEP_OK; 2071 int ret = PBOOK_SLEEP_OK;
@@ -2101,7 +2090,7 @@ broadcast_sleep(int when, int fallback)
2101} 2090}
2102 2091
2103/* Wake is broadcast first-to-last */ 2092/* Wake is broadcast first-to-last */
2104static int __pmac 2093static int
2105broadcast_wake(void) 2094broadcast_wake(void)
2106{ 2095{
2107 int ret = PBOOK_SLEEP_OK; 2096 int ret = PBOOK_SLEEP_OK;
@@ -2132,7 +2121,7 @@ static struct pci_save {
2132} *pbook_pci_saves; 2121} *pbook_pci_saves;
2133static int pbook_npci_saves; 2122static int pbook_npci_saves;
2134 2123
2135static void __pmac 2124static void
2136pbook_alloc_pci_save(void) 2125pbook_alloc_pci_save(void)
2137{ 2126{
2138 int npci; 2127 int npci;
@@ -2149,7 +2138,7 @@ pbook_alloc_pci_save(void)
2149 pbook_npci_saves = npci; 2138 pbook_npci_saves = npci;
2150} 2139}
2151 2140
2152static void __pmac 2141static void
2153pbook_free_pci_save(void) 2142pbook_free_pci_save(void)
2154{ 2143{
2155 if (pbook_pci_saves == NULL) 2144 if (pbook_pci_saves == NULL)
@@ -2159,7 +2148,7 @@ pbook_free_pci_save(void)
2159 pbook_npci_saves = 0; 2148 pbook_npci_saves = 0;
2160} 2149}
2161 2150
2162static void __pmac 2151static void
2163pbook_pci_save(void) 2152pbook_pci_save(void)
2164{ 2153{
2165 struct pci_save *ps = pbook_pci_saves; 2154 struct pci_save *ps = pbook_pci_saves;
@@ -2190,7 +2179,7 @@ pbook_pci_save(void)
2190 * during boot, it will be in the pci dev list. If it's disabled at this point 2179 * during boot, it will be in the pci dev list. If it's disabled at this point
2191 * (and it will probably be), then you can't access it's config space. 2180 * (and it will probably be), then you can't access it's config space.
2192 */ 2181 */
2193static void __pmac 2182static void
2194pbook_pci_restore(void) 2183pbook_pci_restore(void)
2195{ 2184{
2196 u16 cmd; 2185 u16 cmd;
@@ -2238,7 +2227,7 @@ pbook_pci_restore(void)
2238 2227
2239#ifdef DEBUG_SLEEP 2228#ifdef DEBUG_SLEEP
2240/* N.B. This doesn't work on the 3400 */ 2229/* N.B. This doesn't work on the 3400 */
2241void __pmac 2230void
2242pmu_blink(int n) 2231pmu_blink(int n)
2243{ 2232{
2244 struct adb_request req; 2233 struct adb_request req;
@@ -2277,9 +2266,9 @@ pmu_blink(int n)
2277 * Put the powerbook to sleep. 2266 * Put the powerbook to sleep.
2278 */ 2267 */
2279 2268
2280static u32 save_via[8] __pmacdata; 2269static u32 save_via[8];
2281 2270
2282static void __pmac 2271static void
2283save_via_state(void) 2272save_via_state(void)
2284{ 2273{
2285 save_via[0] = in_8(&via[ANH]); 2274 save_via[0] = in_8(&via[ANH]);
@@ -2291,7 +2280,7 @@ save_via_state(void)
2291 save_via[6] = in_8(&via[T1CL]); 2280 save_via[6] = in_8(&via[T1CL]);
2292 save_via[7] = in_8(&via[T1CH]); 2281 save_via[7] = in_8(&via[T1CH]);
2293} 2282}
2294static void __pmac 2283static void
2295restore_via_state(void) 2284restore_via_state(void)
2296{ 2285{
2297 out_8(&via[ANH], save_via[0]); 2286 out_8(&via[ANH], save_via[0]);
@@ -2307,7 +2296,7 @@ restore_via_state(void)
2307 out_8(&via[IER], IER_SET | SR_INT | CB1_INT); 2296 out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
2308} 2297}
2309 2298
2310static int __pmac 2299static int
2311pmac_suspend_devices(void) 2300pmac_suspend_devices(void)
2312{ 2301{
2313 int ret; 2302 int ret;
@@ -2397,7 +2386,7 @@ pmac_suspend_devices(void)
2397 return 0; 2386 return 0;
2398} 2387}
2399 2388
2400static int __pmac 2389static int
2401pmac_wakeup_devices(void) 2390pmac_wakeup_devices(void)
2402{ 2391{
2403 mdelay(100); 2392 mdelay(100);
@@ -2436,7 +2425,7 @@ pmac_wakeup_devices(void)
2436#define GRACKLE_NAP (1<<4) 2425#define GRACKLE_NAP (1<<4)
2437#define GRACKLE_SLEEP (1<<3) 2426#define GRACKLE_SLEEP (1<<3)
2438 2427
2439int __pmac 2428int
2440powerbook_sleep_grackle(void) 2429powerbook_sleep_grackle(void)
2441{ 2430{
2442 unsigned long save_l2cr; 2431 unsigned long save_l2cr;
@@ -2520,7 +2509,7 @@ powerbook_sleep_grackle(void)
2520 return 0; 2509 return 0;
2521} 2510}
2522 2511
2523static int __pmac 2512static int
2524powerbook_sleep_Core99(void) 2513powerbook_sleep_Core99(void)
2525{ 2514{
2526 unsigned long save_l2cr; 2515 unsigned long save_l2cr;
@@ -2620,7 +2609,7 @@ powerbook_sleep_Core99(void)
2620#define PB3400_MEM_CTRL 0xf8000000 2609#define PB3400_MEM_CTRL 0xf8000000
2621#define PB3400_MEM_CTRL_SLEEP 0x70 2610#define PB3400_MEM_CTRL_SLEEP 0x70
2622 2611
2623static int __pmac 2612static int
2624powerbook_sleep_3400(void) 2613powerbook_sleep_3400(void)
2625{ 2614{
2626 int ret, i, x; 2615 int ret, i, x;
@@ -2720,9 +2709,9 @@ struct pmu_private {
2720}; 2709};
2721 2710
2722static LIST_HEAD(all_pmu_pvt); 2711static LIST_HEAD(all_pmu_pvt);
2723static DEFINE_SPINLOCK(all_pvt_lock __pmacdata); 2712static DEFINE_SPINLOCK(all_pvt_lock);
2724 2713
2725static void __pmac 2714static void
2726pmu_pass_intr(unsigned char *data, int len) 2715pmu_pass_intr(unsigned char *data, int len)
2727{ 2716{
2728 struct pmu_private *pp; 2717 struct pmu_private *pp;
@@ -2751,7 +2740,7 @@ pmu_pass_intr(unsigned char *data, int len)
2751 spin_unlock_irqrestore(&all_pvt_lock, flags); 2740 spin_unlock_irqrestore(&all_pvt_lock, flags);
2752} 2741}
2753 2742
2754static int __pmac 2743static int
2755pmu_open(struct inode *inode, struct file *file) 2744pmu_open(struct inode *inode, struct file *file)
2756{ 2745{
2757 struct pmu_private *pp; 2746 struct pmu_private *pp;
@@ -2773,7 +2762,7 @@ pmu_open(struct inode *inode, struct file *file)
2773 return 0; 2762 return 0;
2774} 2763}
2775 2764
2776static ssize_t __pmac 2765static ssize_t
2777pmu_read(struct file *file, char __user *buf, 2766pmu_read(struct file *file, char __user *buf,
2778 size_t count, loff_t *ppos) 2767 size_t count, loff_t *ppos)
2779{ 2768{
@@ -2825,14 +2814,14 @@ pmu_read(struct file *file, char __user *buf,
2825 return ret; 2814 return ret;
2826} 2815}
2827 2816
2828static ssize_t __pmac 2817static ssize_t
2829pmu_write(struct file *file, const char __user *buf, 2818pmu_write(struct file *file, const char __user *buf,
2830 size_t count, loff_t *ppos) 2819 size_t count, loff_t *ppos)
2831{ 2820{
2832 return 0; 2821 return 0;
2833} 2822}
2834 2823
2835static unsigned int __pmac 2824static unsigned int
2836pmu_fpoll(struct file *filp, poll_table *wait) 2825pmu_fpoll(struct file *filp, poll_table *wait)
2837{ 2826{
2838 struct pmu_private *pp = filp->private_data; 2827 struct pmu_private *pp = filp->private_data;
@@ -2849,7 +2838,7 @@ pmu_fpoll(struct file *filp, poll_table *wait)
2849 return mask; 2838 return mask;
2850} 2839}
2851 2840
2852static int __pmac 2841static int
2853pmu_release(struct inode *inode, struct file *file) 2842pmu_release(struct inode *inode, struct file *file)
2854{ 2843{
2855 struct pmu_private *pp = file->private_data; 2844 struct pmu_private *pp = file->private_data;
@@ -2874,8 +2863,7 @@ pmu_release(struct inode *inode, struct file *file)
2874 return 0; 2863 return 0;
2875} 2864}
2876 2865
2877/* Note: removed __openfirmware here since it causes link errors */ 2866static int
2878static int __pmac
2879pmu_ioctl(struct inode * inode, struct file *filp, 2867pmu_ioctl(struct inode * inode, struct file *filp,
2880 u_int cmd, u_long arg) 2868 u_int cmd, u_long arg)
2881{ 2869{
@@ -2957,7 +2945,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2957 return error; 2945 return error;
2958} 2946}
2959 2947
2960static struct file_operations pmu_device_fops __pmacdata = { 2948static struct file_operations pmu_device_fops = {
2961 .read = pmu_read, 2949 .read = pmu_read,
2962 .write = pmu_write, 2950 .write = pmu_write,
2963 .poll = pmu_fpoll, 2951 .poll = pmu_fpoll,
@@ -2966,7 +2954,7 @@ static struct file_operations pmu_device_fops __pmacdata = {
2966 .release = pmu_release, 2954 .release = pmu_release,
2967}; 2955};
2968 2956
2969static struct miscdevice pmu_device __pmacdata = { 2957static struct miscdevice pmu_device = {
2970 PMU_MINOR, "pmu", &pmu_device_fops 2958 PMU_MINOR, "pmu", &pmu_device_fops
2971}; 2959};
2972 2960
@@ -2982,7 +2970,7 @@ device_initcall(pmu_device_init);
2982 2970
2983 2971
2984#ifdef DEBUG_SLEEP 2972#ifdef DEBUG_SLEEP
2985static inline void __pmac 2973static inline void
2986polled_handshake(volatile unsigned char __iomem *via) 2974polled_handshake(volatile unsigned char __iomem *via)
2987{ 2975{
2988 via[B] &= ~TREQ; eieio(); 2976 via[B] &= ~TREQ; eieio();
@@ -2993,7 +2981,7 @@ polled_handshake(volatile unsigned char __iomem *via)
2993 ; 2981 ;
2994} 2982}
2995 2983
2996static inline void __pmac 2984static inline void
2997polled_send_byte(volatile unsigned char __iomem *via, int x) 2985polled_send_byte(volatile unsigned char __iomem *via, int x)
2998{ 2986{
2999 via[ACR] |= SR_OUT | SR_EXT; eieio(); 2987 via[ACR] |= SR_OUT | SR_EXT; eieio();
@@ -3001,7 +2989,7 @@ polled_send_byte(volatile unsigned char __iomem *via, int x)
3001 polled_handshake(via); 2989 polled_handshake(via);
3002} 2990}
3003 2991
3004static inline int __pmac 2992static inline int
3005polled_recv_byte(volatile unsigned char __iomem *via) 2993polled_recv_byte(volatile unsigned char __iomem *via)
3006{ 2994{
3007 int x; 2995 int x;
@@ -3013,7 +3001,7 @@ polled_recv_byte(volatile unsigned char __iomem *via)
3013 return x; 3001 return x;
3014} 3002}
3015 3003
3016int __pmac 3004int
3017pmu_polled_request(struct adb_request *req) 3005pmu_polled_request(struct adb_request *req)
3018{ 3006{
3019 unsigned long flags; 3007 unsigned long flags;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index 820dc52e30bc..6f80d76ac17c 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -835,7 +835,7 @@ static struct pci_save {
835} *pbook_pci_saves; 835} *pbook_pci_saves;
836static int n_pbook_pci_saves; 836static int n_pbook_pci_saves;
837 837
838static inline void __openfirmware 838static inline void
839pbook_pci_save(void) 839pbook_pci_save(void)
840{ 840{
841 int npci; 841 int npci;
@@ -863,7 +863,7 @@ pbook_pci_save(void)
863 } 863 }
864} 864}
865 865
866static inline void __openfirmware 866static inline void
867pbook_pci_restore(void) 867pbook_pci_restore(void)
868{ 868{
869 u16 cmd; 869 u16 cmd;
@@ -902,7 +902,7 @@ pbook_pci_restore(void)
902#define IRQ_ENABLE ((unsigned int *)0xf3000024) 902#define IRQ_ENABLE ((unsigned int *)0xf3000024)
903#define MEM_CTRL ((unsigned int *)0xf8000070) 903#define MEM_CTRL ((unsigned int *)0xf8000070)
904 904
905int __openfirmware powerbook_sleep(void) 905int powerbook_sleep(void)
906{ 906{
907 int ret, i, x; 907 int ret, i, x;
908 static int save_backlight; 908 static int save_backlight;
@@ -1001,25 +1001,24 @@ int __openfirmware powerbook_sleep(void)
1001/* 1001/*
1002 * Support for /dev/pmu device 1002 * Support for /dev/pmu device
1003 */ 1003 */
1004static int __openfirmware pmu_open(struct inode *inode, struct file *file) 1004static int pmu_open(struct inode *inode, struct file *file)
1005{ 1005{
1006 return 0; 1006 return 0;
1007} 1007}
1008 1008
1009static ssize_t __openfirmware pmu_read(struct file *file, char *buf, 1009static ssize_t pmu_read(struct file *file, char *buf,
1010 size_t count, loff_t *ppos) 1010 size_t count, loff_t *ppos)
1011{ 1011{
1012 return 0; 1012 return 0;
1013} 1013}
1014 1014
1015static ssize_t __openfirmware pmu_write(struct file *file, const char *buf, 1015static ssize_t pmu_write(struct file *file, const char *buf,
1016 size_t count, loff_t *ppos) 1016 size_t count, loff_t *ppos)
1017{ 1017{
1018 return 0; 1018 return 0;
1019} 1019}
1020 1020
1021/* Note: removed __openfirmware here since it causes link errors */ 1021static int pmu_ioctl(struct inode * inode, struct file *filp,
1022static int /*__openfirmware*/ pmu_ioctl(struct inode * inode, struct file *filp,
1023 u_int cmd, u_long arg) 1022 u_int cmd, u_long arg)
1024{ 1023{
1025 int error; 1024 int error;
diff --git a/drivers/mca/mca-device.c b/drivers/mca/mca-device.c
index 76d430aa243f..e7adf89fae41 100644
--- a/drivers/mca/mca-device.c
+++ b/drivers/mca/mca-device.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/mca.h> 31#include <linux/mca.h>
32#include <linux/string.h>
32 33
33/** 34/**
34 * mca_device_read_stored_pos - read POS register from stored data 35 * mca_device_read_stored_pos - read POS register from stored data
diff --git a/drivers/media/common/ir-common.c b/drivers/media/common/ir-common.c
index 06f4d4686a6c..31fccb4f05d6 100644
--- a/drivers/media/common/ir-common.c
+++ b/drivers/media/common/ir-common.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <linux/string.h>
25#include <media/ir-common.h> 26#include <media/ir-common.h>
26 27
27/* -------------------------------------------------------------------------- */ 28/* -------------------------------------------------------------------------- */
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 88757e2634e5..2aa767f9bd7d 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,6 +36,7 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/rwsem.h> 38#include <linux/rwsem.h>
39#include <linux/sched.h>
39 40
40#include "dvb_ca_en50221.h" 41#include "dvb_ca_en50221.h"
41#include "dvb_ringbuffer.h" 42#include "dvb_ringbuffer.h"
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 5aa12ebab34f..b595476332cd 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -151,7 +151,7 @@ static struct dvb_usb_properties dtt200u_properties = {
151 .cold_ids = { &dtt200u_usb_table[0], NULL }, 151 .cold_ids = { &dtt200u_usb_table[0], NULL },
152 .warm_ids = { &dtt200u_usb_table[1], NULL }, 152 .warm_ids = { &dtt200u_usb_table[1], NULL },
153 }, 153 },
154 { 0 }, 154 { NULL },
155 } 155 }
156}; 156};
157 157
@@ -192,7 +192,7 @@ static struct dvb_usb_properties wt220u_properties = {
192 .cold_ids = { &dtt200u_usb_table[2], NULL }, 192 .cold_ids = { &dtt200u_usb_table[2], NULL },
193 .warm_ids = { &dtt200u_usb_table[3], NULL }, 193 .warm_ids = { &dtt200u_usb_table[3], NULL },
194 }, 194 },
195 { 0 }, 195 { NULL },
196 } 196 }
197}; 197};
198 198
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 0f57abeb6d6b..75765e3a569c 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -247,7 +247,7 @@ static struct dvb_usb_properties vp7045_properties = {
247 .cold_ids = { &vp7045_usb_table[2], NULL }, 247 .cold_ids = { &vp7045_usb_table[2], NULL },
248 .warm_ids = { &vp7045_usb_table[3], NULL }, 248 .warm_ids = { &vp7045_usb_table[3], NULL },
249 }, 249 },
250 { 0 }, 250 { NULL },
251 } 251 }
252}; 252};
253 253
diff --git a/drivers/media/dvb/frontends/bcm3510.c b/drivers/media/dvb/frontends/bcm3510.c
index f5fdc5c3e605..f6d4ee78bdd4 100644
--- a/drivers/media/dvb/frontends/bcm3510.c
+++ b/drivers/media/dvb/frontends/bcm3510.c
@@ -36,6 +36,9 @@
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/device.h> 37#include <linux/device.h>
38#include <linux/firmware.h> 38#include <linux/firmware.h>
39#include <linux/jiffies.h>
40#include <linux/string.h>
41#include <linux/slab.h>
39 42
40#include "dvb_frontend.h" 43#include "dvb_frontend.h"
41#include "bcm3510.h" 44#include "bcm3510.h"
diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c
index 21433e1831e7..6b0553608610 100644
--- a/drivers/media/dvb/frontends/dib3000mb.c
+++ b/drivers/media/dvb/frontends/dib3000mb.c
@@ -27,6 +27,8 @@
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/string.h>
31#include <linux/slab.h>
30 32
31#include "dib3000-common.h" 33#include "dib3000-common.h"
32#include "dib3000mb_priv.h" 34#include "dib3000mb_priv.h"
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 441de665fec3..c024fad17337 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -26,6 +26,8 @@
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/string.h>
30#include <linux/slab.h>
29 31
30#include "dib3000-common.h" 32#include "dib3000-common.h"
31#include "dib3000mc_priv.h" 33#include "dib3000mc_priv.h"
diff --git a/drivers/media/dvb/frontends/dvb_dummy_fe.c b/drivers/media/dvb/frontends/dvb_dummy_fe.c
index cff93b9d8ab2..794be520d590 100644
--- a/drivers/media/dvb/frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb/frontends/dvb_dummy_fe.c
@@ -22,6 +22,8 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/string.h>
26#include <linux/slab.h>
25 27
26#include "dvb_frontend.h" 28#include "dvb_frontend.h"
27#include "dvb_dummy_fe.h" 29#include "dvb_dummy_fe.h"
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 7142b9c51dd2..8dde72bd1046 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -37,6 +37,8 @@
37#include <linux/moduleparam.h> 37#include <linux/moduleparam.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/string.h>
41#include <linux/slab.h>
40#include <asm/byteorder.h> 42#include <asm/byteorder.h>
41 43
42#include "dvb_frontend.h" 44#include "dvb_frontend.h"
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index e455aecd76b2..e38454901dd1 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -29,6 +29,8 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32#include <linux/string.h>
33#include <linux/slab.h>
32 34
33#include "dvb_frontend.h" 35#include "dvb_frontend.h"
34#include "mt312_priv.h" 36#include "mt312_priv.h"
diff --git a/drivers/media/dvb/frontends/mt352.c b/drivers/media/dvb/frontends/mt352.c
index cc1bc0edd65e..f0c610f2c2df 100644
--- a/drivers/media/dvb/frontends/mt352.c
+++ b/drivers/media/dvb/frontends/mt352.c
@@ -35,6 +35,8 @@
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/string.h>
39#include <linux/slab.h>
38 40
39#include "dvb_frontend.h" 41#include "dvb_frontend.h"
40#include "mt352_priv.h" 42#include "mt352_priv.h"
diff --git a/drivers/media/dvb/frontends/nxt2002.c b/drivers/media/dvb/frontends/nxt2002.c
index 35a1d60f1927..30786b1911bd 100644
--- a/drivers/media/dvb/frontends/nxt2002.c
+++ b/drivers/media/dvb/frontends/nxt2002.c
@@ -32,6 +32,8 @@
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/string.h>
36#include <linux/slab.h>
35 37
36#include "dvb_frontend.h" 38#include "dvb_frontend.h"
37#include "nxt2002.h" 39#include "nxt2002.h"
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index b6d0eecc59eb..817b044c7fd1 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -36,6 +36,8 @@
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/string.h>
40#include <linux/slab.h>
39#include <asm/byteorder.h> 41#include <asm/byteorder.h>
40 42
41#include "dvb_frontend.h" 43#include "dvb_frontend.h"
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index ad56a9958404..8a9db23dd1b7 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -34,6 +34,8 @@
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/firmware.h> 36#include <linux/firmware.h>
37#include <linux/string.h>
38#include <linux/slab.h>
37#include <asm/byteorder.h> 39#include <asm/byteorder.h>
38 40
39#include "dvb_frontend.h" 41#include "dvb_frontend.h"
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index c7fe27fd530c..f265418e3261 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -26,6 +26,8 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/jiffies.h>
30#include <asm/div64.h>
29 31
30#include "dvb_frontend.h" 32#include "dvb_frontend.h"
31#include "s5h1420.h" 33#include "s5h1420.h"
diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c
index 764a95a2e212..1c6b2e9264bc 100644
--- a/drivers/media/dvb/frontends/sp8870.c
+++ b/drivers/media/dvb/frontends/sp8870.c
@@ -32,6 +32,8 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/firmware.h> 33#include <linux/firmware.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/string.h>
36#include <linux/slab.h>
35 37
36#include "dvb_frontend.h" 38#include "dvb_frontend.h"
37#include "sp8870.h" 39#include "sp8870.h"
diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c
index d868a6927a16..73384e75625e 100644
--- a/drivers/media/dvb/frontends/sp887x.c
+++ b/drivers/media/dvb/frontends/sp887x.c
@@ -14,6 +14,8 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/firmware.h> 16#include <linux/firmware.h>
17#include <linux/string.h>
18#include <linux/slab.h>
17 19
18#include "dvb_frontend.h" 20#include "dvb_frontend.h"
19#include "sp887x.h" 21#include "sp887x.h"
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c
index 8d09afd7545d..6122ba754bc5 100644
--- a/drivers/media/dvb/frontends/stv0297.c
+++ b/drivers/media/dvb/frontends/stv0297.c
@@ -24,6 +24,8 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/jiffies.h>
28#include <linux/slab.h>
27 29
28#include "dvb_frontend.h" 30#include "dvb_frontend.h"
29#include "stv0297.h" 31#include "stv0297.h"
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 2d62931f20b5..889d9257215d 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -48,6 +48,7 @@
48#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
49#include <linux/string.h> 49#include <linux/string.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/jiffies.h>
51#include <asm/div64.h> 52#include <asm/div64.h>
52 53
53#include "dvb_frontend.h" 54#include "dvb_frontend.h"
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index 74cea9f8d721..3529c618f828 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -32,6 +32,10 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/jiffies.h>
36#include <linux/string.h>
37#include <linux/slab.h>
38
35#include "dvb_frontend.h" 39#include "dvb_frontend.h"
36#include "tda1004x.h" 40#include "tda1004x.h"
37 41
diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c
index 168e013d23bd..c05cf1861051 100644
--- a/drivers/media/dvb/frontends/tda8083.c
+++ b/drivers/media/dvb/frontends/tda8083.c
@@ -30,6 +30,7 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/jiffies.h>
33#include "dvb_frontend.h" 34#include "dvb_frontend.h"
34#include "tda8083.h" 35#include "tda8083.h"
35 36
diff --git a/drivers/media/radio/miropcm20-rds.c b/drivers/media/radio/miropcm20-rds.c
index df79d5e0aaed..e09214082e01 100644
--- a/drivers/media/radio/miropcm20-rds.c
+++ b/drivers/media/radio/miropcm20-rds.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19#include "miropcm20-rds-core.h" 20#include "miropcm20-rds-core.h"
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 018ca887ca85..40d4ea898dbc 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -90,7 +90,7 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
90 }; 90 };
91 91
92 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) 92 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
93 printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", 93 printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
94 req_status); 94 req_status);
95 else 95 else
96 printk(KERN_DEBUG "TRANSPORT_%s.\n", 96 printk(KERN_DEBUG "TRANSPORT_%s.\n",
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index d9879965eb50..8eb50cdb8ae1 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -16,6 +16,8 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/i2o.h> 17#include <linux/i2o.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/slab.h>
19#include "core.h" 21#include "core.h"
20 22
21/** 23/**
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0079a4be0af2..0fb9c4e2ad4c 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -17,6 +17,9 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/rwsem.h> 18#include <linux/rwsem.h>
19#include <linux/i2o.h> 19#include <linux/i2o.h>
20#include <linux/workqueue.h>
21#include <linux/string.h>
22#include <linux/slab.h>
20#include "core.h" 23#include "core.h"
21 24
22#define OSM_NAME "i2o" 25#define OSM_NAME "i2o"
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index bda2c62648ba..b675b4ebbebd 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -30,6 +30,10 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/i2o.h> 31#include <linux/i2o.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/workqueue.h>
34#include <linux/string.h>
35#include <linux/slab.h>
36#include <asm/param.h> /* HZ */
33#include "core.h" 37#include "core.h"
34 38
35#define OSM_NAME "exec-osm" 39#define OSM_NAME "exec-osm"
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 361da8d1d5e7..61b837de4b6a 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/i2o.h> 29#include <linux/i2o.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/sched.h>
31#include "core.h" 32#include "core.h"
32 33
33#define OSM_NAME "i2o" 34#define OSM_NAME "i2o"
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 585cded3d365..a984c0efabf0 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -32,9 +32,12 @@
32#include <linux/suspend.h> 32#include <linux/suspend.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/kthread.h> 34#include <linux/kthread.h>
35#include <linux/delay.h>
35 36
36#include <asm/dma.h> 37#include <asm/dma.h>
37#include <asm/semaphore.h> 38#include <asm/semaphore.h>
39#include <asm/arch/collie.h>
40#include <asm/mach-types.h>
38 41
39#include "ucb1x00.h" 42#include "ucb1x00.h"
40 43
@@ -85,12 +88,23 @@ static inline void ucb1x00_ts_mode_int(struct ucb1x00_ts *ts)
85 */ 88 */
86static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts) 89static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts)
87{ 90{
88 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 91 if (machine_is_collie()) {
89 UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | 92 ucb1x00_io_write(ts->ucb, COLLIE_TC35143_GPIO_TBL_CHK, 0);
90 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | 93 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
91 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); 94 UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMX_POW |
95 UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
92 96
93 return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync); 97 udelay(55);
98
99 return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_AD2, ts->adcsync);
100 } else {
101 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
102 UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
103 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
104 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
105
106 return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync);
107 }
94} 108}
95 109
96/* 110/*
@@ -101,12 +115,16 @@ static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts)
101 */ 115 */
102static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts) 116static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts)
103{ 117{
104 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 118 if (machine_is_collie())
105 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | 119 ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK);
106 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); 120 else {
107 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 121 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
108 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | 122 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
109 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); 123 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
124 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
125 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
126 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
127 }
110 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 128 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
111 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | 129 UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
112 UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); 130 UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
@@ -124,12 +142,17 @@ static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts)
124 */ 142 */
125static inline unsigned int ucb1x00_ts_read_ypos(struct ucb1x00_ts *ts) 143static inline unsigned int ucb1x00_ts_read_ypos(struct ucb1x00_ts *ts)
126{ 144{
127 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 145 if (machine_is_collie())
128 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | 146 ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK);
129 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); 147 else {
130 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 148 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
131 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | 149 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
132 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); 150 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
151 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
152 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
153 UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
154 }
155
133 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 156 ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
134 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | 157 UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
135 UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); 158 UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
@@ -163,6 +186,15 @@ static inline unsigned int ucb1x00_ts_read_yres(struct ucb1x00_ts *ts)
163 return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync); 186 return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync);
164} 187}
165 188
189static inline int ucb1x00_ts_pen_down(struct ucb1x00_ts *ts)
190{
191 unsigned int val = ucb1x00_reg_read(ts->ucb, UCB_TS_CR);
192 if (machine_is_collie())
193 return (!(val & (UCB_TS_CR_TSPX_LOW)));
194 else
195 return (val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW));
196}
197
166/* 198/*
167 * This is a RT kernel thread that handles the ADC accesses 199 * This is a RT kernel thread that handles the ADC accesses
168 * (mainly so we can use semaphores in the UCB1200 core code 200 * (mainly so we can use semaphores in the UCB1200 core code
@@ -186,7 +218,7 @@ static int ucb1x00_thread(void *_ts)
186 218
187 add_wait_queue(&ts->irq_wait, &wait); 219 add_wait_queue(&ts->irq_wait, &wait);
188 while (!kthread_should_stop()) { 220 while (!kthread_should_stop()) {
189 unsigned int x, y, p, val; 221 unsigned int x, y, p;
190 signed long timeout; 222 signed long timeout;
191 223
192 ts->restart = 0; 224 ts->restart = 0;
@@ -206,12 +238,12 @@ static int ucb1x00_thread(void *_ts)
206 msleep(10); 238 msleep(10);
207 239
208 ucb1x00_enable(ts->ucb); 240 ucb1x00_enable(ts->ucb);
209 val = ucb1x00_reg_read(ts->ucb, UCB_TS_CR);
210 241
211 if (val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW)) { 242
243 if (ucb1x00_ts_pen_down(ts)) {
212 set_task_state(tsk, TASK_INTERRUPTIBLE); 244 set_task_state(tsk, TASK_INTERRUPTIBLE);
213 245
214 ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, UCB_FALLING); 246 ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
215 ucb1x00_disable(ts->ucb); 247 ucb1x00_disable(ts->ucb);
216 248
217 /* 249 /*
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index fa83f15fdf16..9b629856c735 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -85,6 +85,12 @@ static void mmc_blk_put(struct mmc_blk_data *md)
85 up(&open_lock); 85 up(&open_lock);
86} 86}
87 87
88static inline int mmc_blk_readonly(struct mmc_card *card)
89{
90 return mmc_card_readonly(card) ||
91 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
92}
93
88static int mmc_blk_open(struct inode *inode, struct file *filp) 94static int mmc_blk_open(struct inode *inode, struct file *filp)
89{ 95{
90 struct mmc_blk_data *md; 96 struct mmc_blk_data *md;
@@ -97,7 +103,7 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
97 ret = 0; 103 ret = 0;
98 104
99 if ((filp->f_mode & FMODE_WRITE) && 105 if ((filp->f_mode & FMODE_WRITE) &&
100 mmc_card_readonly(md->queue.card)) 106 mmc_blk_readonly(md->queue.card))
101 ret = -EROFS; 107 ret = -EROFS;
102 } 108 }
103 109
@@ -410,7 +416,7 @@ static int mmc_blk_probe(struct mmc_card *card)
410 printk(KERN_INFO "%s: %s %s %dKiB %s\n", 416 printk(KERN_INFO "%s: %s %s %dKiB %s\n",
411 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 417 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
412 (card->csd.capacity << card->csd.read_blkbits) / 1024, 418 (card->csd.capacity << card->csd.read_blkbits) / 1024,
413 mmc_card_readonly(card)?"(ro)":""); 419 mmc_blk_readonly(card)?"(ro)":"");
414 420
415 mmc_set_drvdata(card, md); 421 mmc_set_drvdata(card, md);
416 add_disk(md->disk); 422 add_disk(md->disk);
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 8eba373d42d7..d575e3a018bc 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/dma.h> 30#include <asm/dma.h>
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/irq.h>
33#include <asm/scatterlist.h> 32#include <asm/scatterlist.h>
34#include <asm/sizes.h> 33#include <asm/sizes.h>
35 34
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index 62d235a9a4e2..4f6778f3ee3e 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/slab.h>
20#include <linux/mtd/jedec.h> 21#include <linux/mtd/jedec.h>
21#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
22#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index dfd335e4a2a8..df987a53ed9c 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -44,6 +44,7 @@
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/errno.h> 46#include <linux/errno.h>
47#include <linux/string.h>
47#include <linux/mtd/mtd.h> 48#include <linux/mtd/mtd.h>
48#ifdef HAVE_PARTITIONS 49#ifdef HAVE_PARTITIONS
49#include <linux/mtd/partitions.h> 50#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index a423a382095a..765c0179c8df 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -22,6 +22,7 @@
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <linux/slab.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26 27
27#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args) 28#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args)
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
index 0ba0ff7d43b9..63104c73ca3c 100644
--- a/drivers/mtd/maps/bast-flash.c
+++ b/drivers/mtd/maps/bast-flash.c
@@ -33,6 +33,7 @@
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/slab.h>
36 37
37#include <linux/mtd/mtd.h> 38#include <linux/mtd/mtd.h>
38#include <linux/mtd/map.h> 39#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index da8584a662f4..c68b31dc7e6d 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -20,6 +20,7 @@
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/slab.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
25#include <linux/mtd/map.h> 26#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 938c41f2f056..e5b74169fde6 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/slab.h>
16 17
17#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
18#include <linux/mtd/map.h> 19#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0bc79c93a584..f99519692cb7 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -30,12 +30,15 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <asm/io.h> 33#include <linux/string.h>
34
34#include <linux/mtd/mtd.h> 35#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h> 36#include <linux/mtd/map.h>
36#include <linux/mtd/partitions.h> 37#include <linux/mtd/partitions.h>
37#include <linux/mtd/concat.h> 38#include <linux/mtd/concat.h>
38 39
40#include <asm/io.h>
41
39/* 42/*
40** The DIL/NetPC keeps its BIOS in two distinct flash blocks. 43** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
41** Destroying any of these blocks transforms the DNPC into 44** Destroying any of these blocks transforms the DNPC into
diff --git a/drivers/mtd/maps/epxa10db-flash.c b/drivers/mtd/maps/epxa10db-flash.c
index ab6dbe2b8cce..1df6188926b3 100644
--- a/drivers/mtd/maps/epxa10db-flash.c
+++ b/drivers/mtd/maps/epxa10db-flash.c
@@ -27,12 +27,15 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/io.h> 30#include <linux/slab.h>
31
31#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
32#include <linux/mtd/map.h> 33#include <linux/mtd/map.h>
33#include <linux/mtd/partitions.h> 34#include <linux/mtd/partitions.h>
34 35
36#include <asm/io.h>
35#include <asm/hardware.h> 37#include <asm/hardware.h>
38
36#ifdef CONFIG_EPXA10DB 39#ifdef CONFIG_EPXA10DB
37#define BOARD_NAME "EPXA10DB" 40#define BOARD_NAME "EPXA10DB"
38#else 41#else
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 068bb6a54520..00f7bbe5479e 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -7,11 +7,14 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <asm/io.h> 10#include <linux/string.h>
11
11#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h> 13#include <linux/mtd/map.h>
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
14 15
16#include <asm/io.h>
17
15#define MAX_NUM_REGIONS 4 18#define MAX_NUM_REGIONS 4
16#define MAX_NUM_PARTITIONS 8 19#define MAX_NUM_PARTITIONS 8
17 20
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index a9f86c7fbd52..1e5d6e1d05f3 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -22,11 +22,13 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/slab.h>
26#include <linux/ioport.h>
27#include <linux/device.h>
28
25#include <linux/mtd/mtd.h> 29#include <linux/mtd/mtd.h>
26#include <linux/mtd/map.h> 30#include <linux/mtd/map.h>
27#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
28#include <linux/ioport.h>
29#include <linux/device.h>
30 32
31#include <asm/io.h> 33#include <asm/io.h>
32#include <asm/hardware.h> 34#include <asm/hardware.h>
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 3fcc32884074..da316e543237 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -20,11 +20,14 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/slab.h>
24#include <linux/ioport.h>
25#include <linux/device.h>
26
23#include <linux/mtd/mtd.h> 27#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h> 28#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
26#include <linux/ioport.h> 30
27#include <linux/device.h>
28#include <asm/io.h> 31#include <asm/io.h>
29#include <asm/mach/flash.h> 32#include <asm/mach/flash.h>
30 33
diff --git a/drivers/mtd/maps/lubbock-flash.c b/drivers/mtd/maps/lubbock-flash.c
index 1298de475c9a..2337e0c46750 100644
--- a/drivers/mtd/maps/lubbock-flash.c
+++ b/drivers/mtd/maps/lubbock-flash.c
@@ -15,10 +15,13 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h>
19
18#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
19#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24
22#include <asm/io.h> 25#include <asm/io.h>
23#include <asm/hardware.h> 26#include <asm/hardware.h>
24#include <asm/arch/pxa-regs.h> 27#include <asm/arch/pxa-regs.h>
diff --git a/drivers/mtd/maps/mainstone-flash.c b/drivers/mtd/maps/mainstone-flash.c
index 87e93fa60588..da0f8a692628 100644
--- a/drivers/mtd/maps/mainstone-flash.c
+++ b/drivers/mtd/maps/mainstone-flash.c
@@ -16,9 +16,12 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/slab.h>
20
19#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
21#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24
22#include <asm/io.h> 25#include <asm/io.h>
23#include <asm/hardware.h> 26#include <asm/hardware.h>
24#include <asm/arch/pxa-regs.h> 27#include <asm/arch/pxa-regs.h>
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
index 496109071cb1..da36e8dddd17 100644
--- a/drivers/mtd/maps/omap-toto-flash.c
+++ b/drivers/mtd/maps/omap-toto-flash.c
@@ -12,9 +12,9 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/slab.h>
18 18
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h> 20#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index b17bca657daf..fa84566245a7 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -36,6 +36,8 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/ioport.h> 38#include <linux/ioport.h>
39#include <linux/slab.h>
40
39#include <linux/mtd/mtd.h> 41#include <linux/mtd/mtd.h>
40#include <linux/mtd/map.h> 42#include <linux/mtd/map.h>
41#include <linux/mtd/partitions.h> 43#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 18dbd3af1eaa..d9c64e99ee32 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/slab.h>
20 21
21#include <linux/mtd/mtd.h> 22#include <linux/mtd/mtd.h>
22#include <linux/mtd/map.h> 23#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 118b04544cad..a0577ea00c3c 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -30,6 +30,7 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/slab.h>
33 34
34#include <linux/mtd/mtd.h> 35#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h> 36#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 995e9991cb8d..4e28b977f224 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -27,12 +27,14 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/io.h> 30#include <linux/slab.h>
31 31
32#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
33#include <linux/mtd/map.h> 33#include <linux/mtd/map.h>
34#include <linux/mtd/partitions.h> 34#include <linux/mtd/partitions.h>
35 35
36#include <asm/io.h>
37
36#define FLASH_ADDR 0x40000000 38#define FLASH_ADDR 0x40000000
37#define FLASH_SIZE 0x00800000 39#define FLASH_SIZE 0x00800000
38#define FLASH_BANK_MAX 4 40#define FLASH_BANK_MAX 4
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index b7c32c242bc7..400dd9c89883 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/sched.h> /* TASK_* */
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/blktrans.h> 20#include <linux/mtd/blktrans.h>
20 21
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c534fd5d95cb..16df1e4fb0e9 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/sched.h> /* TASK_* */
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17 18
18#include <linux/device.h> 19#include <linux/device.h>
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 8f66d093c80d..f3e65af33a9c 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -14,7 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17#include <linux/sched.h> /* TASK_* */
18#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
19#include <linux/mtd/concat.h> 19#include <linux/mtd/concat.h>
20 20
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index b47ebcb31e0f..b58ba236a9eb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -51,6 +51,7 @@
51#include <linux/device.h> 51#include <linux/device.h>
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/err.h> 53#include <linux/err.h>
54#include <linux/slab.h>
54 55
55#include <linux/mtd/mtd.h> 56#include <linux/mtd/mtd.h>
56#include <linux/mtd/nand.h> 57#include <linux/mtd/nand.h>
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 73f2fcfc557f..bbca8ae8018c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1658,6 +1658,7 @@ static struct of_device_id bmac_match[] =
1658 }, 1658 },
1659 {}, 1659 {},
1660}; 1660};
1661MODULE_DEVICE_TABLE (of, bmac_match);
1661 1662
1662static struct macio_driver bmac_driver = 1663static struct macio_driver bmac_driver =
1663{ 1664{
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 1ce2c675b8a7..a806dfe54d23 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -552,8 +552,7 @@ static int __init do_eepro_probe(struct net_device *dev)
552 { 552 {
553 unsigned short int WS[32]=WakeupSeq; 553 unsigned short int WS[32]=WakeupSeq;
554 554
555 if (check_region(WakeupPort, 2)==0) { 555 if (request_region(WakeupPort, 2, "eepro wakeup")) {
556
557 if (net_debug>5) 556 if (net_debug>5)
558 printk(KERN_DEBUG "Waking UP\n"); 557 printk(KERN_DEBUG "Waking UP\n");
559 558
@@ -563,7 +562,10 @@ static int __init do_eepro_probe(struct net_device *dev)
563 outb_p(WS[i],WakeupPort); 562 outb_p(WS[i],WakeupPort);
564 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); 563 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
565 } 564 }
566 } else printk(KERN_WARNING "Checkregion Failed!\n"); 565
566 release_region(WakeupPort, 2);
567 } else
568 printk(KERN_WARNING "PnP wakeup region busy!\n");
567 } 569 }
568#endif 570#endif
569 571
@@ -705,7 +707,7 @@ static void __init eepro_print_info (struct net_device *dev)
705 dev->name, (unsigned)dev->base_addr); 707 dev->name, (unsigned)dev->base_addr);
706 break; 708 break;
707 case LAN595FX: 709 case LAN595FX:
708 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", 710 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
709 dev->name, (unsigned)dev->base_addr); 711 dev->name, (unsigned)dev->base_addr);
710 break; 712 break;
711 case LAN595TX: 713 case LAN595TX:
@@ -713,7 +715,7 @@ static void __init eepro_print_info (struct net_device *dev)
713 dev->name, (unsigned)dev->base_addr); 715 dev->name, (unsigned)dev->base_addr);
714 break; 716 break;
715 case LAN595: 717 case LAN595:
716 printk("%s: Intel 82595-based lan card at %#x,", 718 printk("%s: Intel 82595-based lan card at %#x,",
717 dev->name, (unsigned)dev->base_addr); 719 dev->name, (unsigned)dev->base_addr);
718 } 720 }
719 721
@@ -726,7 +728,7 @@ static void __init eepro_print_info (struct net_device *dev)
726 728
727 if (dev->irq > 2) 729 if (dev->irq > 2)
728 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); 730 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
729 else 731 else
730 printk(", %s.\n", ifmap[dev->if_port]); 732 printk(", %s.\n", ifmap[dev->if_port]);
731 733
732 if (net_debug > 3) { 734 if (net_debug > 3) {
@@ -756,7 +758,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
756 int err; 758 int err;
757 759
758 /* Grab the region so we can find another board if autoIRQ fails. */ 760 /* Grab the region so we can find another board if autoIRQ fails. */
759 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { 761 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
760 if (!autoprobe) 762 if (!autoprobe)
761 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", 763 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
762 ioaddr); 764 ioaddr);
@@ -838,15 +840,15 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
838 /* Mask off INT number */ 840 /* Mask off INT number */
839 int count = lp->word[1] & 7; 841 int count = lp->word[1] & 7;
840 unsigned irqMask = lp->word[7]; 842 unsigned irqMask = lp->word[7];
841 843
842 while (count--) 844 while (count--)
843 irqMask &= irqMask - 1; 845 irqMask &= irqMask - 1;
844 846
845 count = ffs(irqMask); 847 count = ffs(irqMask);
846 848
847 if (count) 849 if (count)
848 dev->irq = count - 1; 850 dev->irq = count - 1;
849 851
850 if (dev->irq < 2) { 852 if (dev->irq < 2) {
851 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); 853 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
852 goto exit; 854 goto exit;
@@ -854,7 +856,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
854 dev->irq = 9; 856 dev->irq = 9;
855 } 857 }
856 } 858 }
857 859
858 dev->open = eepro_open; 860 dev->open = eepro_open;
859 dev->stop = eepro_close; 861 dev->stop = eepro_close;
860 dev->hard_start_xmit = eepro_send_packet; 862 dev->hard_start_xmit = eepro_send_packet;
@@ -863,7 +865,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
863 dev->tx_timeout = eepro_tx_timeout; 865 dev->tx_timeout = eepro_tx_timeout;
864 dev->watchdog_timeo = TX_TIMEOUT; 866 dev->watchdog_timeo = TX_TIMEOUT;
865 dev->ethtool_ops = &eepro_ethtool_ops; 867 dev->ethtool_ops = &eepro_ethtool_ops;
866 868
867 /* print boot time info */ 869 /* print boot time info */
868 eepro_print_info(dev); 870 eepro_print_info(dev);
869 871
@@ -1047,8 +1049,8 @@ static int eepro_open(struct net_device *dev)
1047 1049
1048 1050
1049 /* Initialize the RCV and XMT upper and lower limits */ 1051 /* Initialize the RCV and XMT upper and lower limits */
1050 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); 1052 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
1051 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); 1053 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
1052 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); 1054 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
1053 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); 1055 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
1054 1056
@@ -1065,12 +1067,12 @@ static int eepro_open(struct net_device *dev)
1065 eepro_clear_int(ioaddr); 1067 eepro_clear_int(ioaddr);
1066 1068
1067 /* Initialize RCV */ 1069 /* Initialize RCV */
1068 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); 1070 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
1069 lp->rx_start = lp->rcv_lower_limit; 1071 lp->rx_start = lp->rcv_lower_limit;
1070 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); 1072 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
1071 1073
1072 /* Initialize XMT */ 1074 /* Initialize XMT */
1073 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); 1075 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
1074 lp->tx_start = lp->tx_end = lp->xmt_lower_limit; 1076 lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
1075 lp->tx_last = 0; 1077 lp->tx_last = 0;
1076 1078
@@ -1411,7 +1413,7 @@ set_multicast_list(struct net_device *dev)
1411 outb(0x08, ioaddr + STATUS_REG); 1413 outb(0x08, ioaddr + STATUS_REG);
1412 1414
1413 if (i & 0x20) { /* command ABORTed */ 1415 if (i & 0x20) { /* command ABORTed */
1414 printk(KERN_NOTICE "%s: multicast setup failed.\n", 1416 printk(KERN_NOTICE "%s: multicast setup failed.\n",
1415 dev->name); 1417 dev->name);
1416 break; 1418 break;
1417 } else if ((i & 0x0f) == 0x03) { /* MC-Done */ 1419 } else if ((i & 0x0f) == 0x03) { /* MC-Done */
@@ -1512,7 +1514,7 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
1512 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; 1514 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
1513 1515
1514 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ 1516 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
1515 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { 1517 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
1516 /* Arrrr!!!, must keep the xmt header together, 1518 /* Arrrr!!!, must keep the xmt header together,
1517 several days were lost to chase this one down. */ 1519 several days were lost to chase this one down. */
1518 last = lp->xmt_lower_limit; 1520 last = lp->xmt_lower_limit;
@@ -1643,7 +1645,7 @@ eepro_rx(struct net_device *dev)
1643 else if (rcv_status & 0x0800) 1645 else if (rcv_status & 0x0800)
1644 lp->stats.rx_crc_errors++; 1646 lp->stats.rx_crc_errors++;
1645 1647
1646 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", 1648 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1647 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); 1649 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1648 } 1650 }
1649 1651
@@ -1674,10 +1676,10 @@ eepro_transmit_interrupt(struct net_device *dev)
1674{ 1676{
1675 struct eepro_local *lp = netdev_priv(dev); 1677 struct eepro_local *lp = netdev_priv(dev);
1676 short ioaddr = dev->base_addr; 1678 short ioaddr = dev->base_addr;
1677 short boguscount = 25; 1679 short boguscount = 25;
1678 short xmt_status; 1680 short xmt_status;
1679 1681
1680 while ((lp->tx_start != lp->tx_end) && boguscount--) { 1682 while ((lp->tx_start != lp->tx_end) && boguscount--) {
1681 1683
1682 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); 1684 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1683 xmt_status = inw(ioaddr+IO_PORT); 1685 xmt_status = inw(ioaddr+IO_PORT);
@@ -1723,7 +1725,7 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
1723{ 1725{
1724 struct eepro_local *lp = (struct eepro_local *)dev->priv; 1726 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1725 1727
1726 cmd->supported = SUPPORTED_10baseT_Half | 1728 cmd->supported = SUPPORTED_10baseT_Half |
1727 SUPPORTED_10baseT_Full | 1729 SUPPORTED_10baseT_Full |
1728 SUPPORTED_Autoneg; 1730 SUPPORTED_Autoneg;
1729 cmd->advertising = ADVERTISED_10baseT_Half | 1731 cmd->advertising = ADVERTISED_10baseT_Half |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 36da54ad2b7b..e5246f227c98 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -514,7 +514,7 @@ static int ibmveth_open(struct net_device *netdev)
514 514
515 if(lpar_rc != H_Success) { 515 if(lpar_rc != H_Success) {
516 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 516 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
517 ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n", 517 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
518 adapter->buffer_list_dma, 518 adapter->buffer_list_dma,
519 adapter->filter_list_dma, 519 adapter->filter_list_dma,
520 rxq_desc.desc, 520 rxq_desc.desc,
@@ -1174,14 +1174,16 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1174 { "network", "IBM,l-lan"}, 1174 { "network", "IBM,l-lan"},
1175 { "", "" } 1175 { "", "" }
1176}; 1176};
1177
1178MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1177MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1179 1178
1180static struct vio_driver ibmveth_driver = { 1179static struct vio_driver ibmveth_driver = {
1181 .name = (char *)ibmveth_driver_name, 1180 .id_table = ibmveth_device_table,
1182 .id_table = ibmveth_device_table, 1181 .probe = ibmveth_probe,
1183 .probe = ibmveth_probe, 1182 .remove = ibmveth_remove,
1184 .remove = ibmveth_remove 1183 .driver = {
1184 .name = ibmveth_driver_name,
1185 .owner = THIS_MODULE,
1186 }
1185}; 1187};
1186 1188
1187static int __init ibmveth_module_init(void) 1189static int __init ibmveth_module_init(void)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index aef80f5e7c9c..b886b07412a6 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -704,15 +704,12 @@ static int pxa_irda_stop(struct net_device *dev)
704 return 0; 704 return 0;
705} 705}
706 706
707static int pxa_irda_suspend(struct device *_dev, pm_message_t state, u32 level) 707static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
708{ 708{
709 struct net_device *dev = dev_get_drvdata(_dev); 709 struct net_device *dev = dev_get_drvdata(_dev);
710 struct pxa_irda *si; 710 struct pxa_irda *si;
711 711
712 if (!dev || level != SUSPEND_DISABLE) 712 if (dev && netif_running(dev)) {
713 return 0;
714
715 if (netif_running(dev)) {
716 si = netdev_priv(dev); 713 si = netdev_priv(dev);
717 netif_device_detach(dev); 714 netif_device_detach(dev);
718 pxa_irda_shutdown(si); 715 pxa_irda_shutdown(si);
@@ -721,15 +718,12 @@ static int pxa_irda_suspend(struct device *_dev, pm_message_t state, u32 level)
721 return 0; 718 return 0;
722} 719}
723 720
724static int pxa_irda_resume(struct device *_dev, u32 level) 721static int pxa_irda_resume(struct device *_dev)
725{ 722{
726 struct net_device *dev = dev_get_drvdata(_dev); 723 struct net_device *dev = dev_get_drvdata(_dev);
727 struct pxa_irda *si; 724 struct pxa_irda *si;
728 725
729 if (!dev || level != RESUME_ENABLE) 726 if (dev && netif_running(dev)) {
730 return 0;
731
732 if (netif_running(dev)) {
733 si = netdev_priv(dev); 727 si = netdev_priv(dev);
734 pxa_irda_startup(si); 728 pxa_irda_startup(si);
735 netif_device_attach(dev); 729 netif_device_attach(dev);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index bbac720cca63..140b7cdb1f7e 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -638,21 +638,14 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
638 */ 638 */
639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self) 639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
640{ 640{
641 int iobase, ir_mode, ctrl, fast; 641 int iobase = self->io.fir_base;
642
643 IRDA_ASSERT(self != NULL, return;);
644
645 iobase = self->io.fir_base;
646 ir_mode = IRCC_CFGA_IRDA_SIR_A;
647 ctrl = 0;
648 fast = 0;
649 642
650 register_bank(iobase, 0); 643 register_bank(iobase, 0);
651 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER); 644 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
652 outb(0x00, iobase + IRCC_MASTER); 645 outb(0x00, iobase + IRCC_MASTER);
653 646
654 register_bank(iobase, 1); 647 register_bank(iobase, 1);
655 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | ir_mode), 648 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | IRCC_CFGA_IRDA_SIR_A),
656 iobase + IRCC_SCE_CFGA); 649 iobase + IRCC_SCE_CFGA);
657 650
658#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */ 651#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
@@ -666,10 +659,10 @@ static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
666 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD); 659 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
667 660
668 register_bank(iobase, 4); 661 register_bank(iobase, 4);
669 outb((inb(iobase + IRCC_CONTROL) & 0x30) | ctrl, iobase + IRCC_CONTROL); 662 outb((inb(iobase + IRCC_CONTROL) & 0x30), iobase + IRCC_CONTROL);
670 663
671 register_bank(iobase, 0); 664 register_bank(iobase, 0);
672 outb(fast, iobase + IRCC_LCR_A); 665 outb(0, iobase + IRCC_LCR_A);
673 666
674 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 667 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
675 668
@@ -1556,6 +1549,46 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1556} 1549}
1557#endif /* unused */ 1550#endif /* unused */
1558 1551
1552static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
1553{
1554 int error;
1555
1556 error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
1557 self->netdev->name, self->netdev);
1558 if (error)
1559 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
1560 __FUNCTION__, self->io.irq, error);
1561
1562 return error;
1563}
1564
1565static void smsc_ircc_start_interrupts(struct smsc_ircc_cb *self)
1566{
1567 unsigned long flags;
1568
1569 spin_lock_irqsave(&self->lock, flags);
1570
1571 self->io.speed = 0;
1572 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1573
1574 spin_unlock_irqrestore(&self->lock, flags);
1575}
1576
1577static void smsc_ircc_stop_interrupts(struct smsc_ircc_cb *self)
1578{
1579 int iobase = self->io.fir_base;
1580 unsigned long flags;
1581
1582 spin_lock_irqsave(&self->lock, flags);
1583
1584 register_bank(iobase, 0);
1585 outb(0, iobase + IRCC_IER);
1586 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1587 outb(0x00, iobase + IRCC_MASTER);
1588
1589 spin_unlock_irqrestore(&self->lock, flags);
1590}
1591
1559 1592
1560/* 1593/*
1561 * Function smsc_ircc_net_open (dev) 1594 * Function smsc_ircc_net_open (dev)
@@ -1567,7 +1600,6 @@ static int smsc_ircc_net_open(struct net_device *dev)
1567{ 1600{
1568 struct smsc_ircc_cb *self; 1601 struct smsc_ircc_cb *self;
1569 char hwname[16]; 1602 char hwname[16];
1570 unsigned long flags;
1571 1603
1572 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1604 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1573 1605
@@ -1575,6 +1607,11 @@ static int smsc_ircc_net_open(struct net_device *dev)
1575 self = netdev_priv(dev); 1607 self = netdev_priv(dev);
1576 IRDA_ASSERT(self != NULL, return 0;); 1608 IRDA_ASSERT(self != NULL, return 0;);
1577 1609
1610 if (self->io.suspended) {
1611 IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__);
1612 return -EAGAIN;
1613 }
1614
1578 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1615 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1579 (void *) dev)) { 1616 (void *) dev)) {
1580 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1617 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
@@ -1582,11 +1619,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1582 return -EAGAIN; 1619 return -EAGAIN;
1583 } 1620 }
1584 1621
1585 spin_lock_irqsave(&self->lock, flags); 1622 smsc_ircc_start_interrupts(self);
1586 /*smsc_ircc_sir_start(self);*/
1587 self->io.speed = 0;
1588 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1589 spin_unlock_irqrestore(&self->lock, flags);
1590 1623
1591 /* Give self a hardware name */ 1624 /* Give self a hardware name */
1592 /* It would be cool to offer the chip revision here - Jean II */ 1625 /* It would be cool to offer the chip revision here - Jean II */
@@ -1639,7 +1672,12 @@ static int smsc_ircc_net_close(struct net_device *dev)
1639 irlap_close(self->irlap); 1672 irlap_close(self->irlap);
1640 self->irlap = NULL; 1673 self->irlap = NULL;
1641 1674
1642 free_irq(self->io.irq, dev); 1675 smsc_ircc_stop_interrupts(self);
1676
1677 /* if we are called from smsc_ircc_resume we don't have IRQ reserved */
1678 if (!self->io.suspended)
1679 free_irq(self->io.irq, dev);
1680
1643 disable_dma(self->io.dma); 1681 disable_dma(self->io.dma);
1644 free_dma(self->io.dma); 1682 free_dma(self->io.dma);
1645 1683
@@ -1650,11 +1688,18 @@ static int smsc_ircc_suspend(struct device *dev, pm_message_t state)
1650{ 1688{
1651 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1689 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1652 1690
1653 IRDA_MESSAGE("%s, Suspending\n", driver_name);
1654
1655 if (!self->io.suspended) { 1691 if (!self->io.suspended) {
1656 smsc_ircc_net_close(self->netdev); 1692 IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
1693
1694 rtnl_lock();
1695 if (netif_running(self->netdev)) {
1696 netif_device_detach(self->netdev);
1697 smsc_ircc_stop_interrupts(self);
1698 free_irq(self->io.irq, self->netdev);
1699 disable_dma(self->io.dma);
1700 }
1657 self->io.suspended = 1; 1701 self->io.suspended = 1;
1702 rtnl_unlock();
1658 } 1703 }
1659 1704
1660 return 0; 1705 return 0;
@@ -1665,11 +1710,25 @@ static int smsc_ircc_resume(struct device *dev)
1665 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1710 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1666 1711
1667 if (self->io.suspended) { 1712 if (self->io.suspended) {
1668 1713 IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
1669 smsc_ircc_net_open(self->netdev); 1714
1715 rtnl_lock();
1716 smsc_ircc_init_chip(self);
1717 if (netif_running(self->netdev)) {
1718 if (smsc_ircc_request_irq(self)) {
1719 /*
1720 * Don't fail resume process, just kill this
1721 * network interface
1722 */
1723 unregister_netdevice(self->netdev);
1724 } else {
1725 enable_dma(self->io.dma);
1726 smsc_ircc_start_interrupts(self);
1727 netif_device_attach(self->netdev);
1728 }
1729 }
1670 self->io.suspended = 0; 1730 self->io.suspended = 0;
1671 1731 rtnl_unlock();
1672 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1673 } 1732 }
1674 return 0; 1733 return 0;
1675} 1734}
@@ -1682,9 +1741,6 @@ static int smsc_ircc_resume(struct device *dev)
1682 */ 1741 */
1683static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) 1742static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1684{ 1743{
1685 int iobase;
1686 unsigned long flags;
1687
1688 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1744 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1689 1745
1690 IRDA_ASSERT(self != NULL, return -1;); 1746 IRDA_ASSERT(self != NULL, return -1;);
@@ -1694,22 +1750,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1694 /* Remove netdevice */ 1750 /* Remove netdevice */
1695 unregister_netdev(self->netdev); 1751 unregister_netdev(self->netdev);
1696 1752
1697 /* Make sure the irq handler is not exectuting */ 1753 smsc_ircc_stop_interrupts(self);
1698 spin_lock_irqsave(&self->lock, flags);
1699
1700 /* Stop interrupts */
1701 iobase = self->io.fir_base;
1702 register_bank(iobase, 0);
1703 outb(0, iobase + IRCC_IER);
1704 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1705 outb(0x00, iobase + IRCC_MASTER);
1706#if 0
1707 /* Reset to SIR mode */
1708 register_bank(iobase, 1);
1709 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA);
1710 outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);
1711#endif
1712 spin_unlock_irqrestore(&self->lock, flags);
1713 1754
1714 /* Release the PORTS that this driver is using */ 1755 /* Release the PORTS that this driver is using */
1715 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1756 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 3d56cf5a4e23..f5ea39ff1017 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -70,8 +70,9 @@
70#include <linux/delay.h> 70#include <linux/delay.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/ethtool.h> 72#include <linux/ethtool.h>
73
74#include <asm/abs_addr.h>
73#include <asm/iSeries/mf.h> 75#include <asm/iSeries/mf.h>
74#include <asm/iSeries/iSeries_pci.h>
75#include <asm/uaccess.h> 76#include <asm/uaccess.h>
76 77
77#include <asm/iSeries/HvLpConfig.h> 78#include <asm/iSeries/HvLpConfig.h>
@@ -1397,13 +1398,13 @@ static inline void veth_build_dma_list(struct dma_chunk *list,
1397 * it just at the granularity of iSeries real->absolute 1398 * it just at the granularity of iSeries real->absolute
1398 * mapping? Indeed, given the way the allocator works, can we 1399 * mapping? Indeed, given the way the allocator works, can we
1399 * count on them being absolutely contiguous? */ 1400 * count on them being absolutely contiguous? */
1400 list[0].addr = ISERIES_HV_ADDR(p); 1401 list[0].addr = iseries_hv_addr(p);
1401 list[0].size = min(length, 1402 list[0].size = min(length,
1402 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK)); 1403 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
1403 1404
1404 done = list[0].size; 1405 done = list[0].size;
1405 while (done < length) { 1406 while (done < length) {
1406 list[i].addr = ISERIES_HV_ADDR(p + done); 1407 list[i].addr = iseries_hv_addr(p + done);
1407 list[i].size = min(length-done, PAGE_SIZE); 1408 list[i].size = min(length-done, PAGE_SIZE);
1408 done += list[i].size; 1409 done += list[i].size;
1409 i++; 1410 i++;
@@ -1496,8 +1497,8 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1496 cnx->dst_inst, 1497 cnx->dst_inst,
1497 HvLpDma_AddressType_RealAddress, 1498 HvLpDma_AddressType_RealAddress,
1498 HvLpDma_AddressType_TceIndex, 1499 HvLpDma_AddressType_TceIndex,
1499 ISERIES_HV_ADDR(&local_list), 1500 iseries_hv_addr(&local_list),
1500 ISERIES_HV_ADDR(&remote_list), 1501 iseries_hv_addr(&remote_list),
1501 length); 1502 length);
1502 if (rc != HvLpDma_Rc_Good) { 1503 if (rc != HvLpDma_Rc_Good) {
1503 dev_kfree_skb_irq(skb); 1504 dev_kfree_skb_irq(skb);
@@ -1647,10 +1648,13 @@ static struct vio_device_id veth_device_table[] __devinitdata = {
1647MODULE_DEVICE_TABLE(vio, veth_device_table); 1648MODULE_DEVICE_TABLE(vio, veth_device_table);
1648 1649
1649static struct vio_driver veth_driver = { 1650static struct vio_driver veth_driver = {
1650 .name = DRV_NAME,
1651 .id_table = veth_device_table, 1651 .id_table = veth_device_table,
1652 .probe = veth_probe, 1652 .probe = veth_probe,
1653 .remove = veth_remove 1653 .remove = veth_remove,
1654 .driver = {
1655 .name = DRV_NAME,
1656 .owner = THIS_MODULE,
1657 }
1654}; 1658};
1655 1659
1656/* 1660/*
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 09b1e7b364e5..2a5add257b8f 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -1016,6 +1016,7 @@ static struct of_device_id mace_match[] =
1016 }, 1016 },
1017 {}, 1017 {},
1018}; 1018};
1019MODULE_DEVICE_TABLE (of, mace_match);
1019 1020
1020static struct macio_driver mace_driver = 1021static struct macio_driver mace_driver =
1021{ 1022{
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 25c9a99c377b..8fbba21d975b 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1533,6 +1533,9 @@ static int mv643xx_eth_probe(struct device *ddev)
1533 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); 1533 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
1534#endif 1534#endif
1535 1535
1536 if (mp->tx_sram_size > 0)
1537 printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
1538
1536 return 0; 1539 return 0;
1537 1540
1538out: 1541out:
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index f17c05cbe44b..99a776a51fb5 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1896,7 +1896,7 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
1896 1896
1897static void smt_string_swap(char *data, const char *format, int len) 1897static void smt_string_swap(char *data, const char *format, int len)
1898{ 1898{
1899 const char *open_paren = 0 ; 1899 const char *open_paren = NULL ;
1900 int x ; 1900 int x ;
1901 1901
1902 while (len > 0 && *format) { 1902 while (len > 0 && *format) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ac9ce6509eee..817f200742c3 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -230,12 +230,12 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
230#define SMC_CAN_USE_16BIT 1 230#define SMC_CAN_USE_16BIT 1
231#define SMC_CAN_USE_32BIT 0 231#define SMC_CAN_USE_32BIT 0
232 232
233#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) 233#define SMC_inb(a, r) inb((u32)a) + (r))
234#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) 234#define SMC_inw(a, r) inw(((u32)a) + (r))
235#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) 235#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
236#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) 236#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r))
237#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) 237#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
238#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) 238#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
239 239
240#define set_irq_type(irq, type) do {} while(0) 240#define set_irq_type(irq, type) do {} while(0)
241 241
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 4937a5ad4b2c..6a60c5970cb5 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -137,7 +137,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
137 PCI_DMA_FROMDEVICE); 137 PCI_DMA_FROMDEVICE);
138 if (!buf->pci_addr) { 138 if (!buf->pci_addr) {
139 printk(KERN_WARNING 139 printk(KERN_WARNING
140 "Failed to make memory DMA'able\n."); 140 "Failed to make memory DMA'able.\n");
141 return -ENOMEM; 141 return -ENOMEM;
142 } 142 }
143 } 143 }
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index a62a4345b466..2d4639d6841f 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/pci.h> 41#include <linux/pci.h>
42#include <linux/string.h>
42#include "cpci_hotplug.h" 43#include "cpci_hotplug.h"
43 44
44#define DRIVER_VERSION "0.1" 45#define DRIVER_VERSION "0.1"
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 790abadd816c..f7cb00da38df 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/signal.h> /* SA_SHIRQ */
39#include "cpci_hotplug.h" 40#include "cpci_hotplug.h"
40#include "cpcihp_zt5550.h" 41#include "cpcihp_zt5550.h"
41 42
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 8e47fa66e25e..060d74775d7b 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -37,6 +37,8 @@
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/string.h>
41#include <linux/slab.h>
40#include "pci_hotplug.h" 42#include "pci_hotplug.h"
41#include "../pci.h" 43#include "../pci.h"
42 44
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 0392e004258f..aabf1e70b528 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1077,7 +1077,7 @@ static int enable_slot(struct hotplug_slot *hs)
1077 if (rc) { 1077 if (rc) {
1078 err("Adding this card exceeds the limitations of this bus.\n"); 1078 err("Adding this card exceeds the limitations of this bus.\n");
1079 err("(i.e., >1 133MHz cards running on same bus, or " 1079 err("(i.e., >1 133MHz cards running on same bus, or "
1080 ">2 66 PCI cards running on same bus\n."); 1080 ">2 66 PCI cards running on same bus.\n");
1081 err("Try hot-adding into another bus\n"); 1081 err("Try hot-adding into another bus\n");
1082 rc = -EINVAL; 1082 rc = -EINVAL;
1083 goto error_nopower; 1083 goto error_nopower;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 33b539b34f7e..ff17d8e07e94 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -113,7 +113,7 @@ int pciehp_unconfigure_device(struct pci_func* func)
113 */ 113 */
114int pciehp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num) 114int pciehp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
115{ 115{
116#if defined(CONFIG_X86) && !defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_64) 116#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_IO_APIC)
117 int rc; 117 int rc;
118 u16 temp_word; 118 u16 temp_word;
119 struct pci_dev fakedev; 119 struct pci_dev fakedev;
diff --git a/drivers/pci/hotplug/pciehprm_nonacpi.c b/drivers/pci/hotplug/pciehprm_nonacpi.c
index 3622965f8961..33b2c69a0829 100644
--- a/drivers/pci/hotplug/pciehprm_nonacpi.c
+++ b/drivers/pci/hotplug/pciehprm_nonacpi.c
@@ -33,10 +33,13 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
37
36#include <asm/uaccess.h> 38#include <asm/uaccess.h>
37#ifdef CONFIG_IA64 39#ifdef CONFIG_IA64
38#include <asm/iosapic.h> 40#include <asm/iosapic.h>
39#endif 41#endif
42
40#include "pciehp.h" 43#include "pciehp.h"
41#include "pciehprm.h" 44#include "pciehprm.h"
42#include "pciehprm_nonacpi.h" 45#include "pciehprm_nonacpi.h"
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index ad1017da8656..fcb66b9a0e28 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -16,10 +16,13 @@
16 */ 16 */
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/string.h>
20
19#include <asm/pci-bridge.h> 21#include <asm/pci-bridge.h>
20#include <asm/semaphore.h> 22#include <asm/semaphore.h>
21#include <asm/rtas.h> 23#include <asm/rtas.h>
22#include <asm/vio.h> 24#include <asm/vio.h>
25
23#include "../pci.h" 26#include "../pci.h"
24#include "rpaphp.h" 27#include "rpaphp.h"
25#include "rpadlpar.h" 28#include "rpadlpar.h"
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 46c157d26a2f..f7c12d7dfcfc 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -23,11 +23,13 @@
23 * 23 *
24 */ 24 */
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/string.h>
27
26#include <asm/pci-bridge.h> 28#include <asm/pci-bridge.h>
27#include <asm/rtas.h> 29#include <asm/rtas.h>
28#include <asm/machdep.h> 30#include <asm/machdep.h>
29#include "../pci.h" /* for pci_add_new_bus */
30 31
32#include "../pci.h" /* for pci_add_new_bus */
31#include "rpaphp.h" 33#include "rpaphp.h"
32 34
33static struct pci_bus *find_bus_among_children(struct pci_bus *bus, 35static struct pci_bus *find_bus_among_children(struct pci_bus *bus,
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 0e8815495083..daa89ae57123 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -27,6 +27,9 @@
27#include <linux/kobject.h> 27#include <linux/kobject.h>
28#include <linux/sysfs.h> 28#include <linux/sysfs.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/string.h>
31#include <linux/slab.h>
32
30#include <asm/rtas.h> 33#include <asm/rtas.h>
31#include "rpaphp.h" 34#include "rpaphp.h"
32 35
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index abe2cf411e68..08ad26a0cae7 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -32,6 +32,8 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/sched.h> /* signal_pending(), struct timer_list */
36
35#include "pci_hotplug.h" 37#include "pci_hotplug.h"
36 38
37#if !defined(MODULE) 39#if !defined(MODULE)
diff --git a/drivers/pci/hotplug/shpchprm_nonacpi.c b/drivers/pci/hotplug/shpchprm_nonacpi.c
index d70fe5408417..c6b40998eeb3 100644
--- a/drivers/pci/hotplug/shpchprm_nonacpi.c
+++ b/drivers/pci/hotplug/shpchprm_nonacpi.c
@@ -32,6 +32,8 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/slab.h>
36
35#include "shpchp.h" 37#include "shpchp.h"
36 38
37int shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum) 39int shpchprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8972e6a3aac0..ae986e590b48 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -8,6 +8,8 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/mempolicy.h> 10#include <linux/mempolicy.h>
11#include <linux/string.h>
12#include <linux/slab.h>
11#include "pci.h" 13#include "pci.h"
12 14
13/* 15/*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 61b855c99e39..e74d75843047 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -15,6 +15,7 @@
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/string.h>
18#include <asm/dma.h> /* isa_dma_bridge_buggy */ 19#include <asm/dma.h> /* isa_dma_bridge_buggy */
19#include "pci.h" 20#include "pci.h"
20 21
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 14f05d22bb70..467a4ceccf10 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -11,6 +11,8 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14#include <linux/string.h>
15#include <linux/slab.h>
14#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
15 17
16#include "portdrv.h" 18#include "portdrv.h"
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3c565ce7f77b..02260141dc81 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -12,6 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
15#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
16 17
17#include "portdrv.h" 18#include "portdrv.h"
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbd9c2323d8c..5627ce1d2b32 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -356,7 +356,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
356/* 356/*
357 * PIIX4 ACPI: Two IO regions pointed to by longwords at 357 * PIIX4 ACPI: Two IO regions pointed to by longwords at
358 * 0x40 (64 bytes of ACPI registers) 358 * 0x40 (64 bytes of ACPI registers)
359 * 0x90 (32 bytes of SMB registers) 359 * 0x90 (16 bytes of SMB registers)
360 * and a few strange programmable PIIX4 device resources. 360 * and a few strange programmable PIIX4 device resources.
361 */ 361 */
362static void __devinit quirk_piix4_acpi(struct pci_dev *dev) 362static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
@@ -366,7 +366,7 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
366 pci_read_config_dword(dev, 0x40, &region); 366 pci_read_config_dword(dev, 0x40, &region);
367 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); 367 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
368 pci_read_config_dword(dev, 0x90, &region); 368 pci_read_config_dword(dev, 0x90, &region);
369 quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); 369 quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
370 370
371 /* Device resource A has enables for some of the other ones */ 371 /* Device resource A has enables for some of the other ones */
372 pci_read_config_dword(dev, 0x5c, &res_a); 372 pci_read_config_dword(dev, 0x5c, &res_a);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 49bd21702314..598a115cd00e 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -9,6 +9,7 @@
9#include <linux/config.h> 9#include <linux/config.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/slab.h>
12 13
13#include "pci.h" 14#include "pci.h"
14 15
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 36cc9a96a338..ccf20039e909 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -154,6 +154,16 @@ config TCIC
154 "Bridge" is the name used for the hardware inside your computer that 154 "Bridge" is the name used for the hardware inside your computer that
155 PCMCIA cards are plugged into. If unsure, say N. 155 PCMCIA cards are plugged into. If unsure, say N.
156 156
157config PCMCIA_M8XX
158 tristate "MPC8xx PCMCIA support"
159 depends on PCMCIA && PPC
160 select PCCARD_NONSTATIC
161 help
162 Say Y here to include support for PowerPC 8xx series PCMCIA
163 controller.
164
165 This driver is also available as a module called m8xx_pcmcia.
166
157config HD64465_PCMCIA 167config HD64465_PCMCIA
158 tristate "HD64465 host bridge support" 168 tristate "HD64465 host bridge support"
159 depends on HD64465 && PCMCIA 169 depends on HD64465 && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 77ecee7f987b..fe37541abbfe 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_PD6729) += pd6729.o
25obj-$(CONFIG_I82365) += i82365.o 25obj-$(CONFIG_I82365) += i82365.o
26obj-$(CONFIG_I82092) += i82092.o 26obj-$(CONFIG_I82092) += i82092.o
27obj-$(CONFIG_TCIC) += tcic.o 27obj-$(CONFIG_TCIC) += tcic.o
28obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
28obj-$(CONFIG_HD64465_PCMCIA) += hd64465_ss.o 29obj-$(CONFIG_HD64465_PCMCIA) += hd64465_ss.o
29obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o 30obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o
30obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o 31obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
@@ -59,6 +60,7 @@ sa1111_cs-$(CONFIG_SA1100_JORNADA720) += sa1100_jornada720.o
59sa1100_cs-y += sa1100_generic.o 60sa1100_cs-y += sa1100_generic.o
60sa1100_cs-$(CONFIG_SA1100_ASSABET) += sa1100_assabet.o 61sa1100_cs-$(CONFIG_SA1100_ASSABET) += sa1100_assabet.o
61sa1100_cs-$(CONFIG_SA1100_CERF) += sa1100_cerf.o 62sa1100_cs-$(CONFIG_SA1100_CERF) += sa1100_cerf.o
63sa1100_cs-$(CONFIG_SA1100_COLLIE) += pxa2xx_sharpsl.o
62sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o 64sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o
63sa1100_cs-$(CONFIG_SA1100_SHANNON) += sa1100_shannon.o 65sa1100_cs-$(CONFIG_SA1100_SHANNON) += sa1100_shannon.o
64sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o 66sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
new file mode 100644
index 000000000000..f8bed87cf2f1
--- /dev/null
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -0,0 +1,1290 @@
1/*
2 * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series.
3 *
4 * (C) 1999-2000 Magnus Damm <damm@bitsmart.com>
5 * (C) 2001-2002 Montavista Software, Inc.
6 * <mlocke@mvista.com>
7 *
8 * Support for two slots by Cyclades Corporation
9 * <oliver.kurth@cyclades.de>
10 * Further fixes, v2.6 kernel port
11 * <marcelo.tosatti@cyclades.com>
12 *
13 * "The ExCA standard specifies that socket controllers should provide
14 * two IO and five memory windows per socket, which can be independently
15 * configured and positioned in the host address space and mapped to
16 * arbitrary segments of card address space. " - David A Hinds. 1999
17 *
18 * This controller does _not_ meet the ExCA standard.
19 *
20 * m8xx pcmcia controller brief info:
21 * + 8 windows (attrib, mem, i/o)
22 * + up to two slots (SLOT_A and SLOT_B)
23 * + inputpins, outputpins, event and mask registers.
24 * - no offset register. sigh.
25 *
26 * Because of the lacking offset register we must map the whole card.
27 * We assign each memory window PCMCIA_MEM_WIN_SIZE address space.
28 * Make sure there is (PCMCIA_MEM_WIN_SIZE * PCMCIA_MEM_WIN_NO
29 * * PCMCIA_SOCKETS_NO) bytes at PCMCIA_MEM_WIN_BASE.
30 * The i/o windows are dynamically allocated at PCMCIA_IO_WIN_BASE.
31 * They are maximum 64KByte each...
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/types.h>
37#include <linux/fcntl.h>
38#include <linux/string.h>
39
40#include <asm/io.h>
41#include <asm/bitops.h>
42#include <asm/segment.h>
43#include <asm/system.h>
44
45#include <linux/kernel.h>
46#include <linux/errno.h>
47#include <linux/sched.h>
48#include <linux/slab.h>
49#include <linux/timer.h>
50#include <linux/ioport.h>
51#include <linux/delay.h>
52#include <linux/interrupt.h>
53
54#include <asm/mpc8xx.h>
55#include <asm/8xx_immap.h>
56#include <asm/irq.h>
57
58#include <pcmcia/version.h>
59#include <pcmcia/cs_types.h>
60#include <pcmcia/cs.h>
61#include <pcmcia/ss.h>
62
63#ifdef PCMCIA_DEBUG
64static int pc_debug = PCMCIA_DEBUG;
65module_param(pc_debug, int, 0);
66#define dprintk(args...) printk(KERN_DEBUG "m8xx_pcmcia: " args);
67#else
68#define dprintk(args...)
69#endif
70
71#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args)
72#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args)
73
74static const char *version = "Version 0.06, Aug 2005";
75MODULE_LICENSE("Dual MPL/GPL");
76
77#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
78
79/* The RPX series use SLOT_B */
80#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
81#define CONFIG_PCMCIA_SLOT_B
82#define CONFIG_BD_IS_MHZ
83#endif
84
85/* The ADS board use SLOT_A */
86#ifdef CONFIG_ADS
87#define CONFIG_PCMCIA_SLOT_A
88#define CONFIG_BD_IS_MHZ
89#endif
90
91/* The FADS series are a mess */
92#ifdef CONFIG_FADS
93#if defined(CONFIG_MPC860T) || defined(CONFIG_MPC860) || defined(CONFIG_MPC821)
94#define CONFIG_PCMCIA_SLOT_A
95#else
96#define CONFIG_PCMCIA_SLOT_B
97#endif
98#endif
99
100/* Cyclades ACS uses both slots */
101#ifdef CONFIG_PRxK
102#define CONFIG_PCMCIA_SLOT_A
103#define CONFIG_PCMCIA_SLOT_B
104#endif
105
106#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
107
108#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
109
110#define PCMCIA_SOCKETS_NO 2
111/* We have only 8 windows, dualsocket support will be limited. */
112#define PCMCIA_MEM_WIN_NO 2
113#define PCMCIA_IO_WIN_NO 2
114#define PCMCIA_SLOT_MSG "SLOT_A and SLOT_B"
115
116#elif defined(CONFIG_PCMCIA_SLOT_A) || defined(CONFIG_PCMCIA_SLOT_B)
117
118#define PCMCIA_SOCKETS_NO 1
119/* full support for one slot */
120#define PCMCIA_MEM_WIN_NO 5
121#define PCMCIA_IO_WIN_NO 2
122
123/* define _slot_ to be able to optimize macros */
124
125#ifdef CONFIG_PCMCIA_SLOT_A
126#define _slot_ 0
127#define PCMCIA_SLOT_MSG "SLOT_A"
128#else
129#define _slot_ 1
130#define PCMCIA_SLOT_MSG "SLOT_B"
131#endif
132
133#else
134#error m8xx_pcmcia: Bad configuration!
135#endif
136
137/* ------------------------------------------------------------------------- */
138
139#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
140#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
141#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
142
143#define PCMCIA_SCHLVL PCMCIA_INTERRUPT /* Status Change Interrupt Level */
144
145/* ------------------------------------------------------------------------- */
146
147/* 2.4.x and newer has this always in HZ */
148#define M8XX_BUSFREQ ((((bd_t *)&(__res))->bi_busfreq))
149
150static int pcmcia_schlvl = PCMCIA_SCHLVL;
151
152static spinlock_t events_lock = SPIN_LOCK_UNLOCKED;
153
154
155#define PCMCIA_SOCKET_KEY_5V 1
156#define PCMCIA_SOCKET_KEY_LV 2
157
158/* look up table for pgcrx registers */
159static u32 *m8xx_pgcrx[2] = {
160 &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcra,
161 &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcrb
162};
163
164/*
165 * This structure is used to address each window in the PCMCIA controller.
166 *
167 * Keep in mind that we assume that pcmcia_win[n+1] is mapped directly
168 * after pcmcia_win[n]...
169 */
170
171struct pcmcia_win {
172 u32 br;
173 u32 or;
174};
175
176/*
177 * For some reason the hardware guys decided to make both slots share
178 * some registers.
179 *
180 * Could someone invent object oriented hardware ?
181 *
182 * The macros are used to get the right bit from the registers.
183 * SLOT_A : slot = 0
184 * SLOT_B : slot = 1
185 */
186
187#define M8XX_PCMCIA_VS1(slot) (0x80000000 >> (slot << 4))
188#define M8XX_PCMCIA_VS2(slot) (0x40000000 >> (slot << 4))
189#define M8XX_PCMCIA_VS_MASK(slot) (0xc0000000 >> (slot << 4))
190#define M8XX_PCMCIA_VS_SHIFT(slot) (30 - (slot << 4))
191
192#define M8XX_PCMCIA_WP(slot) (0x20000000 >> (slot << 4))
193#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4))
194#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4))
195#define M8XX_PCMCIA_BVD2(slot) (0x04000000 >> (slot << 4))
196#define M8XX_PCMCIA_BVD1(slot) (0x02000000 >> (slot << 4))
197#define M8XX_PCMCIA_RDY(slot) (0x01000000 >> (slot << 4))
198#define M8XX_PCMCIA_RDY_L(slot) (0x00800000 >> (slot << 4))
199#define M8XX_PCMCIA_RDY_H(slot) (0x00400000 >> (slot << 4))
200#define M8XX_PCMCIA_RDY_R(slot) (0x00200000 >> (slot << 4))
201#define M8XX_PCMCIA_RDY_F(slot) (0x00100000 >> (slot << 4))
202#define M8XX_PCMCIA_MASK(slot) (0xFFFF0000 >> (slot << 4))
203
204#define M8XX_PCMCIA_POR_VALID 0x00000001
205#define M8XX_PCMCIA_POR_WRPROT 0x00000002
206#define M8XX_PCMCIA_POR_ATTRMEM 0x00000010
207#define M8XX_PCMCIA_POR_IO 0x00000018
208#define M8XX_PCMCIA_POR_16BIT 0x00000040
209
210#define M8XX_PGCRX(slot) m8xx_pgcrx[slot]
211
212#define M8XX_PGCRX_CXOE 0x00000080
213#define M8XX_PGCRX_CXRESET 0x00000040
214
215/* we keep one lookup table per socket to check flags */
216
217#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
218
219struct event_table {
220 u32 regbit;
221 u32 eventbit;
222};
223
224struct socket_info {
225 void (*handler)(void *info, u32 events);
226 void *info;
227
228 u32 slot;
229
230 socket_state_t state;
231 struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
232 struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
233 struct event_table events[PCMCIA_EVENTS_MAX];
234 struct pcmcia_socket socket;
235};
236
237static struct socket_info socket[PCMCIA_SOCKETS_NO];
238
239/*
240 * Search this table to see if the windowsize is
241 * supported...
242 */
243
244#define M8XX_SIZES_NO 32
245
246static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
247{
248 0x00000001, 0x00000002, 0x00000008, 0x00000004,
249 0x00000080, 0x00000040, 0x00000010, 0x00000020,
250 0x00008000, 0x00004000, 0x00001000, 0x00002000,
251 0x00000100, 0x00000200, 0x00000800, 0x00000400,
252
253 0x0fffffff, 0xffffffff, 0xffffffff, 0xffffffff,
254 0x01000000, 0x02000000, 0xffffffff, 0x04000000,
255 0x00010000, 0x00020000, 0x00080000, 0x00040000,
256 0x00800000, 0x00400000, 0x00100000, 0x00200000
257};
258
259/* ------------------------------------------------------------------------- */
260
261static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs);
262
263#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
264
265/* ------------------------------------------------------------------------- */
266/* board specific stuff: */
267/* voltage_set(), hardware_enable() and hardware_disable() */
268/* ------------------------------------------------------------------------- */
269/* RPX Boards from Embedded Planet */
270
271#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
272
273/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
274 * SYPCR is write once only, therefore must the slowest memory be faster
275 * than the bus monitor or we will get a machine check due to the bus timeout.
276 */
277
278#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
279
280#undef PCMCIA_BMT_LIMIT
281#define PCMCIA_BMT_LIMIT (6*8)
282
283static int voltage_set(int slot, int vcc, int vpp)
284{
285 u32 reg = 0;
286
287 switch(vcc) {
288 case 0: break;
289 case 33:
290 reg |= BCSR1_PCVCTL4;
291 break;
292 case 50:
293 reg |= BCSR1_PCVCTL5;
294 break;
295 default:
296 return 1;
297 }
298
299 switch(vpp) {
300 case 0: break;
301 case 33:
302 case 50:
303 if(vcc == vpp)
304 reg |= BCSR1_PCVCTL6;
305 else
306 return 1;
307 break;
308 case 120:
309 reg |= BCSR1_PCVCTL7;
310 default:
311 return 1;
312 }
313
314 if(!((vcc == 50) || (vcc == 0)))
315 return 1;
316
317 /* first, turn off all power */
318
319 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7));
320
321 /* enable new powersettings */
322
323 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg);
324
325 return 0;
326}
327
328#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
329#define hardware_enable(_slot_) /* No hardware to enable */
330#define hardware_disable(_slot_) /* No hardware to disable */
331
332#endif /* CONFIG_RPXCLASSIC */
333
334/* FADS Boards from Motorola */
335
336#if defined(CONFIG_FADS)
337
338#define PCMCIA_BOARD_MSG "FADS"
339
340static int voltage_set(int slot, int vcc, int vpp)
341{
342 u32 reg = 0;
343
344 switch(vcc) {
345 case 0:
346 break;
347 case 33:
348 reg |= BCSR1_PCCVCC0;
349 break;
350 case 50:
351 reg |= BCSR1_PCCVCC1;
352 break;
353 default:
354 return 1;
355 }
356
357 switch(vpp) {
358 case 0:
359 break;
360 case 33:
361 case 50:
362 if(vcc == vpp)
363 reg |= BCSR1_PCCVPP1;
364 else
365 return 1;
366 break;
367 case 120:
368 if ((vcc == 33) || (vcc == 50))
369 reg |= BCSR1_PCCVPP0;
370 else
371 return 1;
372 default:
373 return 1;
374 }
375
376 /* first, turn off all power */
377 out_be32(&((u32 *)BCSR1), in_be32(&((u32 *)BCSR1)) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
378
379 /* enable new powersettings */
380 out_be32(&((u32 *)BCSR1), in_be32(&((u32 *)BCSR1)) | reg);
381
382 return 0;
383}
384
385#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
386
387static void hardware_enable(int slot)
388{
389 out_be32(&((u32 *)BCSR1), in_be32(&((u32 *)BCSR1)) & ~BCSR1_PCCEN);
390}
391
392static void hardware_disable(int slot)
393{
394 out_be32(&((u32 *)BCSR1), in_be32(&((u32 *)BCSR1)) | BCSR1_PCCEN);
395}
396
397#endif
398
399/* ------------------------------------------------------------------------- */
400/* Motorola MBX860 */
401
402#if defined(CONFIG_MBX)
403
404#define PCMCIA_BOARD_MSG "MBX"
405
406static int voltage_set(int slot, int vcc, int vpp)
407{
408 u8 reg = 0;
409
410 switch(vcc) {
411 case 0:
412 break;
413 case 33:
414 reg |= CSR2_VCC_33;
415 break;
416 case 50:
417 reg |= CSR2_VCC_50;
418 break;
419 default:
420 return 1;
421 }
422
423 switch(vpp) {
424 case 0:
425 break;
426 case 33:
427 case 50:
428 if(vcc == vpp)
429 reg |= CSR2_VPP_VCC;
430 else
431 return 1;
432 break;
433 case 120:
434 if ((vcc == 33) || (vcc == 50))
435 reg |= CSR2_VPP_12;
436 else
437 return 1;
438 default:
439 return 1;
440 }
441
442 /* first, turn off all power */
443 out_8(&((u8 *)MBX_CSR2_ADDR), in_8(&((u8 *)MBX_CSR2_ADDR)) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
444
445 /* enable new powersettings */
446 out_8(&((u8 *)MBX_CSR2_ADDR), in_8(&((u8 *)MBX_CSR2_ADDR)) | reg);
447
448 return 0;
449}
450
451#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
452#define hardware_enable(_slot_) /* No hardware to enable */
453#define hardware_disable(_slot_) /* No hardware to disable */
454
455#endif /* CONFIG_MBX */
456
457#if defined(CONFIG_PRxK)
458#include <asm/cpld.h>
459extern volatile fpga_pc_regs *fpga_pc;
460
461#define PCMCIA_BOARD_MSG "MPC855T"
462
463static int voltage_set(int slot, int vcc, int vpp)
464{
465 u8 reg = 0;
466 u8 regread;
467 cpld_regs *ccpld = get_cpld();
468
469 switch(vcc) {
470 case 0:
471 break;
472 case 33:
473 reg |= PCMCIA_VCC_33;
474 break;
475 case 50:
476 reg |= PCMCIA_VCC_50;
477 break;
478 default:
479 return 1;
480 }
481
482 switch(vpp) {
483 case 0:
484 break;
485 case 33:
486 case 50:
487 if(vcc == vpp)
488 reg |= PCMCIA_VPP_VCC;
489 else
490 return 1;
491 break;
492 case 120:
493 if ((vcc == 33) || (vcc == 50))
494 reg |= PCMCIA_VPP_12;
495 else
496 return 1;
497 default:
498 return 1;
499 }
500
501 reg = reg >> (slot << 2);
502 regread = in_8(&ccpld->fpga_pc_ctl);
503 if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
504 /* enable new powersettings */
505 regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2));
506 out_8(&ccpld->fpga_pc_ctl, reg | regread);
507 msleep(100);
508 }
509
510 return 0;
511}
512
513#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
514#define hardware_enable(_slot_) /* No hardware to enable */
515#define hardware_disable(_slot_) /* No hardware to disable */
516
517#endif /* CONFIG_PRxK */
518
519static void m8xx_shutdown(void)
520{
521 u32 m, i;
522 struct pcmcia_win *w;
523
524 for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
525 w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
526
527 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, M8XX_PCMCIA_MASK(i));
528 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) & ~M8XX_PCMCIA_MASK(i));
529
530 /* turn off interrupt and disable CxOE */
531 out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
532
533 /* turn off memory windows */
534 for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
535 out_be32(&w->or, 0); /* set to not valid */
536 w++;
537 }
538
539 /* turn off voltage */
540 voltage_set(i, 0, 0);
541
542 /* disable external hardware */
543 hardware_disable(i);
544 }
545
546 free_irq(pcmcia_schlvl, NULL);
547}
548
549/* copied from tcic.c */
550
551static int m8xx_drv_suspend(struct device *dev, pm_message_t state, u32 level)
552{
553 int ret = 0;
554 if (level == SUSPEND_SAVE_STATE)
555 ret = pcmcia_socket_dev_suspend(dev, state);
556 return ret;
557}
558
559static int m8xx_drv_resume(struct device *dev, u32 level)
560{
561 int ret = 0;
562 if (level == RESUME_RESTORE_STATE)
563 ret = pcmcia_socket_dev_resume(dev);
564 return ret;
565}
566
567static struct device_driver m8xx_driver = {
568 .name = "m8xx-pcmcia",
569 .bus = &platform_bus_type,
570 .suspend = m8xx_drv_suspend,
571 .resume = m8xx_drv_resume,
572};
573
574static struct platform_device m8xx_device = {
575 .name = "m8xx-pcmcia",
576 .id = 0,
577};
578
579static u32 pending_events[PCMCIA_SOCKETS_NO];
580static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED;
581
582static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs)
583{
584 struct socket_info *s;
585 struct event_table *e;
586 unsigned int i, events, pscr, pipr, per;
587
588 dprintk("Interrupt!\n");
589 /* get interrupt sources */
590
591 pscr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr);
592 pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
593 per = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per);
594
595 for(i = 0; i < PCMCIA_SOCKETS_NO; i++) {
596 s = &socket[i];
597 e = &s->events[0];
598 events = 0;
599
600 while(e->regbit) {
601 if(pscr & e->regbit)
602 events |= e->eventbit;
603
604 e++;
605 }
606
607 /*
608 * report only if both card detect signals are the same
609 * not too nice done,
610 * we depend on that CD2 is the bit to the left of CD1...
611 */
612 if(events & SS_DETECT)
613 if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
614 (pipr & M8XX_PCMCIA_CD1(i)))
615 {
616 events &= ~SS_DETECT;
617 }
618
619#ifdef PCMCIA_GLITCHY_CD
620 /*
621 * I've experienced CD problems with my ADS board.
622 * We make an extra check to see if there was a
623 * real change of Card detection.
624 */
625
626 if((events & SS_DETECT) &&
627 ((pipr &
628 (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
629 (s->state.Vcc | s->state.Vpp)) {
630 events &= ~SS_DETECT;
631 /*printk( "CD glitch workaround - CD = 0x%08x!\n",
632 (pipr & (M8XX_PCMCIA_CD2(i)
633 | M8XX_PCMCIA_CD1(i))));*/
634 }
635#endif
636
637 /* call the handler */
638
639 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
640 "pipr = 0x%08x\n",
641 i, events, pscr, pipr);
642
643 if(events) {
644 spin_lock(&pending_event_lock);
645 pending_events[i] |= events;
646 spin_unlock(&pending_event_lock);
647 /*
648 * Turn off RDY_L bits in the PER mask on
649 * CD interrupt receival.
650 *
651 * They can generate bad interrupts on the
652 * ACS4,8,16,32. - marcelo
653 */
654 per &= ~M8XX_PCMCIA_RDY_L(0);
655 per &= ~M8XX_PCMCIA_RDY_L(1);
656
657 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, per);
658
659 if (events)
660 pcmcia_parse_events(&socket[i].socket, events);
661 }
662 }
663
664 /* clear the interrupt sources */
665 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, pscr);
666
667 dprintk("Interrupt done.\n");
668
669 return IRQ_HANDLED;
670}
671
672static u32 m8xx_get_graycode(u32 size)
673{
674 u32 k;
675
676 for(k = 0; k < M8XX_SIZES_NO; k++)
677 if(m8xx_size_to_gray[k] == size)
678 break;
679
680 if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
681 k = -1;
682
683 return k;
684}
685
686static u32 m8xx_get_speed(u32 ns, u32 is_io)
687{
688 u32 reg, clocks, psst, psl, psht;
689
690 if(!ns) {
691
692 /*
693 * We get called with IO maps setup to 0ns
694 * if not specified by the user.
695 * They should be 255ns.
696 */
697
698 if(is_io)
699 ns = 255;
700 else
701 ns = 100; /* fast memory if 0 */
702 }
703
704 /*
705 * In PSST, PSL, PSHT fields we tell the controller
706 * timing parameters in CLKOUT clock cycles.
707 * CLKOUT is the same as GCLK2_50.
708 */
709
710/* how we want to adjust the timing - in percent */
711
712#define ADJ 180 /* 80 % longer accesstime - to be sure */
713
714 clocks = ((M8XX_BUSFREQ / 1000) * ns) / 1000;
715 clocks = (clocks * ADJ) / (100*1000);
716 if(clocks >= PCMCIA_BMT_LIMIT) {
717 printk( "Max access time limit reached\n");
718 clocks = PCMCIA_BMT_LIMIT-1;
719 }
720
721 psst = clocks / 7; /* setup time */
722 psht = clocks / 7; /* hold time */
723 psl = (clocks * 5) / 7; /* strobe length */
724
725 psst += clocks - (psst + psht + psl);
726
727 reg = psst << 12;
728 reg |= psl << 7;
729 reg |= psht << 16;
730
731 return reg;
732}
733
734static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
735{
736 int lsock = container_of(sock, struct socket_info, socket)->slot;
737 struct socket_info *s = &socket[lsock];
738 unsigned int pipr, reg;
739
740 pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
741
742 *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
743 | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
744 *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
745
746 if (s->state.flags & SS_IOCARD)
747 *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_STSCHG : 0;
748 else {
749 *value |= (pipr & M8XX_PCMCIA_RDY(lsock)) ? SS_READY : 0;
750 *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_BATDEAD : 0;
751 *value |= (pipr & M8XX_PCMCIA_BVD2(lsock)) ? SS_BATWARN : 0;
752 }
753
754 if (s->state.Vcc | s->state.Vpp)
755 *value |= SS_POWERON;
756
757 /*
758 * Voltage detection:
759 * This driver only supports 16-Bit pc-cards.
760 * Cardbus is not handled here.
761 *
762 * To determine what voltage to use we must read the VS1 and VS2 pin.
763 * Depending on what socket type is present,
764 * different combinations mean different things.
765 *
766 * Card Key Socket Key VS1 VS2 Card Vcc for CIS parse
767 *
768 * 5V 5V, LV* NC NC 5V only 5V (if available)
769 *
770 * 5V 5V, LV* GND NC 5 or 3.3V as low as possible
771 *
772 * 5V 5V, LV* GND GND 5, 3.3, x.xV as low as possible
773 *
774 * LV* 5V - - shall not fit into socket
775 *
776 * LV* LV* GND NC 3.3V only 3.3V
777 *
778 * LV* LV* NC GND x.xV x.xV (if avail.)
779 *
780 * LV* LV* GND GND 3.3 or x.xV as low as possible
781 *
782 * *LV means Low Voltage
783 *
784 *
785 * That gives us the following table:
786 *
787 * Socket VS1 VS2 Voltage
788 *
789 * 5V NC NC 5V
790 * 5V NC GND none (should not be possible)
791 * 5V GND NC >= 3.3V
792 * 5V GND GND >= x.xV
793 *
794 * LV NC NC 5V (if available)
795 * LV NC GND x.xV (if available)
796 * LV GND NC 3.3V
797 * LV GND GND >= x.xV
798 *
799 * So, how do I determine if I have a 5V or a LV
800 * socket on my board? Look at the socket!
801 *
802 *
803 * Socket with 5V key:
804 * ++--------------------------------------------+
805 * || |
806 * || ||
807 * || ||
808 * | |
809 * +---------------------------------------------+
810 *
811 * Socket with LV key:
812 * ++--------------------------------------------+
813 * || |
814 * | ||
815 * | ||
816 * | |
817 * +---------------------------------------------+
818 *
819 *
820 * With other words - LV only cards does not fit
821 * into the 5V socket!
822 */
823
824 /* read out VS1 and VS2 */
825
826 reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
827 >> M8XX_PCMCIA_VS_SHIFT(lsock);
828
829 if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
830 switch(reg) {
831 case 1:
832 *value |= SS_3VCARD;
833 break; /* GND, NC - 3.3V only */
834 case 2:
835 *value |= SS_XVCARD;
836 break; /* NC. GND - x.xV only */
837 };
838 }
839
840 dprintk("GetStatus(%d) = %#2.2x\n", lsock, *value);
841 return 0;
842}
843
844static int m8xx_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
845{
846 int lsock = container_of(sock, struct socket_info, socket)->slot;
847 *state = socket[lsock].state; /* copy the whole structure */
848
849 dprintk("GetSocket(%d) = flags %#3.3x, Vcc %d, Vpp %d, "
850 "io_irq %d, csc_mask %#2.2x\n", lsock, state->flags,
851 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
852 return 0;
853}
854
855static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
856{
857 int lsock = container_of(sock, struct socket_info, socket)->slot;
858 struct socket_info *s = &socket[lsock];
859 struct event_table *e;
860 unsigned int reg;
861 unsigned long flags;
862
863 dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
864 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
865 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
866
867 /* First, set voltage - bail out if invalid */
868 if(voltage_set(lsock, state->Vcc, state->Vpp))
869 return -EINVAL;
870
871 /* Take care of reset... */
872 if(state->flags & SS_RESET)
873 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
874 else
875 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
876
877 /* ... and output enable. */
878
879 /* The CxOE signal is connected to a 74541 on the ADS.
880 I guess most other boards used the ADS as a reference.
881 I tried to control the CxOE signal with SS_OUTPUT_ENA,
882 but the reset signal seems connected via the 541.
883 If the CxOE is left high are some signals tristated and
884 no pullups are present -> the cards act wierd.
885 So right now the buffers are enabled if the power is on. */
886
887 if(state->Vcc || state->Vpp)
888 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
889 else
890 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
891
892 /*
893 * We'd better turn off interrupts before
894 * we mess with the events-table..
895 */
896
897 spin_lock_irqsave(&events_lock, flags);
898
899 /*
900 * Play around with the interrupt mask to be able to
901 * give the events the generic pcmcia driver wants us to.
902 */
903
904 e = &s->events[0];
905 reg = 0;
906
907 if(state->csc_mask & SS_DETECT) {
908 e->eventbit = SS_DETECT;
909 reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
910 | M8XX_PCMCIA_CD1(lsock));
911 e++;
912 }
913 if(state->flags & SS_IOCARD) {
914 /*
915 * I/O card
916 */
917 if(state->csc_mask & SS_STSCHG) {
918 e->eventbit = SS_STSCHG;
919 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
920 e++;
921 }
922 /*
923 * If io_irq is non-zero we should enable irq.
924 */
925 if(state->io_irq) {
926 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(state->io_irq) << 24);
927 /*
928 * Strange thing here:
929 * The manual does not tell us which interrupt
930 * the sources generate.
931 * Anyhow, I found out that RDY_L generates IREQLVL.
932 *
933 * We use level triggerd interrupts, and they don't
934 * have to be cleared in PSCR in the interrupt handler.
935 */
936 reg |= M8XX_PCMCIA_RDY_L(lsock);
937 }
938 else
939 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
940 }
941 else {
942 /*
943 * Memory card
944 */
945 if(state->csc_mask & SS_BATDEAD) {
946 e->eventbit = SS_BATDEAD;
947 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
948 e++;
949 }
950 if(state->csc_mask & SS_BATWARN) {
951 e->eventbit = SS_BATWARN;
952 reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
953 e++;
954 }
955 /* What should I trigger on - low/high,raise,fall? */
956 if(state->csc_mask & SS_READY) {
957 e->eventbit = SS_READY;
958 reg |= e->regbit = 0; //??
959 e++;
960 }
961 }
962
963 e->regbit = 0; /* terminate list */
964
965 /*
966 * Clear the status changed .
967 * Port A and Port B share the same port.
968 * Writing ones will clear the bits.
969 */
970
971 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, reg);
972
973 /*
974 * Write the mask.
975 * Port A and Port B share the same port.
976 * Need for read-modify-write.
977 * Ones will enable the interrupt.
978 */
979
980 /*
981 reg |= ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per
982 & M8XX_PCMCIA_MASK(lsock);
983 */
984
985 reg |= in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
986 (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
987
988 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, reg);
989
990 spin_unlock_irqrestore(&events_lock, flags);
991
992 /* copy the struct and modify the copy */
993
994 s->state = *state;
995
996 return 0;
997}
998
999static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1000{
1001 int lsock = container_of(sock, struct socket_info, socket)->slot;
1002
1003 struct socket_info *s = &socket[lsock];
1004 struct pcmcia_win *w;
1005 unsigned int reg, winnr;
1006
1007#define M8XX_SIZE (io->stop - io->start + 1)
1008#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
1009
1010 dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, "
1011 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
1012 io->speed, io->start, io->stop);
1013
1014 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
1015 || (io->stop > 0xffff) || (io->stop < io->start))
1016 return -EINVAL;
1017
1018 if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
1019 return -EINVAL;
1020
1021 if(io->flags & MAP_ACTIVE) {
1022
1023 dprintk( "io->flags & MAP_ACTIVE\n");
1024
1025 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
1026 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
1027
1028 /* setup registers */
1029
1030 w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
1031 w += winnr;
1032
1033 out_be32(&w->or, 0); /* turn off window first */
1034 out_be32(&w->br, M8XX_BASE);
1035
1036 reg <<= 27;
1037 reg |= M8XX_PCMCIA_POR_IO |(lsock << 2);
1038
1039 reg |= m8xx_get_speed(io->speed, 1);
1040
1041 if(io->flags & MAP_WRPROT)
1042 reg |= M8XX_PCMCIA_POR_WRPROT;
1043
1044 /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ))*/
1045 if(io->flags & MAP_16BIT)
1046 reg |= M8XX_PCMCIA_POR_16BIT;
1047
1048 if(io->flags & MAP_ACTIVE)
1049 reg |= M8XX_PCMCIA_POR_VALID;
1050
1051 out_be32(&w->or, reg);
1052
1053 dprintk("Socket %u: Mapped io window %u at %#8.8x, "
1054 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1055 } else {
1056 /* shutdown IO window */
1057 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
1058 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
1059
1060 /* setup registers */
1061
1062 w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
1063 w += winnr;
1064
1065 out_be32(&w->or, 0); /* turn off window */
1066 out_be32(&w->br, 0); /* turn off base address */
1067
1068 dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
1069 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1070 }
1071
1072 /* copy the struct and modify the copy */
1073 s->io_win[io->map] = *io;
1074 s->io_win[io->map].flags &= (MAP_WRPROT
1075 | MAP_16BIT
1076 | MAP_ACTIVE);
1077 dprintk("SetIOMap exit\n");
1078
1079 return 0;
1080}
1081
1082static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
1083{
1084 int lsock = container_of(sock, struct socket_info, socket)->slot;
1085 struct socket_info *s = &socket[lsock];
1086 struct pcmcia_win *w;
1087 struct pccard_mem_map *old;
1088 unsigned int reg, winnr;
1089
1090 dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, "
1091 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1092 mem->speed, mem->static_start, mem->card_start);
1093
1094 if ((mem->map >= PCMCIA_MEM_WIN_NO)
1095// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
1096 || (mem->card_start >= 0x04000000)
1097 || (mem->static_start & 0xfff) /* 4KByte resolution */
1098 || (mem->card_start & 0xfff))
1099 return -EINVAL;
1100
1101 if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
1102 printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
1103 return -EINVAL;
1104 }
1105 reg <<= 27;
1106
1107 winnr = (lsock * PCMCIA_MEM_WIN_NO) + mem->map;
1108
1109 /* Setup the window in the pcmcia controller */
1110
1111 w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
1112 w += winnr;
1113
1114 reg |= lsock << 2;
1115
1116 reg |= m8xx_get_speed(mem->speed, 0);
1117
1118 if(mem->flags & MAP_ATTRIB)
1119 reg |= M8XX_PCMCIA_POR_ATTRMEM;
1120
1121 if(mem->flags & MAP_WRPROT)
1122 reg |= M8XX_PCMCIA_POR_WRPROT;
1123
1124 if(mem->flags & MAP_16BIT)
1125 reg |= M8XX_PCMCIA_POR_16BIT;
1126
1127 if(mem->flags & MAP_ACTIVE)
1128 reg |= M8XX_PCMCIA_POR_VALID;
1129
1130 out_be32(&w->or, reg);
1131
1132 dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
1133 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
1134
1135 if(mem->flags & MAP_ACTIVE) {
1136 /* get the new base address */
1137 mem->static_start = PCMCIA_MEM_WIN_BASE +
1138 (PCMCIA_MEM_WIN_SIZE * winnr)
1139 + mem->card_start;
1140 }
1141
1142 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1143 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1144 mem->speed, mem->static_start, mem->card_start);
1145
1146 /* copy the struct and modify the copy */
1147
1148 old = &s->mem_win[mem->map];
1149
1150 *old = *mem;
1151 old->flags &= (MAP_ATTRIB
1152 | MAP_WRPROT
1153 | MAP_16BIT
1154 | MAP_ACTIVE);
1155
1156 return 0;
1157}
1158
1159static int m8xx_sock_init(struct pcmcia_socket *sock)
1160{
1161 int i;
1162 pccard_io_map io = { 0, 0, 0, 0, 1 };
1163 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
1164
1165 dprintk( "sock_init(%d)\n", s);
1166
1167 m8xx_set_socket(sock, &dead_socket);
1168 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
1169 io.map = i;
1170 m8xx_set_io_map(sock, &io);
1171 }
1172 for (i = 0; i < PCMCIA_MEM_WIN_NO; i++) {
1173 mem.map = i;
1174 m8xx_set_mem_map(sock, &mem);
1175 }
1176
1177 return 0;
1178
1179}
1180
1181static int m8xx_suspend(struct pcmcia_socket *sock)
1182{
1183 return m8xx_set_socket(sock, &dead_socket);
1184}
1185
1186static struct pccard_operations m8xx_services = {
1187 .init = m8xx_sock_init,
1188 .suspend = m8xx_suspend,
1189 .get_status = m8xx_get_status,
1190 .get_socket = m8xx_get_socket,
1191 .set_socket = m8xx_set_socket,
1192 .set_io_map = m8xx_set_io_map,
1193 .set_mem_map = m8xx_set_mem_map,
1194};
1195
1196static int __init m8xx_init(void)
1197{
1198 struct pcmcia_win *w;
1199 unsigned int i,m;
1200
1201 pcmcia_info("%s\n", version);
1202
1203 if (driver_register(&m8xx_driver))
1204 return -1;
1205
1206 pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
1207 " with IRQ %u.\n", pcmcia_schlvl);
1208
1209 /* Configure Status change interrupt */
1210
1211 if(request_irq(pcmcia_schlvl, m8xx_interrupt, 0,
1212 "m8xx_pcmcia", NULL)) {
1213 pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
1214 pcmcia_schlvl);
1215 return -1;
1216 }
1217
1218 w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
1219
1220 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr,
1221 M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1));
1222
1223 out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per,
1224 in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
1225 ~(M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1)));
1226
1227/* connect interrupt and disable CxOE */
1228
1229 out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
1230 out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
1231
1232/* intialize the fixed memory windows */
1233
1234 for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
1235 for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
1236 out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
1237 (PCMCIA_MEM_WIN_SIZE
1238 * (m + i * PCMCIA_MEM_WIN_NO)));
1239
1240 out_be32(&w->or, 0); /* set to not valid */
1241
1242 w++;
1243 }
1244 }
1245
1246/* turn off voltage */
1247 voltage_set(0, 0, 0);
1248 voltage_set(1, 0, 0);
1249
1250/* Enable external hardware */
1251 hardware_enable(0);
1252 hardware_enable(1);
1253
1254 platform_device_register(&m8xx_device);
1255
1256 for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) {
1257 socket[i].slot = i;
1258 socket[i].socket.owner = THIS_MODULE;
1259 socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
1260 socket[i].socket.irq_mask = 0x000;
1261 socket[i].socket.map_size = 0x1000;
1262 socket[i].socket.io_offset = 0;
1263 socket[i].socket.pci_irq = i ? 7 : 9;
1264 socket[i].socket.ops = &m8xx_services;
1265 socket[i].socket.resource_ops = &pccard_nonstatic_ops;
1266 socket[i].socket.cb_dev = NULL;
1267 socket[i].socket.dev.dev = &m8xx_device.dev;
1268 }
1269
1270 for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
1271 pcmcia_register_socket(&socket[i].socket);
1272
1273 return 0;
1274}
1275
1276static void __exit m8xx_exit(void)
1277{
1278 int i;
1279
1280 for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
1281 pcmcia_unregister_socket(&socket[i].socket);
1282
1283 m8xx_shutdown();
1284
1285 platform_device_unregister(&m8xx_device);
1286 driver_unregister(&m8xx_driver);
1287}
1288
1289module_init(m8xx_init);
1290module_exit(m8xx_exit);
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index a1178a600e3c..bd924336a49f 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -18,10 +18,15 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/device.h> 19#include <linux/device.h>
20 20
21#include <asm/mach-types.h>
21#include <asm/hardware.h> 22#include <asm/hardware.h>
22#include <asm/irq.h> 23#include <asm/irq.h>
23#include <asm/hardware/scoop.h> 24#include <asm/hardware/scoop.h>
24#include <asm/arch/pxa-regs.h> 25#ifdef CONFIG_SA1100_COLLIE
26#include <asm/arch-sa1100/collie.h>
27#else
28#include <asm/arch-pxa/pxa-regs.h>
29#endif
25 30
26#include "soc_common.h" 31#include "soc_common.h"
27 32
@@ -38,6 +43,7 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
38{ 43{
39 int ret; 44 int ret;
40 45
46#ifndef CONFIG_SA1100_COLLIE
41 /* 47 /*
42 * Setup default state of GPIO outputs 48 * Setup default state of GPIO outputs
43 * before we enable them as outputs. 49 * before we enable them as outputs.
@@ -60,6 +66,7 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
60 pxa_gpio_mode(GPIO55_nPREG_MD); 66 pxa_gpio_mode(GPIO55_nPREG_MD);
61 pxa_gpio_mode(GPIO56_nPWAIT_MD); 67 pxa_gpio_mode(GPIO56_nPWAIT_MD);
62 pxa_gpio_mode(GPIO57_nIOIS16_MD); 68 pxa_gpio_mode(GPIO57_nIOIS16_MD);
69#endif
63 70
64 /* Register interrupts */ 71 /* Register interrupts */
65 if (scoop_devs[skt->nr].cd_irq >= 0) { 72 if (scoop_devs[skt->nr].cd_irq >= 0) {
@@ -213,12 +220,20 @@ static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
213 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_IMR, 0x00C0); 220 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_IMR, 0x00C0);
214 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_MCR, 0x0101); 221 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_MCR, 0x0101);
215 scoop_devs[skt->nr].keep_vs = NO_KEEP_VS; 222 scoop_devs[skt->nr].keep_vs = NO_KEEP_VS;
223
224 if (machine_is_collie())
225 /* We need to disable SS_OUTPUT_ENA here. */
226 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_CPR, read_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_CPR) & ~0x0080);
216} 227}
217 228
218static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) 229static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
219{ 230{
220 /* CF_BUS_OFF */ 231 /* CF_BUS_OFF */
221 sharpsl_pcmcia_init_reset(&scoop_devs[skt->nr]); 232 sharpsl_pcmcia_init_reset(&scoop_devs[skt->nr]);
233
234 if (machine_is_collie())
235 /* We need to disable SS_OUTPUT_ENA here. */
236 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_CPR, read_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_CPR) & ~0x0080);
222} 237}
223 238
224static struct pcmcia_low_level sharpsl_pcmcia_ops = { 239static struct pcmcia_low_level sharpsl_pcmcia_ops = {
@@ -235,6 +250,19 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops = {
235 250
236static struct platform_device *sharpsl_pcmcia_device; 251static struct platform_device *sharpsl_pcmcia_device;
237 252
253#ifdef CONFIG_SA1100_COLLIE
254int __init pcmcia_collie_init(struct device *dev)
255{
256 int ret = -ENODEV;
257
258 if (machine_is_collie())
259 ret = sa11xx_drv_pcmcia_probe(dev, &sharpsl_pcmcia_ops, 0, 1);
260
261 return ret;
262}
263
264#else
265
238static int __init sharpsl_pcmcia_init(void) 266static int __init sharpsl_pcmcia_init(void)
239{ 267{
240 int ret; 268 int ret;
@@ -269,6 +297,7 @@ static void __exit sharpsl_pcmcia_exit(void)
269 297
270fs_initcall(sharpsl_pcmcia_init); 298fs_initcall(sharpsl_pcmcia_init);
271module_exit(sharpsl_pcmcia_exit); 299module_exit(sharpsl_pcmcia_exit);
300#endif
272 301
273MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support"); 302MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support");
274MODULE_LICENSE("GPL"); 303MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index fc87e7e2b6b8..00960a379b9c 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -779,7 +779,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
779 if (!s->cb_dev || !s->cb_dev->bus) 779 if (!s->cb_dev || !s->cb_dev->bus)
780 return -ENODEV; 780 return -ENODEV;
781 781
782#if defined(CONFIG_X86) || defined(CONFIG_X86_64) 782#if defined(CONFIG_X86)
783 /* If this is the root bus, the risk of hitting 783 /* If this is the root bus, the risk of hitting
784 * some strange system devices which aren't protected 784 * some strange system devices which aren't protected
785 * by either ACPI resource tables or properly requested 785 * by either ACPI resource tables or properly requested
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index b768fa81f043..acf60ffc8a12 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -38,8 +38,12 @@
38#include <pcmcia/cs.h> 38#include <pcmcia/cs.h>
39#include <pcmcia/ss.h> 39#include <pcmcia/ss.h>
40 40
41#include <asm/hardware/scoop.h>
42
41#include "sa1100_generic.h" 43#include "sa1100_generic.h"
42 44
45int __init pcmcia_collie_init(struct device *dev);
46
43static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = { 47static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = {
44#ifdef CONFIG_SA1100_ASSABET 48#ifdef CONFIG_SA1100_ASSABET
45 pcmcia_assabet_init, 49 pcmcia_assabet_init,
@@ -56,6 +60,9 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = {
56#ifdef CONFIG_SA1100_SIMPAD 60#ifdef CONFIG_SA1100_SIMPAD
57 pcmcia_simpad_init, 61 pcmcia_simpad_init,
58#endif 62#endif
63#ifdef CONFIG_SA1100_COLLIE
64 pcmcia_collie_init,
65#endif
59}; 66};
60 67
61static int sa11x0_drv_pcmcia_probe(struct device *dev) 68static int sa11x0_drv_pcmcia_probe(struct device *dev)
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 94442ffd4aed..cbb2749db178 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -12,6 +12,8 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pnp.h> 14#include <linux/pnp.h>
15#include <linux/slab.h>
16#include <linux/bitmap.h>
15#include "base.h" 17#include "base.h"
16 18
17DECLARE_MUTEX(pnp_res_mutex); 19DECLARE_MUTEX(pnp_res_mutex);
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index b0ca65b68645..5e38cd7335f7 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -7,6 +7,8 @@
7#include <linux/ctype.h> 7#include <linux/ctype.h>
8#include <linux/pnp.h> 8#include <linux/pnp.h>
9#include <linux/pnpbios.h> 9#include <linux/pnpbios.h>
10#include <linux/string.h>
11#include <linux/slab.h>
10 12
11#ifdef CONFIG_PCI 13#ifdef CONFIG_PCI
12#include <linux/pci.h> 14#include <linux/pci.h>
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fc7a213e591f..c570a9f6ce9c 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -213,6 +213,9 @@ con3270_update(struct con3270 *cp)
213 struct string *s, *n; 213 struct string *s, *n;
214 int rc; 214 int rc;
215 215
216 if (cp->view.dev)
217 raw3270_activate_view(&cp->view);
218
216 wrq = xchg(&cp->write, 0); 219 wrq = xchg(&cp->write, 0);
217 if (!wrq) { 220 if (!wrq) {
218 con3270_set_timer(cp, 1); 221 con3270_set_timer(cp, 1);
@@ -489,8 +492,6 @@ con3270_write(struct console *co, const char *str, unsigned int count)
489 unsigned char c; 492 unsigned char c;
490 493
491 cp = condev; 494 cp = condev;
492 if (cp->view.dev)
493 raw3270_activate_view(&cp->view);
494 spin_lock_irqsave(&cp->view.lock, flags); 495 spin_lock_irqsave(&cp->view.lock, flags);
495 while (count-- > 0) { 496 while (count-- > 0) {
496 c = *str++; 497 c = *str++;
@@ -620,7 +621,7 @@ con3270_init(void)
620 (void (*)(unsigned long)) con3270_read_tasklet, 621 (void (*)(unsigned long)) con3270_read_tasklet,
621 (unsigned long) condev->read); 622 (unsigned long) condev->read);
622 623
623 raw3270_add_view(&condev->view, &con3270_fn, 0); 624 raw3270_add_view(&condev->view, &con3270_fn, 1);
624 625
625 INIT_LIST_HEAD(&condev->freemem); 626 INIT_LIST_HEAD(&condev->freemem);
626 for (i = 0; i < CON3270_STRING_PAGES; i++) { 627 for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 60afcdcf91c2..735a7fcdeff5 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -33,8 +33,11 @@ struct fs3270 {
33 int read_command; /* ccw command to use for reads. */ 33 int read_command; /* ccw command to use for reads. */
34 int write_command; /* ccw command to use for writes. */ 34 int write_command; /* ccw command to use for writes. */
35 int attention; /* Got attention. */ 35 int attention; /* Got attention. */
36 struct raw3270_request *clear; /* single clear request. */ 36 int active; /* Fullscreen view is active. */
37 wait_queue_head_t attn_wait; /* Attention wait queue. */ 37 struct raw3270_request *init; /* single init request. */
38 wait_queue_head_t wait; /* Init & attention wait queue. */
39 struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */
40 size_t rdbuf_size; /* size of data returned by RDBUF */
38}; 41};
39 42
40static void 43static void
@@ -43,58 +46,172 @@ fs3270_wake_up(struct raw3270_request *rq, void *data)
43 wake_up((wait_queue_head_t *) data); 46 wake_up((wait_queue_head_t *) data);
44} 47}
45 48
49static inline int
50fs3270_working(struct fs3270 *fp)
51{
52 /*
53 * The fullscreen view is in working order if the view
54 * has been activated AND the initial request is finished.
55 */
56 return fp->active && raw3270_request_final(fp->init);
57}
58
46static int 59static int
47fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) 60fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
48{ 61{
49 wait_queue_head_t wq; 62 struct fs3270 *fp;
50 int rc; 63 int rc;
51 64
52 init_waitqueue_head(&wq); 65 fp = (struct fs3270 *) view;
53 rq->callback = fs3270_wake_up; 66 rq->callback = fs3270_wake_up;
54 rq->callback_data = &wq; 67 rq->callback_data = &fp->wait;
55 rc = raw3270_start(view, rq); 68
56 if (rc) 69 do {
57 return rc; 70 if (!fs3270_working(fp)) {
58 /* Started sucessfully. Now wait for completion. */ 71 /* Fullscreen view isn't ready yet. */
59 wait_event(wq, raw3270_request_final(rq)); 72 rc = wait_event_interruptible(fp->wait,
60 return rq->rc; 73 fs3270_working(fp));
74 if (rc != 0)
75 break;
76 }
77 rc = raw3270_start(view, rq);
78 if (rc == 0) {
79 /* Started sucessfully. Now wait for completion. */
80 wait_event(fp->wait, raw3270_request_final(rq));
81 }
82 } while (rc == -EACCES);
83 return rc;
61} 84}
62 85
86/*
87 * Switch to the fullscreen view.
88 */
63static void 89static void
64fs3270_reset_callback(struct raw3270_request *rq, void *data) 90fs3270_reset_callback(struct raw3270_request *rq, void *data)
65{ 91{
92 struct fs3270 *fp;
93
94 fp = (struct fs3270 *) rq->view;
66 raw3270_request_reset(rq); 95 raw3270_request_reset(rq);
96 wake_up(&fp->wait);
97}
98
99static void
100fs3270_restore_callback(struct raw3270_request *rq, void *data)
101{
102 struct fs3270 *fp;
103
104 fp = (struct fs3270 *) rq->view;
105 if (rq->rc != 0 || rq->rescnt != 0) {
106 if (fp->fs_pid)
107 kill_proc(fp->fs_pid, SIGHUP, 1);
108 }
109 fp->rdbuf_size = 0;
110 raw3270_request_reset(rq);
111 wake_up(&fp->wait);
67} 112}
68 113
69/*
70 * Switch to the fullscreen view.
71 */
72static int 114static int
73fs3270_activate(struct raw3270_view *view) 115fs3270_activate(struct raw3270_view *view)
74{ 116{
75 struct fs3270 *fp; 117 struct fs3270 *fp;
118 char *cp;
119 int rc;
76 120
77 fp = (struct fs3270 *) view; 121 fp = (struct fs3270 *) view;
78 raw3270_request_set_cmd(fp->clear, TC_EWRITEA); 122
79 fp->clear->callback = fs3270_reset_callback; 123 /* If an old init command is still running just return. */
80 return raw3270_start(view, fp->clear); 124 if (!raw3270_request_final(fp->init))
125 return 0;
126
127 if (fp->rdbuf_size == 0) {
128 /* No saved buffer. Just clear the screen. */
129 raw3270_request_set_cmd(fp->init, TC_EWRITEA);
130 fp->init->callback = fs3270_reset_callback;
131 } else {
132 /* Restore fullscreen buffer saved by fs3270_deactivate. */
133 raw3270_request_set_cmd(fp->init, TC_EWRITEA);
134 raw3270_request_set_idal(fp->init, fp->rdbuf);
135 fp->init->ccw.count = fp->rdbuf_size;
136 cp = fp->rdbuf->data[0];
137 cp[0] = TW_KR;
138 cp[1] = TO_SBA;
139 cp[2] = cp[6];
140 cp[3] = cp[7];
141 cp[4] = TO_IC;
142 cp[5] = TO_SBA;
143 cp[6] = 0x40;
144 cp[7] = 0x40;
145 fp->init->rescnt = 0;
146 fp->init->callback = fs3270_restore_callback;
147 }
148 rc = fp->init->rc = raw3270_start_locked(view, fp->init);
149 if (rc)
150 fp->init->callback(fp->init, NULL);
151 else
152 fp->active = 1;
153 return rc;
81} 154}
82 155
83/* 156/*
84 * Shutdown fullscreen view. 157 * Shutdown fullscreen view.
85 */ 158 */
86static void 159static void
160fs3270_save_callback(struct raw3270_request *rq, void *data)
161{
162 struct fs3270 *fp;
163
164 fp = (struct fs3270 *) rq->view;
165
166 /* Correct idal buffer element 0 address. */
167 fp->rdbuf->data[0] -= 5;
168 fp->rdbuf->size += 5;
169
170 /*
171 * If the rdbuf command failed or the idal buffer is
172 * to small for the amount of data returned by the
173 * rdbuf command, then we have no choice but to send
174 * a SIGHUP to the application.
175 */
176 if (rq->rc != 0 || rq->rescnt == 0) {
177 if (fp->fs_pid)
178 kill_proc(fp->fs_pid, SIGHUP, 1);
179 fp->rdbuf_size = 0;
180 } else
181 fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
182 raw3270_request_reset(rq);
183 wake_up(&fp->wait);
184}
185
186static void
87fs3270_deactivate(struct raw3270_view *view) 187fs3270_deactivate(struct raw3270_view *view)
88{ 188{
89 // FIXME: is this a good idea? The user program using fullscreen 3270
90 // will die just because a console message appeared. On the other
91 // hand the fullscreen device is unoperational now.
92 struct fs3270 *fp; 189 struct fs3270 *fp;
93 190
94 fp = (struct fs3270 *) view; 191 fp = (struct fs3270 *) view;
95 if (fp->fs_pid != 0) 192 fp->active = 0;
96 kill_proc(fp->fs_pid, SIGHUP, 1); 193
97 fp->fs_pid = 0; 194 /* If an old init command is still running just return. */
195 if (!raw3270_request_final(fp->init))
196 return;
197
198 /* Prepare read-buffer request. */
199 raw3270_request_set_cmd(fp->init, TC_RDBUF);
200 /*
201 * Hackish: skip first 5 bytes of the idal buffer to make
202 * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
203 * in the activation command.
204 */
205 fp->rdbuf->data[0] += 5;
206 fp->rdbuf->size -= 5;
207 raw3270_request_set_idal(fp->init, fp->rdbuf);
208 fp->init->rescnt = 0;
209 fp->init->callback = fs3270_save_callback;
210
211 /* Start I/O to read in the 3270 buffer. */
212 fp->init->rc = raw3270_start_locked(view, fp->init);
213 if (fp->init->rc)
214 fp->init->callback(fp->init, NULL);
98} 215}
99 216
100static int 217static int
@@ -103,7 +220,7 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
103 /* Handle ATTN. Set indication and wake waiters for attention. */ 220 /* Handle ATTN. Set indication and wake waiters for attention. */
104 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 221 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
105 fp->attention = 1; 222 fp->attention = 1;
106 wake_up(&fp->attn_wait); 223 wake_up(&fp->wait);
107 } 224 }
108 225
109 if (rq) { 226 if (rq) {
@@ -125,7 +242,7 @@ fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
125 struct fs3270 *fp; 242 struct fs3270 *fp;
126 struct raw3270_request *rq; 243 struct raw3270_request *rq;
127 struct idal_buffer *ib; 244 struct idal_buffer *ib;
128 int rc; 245 ssize_t rc;
129 246
130 if (count == 0 || count > 65535) 247 if (count == 0 || count > 65535)
131 return -EINVAL; 248 return -EINVAL;
@@ -133,7 +250,7 @@ fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
133 if (!fp) 250 if (!fp)
134 return -ENODEV; 251 return -ENODEV;
135 ib = idal_buffer_alloc(count, 0); 252 ib = idal_buffer_alloc(count, 0);
136 if (!ib) 253 if (IS_ERR(ib))
137 return -ENOMEM; 254 return -ENOMEM;
138 rq = raw3270_request_alloc(0); 255 rq = raw3270_request_alloc(0);
139 if (!IS_ERR(rq)) { 256 if (!IS_ERR(rq)) {
@@ -141,10 +258,19 @@ fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
141 fp->read_command = 6; 258 fp->read_command = 6;
142 raw3270_request_set_cmd(rq, fp->read_command ? : 2); 259 raw3270_request_set_cmd(rq, fp->read_command ? : 2);
143 raw3270_request_set_idal(rq, ib); 260 raw3270_request_set_idal(rq, ib);
144 wait_event(fp->attn_wait, fp->attention); 261 rc = wait_event_interruptible(fp->wait, fp->attention);
145 rc = fs3270_do_io(&fp->view, rq); 262 fp->attention = 0;
146 if (rc == 0 && idal_buffer_to_user(ib, data, count)) 263 if (rc == 0) {
147 rc = -EFAULT; 264 rc = fs3270_do_io(&fp->view, rq);
265 if (rc == 0) {
266 count -= rq->rescnt;
267 if (idal_buffer_to_user(ib, data, count) != 0)
268 rc = -EFAULT;
269 else
270 rc = count;
271
272 }
273 }
148 raw3270_request_free(rq); 274 raw3270_request_free(rq);
149 } else 275 } else
150 rc = PTR_ERR(rq); 276 rc = PTR_ERR(rq);
@@ -162,13 +288,13 @@ fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
162 struct raw3270_request *rq; 288 struct raw3270_request *rq;
163 struct idal_buffer *ib; 289 struct idal_buffer *ib;
164 int write_command; 290 int write_command;
165 int rc; 291 ssize_t rc;
166 292
167 fp = filp->private_data; 293 fp = filp->private_data;
168 if (!fp) 294 if (!fp)
169 return -ENODEV; 295 return -ENODEV;
170 ib = idal_buffer_alloc(count, 0); 296 ib = idal_buffer_alloc(count, 0);
171 if (!ib) 297 if (IS_ERR(ib))
172 return -ENOMEM; 298 return -ENOMEM;
173 rq = raw3270_request_alloc(0); 299 rq = raw3270_request_alloc(0);
174 if (!IS_ERR(rq)) { 300 if (!IS_ERR(rq)) {
@@ -179,6 +305,8 @@ fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
179 raw3270_request_set_cmd(rq, write_command); 305 raw3270_request_set_cmd(rq, write_command);
180 raw3270_request_set_idal(rq, ib); 306 raw3270_request_set_idal(rq, ib);
181 rc = fs3270_do_io(&fp->view, rq); 307 rc = fs3270_do_io(&fp->view, rq);
308 if (rc == 0)
309 rc = count - rq->rescnt;
182 } else 310 } else
183 rc = -EFAULT; 311 rc = -EFAULT;
184 raw3270_request_free(rq); 312 raw3270_request_free(rq);
@@ -232,7 +360,7 @@ fs3270_ioctl(struct inode *inode, struct file *filp,
232} 360}
233 361
234/* 362/*
235 * Allocate tty3270 structure. 363 * Allocate fs3270 structure.
236 */ 364 */
237static struct fs3270 * 365static struct fs3270 *
238fs3270_alloc_view(void) 366fs3270_alloc_view(void)
@@ -243,8 +371,8 @@ fs3270_alloc_view(void)
243 if (!fp) 371 if (!fp)
244 return ERR_PTR(-ENOMEM); 372 return ERR_PTR(-ENOMEM);
245 memset(fp, 0, sizeof(struct fs3270)); 373 memset(fp, 0, sizeof(struct fs3270));
246 fp->clear = raw3270_request_alloc(0); 374 fp->init = raw3270_request_alloc(0);
247 if (!IS_ERR(fp->clear)) { 375 if (IS_ERR(fp->init)) {
248 kfree(fp); 376 kfree(fp);
249 return ERR_PTR(-ENOMEM); 377 return ERR_PTR(-ENOMEM);
250 } 378 }
@@ -252,12 +380,17 @@ fs3270_alloc_view(void)
252} 380}
253 381
254/* 382/*
255 * Free tty3270 structure. 383 * Free fs3270 structure.
256 */ 384 */
257static void 385static void
258fs3270_free_view(struct raw3270_view *view) 386fs3270_free_view(struct raw3270_view *view)
259{ 387{
260 raw3270_request_free(((struct fs3270 *) view)->clear); 388 struct fs3270 *fp;
389
390 fp = (struct fs3270 *) view;
391 if (fp->rdbuf)
392 idal_buffer_free(fp->rdbuf);
393 raw3270_request_free(((struct fs3270 *) view)->init);
261 kfree(view); 394 kfree(view);
262} 395}
263 396
@@ -285,11 +418,20 @@ static int
285fs3270_open(struct inode *inode, struct file *filp) 418fs3270_open(struct inode *inode, struct file *filp)
286{ 419{
287 struct fs3270 *fp; 420 struct fs3270 *fp;
421 struct idal_buffer *ib;
288 int minor, rc; 422 int minor, rc;
289 423
290 if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR) 424 if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR)
291 return -ENODEV; 425 return -ENODEV;
292 minor = iminor(filp->f_dentry->d_inode); 426 minor = iminor(filp->f_dentry->d_inode);
427 /* Check for minor 0 multiplexer. */
428 if (minor == 0) {
429 if (!current->signal->tty)
430 return -ENODEV;
431 if (current->signal->tty->driver->major != IBM_TTY3270_MAJOR)
432 return -ENODEV;
433 minor = current->signal->tty->index + RAW3270_FIRSTMINOR;
434 }
293 /* Check if some other program is already using fullscreen mode. */ 435 /* Check if some other program is already using fullscreen mode. */
294 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 436 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
295 if (!IS_ERR(fp)) { 437 if (!IS_ERR(fp)) {
@@ -301,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp)
301 if (IS_ERR(fp)) 443 if (IS_ERR(fp))
302 return PTR_ERR(fp); 444 return PTR_ERR(fp);
303 445
304 init_waitqueue_head(&fp->attn_wait); 446 init_waitqueue_head(&fp->wait);
305 fp->fs_pid = current->pid; 447 fp->fs_pid = current->pid;
306 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); 448 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
307 if (rc) { 449 if (rc) {
@@ -309,8 +451,18 @@ fs3270_open(struct inode *inode, struct file *filp)
309 return rc; 451 return rc;
310 } 452 }
311 453
454 /* Allocate idal-buffer. */
455 ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
456 if (IS_ERR(ib)) {
457 raw3270_put_view(&fp->view);
458 raw3270_del_view(&fp->view);
459 return PTR_ERR(fp);
460 }
461 fp->rdbuf = ib;
462
312 rc = raw3270_activate_view(&fp->view); 463 rc = raw3270_activate_view(&fp->view);
313 if (rc) { 464 if (rc) {
465 raw3270_put_view(&fp->view);
314 raw3270_del_view(&fp->view); 466 raw3270_del_view(&fp->view);
315 return rc; 467 return rc;
316 } 468 }
@@ -329,8 +481,12 @@ fs3270_close(struct inode *inode, struct file *filp)
329 481
330 fp = filp->private_data; 482 fp = filp->private_data;
331 filp->private_data = 0; 483 filp->private_data = 0;
332 if (fp) 484 if (fp) {
485 fp->fs_pid = 0;
486 raw3270_reset(&fp->view);
487 raw3270_put_view(&fp->view);
333 raw3270_del_view(&fp->view); 488 raw3270_del_view(&fp->view);
489 }
334 return 0; 490 return 0;
335} 491}
336 492
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 328d9cbc56a3..d66946443dfc 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -25,6 +25,12 @@
25 25
26#include "raw3270.h" 26#include "raw3270.h"
27 27
28#include <linux/major.h>
29#include <linux/kdev_t.h>
30#include <linux/device.h>
31
32struct class *class3270;
33
28/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
29struct raw3270 { 35struct raw3270 {
30 struct list_head list; 36 struct list_head list;
@@ -41,6 +47,8 @@ struct raw3270 {
41 struct timer_list timer; /* Device timer. */ 47 struct timer_list timer; /* Device timer. */
42 48
43 unsigned char *ascebc; /* ascii -> ebcdic table */ 49 unsigned char *ascebc; /* ascii -> ebcdic table */
50 struct class_device *clttydev; /* 3270-class tty device ptr */
51 struct class_device *cltubdev; /* 3270-class tub device ptr */
44}; 52};
45 53
46/* raw3270->flags */ 54/* raw3270->flags */
@@ -317,6 +325,22 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
317} 325}
318 326
319int 327int
328raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
329{
330 struct raw3270 *rp;
331 int rc;
332
333 rp = view->dev;
334 if (!rp || rp->view != view)
335 rc = -EACCES;
336 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
337 rc = -ENODEV;
338 else
339 rc = __raw3270_start(rp, view, rq);
340 return rc;
341}
342
343int
320raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq) 344raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
321{ 345{
322 struct raw3270 *rp; 346 struct raw3270 *rp;
@@ -744,6 +768,22 @@ raw3270_reset_device(struct raw3270 *rp)
744 return rc; 768 return rc;
745} 769}
746 770
771int
772raw3270_reset(struct raw3270_view *view)
773{
774 struct raw3270 *rp;
775 int rc;
776
777 rp = view->dev;
778 if (!rp || rp->view != view)
779 rc = -EACCES;
780 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
781 rc = -ENODEV;
782 else
783 rc = raw3270_reset_device(view->dev);
784 return rc;
785}
786
747/* 787/*
748 * Setup new 3270 device. 788 * Setup new 3270 device.
749 */ 789 */
@@ -774,11 +814,12 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
774 814
775 /* 815 /*
776 * Add device to list and find the smallest unused minor 816 * Add device to list and find the smallest unused minor
777 * number for it. 817 * number for it. Note: there is no device with minor 0,
818 * see special case for fs3270.c:fs3270_open().
778 */ 819 */
779 down(&raw3270_sem); 820 down(&raw3270_sem);
780 /* Keep the list sorted. */ 821 /* Keep the list sorted. */
781 minor = 0; 822 minor = RAW3270_FIRSTMINOR;
782 rp->minor = -1; 823 rp->minor = -1;
783 list_for_each(l, &raw3270_devices) { 824 list_for_each(l, &raw3270_devices) {
784 tmp = list_entry(l, struct raw3270, list); 825 tmp = list_entry(l, struct raw3270, list);
@@ -789,7 +830,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
789 } 830 }
790 minor++; 831 minor++;
791 } 832 }
792 if (rp->minor == -1 && minor < RAW3270_MAXDEVS) { 833 if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
793 rp->minor = minor; 834 rp->minor = minor;
794 list_add_tail(&rp->list, &raw3270_devices); 835 list_add_tail(&rp->list, &raw3270_devices);
795 } 836 }
@@ -941,11 +982,12 @@ raw3270_deactivate_view(struct raw3270_view *view)
941 list_add_tail(&view->list, &rp->view_list); 982 list_add_tail(&view->list, &rp->view_list);
942 /* Try to activate another view. */ 983 /* Try to activate another view. */
943 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 984 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
944 list_for_each_entry(view, &rp->view_list, list) 985 list_for_each_entry(view, &rp->view_list, list) {
945 if (view->fn->activate(view) == 0) { 986 rp->view = view;
946 rp->view = view; 987 if (view->fn->activate(view) == 0)
947 break; 988 break;
948 } 989 rp->view = 0;
990 }
949 } 991 }
950 } 992 }
951 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 993 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -961,6 +1003,8 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
961 struct raw3270 *rp; 1003 struct raw3270 *rp;
962 int rc; 1004 int rc;
963 1005
1006 if (minor <= 0)
1007 return -ENODEV;
964 down(&raw3270_sem); 1008 down(&raw3270_sem);
965 rc = -ENODEV; 1009 rc = -ENODEV;
966 list_for_each_entry(rp, &raw3270_devices, list) { 1010 list_for_each_entry(rp, &raw3270_devices, list) {
@@ -976,7 +1020,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
976 view->cols = rp->cols; 1020 view->cols = rp->cols;
977 view->ascebc = rp->ascebc; 1021 view->ascebc = rp->ascebc;
978 spin_lock_init(&view->lock); 1022 spin_lock_init(&view->lock);
979 list_add_tail(&view->list, &rp->view_list); 1023 list_add(&view->list, &rp->view_list);
980 rc = 0; 1024 rc = 0;
981 } 1025 }
982 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1026 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -1039,7 +1083,7 @@ raw3270_del_view(struct raw3270_view *view)
1039 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 1083 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1040 /* Try to activate another view. */ 1084 /* Try to activate another view. */
1041 list_for_each_entry(nv, &rp->view_list, list) { 1085 list_for_each_entry(nv, &rp->view_list, list) {
1042 if (nv->fn->activate(view) == 0) { 1086 if (nv->fn->activate(nv) == 0) {
1043 rp->view = nv; 1087 rp->view = nv;
1044 break; 1088 break;
1045 } 1089 }
@@ -1063,6 +1107,12 @@ raw3270_delete_device(struct raw3270 *rp)
1063 1107
1064 /* Remove from device chain. */ 1108 /* Remove from device chain. */
1065 down(&raw3270_sem); 1109 down(&raw3270_sem);
1110 if (rp->clttydev)
1111 class_device_destroy(class3270,
1112 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1113 if (rp->cltubdev)
1114 class_device_destroy(class3270,
1115 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1066 list_del_init(&rp->list); 1116 list_del_init(&rp->list);
1067 up(&raw3270_sem); 1117 up(&raw3270_sem);
1068 1118
@@ -1129,6 +1179,16 @@ raw3270_create_attributes(struct raw3270 *rp)
1129{ 1179{
1130 //FIXME: check return code 1180 //FIXME: check return code
1131 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1181 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1182 rp->clttydev =
1183 class_device_create(class3270,
1184 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1185 &rp->cdev->dev, "tty%s",
1186 rp->cdev->dev.bus_id);
1187 rp->cltubdev =
1188 class_device_create(class3270,
1189 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1190 &rp->cdev->dev, "tub%s",
1191 rp->cdev->dev.bus_id);
1132} 1192}
1133 1193
1134/* 1194/*
@@ -1189,13 +1249,13 @@ raw3270_set_online (struct ccw_device *cdev)
1189 return PTR_ERR(rp); 1249 return PTR_ERR(rp);
1190 rc = raw3270_reset_device(rp); 1250 rc = raw3270_reset_device(rp);
1191 if (rc) 1251 if (rc)
1192 return rc; 1252 goto failure;
1193 rc = raw3270_size_device(rp); 1253 rc = raw3270_size_device(rp);
1194 if (rc) 1254 if (rc)
1195 return rc; 1255 goto failure;
1196 rc = raw3270_reset_device(rp); 1256 rc = raw3270_reset_device(rp);
1197 if (rc) 1257 if (rc)
1198 return rc; 1258 goto failure;
1199 raw3270_create_attributes(rp); 1259 raw3270_create_attributes(rp);
1200 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1260 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1201 down(&raw3270_sem); 1261 down(&raw3270_sem);
@@ -1203,6 +1263,10 @@ raw3270_set_online (struct ccw_device *cdev)
1203 np->notifier(rp->minor, 1); 1263 np->notifier(rp->minor, 1);
1204 up(&raw3270_sem); 1264 up(&raw3270_sem);
1205 return 0; 1265 return 0;
1266
1267failure:
1268 raw3270_delete_device(rp);
1269 return rc;
1206} 1270}
1207 1271
1208/* 1272/*
@@ -1217,6 +1281,14 @@ raw3270_remove (struct ccw_device *cdev)
1217 struct raw3270_notifier *np; 1281 struct raw3270_notifier *np;
1218 1282
1219 rp = cdev->dev.driver_data; 1283 rp = cdev->dev.driver_data;
1284 /*
1285 * _remove is the opposite of _probe; it's probe that
1286 * should set up rp. raw3270_remove gets entered for
1287 * devices even if they haven't been varied online.
1288 * Thus, rp may validly be NULL here.
1289 */
1290 if (rp == NULL)
1291 return;
1220 clear_bit(RAW3270_FLAGS_READY, &rp->flags); 1292 clear_bit(RAW3270_FLAGS_READY, &rp->flags);
1221 1293
1222 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); 1294 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
@@ -1301,6 +1373,7 @@ raw3270_init(void)
1301 if (rc == 0) { 1373 if (rc == 0) {
1302 /* Create attributes for early (= console) device. */ 1374 /* Create attributes for early (= console) device. */
1303 down(&raw3270_sem); 1375 down(&raw3270_sem);
1376 class3270 = class_create(THIS_MODULE, "3270");
1304 list_for_each_entry(rp, &raw3270_devices, list) { 1377 list_for_each_entry(rp, &raw3270_devices, list) {
1305 get_device(&rp->cdev->dev); 1378 get_device(&rp->cdev->dev);
1306 raw3270_create_attributes(rp); 1379 raw3270_create_attributes(rp);
@@ -1314,6 +1387,7 @@ static void
1314raw3270_exit(void) 1387raw3270_exit(void)
1315{ 1388{
1316 ccw_driver_unregister(&raw3270_ccw_driver); 1389 ccw_driver_unregister(&raw3270_ccw_driver);
1390 class_destroy(class3270);
1317} 1391}
1318 1392
1319MODULE_LICENSE("GPL"); 1393MODULE_LICENSE("GPL");
@@ -1335,7 +1409,9 @@ EXPORT_SYMBOL(raw3270_find_view);
1335EXPORT_SYMBOL(raw3270_activate_view); 1409EXPORT_SYMBOL(raw3270_activate_view);
1336EXPORT_SYMBOL(raw3270_deactivate_view); 1410EXPORT_SYMBOL(raw3270_deactivate_view);
1337EXPORT_SYMBOL(raw3270_start); 1411EXPORT_SYMBOL(raw3270_start);
1412EXPORT_SYMBOL(raw3270_start_locked);
1338EXPORT_SYMBOL(raw3270_start_irq); 1413EXPORT_SYMBOL(raw3270_start_irq);
1414EXPORT_SYMBOL(raw3270_reset);
1339EXPORT_SYMBOL(raw3270_register_notifier); 1415EXPORT_SYMBOL(raw3270_register_notifier);
1340EXPORT_SYMBOL(raw3270_unregister_notifier); 1416EXPORT_SYMBOL(raw3270_unregister_notifier);
1341EXPORT_SYMBOL(raw3270_wait_queue); 1417EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index ed5d4eb9f623..b635bf8e7775 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -21,6 +21,7 @@
21 21
22/* Local Channel Commands */ 22/* Local Channel Commands */
23#define TC_WRITE 0x01 /* Write */ 23#define TC_WRITE 0x01 /* Write */
24#define TC_RDBUF 0x02 /* Read Buffer */
24#define TC_EWRITE 0x05 /* Erase write */ 25#define TC_EWRITE 0x05 /* Erase write */
25#define TC_READMOD 0x06 /* Read modified */ 26#define TC_READMOD 0x06 /* Read modified */
26#define TC_EWRITEA 0x0d /* Erase write alternate */ 27#define TC_EWRITEA 0x0d /* Erase write alternate */
@@ -76,7 +77,8 @@
76#define TW_KR 0xc2 /* Keyboard restore */ 77#define TW_KR 0xc2 /* Keyboard restore */
77#define TW_PLUSALARM 0x04 /* Add this bit for alarm */ 78#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
78 79
79#define RAW3270_MAXDEVS 256 80#define RAW3270_FIRSTMINOR 1 /* First minor number */
81#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
80 82
81/* For TUBGETMOD and TUBSETMOD. Should include. */ 83/* For TUBGETMOD and TUBSETMOD. Should include. */
82struct raw3270_iocb { 84struct raw3270_iocb {
@@ -166,7 +168,10 @@ void raw3270_del_view(struct raw3270_view *);
166void raw3270_deactivate_view(struct raw3270_view *); 168void raw3270_deactivate_view(struct raw3270_view *);
167struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int); 169struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
168int raw3270_start(struct raw3270_view *, struct raw3270_request *); 170int raw3270_start(struct raw3270_view *, struct raw3270_request *);
171int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
169int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *); 172int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
173int raw3270_reset(struct raw3270_view *);
174struct raw3270_view *raw3270_view(struct raw3270_view *);
170 175
171/* Reference count inliner for view structures. */ 176/* Reference count inliner for view structures. */
172static inline void 177static inline void
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 7db5ebce7f0f..4b9069370388 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -653,18 +653,12 @@ tty3270_activate(struct raw3270_view *view)
653 tp->update_flags = TTY_UPDATE_ALL; 653 tp->update_flags = TTY_UPDATE_ALL;
654 tty3270_set_timer(tp, 1); 654 tty3270_set_timer(tp, 1);
655 spin_unlock_irqrestore(&tp->view.lock, flags); 655 spin_unlock_irqrestore(&tp->view.lock, flags);
656 start_tty(tp->tty);
657 return 0; 656 return 0;
658} 657}
659 658
660static void 659static void
661tty3270_deactivate(struct raw3270_view *view) 660tty3270_deactivate(struct raw3270_view *view)
662{ 661{
663 struct tty3270 *tp;
664
665 tp = (struct tty3270 *) view;
666 if (tp && tp->tty)
667 stop_tty(tp->tty);
668} 662}
669 663
670static int 664static int
@@ -716,13 +710,13 @@ tty3270_alloc_view(void)
716 tp->freemem_pages[pages], PAGE_SIZE); 710 tp->freemem_pages[pages], PAGE_SIZE);
717 } 711 }
718 tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE); 712 tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
719 if (!tp->write) 713 if (IS_ERR(tp->write))
720 goto out_pages; 714 goto out_pages;
721 tp->read = raw3270_request_alloc(0); 715 tp->read = raw3270_request_alloc(0);
722 if (!tp->read) 716 if (IS_ERR(tp->read))
723 goto out_write; 717 goto out_write;
724 tp->kreset = raw3270_request_alloc(1); 718 tp->kreset = raw3270_request_alloc(1);
725 if (!tp->kreset) 719 if (IS_ERR(tp->kreset))
726 goto out_read; 720 goto out_read;
727 tp->kbd = kbd_alloc(); 721 tp->kbd = kbd_alloc();
728 if (!tp->kbd) 722 if (!tp->kbd)
@@ -845,7 +839,8 @@ tty3270_del_views(void)
845 int i; 839 int i;
846 840
847 for (i = 0; i < tty3270_max_index; i++) { 841 for (i = 0; i < tty3270_max_index; i++) {
848 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, i); 842 tp = (struct tty3270 *)
843 raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
849 if (!IS_ERR(tp)) 844 if (!IS_ERR(tp))
850 raw3270_del_view(&tp->view); 845 raw3270_del_view(&tp->view);
851 } 846 }
@@ -871,7 +866,9 @@ tty3270_open(struct tty_struct *tty, struct file * filp)
871 if (tty->count > 1) 866 if (tty->count > 1)
872 return 0; 867 return 0;
873 /* Check if the tty3270 is already there. */ 868 /* Check if the tty3270 is already there. */
874 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, tty->index); 869 tp = (struct tty3270 *)
870 raw3270_find_view(&tty3270_fn,
871 tty->index + RAW3270_FIRSTMINOR);
875 if (!IS_ERR(tp)) { 872 if (!IS_ERR(tp)) {
876 tty->driver_data = tp; 873 tty->driver_data = tp;
877 tty->winsize.ws_row = tp->view.rows - 2; 874 tty->winsize.ws_row = tp->view.rows - 2;
@@ -903,7 +900,8 @@ tty3270_open(struct tty_struct *tty, struct file * filp)
903 (void (*)(unsigned long)) tty3270_read_tasklet, 900 (void (*)(unsigned long)) tty3270_read_tasklet,
904 (unsigned long) tp->read); 901 (unsigned long) tp->read);
905 902
906 rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); 903 rc = raw3270_add_view(&tp->view, &tty3270_fn,
904 tty->index + RAW3270_FIRSTMINOR);
907 if (rc) { 905 if (rc) {
908 tty3270_free_view(tp); 906 tty3270_free_view(tp);
909 return rc; 907 return rc;
@@ -911,8 +909,8 @@ tty3270_open(struct tty_struct *tty, struct file * filp)
911 909
912 rc = tty3270_alloc_screen(tp); 910 rc = tty3270_alloc_screen(tp);
913 if (rc) { 911 if (rc) {
914 raw3270_del_view(&tp->view);
915 raw3270_put_view(&tp->view); 912 raw3270_put_view(&tp->view);
913 raw3270_del_view(&tp->view);
916 return rc; 914 return rc;
917 } 915 }
918 916
@@ -1780,7 +1778,7 @@ tty3270_init(void)
1780 struct tty_driver *driver; 1778 struct tty_driver *driver;
1781 int ret; 1779 int ret;
1782 1780
1783 driver = alloc_tty_driver(256); 1781 driver = alloc_tty_driver(RAW3270_MAXDEVS);
1784 if (!driver) 1782 if (!driver)
1785 return -ENOMEM; 1783 return -ENOMEM;
1786 1784
@@ -1794,6 +1792,7 @@ tty3270_init(void)
1794 driver->driver_name = "ttyTUB"; 1792 driver->driver_name = "ttyTUB";
1795 driver->name = "ttyTUB"; 1793 driver->name = "ttyTUB";
1796 driver->major = IBM_TTY3270_MAJOR; 1794 driver->major = IBM_TTY3270_MAJOR;
1795 driver->minor_start = RAW3270_FIRSTMINOR;
1797 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1796 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1798 driver->subtype = SYSTEM_TYPE_TTY; 1797 driver->subtype = SYSTEM_TYPE_TTY;
1799 driver->init_termios = tty_std_termios; 1798 driver->init_termios = tty_std_termios;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 8cc4f1a940dc..c05b069c2996 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -30,10 +30,13 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/slab.h>
34#include <linux/timex.h> /* get_clock() */
33 35
34#include <asm/ccwdev.h> 36#include <asm/ccwdev.h>
35#include <asm/cio.h> 37#include <asm/cio.h>
36#include <asm/cmb.h> 38#include <asm/cmb.h>
39#include <asm/div64.h>
37 40
38#include "cio.h" 41#include "cio.h"
39#include "css.h" 42#include "css.h"
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9adc11e8b8bc..811c9d150637 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -22,6 +22,7 @@
22 22
23#include <asm/ccwdev.h> 23#include <asm/ccwdev.h>
24#include <asm/cio.h> 24#include <asm/cio.h>
25#include <asm/param.h> /* HZ */
25 26
26#include "cio.h" 27#include "cio.h"
27#include "css.h" 28#include "css.h"
@@ -252,6 +253,23 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
252} 253}
253 254
254static ssize_t 255static ssize_t
256modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
257{
258 struct ccw_device *cdev = to_ccwdev(dev);
259 struct ccw_device_id *id = &(cdev->id);
260 int ret;
261
262 ret = sprintf(buf, "ccw:t%04Xm%02x",
263 id->cu_type, id->cu_model);
264 if (id->dev_type != 0)
265 ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
266 id->dev_type, id->dev_model);
267 else
268 ret += sprintf(buf + ret, "dtdm\n");
269 return ret;
270}
271
272static ssize_t
255online_show (struct device *dev, struct device_attribute *attr, char *buf) 273online_show (struct device *dev, struct device_attribute *attr, char *buf)
256{ 274{
257 struct ccw_device *cdev = to_ccwdev(dev); 275 struct ccw_device *cdev = to_ccwdev(dev);
@@ -448,6 +466,7 @@ static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
448static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 466static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
449static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 467static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
450static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 468static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
469static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
451static DEVICE_ATTR(online, 0644, online_show, online_store); 470static DEVICE_ATTR(online, 0644, online_show, online_store);
452extern struct device_attribute dev_attr_cmb_enable; 471extern struct device_attribute dev_attr_cmb_enable;
453static DEVICE_ATTR(availability, 0444, available_show, NULL); 472static DEVICE_ATTR(availability, 0444, available_show, NULL);
@@ -471,6 +490,7 @@ subchannel_add_files (struct device *dev)
471static struct attribute * ccwdev_attrs[] = { 490static struct attribute * ccwdev_attrs[] = {
472 &dev_attr_devtype.attr, 491 &dev_attr_devtype.attr,
473 &dev_attr_cutype.attr, 492 &dev_attr_cutype.attr,
493 &dev_attr_modalias.attr,
474 &dev_attr_online.attr, 494 &dev_attr_online.attr,
475 &dev_attr_cmb_enable.attr, 495 &dev_attr_cmb_enable.attr,
476 &dev_attr_availability.attr, 496 &dev_attr_availability.attr,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index fbe4202a3f6f..c1c89f4fd4e3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -11,6 +11,8 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/config.h> 12#include <linux/config.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/jiffies.h>
15#include <linux/string.h>
14 16
15#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
16#include <asm/cio.h> 18#include <asm/cio.h>
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index fe8187d6f58b..e2a5657d5fdb 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -192,7 +193,6 @@ static void ahci_port_stop(struct ata_port *ap);
192static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 193static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
193static void ahci_qc_prep(struct ata_queued_cmd *qc); 194static void ahci_qc_prep(struct ata_queued_cmd *qc);
194static u8 ahci_check_status(struct ata_port *ap); 195static u8 ahci_check_status(struct ata_port *ap);
195static u8 ahci_check_err(struct ata_port *ap);
196static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 196static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
197static void ahci_remove_one (struct pci_dev *pdev); 197static void ahci_remove_one (struct pci_dev *pdev);
198 198
@@ -221,7 +221,6 @@ static const struct ata_port_operations ahci_ops = {
221 221
222 .check_status = ahci_check_status, 222 .check_status = ahci_check_status,
223 .check_altstatus = ahci_check_status, 223 .check_altstatus = ahci_check_status,
224 .check_err = ahci_check_err,
225 .dev_select = ata_noop_dev_select, 224 .dev_select = ata_noop_dev_select,
226 225
227 .tf_read = ahci_tf_read, 226 .tf_read = ahci_tf_read,
@@ -458,13 +457,6 @@ static u8 ahci_check_status(struct ata_port *ap)
458 return readl(mmio + PORT_TFDATA) & 0xFF; 457 return readl(mmio + PORT_TFDATA) & 0xFF;
459} 458}
460 459
461static u8 ahci_check_err(struct ata_port *ap)
462{
463 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
464
465 return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF;
466}
467
468static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 460static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
469{ 461{
470 struct ahci_port_priv *pp = ap->private_data; 462 struct ahci_port_priv *pp = ap->private_data;
@@ -609,7 +601,7 @@ static void ahci_eng_timeout(struct ata_port *ap)
609 * not being called from the SCSI EH. 601 * not being called from the SCSI EH.
610 */ 602 */
611 qc->scsidone = scsi_finish_command; 603 qc->scsidone = scsi_finish_command;
612 ata_qc_complete(qc, ATA_ERR); 604 ata_qc_complete(qc, AC_ERR_OTHER);
613 } 605 }
614 606
615 spin_unlock_irqrestore(&host_set->lock, flags); 607 spin_unlock_irqrestore(&host_set->lock, flags);
@@ -638,7 +630,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
638 if (status & PORT_IRQ_FATAL) { 630 if (status & PORT_IRQ_FATAL) {
639 ahci_intr_error(ap, status); 631 ahci_intr_error(ap, status);
640 if (qc) 632 if (qc)
641 ata_qc_complete(qc, ATA_ERR); 633 ata_qc_complete(qc, AC_ERR_OTHER);
642 } 634 }
643 635
644 return 1; 636 return 1;
@@ -683,10 +675,10 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
683 if (!ahci_host_intr(ap, qc)) 675 if (!ahci_host_intr(ap, qc))
684 if (ata_ratelimit()) { 676 if (ata_ratelimit()) {
685 struct pci_dev *pdev = 677 struct pci_dev *pdev =
686 to_pci_dev(ap->host_set->dev); 678 to_pci_dev(ap->host_set->dev);
687 printk(KERN_WARNING 679 dev_printk(KERN_WARNING, &pdev->dev,
688 "ahci(%s): unhandled interrupt on port %u\n", 680 "unhandled interrupt on port %u\n",
689 pci_name(pdev), i); 681 i);
690 } 682 }
691 683
692 VPRINTK("port %u\n", i); 684 VPRINTK("port %u\n", i);
@@ -694,10 +686,9 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
694 VPRINTK("port %u (no irq)\n", i); 686 VPRINTK("port %u (no irq)\n", i);
695 if (ata_ratelimit()) { 687 if (ata_ratelimit()) {
696 struct pci_dev *pdev = 688 struct pci_dev *pdev =
697 to_pci_dev(ap->host_set->dev); 689 to_pci_dev(ap->host_set->dev);
698 printk(KERN_WARNING 690 dev_printk(KERN_WARNING, &pdev->dev,
699 "ahci(%s): interrupt on disabled port %u\n", 691 "interrupt on disabled port %u\n", i);
700 pci_name(pdev), i);
701 } 692 }
702 } 693 }
703 694
@@ -769,8 +760,8 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
769 760
770 tmp = readl(mmio + HOST_CTL); 761 tmp = readl(mmio + HOST_CTL);
771 if (tmp & HOST_RESET) { 762 if (tmp & HOST_RESET) {
772 printk(KERN_ERR DRV_NAME "(%s): controller reset failed (0x%x)\n", 763 dev_printk(KERN_ERR, &pdev->dev,
773 pci_name(pdev), tmp); 764 "controller reset failed (0x%x)\n", tmp);
774 return -EIO; 765 return -EIO;
775 } 766 }
776 767
@@ -798,22 +789,22 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
798 if (rc) { 789 if (rc) {
799 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 790 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
800 if (rc) { 791 if (rc) {
801 printk(KERN_ERR DRV_NAME "(%s): 64-bit DMA enable failed\n", 792 dev_printk(KERN_ERR, &pdev->dev,
802 pci_name(pdev)); 793 "64-bit DMA enable failed\n");
803 return rc; 794 return rc;
804 } 795 }
805 } 796 }
806 } else { 797 } else {
807 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 798 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
808 if (rc) { 799 if (rc) {
809 printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n", 800 dev_printk(KERN_ERR, &pdev->dev,
810 pci_name(pdev)); 801 "32-bit DMA enable failed\n");
811 return rc; 802 return rc;
812 } 803 }
813 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 804 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
814 if (rc) { 805 if (rc) {
815 printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n", 806 dev_printk(KERN_ERR, &pdev->dev,
816 pci_name(pdev)); 807 "32-bit consistent DMA enable failed\n");
817 return rc; 808 return rc;
818 } 809 }
819 } 810 }
@@ -916,10 +907,10 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
916 else 907 else
917 scc_s = "unknown"; 908 scc_s = "unknown";
918 909
919 printk(KERN_INFO DRV_NAME "(%s) AHCI %02x%02x.%02x%02x " 910 dev_printk(KERN_INFO, &pdev->dev,
911 "AHCI %02x%02x.%02x%02x "
920 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 912 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
921 , 913 ,
922 pci_name(pdev),
923 914
924 (vers >> 24) & 0xff, 915 (vers >> 24) & 0xff,
925 (vers >> 16) & 0xff, 916 (vers >> 16) & 0xff,
@@ -932,11 +923,11 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
932 impl, 923 impl,
933 scc_s); 924 scc_s);
934 925
935 printk(KERN_INFO DRV_NAME "(%s) flags: " 926 dev_printk(KERN_INFO, &pdev->dev,
927 "flags: "
936 "%s%s%s%s%s%s" 928 "%s%s%s%s%s%s"
937 "%s%s%s%s%s%s%s\n" 929 "%s%s%s%s%s%s%s\n"
938 , 930 ,
939 pci_name(pdev),
940 931
941 cap & (1 << 31) ? "64bit " : "", 932 cap & (1 << 31) ? "64bit " : "",
942 cap & (1 << 30) ? "ncq " : "", 933 cap & (1 << 30) ? "ncq " : "",
@@ -969,7 +960,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
969 VPRINTK("ENTER\n"); 960 VPRINTK("ENTER\n");
970 961
971 if (!printed_version++) 962 if (!printed_version++)
972 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 963 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
973 964
974 rc = pci_enable_device(pdev); 965 rc = pci_enable_device(pdev);
975 if (rc) 966 if (rc)
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index be021478f416..7f8aa1b552ce 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/blkdev.h> 46#include <linux/blkdev.h>
47#include <linux/delay.h> 47#include <linux/delay.h>
48#include <linux/device.h>
48#include "scsi.h" 49#include "scsi.h"
49#include <scsi/scsi_host.h> 50#include <scsi/scsi_host.h>
50#include <linux/libata.h> 51#include <linux/libata.h>
@@ -621,18 +622,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
621{ 622{
622 static int printed_version; 623 static int printed_version;
623 struct ata_port_info *port_info[2]; 624 struct ata_port_info *port_info[2];
624 unsigned int combined = 0, n_ports = 1; 625 unsigned int combined = 0;
625 unsigned int pata_chan = 0, sata_chan = 0; 626 unsigned int pata_chan = 0, sata_chan = 0;
626 627
627 if (!printed_version++) 628 if (!printed_version++)
628 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 629 dev_printk(KERN_DEBUG, &pdev->dev,
630 "version " DRV_VERSION "\n");
629 631
630 /* no hotplugging support (FIXME) */ 632 /* no hotplugging support (FIXME) */
631 if (!in_module_init) 633 if (!in_module_init)
632 return -ENODEV; 634 return -ENODEV;
633 635
634 port_info[0] = &piix_port_info[ent->driver_data]; 636 port_info[0] = &piix_port_info[ent->driver_data];
635 port_info[1] = NULL; 637 port_info[1] = &piix_port_info[ent->driver_data];
636 638
637 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 639 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) {
638 u8 tmp; 640 u8 tmp;
@@ -670,12 +672,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
670 port_info[sata_chan] = &piix_port_info[ent->driver_data]; 672 port_info[sata_chan] = &piix_port_info[ent->driver_data];
671 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS; 673 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
672 port_info[pata_chan] = &piix_port_info[ich5_pata]; 674 port_info[pata_chan] = &piix_port_info[ich5_pata];
673 n_ports++;
674 675
675 printk(KERN_WARNING DRV_NAME ": combined mode detected\n"); 676 dev_printk(KERN_WARNING, &pdev->dev,
677 "combined mode detected (p=%u, s=%u)\n",
678 pata_chan, sata_chan);
676 } 679 }
677 680
678 return ata_pci_init_one(pdev, port_info, n_ports); 681 return ata_pci_init_one(pdev, port_info, 2);
679} 682}
680 683
681static int __init piix_init(void) 684static int __init piix_init(void)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ff25210b00ba..822b9fa706f3 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1543,13 +1543,16 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1543 {"vscsi", "IBM,v-scsi"}, 1543 {"vscsi", "IBM,v-scsi"},
1544 { "", "" } 1544 { "", "" }
1545}; 1545};
1546
1547MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1546MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
1547
1548static struct vio_driver ibmvscsi_driver = { 1548static struct vio_driver ibmvscsi_driver = {
1549 .name = "ibmvscsi",
1550 .id_table = ibmvscsi_device_table, 1549 .id_table = ibmvscsi_device_table,
1551 .probe = ibmvscsi_probe, 1550 .probe = ibmvscsi_probe,
1552 .remove = ibmvscsi_remove 1551 .remove = ibmvscsi_remove,
1552 .driver = {
1553 .name = "ibmvscsi",
1554 .owner = THIS_MODULE,
1555 }
1553}; 1556};
1554 1557
1555int __init ibmvscsi_module_init(void) 1558int __init ibmvscsi_module_init(void)
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 3d62c9bcbff7..00d6a6657ebc 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -180,19 +180,12 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
180 return; 180 return;
181 } 181 }
182 count = min(pc->sg->length - pc->b_count, bcount); 182 count = min(pc->sg->length - pc->b_count, bcount);
183 if (PageHighMem(pc->sg->page)) { 183 buf = kmap_atomic(pc->sg->page, KM_IRQ0);
184 unsigned long flags; 184 drive->hwif->atapi_input_bytes(drive,
185 185 buf + pc->b_count + pc->sg->offset, count);
186 local_irq_save(flags); 186 kunmap_atomic(buf, KM_IRQ0);
187 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset; 187 bcount -= count;
188 drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count); 188 pc->b_count += count;
189 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
190 local_irq_restore(flags);
191 } else {
192 buf = page_address(pc->sg->page) + pc->sg->offset;
193 drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count);
194 }
195 bcount -= count; pc->b_count += count;
196 if (pc->b_count == pc->sg->length) { 189 if (pc->b_count == pc->sg->length) {
197 pc->sg++; 190 pc->sg++;
198 pc->b_count = 0; 191 pc->b_count = 0;
@@ -212,19 +205,12 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
212 return; 205 return;
213 } 206 }
214 count = min(pc->sg->length - pc->b_count, bcount); 207 count = min(pc->sg->length - pc->b_count, bcount);
215 if (PageHighMem(pc->sg->page)) { 208 buf = kmap_atomic(pc->sg->page, KM_IRQ0);
216 unsigned long flags; 209 drive->hwif->atapi_output_bytes(drive,
217 210 buf + pc->b_count + pc->sg->offset, count);
218 local_irq_save(flags); 211 kunmap_atomic(buf, KM_IRQ0);
219 buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset; 212 bcount -= count;
220 drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count); 213 pc->b_count += count;
221 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
222 local_irq_restore(flags);
223 } else {
224 buf = page_address(pc->sg->page) + pc->sg->offset;
225 drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count);
226 }
227 bcount -= count; pc->b_count += count;
228 if (pc->b_count == pc->sg->length) { 214 if (pc->b_count == pc->sg->length) {
229 pc->sg++; 215 pc->sg++;
230 pc->b_count = 0; 216 pc->b_count = 0;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5ca97605ff35..8be7dc0b47b8 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -372,7 +372,7 @@ static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
372 struct ata_ioports *ioaddr = &ap->ioaddr; 372 struct ata_ioports *ioaddr = &ap->ioaddr;
373 373
374 tf->command = ata_check_status(ap); 374 tf->command = ata_check_status(ap);
375 tf->feature = ata_chk_err(ap); 375 tf->feature = inb(ioaddr->error_addr);
376 tf->nsect = inb(ioaddr->nsect_addr); 376 tf->nsect = inb(ioaddr->nsect_addr);
377 tf->lbal = inb(ioaddr->lbal_addr); 377 tf->lbal = inb(ioaddr->lbal_addr);
378 tf->lbam = inb(ioaddr->lbam_addr); 378 tf->lbam = inb(ioaddr->lbam_addr);
@@ -406,7 +406,7 @@ static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
406 struct ata_ioports *ioaddr = &ap->ioaddr; 406 struct ata_ioports *ioaddr = &ap->ioaddr;
407 407
408 tf->command = ata_check_status(ap); 408 tf->command = ata_check_status(ap);
409 tf->feature = ata_chk_err(ap); 409 tf->feature = readb((void __iomem *)ioaddr->error_addr);
410 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 410 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
411 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 411 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
412 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 412 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
@@ -527,30 +527,6 @@ u8 ata_altstatus(struct ata_port *ap)
527 527
528 528
529/** 529/**
530 * ata_chk_err - Read device error reg
531 * @ap: port where the device is
532 *
533 * Reads ATA taskfile error register for
534 * currently-selected device and return its value.
535 *
536 * Note: may NOT be used as the check_err() entry in
537 * ata_port_operations.
538 *
539 * LOCKING:
540 * Inherited from caller.
541 */
542u8 ata_chk_err(struct ata_port *ap)
543{
544 if (ap->ops->check_err)
545 return ap->ops->check_err(ap);
546
547 if (ap->flags & ATA_FLAG_MMIO) {
548 return readb((void __iomem *) ap->ioaddr.error_addr);
549 }
550 return inb(ap->ioaddr.error_addr);
551}
552
553/**
554 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 530 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
555 * @tf: Taskfile to convert 531 * @tf: Taskfile to convert
556 * @fis: Buffer into which data will output 532 * @fis: Buffer into which data will output
@@ -902,8 +878,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
902 878
903 memset(&tf, 0, sizeof(tf)); 879 memset(&tf, 0, sizeof(tf));
904 880
905 err = ata_chk_err(ap);
906 ap->ops->tf_read(ap, &tf); 881 ap->ops->tf_read(ap, &tf);
882 err = tf.feature;
907 883
908 dev->class = ATA_DEV_NONE; 884 dev->class = ATA_DEV_NONE;
909 885
@@ -1140,7 +1116,6 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1140 unsigned int major_version; 1116 unsigned int major_version;
1141 u16 tmp; 1117 u16 tmp;
1142 unsigned long xfer_modes; 1118 unsigned long xfer_modes;
1143 u8 status;
1144 unsigned int using_edd; 1119 unsigned int using_edd;
1145 DECLARE_COMPLETION(wait); 1120 DECLARE_COMPLETION(wait);
1146 struct ata_queued_cmd *qc; 1121 struct ata_queued_cmd *qc;
@@ -1194,8 +1169,11 @@ retry:
1194 else 1169 else
1195 wait_for_completion(&wait); 1170 wait_for_completion(&wait);
1196 1171
1197 status = ata_chk_status(ap); 1172 spin_lock_irqsave(&ap->host_set->lock, flags);
1198 if (status & ATA_ERR) { 1173 ap->ops->tf_read(ap, &qc->tf);
1174 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1175
1176 if (qc->tf.command & ATA_ERR) {
1199 /* 1177 /*
1200 * arg! EDD works for all test cases, but seems to return 1178 * arg! EDD works for all test cases, but seems to return
1201 * the ATA signature for some ATAPI devices. Until the 1179 * the ATA signature for some ATAPI devices. Until the
@@ -1208,7 +1186,7 @@ retry:
1208 * to have this problem. 1186 * to have this problem.
1209 */ 1187 */
1210 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) { 1188 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1211 u8 err = ata_chk_err(ap); 1189 u8 err = qc->tf.feature;
1212 if (err & ATA_ABORTED) { 1190 if (err & ATA_ABORTED) {
1213 dev->class = ATA_DEV_ATAPI; 1191 dev->class = ATA_DEV_ATAPI;
1214 qc->cursg = 0; 1192 qc->cursg = 0;
@@ -2685,7 +2663,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2685 * None. (grabs host lock) 2663 * None. (grabs host lock)
2686 */ 2664 */
2687 2665
2688void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 2666void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
2689{ 2667{
2690 struct ata_port *ap = qc->ap; 2668 struct ata_port *ap = qc->ap;
2691 unsigned long flags; 2669 unsigned long flags;
@@ -2693,7 +2671,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2693 spin_lock_irqsave(&ap->host_set->lock, flags); 2671 spin_lock_irqsave(&ap->host_set->lock, flags);
2694 ap->flags &= ~ATA_FLAG_NOINTR; 2672 ap->flags &= ~ATA_FLAG_NOINTR;
2695 ata_irq_on(ap); 2673 ata_irq_on(ap);
2696 ata_qc_complete(qc, drv_stat); 2674 ata_qc_complete(qc, err_mask);
2697 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2675 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2698} 2676}
2699 2677
@@ -2790,7 +2768,7 @@ static int ata_pio_complete (struct ata_port *ap)
2790 2768
2791 ap->hsm_task_state = HSM_ST_IDLE; 2769 ap->hsm_task_state = HSM_ST_IDLE;
2792 2770
2793 ata_poll_qc_complete(qc, drv_stat); 2771 ata_poll_qc_complete(qc, 0);
2794 2772
2795 /* another command may start at this point */ 2773 /* another command may start at this point */
2796 2774
@@ -3158,18 +3136,15 @@ static void ata_pio_block(struct ata_port *ap)
3158static void ata_pio_error(struct ata_port *ap) 3136static void ata_pio_error(struct ata_port *ap)
3159{ 3137{
3160 struct ata_queued_cmd *qc; 3138 struct ata_queued_cmd *qc;
3161 u8 drv_stat; 3139
3140 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3162 3141
3163 qc = ata_qc_from_tag(ap, ap->active_tag); 3142 qc = ata_qc_from_tag(ap, ap->active_tag);
3164 assert(qc != NULL); 3143 assert(qc != NULL);
3165 3144
3166 drv_stat = ata_chk_status(ap);
3167 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
3168 ap->id, drv_stat);
3169
3170 ap->hsm_task_state = HSM_ST_IDLE; 3145 ap->hsm_task_state = HSM_ST_IDLE;
3171 3146
3172 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 3147 ata_poll_qc_complete(qc, AC_ERR_ATA_BUS);
3173} 3148}
3174 3149
3175static void ata_pio_task(void *_data) 3150static void ata_pio_task(void *_data)
@@ -3292,7 +3267,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3292 ap->id, qc->tf.command, drv_stat, host_stat); 3267 ap->id, qc->tf.command, drv_stat, host_stat);
3293 3268
3294 /* complete taskfile transaction */ 3269 /* complete taskfile transaction */
3295 ata_qc_complete(qc, drv_stat); 3270 ata_qc_complete(qc, ac_err_mask(drv_stat));
3296 break; 3271 break;
3297 } 3272 }
3298 3273
@@ -3397,7 +3372,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3397 return qc; 3372 return qc;
3398} 3373}
3399 3374
3400int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3375int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask)
3401{ 3376{
3402 return 0; 3377 return 0;
3403} 3378}
@@ -3456,7 +3431,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3456 * spin_lock_irqsave(host_set lock) 3431 * spin_lock_irqsave(host_set lock)
3457 */ 3432 */
3458 3433
3459void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 3434void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
3460{ 3435{
3461 int rc; 3436 int rc;
3462 3437
@@ -3473,7 +3448,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3473 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3448 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3474 3449
3475 /* call completion callback */ 3450 /* call completion callback */
3476 rc = qc->complete_fn(qc, drv_stat); 3451 rc = qc->complete_fn(qc, err_mask);
3477 3452
3478 /* if callback indicates not to complete command (non-zero), 3453 /* if callback indicates not to complete command (non-zero),
3479 * return immediately 3454 * return immediately
@@ -3911,7 +3886,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
3911 ap->ops->irq_clear(ap); 3886 ap->ops->irq_clear(ap);
3912 3887
3913 /* complete taskfile transaction */ 3888 /* complete taskfile transaction */
3914 ata_qc_complete(qc, status); 3889 ata_qc_complete(qc, ac_err_mask(status));
3915 break; 3890 break;
3916 3891
3917 default: 3892 default:
@@ -4006,7 +3981,7 @@ static void atapi_packet_task(void *_data)
4006 /* sleep-wait for BSY to clear */ 3981 /* sleep-wait for BSY to clear */
4007 DPRINTK("busy wait\n"); 3982 DPRINTK("busy wait\n");
4008 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) 3983 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
4009 goto err_out; 3984 goto err_out_status;
4010 3985
4011 /* make sure DRQ is set */ 3986 /* make sure DRQ is set */
4012 status = ata_chk_status(ap); 3987 status = ata_chk_status(ap);
@@ -4043,8 +4018,10 @@ static void atapi_packet_task(void *_data)
4043 4018
4044 return; 4019 return;
4045 4020
4021err_out_status:
4022 status = ata_chk_status(ap);
4046err_out: 4023err_out:
4047 ata_poll_qc_complete(qc, ATA_ERR); 4024 ata_poll_qc_complete(qc, __ac_err_mask(status));
4048} 4025}
4049 4026
4050 4027
@@ -4550,11 +4527,11 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
4550 return probe_ent; 4527 return probe_ent;
4551} 4528}
4552 4529
4553static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num) 4530static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4554{ 4531{
4555 struct ata_probe_ent *probe_ent; 4532 struct ata_probe_ent *probe_ent;
4556 4533
4557 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4534 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4558 if (!probe_ent) 4535 if (!probe_ent)
4559 return NULL; 4536 return NULL;
4560 4537
@@ -4701,9 +4678,9 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4701 4678
4702 if (legacy_mode) { 4679 if (legacy_mode) {
4703 if (legacy_mode & (1 << 0)) 4680 if (legacy_mode & (1 << 0))
4704 probe_ent = ata_pci_init_legacy_port(pdev, port, 0); 4681 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4705 if (legacy_mode & (1 << 1)) 4682 if (legacy_mode & (1 << 1))
4706 probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1); 4683 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4707 } else { 4684 } else {
4708 if (n_ports == 2) 4685 if (n_ports == 2)
4709 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 4686 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
@@ -4867,7 +4844,6 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4867EXPORT_SYMBOL_GPL(ata_tf_from_fis); 4844EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4868EXPORT_SYMBOL_GPL(ata_check_status); 4845EXPORT_SYMBOL_GPL(ata_check_status);
4869EXPORT_SYMBOL_GPL(ata_altstatus); 4846EXPORT_SYMBOL_GPL(ata_altstatus);
4870EXPORT_SYMBOL_GPL(ata_chk_err);
4871EXPORT_SYMBOL_GPL(ata_exec_command); 4847EXPORT_SYMBOL_GPL(ata_exec_command);
4872EXPORT_SYMBOL_GPL(ata_port_start); 4848EXPORT_SYMBOL_GPL(ata_port_start);
4873EXPORT_SYMBOL_GPL(ata_port_stop); 4849EXPORT_SYMBOL_GPL(ata_port_stop);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 89a04b1a5a0e..1e3792f86fcf 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -560,7 +560,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
560 * Use ata_to_sense_error() to map status register bits 560 * Use ata_to_sense_error() to map status register bits
561 * onto sense key, asc & ascq. 561 * onto sense key, asc & ascq.
562 */ 562 */
563 if (unlikely(tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ))) { 563 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
564 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 564 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
565 &sb[1], &sb[2], &sb[3]); 565 &sb[1], &sb[2], &sb[3]);
566 sb[1] &= 0x0f; 566 sb[1] &= 0x0f;
@@ -635,7 +635,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
635 * Use ata_to_sense_error() to map status register bits 635 * Use ata_to_sense_error() to map status register bits
636 * onto sense key, asc & ascq. 636 * onto sense key, asc & ascq.
637 */ 637 */
638 if (unlikely(tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ))) { 638 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
639 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 639 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
640 &sb[2], &sb[12], &sb[13]); 640 &sb[2], &sb[12], &sb[13]);
641 sb[2] &= 0x0f; 641 sb[2] &= 0x0f;
@@ -644,7 +644,11 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
644 sb[0] = 0x70; 644 sb[0] = 0x70;
645 sb[7] = 0x0a; 645 sb[7] = 0x0a;
646 646
647 if (tf->flags & ATA_TFLAG_LBA && !(tf->flags & ATA_TFLAG_LBA48)) { 647 if (tf->flags & ATA_TFLAG_LBA48) {
648 /* TODO: find solution for LBA48 descriptors */
649 }
650
651 else if (tf->flags & ATA_TFLAG_LBA) {
648 /* A small (28b) LBA will fit in the 32b info field */ 652 /* A small (28b) LBA will fit in the 32b info field */
649 sb[0] |= 0x80; /* set valid bit */ 653 sb[0] |= 0x80; /* set valid bit */
650 sb[3] = tf->device & 0x0f; 654 sb[3] = tf->device & 0x0f;
@@ -652,6 +656,10 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
652 sb[5] = tf->lbam; 656 sb[5] = tf->lbam;
653 sb[6] = tf->lbal; 657 sb[6] = tf->lbal;
654 } 658 }
659
660 else {
661 /* TODO: C/H/S */
662 }
655} 663}
656 664
657/** 665/**
@@ -1199,10 +1207,12 @@ nothing_to_do:
1199 return 1; 1207 return 1;
1200} 1208}
1201 1209
1202static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1210static int ata_scsi_qc_complete(struct ata_queued_cmd *qc,
1211 unsigned int err_mask)
1203{ 1212{
1204 struct scsi_cmnd *cmd = qc->scsicmd; 1213 struct scsi_cmnd *cmd = qc->scsicmd;
1205 int need_sense = drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ); 1214 u8 *cdb = cmd->cmnd;
1215 int need_sense = (err_mask != 0);
1206 1216
1207 /* For ATA pass thru (SAT) commands, generate a sense block if 1217 /* For ATA pass thru (SAT) commands, generate a sense block if
1208 * user mandated it or if there's an error. Note that if we 1218 * user mandated it or if there's an error. Note that if we
@@ -1211,8 +1221,8 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1211 * whether the command completed successfully or not. If there 1221 * whether the command completed successfully or not. If there
1212 * was no error, SK, ASC and ASCQ will all be zero. 1222 * was no error, SK, ASC and ASCQ will all be zero.
1213 */ 1223 */
1214 if (((cmd->cmnd[0] == ATA_16) || (cmd->cmnd[0] == ATA_12)) && 1224 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1215 ((cmd->cmnd[2] & 0x20) || need_sense)) { 1225 ((cdb[2] & 0x20) || need_sense)) {
1216 ata_gen_ata_desc_sense(qc); 1226 ata_gen_ata_desc_sense(qc);
1217 } else { 1227 } else {
1218 if (!need_sense) { 1228 if (!need_sense) {
@@ -1995,21 +2005,13 @@ void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1995 DPRINTK("EXIT\n"); 2005 DPRINTK("EXIT\n");
1996} 2006}
1997 2007
1998static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 2008static int atapi_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
1999{ 2009{
2000 struct scsi_cmnd *cmd = qc->scsicmd; 2010 struct scsi_cmnd *cmd = qc->scsicmd;
2001 2011
2002 VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat); 2012 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2003
2004 if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
2005 /* FIXME: not quite right; we don't want the
2006 * translation of taskfile registers into
2007 * a sense descriptors, since that's only
2008 * correct for ATA, not ATAPI
2009 */
2010 ata_gen_ata_desc_sense(qc);
2011 2013
2012 else if (unlikely(drv_stat & ATA_ERR)) { 2014 if (unlikely(err_mask & AC_ERR_DEV)) {
2013 DPRINTK("request check condition\n"); 2015 DPRINTK("request check condition\n");
2014 2016
2015 /* FIXME: command completion with check condition 2017 /* FIXME: command completion with check condition
@@ -2026,6 +2028,14 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2026 return 1; 2028 return 1;
2027 } 2029 }
2028 2030
2031 else if (unlikely(err_mask))
2032 /* FIXME: not quite right; we don't want the
2033 * translation of taskfile registers into
2034 * a sense descriptors, since that's only
2035 * correct for ATA, not ATAPI
2036 */
2037 ata_gen_ata_desc_sense(qc);
2038
2029 else { 2039 else {
2030 u8 *scsicmd = cmd->cmnd; 2040 u8 *scsicmd = cmd->cmnd;
2031 2041
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 65c264b91136..10ecd9e15e4f 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,7 +39,7 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); 42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask);
43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
44 struct ata_device *dev); 44 struct ata_device *dev);
45extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); 45extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index b235556b7b65..bdccf73cf9fe 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -730,7 +730,7 @@ static void start_phase(struct mesh_state *ms)
730 * issue a SEQ_MSGOUT to get the mesh to drop ACK. 730 * issue a SEQ_MSGOUT to get the mesh to drop ACK.
731 */ 731 */
732 if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { 732 if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
733 dlog(ms, "bus0 was %.2x explictly asserting ATN", mr->bus_status0); 733 dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
734 out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ 734 out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
735 mesh_flush_io(mr); 735 mesh_flush_io(mr);
736 udelay(1); 736 udelay(1);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index af99feb9d237..665017eda8a6 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -40,6 +40,7 @@
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/device.h>
43#include "scsi.h" 44#include "scsi.h"
44#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
45#include <asm/io.h> 46#include <asm/io.h>
@@ -451,7 +452,7 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
451 struct adma_port_priv *pp; 452 struct adma_port_priv *pp;
452 struct ata_queued_cmd *qc; 453 struct ata_queued_cmd *qc;
453 void __iomem *chan = ADMA_REGS(mmio_base, port_no); 454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
454 u8 drv_stat = 0, status = readb(chan + ADMA_STATUS); 455 u8 status = readb(chan + ADMA_STATUS);
455 456
456 if (status == 0) 457 if (status == 0)
457 continue; 458 continue;
@@ -464,11 +465,14 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
464 continue; 465 continue;
465 qc = ata_qc_from_tag(ap, ap->active_tag); 466 qc = ata_qc_from_tag(ap, ap->active_tag);
466 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 467 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
468 unsigned int err_mask = 0;
469
467 if ((status & (aPERR | aPSD | aUIRQ))) 470 if ((status & (aPERR | aPSD | aUIRQ)))
468 drv_stat = ATA_ERR; 471 err_mask = AC_ERR_OTHER;
469 else if (pp->pkt[0] != cDONE) 472 else if (pp->pkt[0] != cDONE)
470 drv_stat = ATA_ERR; 473 err_mask = AC_ERR_OTHER;
471 ata_qc_complete(qc, drv_stat); 474
475 ata_qc_complete(qc, err_mask);
472 } 476 }
473 } 477 }
474 return handled; 478 return handled;
@@ -498,7 +502,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
498 502
499 /* complete taskfile transaction */ 503 /* complete taskfile transaction */
500 pp->state = adma_state_idle; 504 pp->state = adma_state_idle;
501 ata_qc_complete(qc, status); 505 ata_qc_complete(qc, ac_err_mask(status));
502 handled = 1; 506 handled = 1;
503 } 507 }
504 } 508 }
@@ -623,16 +627,14 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
623 627
624 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 628 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
625 if (rc) { 629 if (rc) {
626 printk(KERN_ERR DRV_NAME 630 dev_printk(KERN_ERR, &pdev->dev,
627 "(%s): 32-bit DMA enable failed\n", 631 "32-bit DMA enable failed\n");
628 pci_name(pdev));
629 return rc; 632 return rc;
630 } 633 }
631 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 634 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
632 if (rc) { 635 if (rc) {
633 printk(KERN_ERR DRV_NAME 636 dev_printk(KERN_ERR, &pdev->dev,
634 "(%s): 32-bit consistent DMA enable failed\n", 637 "32-bit consistent DMA enable failed\n");
635 pci_name(pdev));
636 return rc; 638 return rc;
637 } 639 }
638 return 0; 640 return 0;
@@ -648,7 +650,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
648 int rc, port_no; 650 int rc, port_no;
649 651
650 if (!printed_version++) 652 if (!printed_version++)
651 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 653 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
652 654
653 rc = pci_enable_device(pdev); 655 rc = pci_enable_device(pdev);
654 if (rc) 656 if (rc)
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 422e0b6f603a..46dbdee79f77 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -29,6 +29,7 @@
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/device.h>
32#include "scsi.h" 33#include "scsi.h"
33#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
34#include <linux/libata.h> 35#include <linux/libata.h>
@@ -258,7 +259,6 @@ struct mv_host_priv {
258static void mv_irq_clear(struct ata_port *ap); 259static void mv_irq_clear(struct ata_port *ap);
259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 260static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 261static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
262static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
263static void mv_host_stop(struct ata_host_set *host_set); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap); 264static int mv_port_start(struct ata_port *ap);
@@ -296,7 +296,6 @@ static const struct ata_port_operations mv_ops = {
296 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
297 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
298 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
300 .exec_command = ata_exec_command, 299 .exec_command = ata_exec_command,
301 .dev_select = ata_std_dev_select, 300 .dev_select = ata_std_dev_select,
302 301
@@ -1067,6 +1066,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1067 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
1068 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
1069 int shift, port, port0, hard_port, handled; 1068 int shift, port, port0, hard_port, handled;
1069 unsigned int err_mask;
1070 u8 ata_status = 0; 1070 u8 ata_status = 0;
1071 1071
1072 if (hc == 0) { 1072 if (hc == 0) {
@@ -1102,15 +1102,15 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1102 handled++; 1102 handled++;
1103 } 1103 }
1104 1104
1105 err_mask = ac_err_mask(ata_status);
1106
1105 shift = port << 1; /* (port * 2) */ 1107 shift = port << 1; /* (port * 2) */
1106 if (port >= MV_PORTS_PER_HC) { 1108 if (port >= MV_PORTS_PER_HC) {
1107 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1109 shift++; /* skip bit 8 in the HC Main IRQ reg */
1108 } 1110 }
1109 if ((PORT0_ERR << shift) & relevant) { 1111 if ((PORT0_ERR << shift) & relevant) {
1110 mv_err_intr(ap); 1112 mv_err_intr(ap);
1111 /* OR in ATA_ERR to ensure libata knows we took one */ 1113 err_mask |= AC_ERR_OTHER;
1112 ata_status = readb((void __iomem *)
1113 ap->ioaddr.status_addr) | ATA_ERR;
1114 handled++; 1114 handled++;
1115 } 1115 }
1116 1116
@@ -1120,7 +1120,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1120 VPRINTK("port %u IRQ found for qc, " 1120 VPRINTK("port %u IRQ found for qc, "
1121 "ata_status 0x%x\n", port,ata_status); 1121 "ata_status 0x%x\n", port,ata_status);
1122 /* mark qc status appropriately */ 1122 /* mark qc status appropriately */
1123 ata_qc_complete(qc, ata_status); 1123 ata_qc_complete(qc, err_mask);
1124 } 1124 }
1125 } 1125 }
1126 } 1126 }
@@ -1185,22 +1185,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1185} 1185}
1186 1186
1187/** 1187/**
1188 * mv_check_err - Return the error shadow register to caller.
1189 * @ap: ATA channel to manipulate
1190 *
1191 * Marvell requires DMA to be stopped before accessing shadow
1192 * registers. So we do that, then return the needed register.
1193 *
1194 * LOCKING:
1195 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1196 */
1197static u8 mv_check_err(struct ata_port *ap)
1198{
1199 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1200 return readb((void __iomem *) ap->ioaddr.error_addr);
1201}
1202
1203/**
1204 * mv_phy_reset - Perform eDMA reset followed by COMRESET 1188 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1205 * @ap: ATA channel to manipulate 1189 * @ap: ATA channel to manipulate
1206 * 1190 *
@@ -1312,7 +1296,7 @@ static void mv_eng_timeout(struct ata_port *ap)
1312 */ 1296 */
1313 spin_lock_irqsave(&ap->host_set->lock, flags); 1297 spin_lock_irqsave(&ap->host_set->lock, flags);
1314 qc->scsidone = scsi_finish_command; 1298 qc->scsidone = scsi_finish_command;
1315 ata_qc_complete(qc, ATA_ERR); 1299 ata_qc_complete(qc, AC_ERR_OTHER);
1316 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1300 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1317 } 1301 }
1318} 1302}
@@ -1454,9 +1438,9 @@ static void mv_print_info(struct ata_probe_ent *probe_ent)
1454 else 1438 else
1455 scc_s = "unknown"; 1439 scc_s = "unknown";
1456 1440
1457 printk(KERN_INFO DRV_NAME 1441 dev_printk(KERN_INFO, &pdev->dev,
1458 "(%s) %u slots %u ports %s mode IRQ via %s\n", 1442 "%u slots %u ports %s mode IRQ via %s\n",
1459 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, 1443 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1460 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 1444 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1461} 1445}
1462 1446
@@ -1477,9 +1461,8 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1477 void __iomem *mmio_base; 1461 void __iomem *mmio_base;
1478 int pci_dev_busy = 0, rc; 1462 int pci_dev_busy = 0, rc;
1479 1463
1480 if (!printed_version++) { 1464 if (!printed_version++)
1481 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n"); 1465 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
1482 }
1483 1466
1484 rc = pci_enable_device(pdev); 1467 rc = pci_enable_device(pdev);
1485 if (rc) { 1468 if (rc) {
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 1a56d6c79ddd..d573888eda76 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -61,6 +61,7 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/device.h>
64#include "scsi.h" 65#include "scsi.h"
65#include <scsi/scsi_host.h> 66#include <scsi/scsi_host.h>
66#include <linux/libata.h> 67#include <linux/libata.h>
@@ -383,7 +384,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
383 return -ENODEV; 384 return -ENODEV;
384 385
385 if (!printed_version++) 386 if (!printed_version++)
386 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 387 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
387 388
388 rc = pci_enable_device(pdev); 389 rc = pci_enable_device(pdev);
389 if (rc) 390 if (rc)
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 63911f16b6ec..b41c977d6fab 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -399,7 +400,8 @@ static void pdc_eng_timeout(struct ata_port *ap)
399 case ATA_PROT_DMA: 400 case ATA_PROT_DMA:
400 case ATA_PROT_NODATA: 401 case ATA_PROT_NODATA:
401 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 402 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
402 ata_qc_complete(qc, ata_wait_idle(ap) | ATA_ERR); 403 drv_stat = ata_wait_idle(ap);
404 ata_qc_complete(qc, __ac_err_mask(drv_stat));
403 break; 405 break;
404 406
405 default: 407 default:
@@ -408,7 +410,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
408 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 410 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
409 ap->id, qc->tf.command, drv_stat); 411 ap->id, qc->tf.command, drv_stat);
410 412
411 ata_qc_complete(qc, drv_stat); 413 ata_qc_complete(qc, ac_err_mask(drv_stat));
412 break; 414 break;
413 } 415 }
414 416
@@ -420,24 +422,21 @@ out:
420static inline unsigned int pdc_host_intr( struct ata_port *ap, 422static inline unsigned int pdc_host_intr( struct ata_port *ap,
421 struct ata_queued_cmd *qc) 423 struct ata_queued_cmd *qc)
422{ 424{
423 u8 status; 425 unsigned int handled = 0, err_mask = 0;
424 unsigned int handled = 0, have_err = 0;
425 u32 tmp; 426 u32 tmp;
426 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; 427 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
427 428
428 tmp = readl(mmio); 429 tmp = readl(mmio);
429 if (tmp & PDC_ERR_MASK) { 430 if (tmp & PDC_ERR_MASK) {
430 have_err = 1; 431 err_mask = AC_ERR_DEV;
431 pdc_reset_port(ap); 432 pdc_reset_port(ap);
432 } 433 }
433 434
434 switch (qc->tf.protocol) { 435 switch (qc->tf.protocol) {
435 case ATA_PROT_DMA: 436 case ATA_PROT_DMA:
436 case ATA_PROT_NODATA: 437 case ATA_PROT_NODATA:
437 status = ata_wait_idle(ap); 438 err_mask |= ac_err_mask(ata_wait_idle(ap));
438 if (have_err) 439 ata_qc_complete(qc, err_mask);
439 status |= ATA_ERR;
440 ata_qc_complete(qc, status);
441 handled = 1; 440 handled = 1;
442 break; 441 break;
443 442
@@ -635,7 +634,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
635 int rc; 634 int rc;
636 635
637 if (!printed_version++) 636 if (!printed_version++)
638 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 637 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
639 638
640 /* 639 /*
641 * If this driver happens to only be useful on Apple's K2, then 640 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 1aaf3304d397..9938dae782b6 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/device.h>
38#include "scsi.h" 39#include "scsi.h"
39#include <scsi/scsi_host.h> 40#include <scsi/scsi_host.h>
40#include <asm/io.h> 41#include <asm/io.h>
@@ -400,11 +401,12 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
400 qc = ata_qc_from_tag(ap, ap->active_tag); 401 qc = ata_qc_from_tag(ap, ap->active_tag);
401 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 402 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
402 switch (sHST) { 403 switch (sHST) {
403 case 0: /* sucessful CPB */ 404 case 0: /* successful CPB */
404 case 3: /* device error */ 405 case 3: /* device error */
405 pp->state = qs_state_idle; 406 pp->state = qs_state_idle;
406 qs_enter_reg_mode(qc->ap); 407 qs_enter_reg_mode(qc->ap);
407 ata_qc_complete(qc, sDST); 408 ata_qc_complete(qc,
409 ac_err_mask(sDST));
408 break; 410 break;
409 default: 411 default:
410 break; 412 break;
@@ -441,7 +443,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
441 443
442 /* complete taskfile transaction */ 444 /* complete taskfile transaction */
443 pp->state = qs_state_idle; 445 pp->state = qs_state_idle;
444 ata_qc_complete(qc, status); 446 ata_qc_complete(qc, ac_err_mask(status));
445 handled = 1; 447 handled = 1;
446 } 448 }
447 } 449 }
@@ -599,25 +601,22 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
599 if (rc) { 601 if (rc) {
600 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 602 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
601 if (rc) { 603 if (rc) {
602 printk(KERN_ERR DRV_NAME 604 dev_printk(KERN_ERR, &pdev->dev,
603 "(%s): 64-bit DMA enable failed\n", 605 "64-bit DMA enable failed\n");
604 pci_name(pdev));
605 return rc; 606 return rc;
606 } 607 }
607 } 608 }
608 } else { 609 } else {
609 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 610 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
610 if (rc) { 611 if (rc) {
611 printk(KERN_ERR DRV_NAME 612 dev_printk(KERN_ERR, &pdev->dev,
612 "(%s): 32-bit DMA enable failed\n", 613 "32-bit DMA enable failed\n");
613 pci_name(pdev));
614 return rc; 614 return rc;
615 } 615 }
616 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 616 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
617 if (rc) { 617 if (rc) {
618 printk(KERN_ERR DRV_NAME 618 dev_printk(KERN_ERR, &pdev->dev,
619 "(%s): 32-bit consistent DMA enable failed\n", 619 "32-bit consistent DMA enable failed\n");
620 pci_name(pdev));
621 return rc; 620 return rc;
622 } 621 }
623 } 622 }
@@ -634,7 +633,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
634 int rc, port_no; 633 int rc, port_no;
635 634
636 if (!printed_version++) 635 if (!printed_version++)
637 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 636 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
638 637
639 rc = pci_enable_device(pdev); 638 rc = pci_enable_device(pdev);
640 if (rc) 639 if (rc)
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 3a056173fb95..435f7e0085ec 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -41,6 +41,7 @@
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -386,7 +387,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
386 u8 cls; 387 u8 cls;
387 388
388 if (!printed_version++) 389 if (!printed_version++)
389 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 390 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
390 391
391 /* 392 /*
392 * If this driver happens to only be useful on Apple's K2, then 393 * If this driver happens to only be useful on Apple's K2, then
@@ -463,8 +464,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
463 writeb(cls, mmio_base + SIL_FIFO_W3); 464 writeb(cls, mmio_base + SIL_FIFO_W3);
464 } 465 }
465 } else 466 } else
466 printk(KERN_WARNING DRV_NAME "(%s): cache line size not set. Driver may not function\n", 467 dev_printk(KERN_WARNING, &pdev->dev,
467 pci_name(pdev)); 468 "cache line size not set. Driver may not function\n");
468 469
469 if (ent->driver_data == sil_3114) { 470 if (ent->driver_data == sil_3114) {
470 irq_mask = SIL_MASK_4PORT; 471 irq_mask = SIL_MASK_4PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 51855d3bac64..c66548025657 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38#include <linux/device.h>
38#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
39#include "scsi.h" 40#include "scsi.h"
40#include <linux/libata.h> 41#include <linux/libata.h>
@@ -225,7 +226,6 @@ struct sil24_host_priv {
225}; 226};
226 227
227static u8 sil24_check_status(struct ata_port *ap); 228static u8 sil24_check_status(struct ata_port *ap);
228static u8 sil24_check_err(struct ata_port *ap);
229static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 229static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
230static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 230static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
231static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 231static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
@@ -280,7 +280,6 @@ static const struct ata_port_operations sil24_ops = {
280 280
281 .check_status = sil24_check_status, 281 .check_status = sil24_check_status,
282 .check_altstatus = sil24_check_status, 282 .check_altstatus = sil24_check_status,
283 .check_err = sil24_check_err,
284 .dev_select = ata_noop_dev_select, 283 .dev_select = ata_noop_dev_select,
285 284
286 .tf_read = sil24_tf_read, 285 .tf_read = sil24_tf_read,
@@ -363,12 +362,6 @@ static u8 sil24_check_status(struct ata_port *ap)
363 return pp->tf.command; 362 return pp->tf.command;
364} 363}
365 364
366static u8 sil24_check_err(struct ata_port *ap)
367{
368 struct sil24_port_priv *pp = ap->private_data;
369 return pp->tf.feature;
370}
371
372static int sil24_scr_map[] = { 365static int sil24_scr_map[] = {
373 [SCR_CONTROL] = 0, 366 [SCR_CONTROL] = 0,
374 [SCR_STATUS] = 1, 367 [SCR_STATUS] = 1,
@@ -506,7 +499,7 @@ static void sil24_eng_timeout(struct ata_port *ap)
506 499
507 qc = ata_qc_from_tag(ap, ap->active_tag); 500 qc = ata_qc_from_tag(ap, ap->active_tag);
508 if (!qc) { 501 if (!qc) {
509 printk(KERN_ERR "ata%u: BUG: tiemout without command\n", 502 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
510 ap->id); 503 ap->id);
511 return; 504 return;
512 } 505 }
@@ -520,7 +513,7 @@ static void sil24_eng_timeout(struct ata_port *ap)
520 */ 513 */
521 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 514 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
522 qc->scsidone = scsi_finish_command; 515 qc->scsidone = scsi_finish_command;
523 ata_qc_complete(qc, ATA_ERR); 516 ata_qc_complete(qc, AC_ERR_OTHER);
524 517
525 sil24_reset_controller(ap); 518 sil24_reset_controller(ap);
526} 519}
@@ -531,6 +524,7 @@ static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
531 struct sil24_port_priv *pp = ap->private_data; 524 struct sil24_port_priv *pp = ap->private_data;
532 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 525 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
533 u32 irq_stat, cmd_err, sstatus, serror; 526 u32 irq_stat, cmd_err, sstatus, serror;
527 unsigned int err_mask;
534 528
535 irq_stat = readl(port + PORT_IRQ_STAT); 529 irq_stat = readl(port + PORT_IRQ_STAT);
536 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 530 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
@@ -558,17 +552,18 @@ static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
558 * Device is reporting error, tf registers are valid. 552 * Device is reporting error, tf registers are valid.
559 */ 553 */
560 sil24_update_tf(ap); 554 sil24_update_tf(ap);
555 err_mask = ac_err_mask(pp->tf.command);
561 } else { 556 } else {
562 /* 557 /*
563 * Other errors. libata currently doesn't have any 558 * Other errors. libata currently doesn't have any
564 * mechanism to report these errors. Just turn on 559 * mechanism to report these errors. Just turn on
565 * ATA_ERR. 560 * ATA_ERR.
566 */ 561 */
567 pp->tf.command = ATA_ERR; 562 err_mask = AC_ERR_OTHER;
568 } 563 }
569 564
570 if (qc) 565 if (qc)
571 ata_qc_complete(qc, pp->tf.command); 566 ata_qc_complete(qc, err_mask);
572 567
573 sil24_reset_controller(ap); 568 sil24_reset_controller(ap);
574} 569}
@@ -593,7 +588,7 @@ static inline void sil24_host_intr(struct ata_port *ap)
593 sil24_update_tf(ap); 588 sil24_update_tf(ap);
594 589
595 if (qc) 590 if (qc)
596 ata_qc_complete(qc, pp->tf.command); 591 ata_qc_complete(qc, ac_err_mask(pp->tf.command));
597 } else 592 } else
598 sil24_error_intr(ap, slot_stat); 593 sil24_error_intr(ap, slot_stat);
599} 594}
@@ -696,7 +691,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
696 int i, rc; 691 int i, rc;
697 692
698 if (!printed_version++) 693 if (!printed_version++)
699 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 694 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
700 695
701 rc = pci_enable_device(pdev); 696 rc = pci_enable_device(pdev);
702 if (rc) 697 if (rc)
@@ -756,14 +751,14 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
756 */ 751 */
757 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 752 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
758 if (rc) { 753 if (rc) {
759 printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n", 754 dev_printk(KERN_ERR, &pdev->dev,
760 pci_name(pdev)); 755 "32-bit DMA enable failed\n");
761 goto out_free; 756 goto out_free;
762 } 757 }
763 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 758 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
764 if (rc) { 759 if (rc) {
765 printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n", 760 dev_printk(KERN_ERR, &pdev->dev,
766 pci_name(pdev)); 761 "32-bit consistent DMA enable failed\n");
767 goto out_free; 762 goto out_free;
768 } 763 }
769 764
@@ -799,9 +794,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
799 break; 794 break;
800 } 795 }
801 if (tmp & PORT_CS_PORT_RST) 796 if (tmp & PORT_CS_PORT_RST)
802 printk(KERN_ERR DRV_NAME 797 dev_printk(KERN_ERR, &pdev->dev,
803 "(%s): failed to clear port RST\n", 798 "failed to clear port RST\n");
804 pci_name(pdev));
805 } 799 }
806 800
807 /* Zero error counters. */ 801 /* Zero error counters. */
@@ -830,9 +824,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
830 824
831 /* Reset itself */ 825 /* Reset itself */
832 if (__sil24_reset_controller(port)) 826 if (__sil24_reset_controller(port))
833 printk(KERN_ERR DRV_NAME 827 dev_printk(KERN_ERR, &pdev->dev,
834 "(%s): failed to reset controller\n", 828 "failed to reset controller\n");
835 pci_name(pdev));
836 } 829 }
837 830
838 /* Turn on interrupts */ 831 /* Turn on interrupts */
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 057f7b98b6c4..42288be0e561 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -38,6 +38,7 @@
38#include <linux/blkdev.h> 38#include <linux/blkdev.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -237,6 +238,7 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
237 238
238static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 239static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
239{ 240{
241 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL; 242 struct ata_probe_ent *probe_ent = NULL;
241 int rc; 243 int rc;
242 u32 genctl; 244 u32 genctl;
@@ -245,6 +247,9 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
245 u8 pmr; 247 u8 pmr;
246 u8 port2_start; 248 u8 port2_start;
247 249
250 if (!printed_version++)
251 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
252
248 rc = pci_enable_device(pdev); 253 rc = pci_enable_device(pdev);
249 if (rc) 254 if (rc)
250 return rc; 255 return rc;
@@ -288,16 +293,18 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
288 pci_read_config_byte(pdev, SIS_PMR, &pmr); 293 pci_read_config_byte(pdev, SIS_PMR, &pmr);
289 if (ent->device != 0x182) { 294 if (ent->device != 0x182) {
290 if ((pmr & SIS_PMR_COMBINED) == 0) { 295 if ((pmr & SIS_PMR_COMBINED) == 0) {
291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n"); 296 dev_printk(KERN_INFO, &pdev->dev,
297 "Detected SiS 180/181 chipset in SATA mode\n");
292 port2_start = 64; 298 port2_start = 64;
293 } 299 }
294 else { 300 else {
295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n"); 301 dev_printk(KERN_INFO, &pdev->dev,
302 "Detected SiS 180/181 chipset in combined mode\n");
296 port2_start=0; 303 port2_start=0;
297 } 304 }
298 } 305 }
299 else { 306 else {
300 printk(KERN_INFO "sata_sis: Detected SiS 182 chipset\n"); 307 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
301 port2_start = 0x20; 308 port2_start = 0x20;
302 } 309 }
303 310
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 46208f52d0e1..db615ff794d8 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -44,6 +44,7 @@
44#include <linux/blkdev.h> 44#include <linux/blkdev.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/device.h>
47#include "scsi.h" 48#include "scsi.h"
48#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
49#include <linux/libata.h> 50#include <linux/libata.h>
@@ -362,7 +363,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
362 int i; 363 int i;
363 364
364 if (!printed_version++) 365 if (!printed_version++)
365 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 366 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
366 367
367 /* 368 /*
368 * If this driver happens to only be useful on Apple's K2, then 369 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index af08f4f650c1..0ec21e09f5d8 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/device.h>
41#include "scsi.h" 42#include "scsi.h"
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <linux/libata.h> 44#include <linux/libata.h>
@@ -718,7 +719,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id, 719 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 720 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */ 721 /* get drive status; clear intr; complete txn */
721 ata_qc_complete(qc, ata_wait_idle(ap)); 722 ata_qc_complete(qc, ac_err_mask(ata_wait_idle(ap)));
722 pdc20621_pop_hdma(qc); 723 pdc20621_pop_hdma(qc);
723 } 724 }
724 725
@@ -756,7 +757,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
756 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id, 757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
757 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
758 /* get drive status; clear intr; complete txn */ 759 /* get drive status; clear intr; complete txn */
759 ata_qc_complete(qc, ata_wait_idle(ap)); 760 ata_qc_complete(qc, ac_err_mask(ata_wait_idle(ap)));
760 pdc20621_pop_hdma(qc); 761 pdc20621_pop_hdma(qc);
761 } 762 }
762 handled = 1; 763 handled = 1;
@@ -766,7 +767,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
766 767
767 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 768 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
768 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); 769 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
769 ata_qc_complete(qc, status); 770 ata_qc_complete(qc, ac_err_mask(status));
770 handled = 1; 771 handled = 1;
771 772
772 } else { 773 } else {
@@ -881,7 +882,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
881 case ATA_PROT_DMA: 882 case ATA_PROT_DMA:
882 case ATA_PROT_NODATA: 883 case ATA_PROT_NODATA:
883 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 884 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
884 ata_qc_complete(qc, ata_wait_idle(ap) | ATA_ERR); 885 ata_qc_complete(qc, __ac_err_mask(ata_wait_idle(ap)));
885 break; 886 break;
886 887
887 default: 888 default:
@@ -890,7 +891,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
890 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 891 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
891 ap->id, qc->tf.command, drv_stat); 892 ap->id, qc->tf.command, drv_stat);
892 893
893 ata_qc_complete(qc, drv_stat); 894 ata_qc_complete(qc, ac_err_mask(drv_stat));
894 break; 895 break;
895 } 896 }
896 897
@@ -1385,7 +1386,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1385 int rc; 1386 int rc;
1386 1387
1387 if (!printed_version++) 1388 if (!printed_version++)
1388 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1389 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1389 1390
1390 /* 1391 /*
1391 * If this driver happens to only be useful on Apple's K2, then 1392 * If this driver happens to only be useful on Apple's K2, then
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index d68dc7d3422c..a5e245c098e1 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -32,6 +32,7 @@
32#include <linux/blkdev.h> 32#include <linux/blkdev.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/device.h>
35#include "scsi.h" 36#include "scsi.h"
36#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
37#include <linux/libata.h> 38#include <linux/libata.h>
@@ -178,12 +179,16 @@ static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
178 179
179static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 180static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
180{ 181{
182 static int printed_version;
181 struct ata_probe_ent *probe_ent; 183 struct ata_probe_ent *probe_ent;
182 struct ata_port_info *ppi; 184 struct ata_port_info *ppi;
183 int rc; 185 int rc;
184 unsigned int board_idx = (unsigned int) ent->driver_data; 186 unsigned int board_idx = (unsigned int) ent->driver_data;
185 int pci_dev_busy = 0; 187 int pci_dev_busy = 0;
186 188
189 if (!printed_version++)
190 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
191
187 rc = pci_enable_device(pdev); 192 rc = pci_enable_device(pdev);
188 if (rc) 193 if (rc)
189 return rc; 194 return rc;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 80e291a909a9..b3ecdbe400e9 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -41,6 +41,7 @@
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/device.h>
44#include "scsi.h" 45#include "scsi.h"
45#include <scsi/scsi_host.h> 46#include <scsi/scsi_host.h>
46#include <linux/libata.h> 47#include <linux/libata.h>
@@ -259,15 +260,15 @@ static void svia_configure(struct pci_dev *pdev)
259 u8 tmp8; 260 u8 tmp8;
260 261
261 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 262 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
262 printk(KERN_INFO DRV_NAME "(%s): routed to hard irq line %d\n", 263 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
263 pci_name(pdev),
264 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 264 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
265 265
266 /* make sure SATA channels are enabled */ 266 /* make sure SATA channels are enabled */
267 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 267 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
268 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 268 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
269 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channels (0x%x)\n", 269 dev_printk(KERN_DEBUG, &pdev->dev,
270 pci_name(pdev), (int) tmp8); 270 "enabling SATA channels (0x%x)\n",
271 (int) tmp8);
271 tmp8 |= ALL_PORTS; 272 tmp8 |= ALL_PORTS;
272 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 273 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
273 } 274 }
@@ -275,8 +276,9 @@ static void svia_configure(struct pci_dev *pdev)
275 /* make sure interrupts for each channel sent to us */ 276 /* make sure interrupts for each channel sent to us */
276 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 277 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
277 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 278 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
278 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel interrupts (0x%x)\n", 279 dev_printk(KERN_DEBUG, &pdev->dev,
279 pci_name(pdev), (int) tmp8); 280 "enabling SATA channel interrupts (0x%x)\n",
281 (int) tmp8);
280 tmp8 |= ALL_PORTS; 282 tmp8 |= ALL_PORTS;
281 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 283 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
282 } 284 }
@@ -284,8 +286,9 @@ static void svia_configure(struct pci_dev *pdev)
284 /* make sure native mode is enabled */ 286 /* make sure native mode is enabled */
285 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 287 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
286 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 288 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
287 printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel native mode (0x%x)\n", 289 dev_printk(KERN_DEBUG, &pdev->dev,
288 pci_name(pdev), (int) tmp8); 290 "enabling SATA channel native mode (0x%x)\n",
291 (int) tmp8);
289 tmp8 |= NATIVE_MODE_ALL; 292 tmp8 |= NATIVE_MODE_ALL;
290 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 293 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
291 } 294 }
@@ -303,7 +306,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
303 u8 tmp8; 306 u8 tmp8;
304 307
305 if (!printed_version++) 308 if (!printed_version++)
306 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 309 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
307 310
308 rc = pci_enable_device(pdev); 311 rc = pci_enable_device(pdev);
309 if (rc) 312 if (rc)
@@ -318,8 +321,9 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
318 if (board_id == vt6420) { 321 if (board_id == vt6420) {
319 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8); 322 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
320 if (tmp8 & SATA_2DEV) { 323 if (tmp8 & SATA_2DEV) {
321 printk(KERN_ERR DRV_NAME "(%s): SATA master/slave not supported (0x%x)\n", 324 dev_printk(KERN_ERR, &pdev->dev,
322 pci_name(pdev), (int) tmp8); 325 "SATA master/slave not supported (0x%x)\n",
326 (int) tmp8);
323 rc = -EIO; 327 rc = -EIO;
324 goto err_out_regions; 328 goto err_out_regions;
325 } 329 }
@@ -332,10 +336,11 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
332 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 336 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
333 if ((pci_resource_start(pdev, i) == 0) || 337 if ((pci_resource_start(pdev, i) == 0) ||
334 (pci_resource_len(pdev, i) < bar_sizes[i])) { 338 (pci_resource_len(pdev, i) < bar_sizes[i])) {
335 printk(KERN_ERR DRV_NAME "(%s): invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n", 339 dev_printk(KERN_ERR, &pdev->dev,
336 pci_name(pdev), i, 340 "invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n",
337 pci_resource_start(pdev, i), 341 i,
338 pci_resource_len(pdev, i)); 342 pci_resource_start(pdev, i),
343 pci_resource_len(pdev, i));
339 rc = -ENODEV; 344 rc = -ENODEV;
340 goto err_out_regions; 345 goto err_out_regions;
341 } 346 }
@@ -353,8 +358,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
353 probe_ent = vt6421_init_probe_ent(pdev); 358 probe_ent = vt6421_init_probe_ent(pdev);
354 359
355 if (!probe_ent) { 360 if (!probe_ent) {
356 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 361 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
357 pci_name(pdev));
358 rc = -ENOMEM; 362 rc = -ENOMEM;
359 goto err_out_regions; 363 goto err_out_regions;
360 } 364 }
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 54273e0063c7..bb84ba0c7e83 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
45#include <linux/device.h>
45#include "scsi.h" 46#include "scsi.h"
46#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
47#include <linux/libata.h> 48#include <linux/libata.h>
@@ -295,7 +296,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
295 int rc; 296 int rc;
296 297
297 if (!printed_version++) 298 if (!printed_version++)
298 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 299 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
299 300
300 rc = pci_enable_device(pdev); 301 rc = pci_enable_device(pdev);
301 if (rc) 302 if (rc)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 771e97ef136e..b856e140e65f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -26,6 +26,7 @@
26 */ 26 */
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/sched.h> /* workqueue stuff, HZ */
29#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8bb8222ea589..d2caa35059d9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -19,6 +19,9 @@
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24
22#include <scsi/scsi.h> 25#include <scsi/scsi.h>
23#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index e753ba27dc59..a1a58e1d5ad3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -37,6 +37,9 @@
37 * along with this program; if not, write to the Free Software 37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40
41#include <linux/slab.h>
42
40#include "sym_glue.h" 43#include "sym_glue.h"
41#include "sym_nvram.h" 44#include "sym_nvram.h"
42 45
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 3131a6bf7ab7..3a264a408216 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -37,6 +37,8 @@
37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 */ 38 */
39 39
40#include <linux/gfp.h>
41
40#ifndef SYM_HIPD_H 42#ifndef SYM_HIPD_H
41#define SYM_HIPD_H 43#define SYM_HIPD_H
42 44
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index f88fdd480685..771676abee60 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -308,6 +308,8 @@ struct ioc4_serial {
308typedef void ioc4_intr_func_f(void *, uint32_t); 308typedef void ioc4_intr_func_f(void *, uint32_t);
309typedef ioc4_intr_func_f *ioc4_intr_func_t; 309typedef ioc4_intr_func_f *ioc4_intr_func_t;
310 310
311static unsigned int Num_of_ioc4_cards;
312
311/* defining this will get you LOTS of great debug info */ 313/* defining this will get you LOTS of great debug info */
312//#define DEBUG_INTERRUPTS 314//#define DEBUG_INTERRUPTS
313#define DPRINT_CONFIG(_x...) ; 315#define DPRINT_CONFIG(_x...) ;
@@ -317,7 +319,8 @@ typedef ioc4_intr_func_f *ioc4_intr_func_t;
317#define WAKEUP_CHARS 256 319#define WAKEUP_CHARS 256
318 320
319/* number of characters we want to transmit to the lower level at a time */ 321/* number of characters we want to transmit to the lower level at a time */
320#define IOC4_MAX_CHARS 128 322#define IOC4_MAX_CHARS 256
323#define IOC4_FIFO_CHARS 255
321 324
322/* Device name we're using */ 325/* Device name we're using */
323#define DEVICE_NAME "ttyIOC" 326#define DEVICE_NAME "ttyIOC"
@@ -1038,6 +1041,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
1038 return -ENOMEM; 1041 return -ENOMEM;
1039 } 1042 }
1040 memset(port, 0, sizeof(struct ioc4_port)); 1043 memset(port, 0, sizeof(struct ioc4_port));
1044 spin_lock_init(&port->ip_lock);
1041 1045
1042 /* we need to remember the previous ones, to point back to 1046 /* we need to remember the previous ones, to point back to
1043 * them farther down - setting up the ring buffers. 1047 * them farther down - setting up the ring buffers.
@@ -1691,12 +1695,14 @@ ioc4_change_speed(struct uart_port *the_port,
1691 baud = 9600; 1695 baud = 9600;
1692 1696
1693 if (!the_port->fifosize) 1697 if (!the_port->fifosize)
1694 the_port->fifosize = IOC4_MAX_CHARS; 1698 the_port->fifosize = IOC4_FIFO_CHARS;
1695 the_port->timeout = ((the_port->fifosize * HZ * bits) / (baud / 10)); 1699 the_port->timeout = ((the_port->fifosize * HZ * bits) / (baud / 10));
1696 the_port->timeout += HZ / 50; /* Add .02 seconds of slop */ 1700 the_port->timeout += HZ / 50; /* Add .02 seconds of slop */
1697 1701
1698 the_port->ignore_status_mask = N_ALL_INPUT; 1702 the_port->ignore_status_mask = N_ALL_INPUT;
1699 1703
1704 info->tty->low_latency = 1;
1705
1700 if (I_IGNPAR(info->tty)) 1706 if (I_IGNPAR(info->tty))
1701 the_port->ignore_status_mask &= ~(N_PARITY_ERROR 1707 the_port->ignore_status_mask &= ~(N_PARITY_ERROR
1702 | N_FRAMING_ERROR); 1708 | N_FRAMING_ERROR);
@@ -1742,7 +1748,6 @@ ioc4_change_speed(struct uart_port *the_port,
1742 */ 1748 */
1743static inline int ic4_startup_local(struct uart_port *the_port) 1749static inline int ic4_startup_local(struct uart_port *the_port)
1744{ 1750{
1745 int retval = 0;
1746 struct ioc4_port *port; 1751 struct ioc4_port *port;
1747 struct uart_info *info; 1752 struct uart_info *info;
1748 1753
@@ -1754,9 +1759,6 @@ static inline int ic4_startup_local(struct uart_port *the_port)
1754 return -1; 1759 return -1;
1755 1760
1756 info = the_port->info; 1761 info = the_port->info;
1757 if (info->flags & UIF_INITIALIZED) {
1758 return retval;
1759 }
1760 1762
1761 if (info->tty) { 1763 if (info->tty) {
1762 set_bit(TTY_IO_ERROR, &info->tty->flags); 1764 set_bit(TTY_IO_ERROR, &info->tty->flags);
@@ -1775,7 +1777,6 @@ static inline int ic4_startup_local(struct uart_port *the_port)
1775 /* set the speed of the serial port */ 1777 /* set the speed of the serial port */
1776 ioc4_change_speed(the_port, info->tty->termios, (struct termios *)0); 1778 ioc4_change_speed(the_port, info->tty->termios, (struct termios *)0);
1777 1779
1778 info->flags |= UIF_INITIALIZED;
1779 return 0; 1780 return 0;
1780} 1781}
1781 1782
@@ -1785,9 +1786,13 @@ static inline int ic4_startup_local(struct uart_port *the_port)
1785 */ 1786 */
1786static void ioc4_cb_output_lowat(struct ioc4_port *port) 1787static void ioc4_cb_output_lowat(struct ioc4_port *port)
1787{ 1788{
1789 unsigned long pflags;
1790
1788 /* ip_lock is set on the call here */ 1791 /* ip_lock is set on the call here */
1789 if (port->ip_port) { 1792 if (port->ip_port) {
1793 spin_lock_irqsave(&port->ip_port->lock, pflags);
1790 transmit_chars(port->ip_port); 1794 transmit_chars(port->ip_port);
1795 spin_unlock_irqrestore(&port->ip_port->lock, pflags);
1791 } 1796 }
1792} 1797}
1793 1798
@@ -2064,8 +2069,7 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf,
2064 * available data as long as it returns some. 2069 * available data as long as it returns some.
2065 */ 2070 */
2066 /* Re-arm the timer */ 2071 /* Re-arm the timer */
2067 writel(port->ip_rx_cons | IOC4_SRCIR_ARM, 2072 writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
2068 &port->ip_serial_regs->srcir);
2069 2073
2070 prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK; 2074 prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
2071 cons_ptr = port->ip_rx_cons; 2075 cons_ptr = port->ip_rx_cons;
@@ -2299,6 +2303,7 @@ static inline int do_read(struct uart_port *the_port, unsigned char *buf,
2299 } 2303 }
2300 return total; 2304 return total;
2301} 2305}
2306
2302/** 2307/**
2303 * receive_chars - upper level read. Called with ip_lock. 2308 * receive_chars - upper level read. Called with ip_lock.
2304 * @the_port: port to read from 2309 * @the_port: port to read from
@@ -2307,9 +2312,11 @@ static void receive_chars(struct uart_port *the_port)
2307{ 2312{
2308 struct tty_struct *tty; 2313 struct tty_struct *tty;
2309 unsigned char ch[IOC4_MAX_CHARS]; 2314 unsigned char ch[IOC4_MAX_CHARS];
2310 int read_count, request_count; 2315 int read_count, request_count = IOC4_MAX_CHARS;
2311 struct uart_icount *icount; 2316 struct uart_icount *icount;
2312 struct uart_info *info = the_port->info; 2317 struct uart_info *info = the_port->info;
2318 int flip = 0;
2319 unsigned long pflags;
2313 2320
2314 /* Make sure all the pointers are "good" ones */ 2321 /* Make sure all the pointers are "good" ones */
2315 if (!info) 2322 if (!info)
@@ -2317,16 +2324,17 @@ static void receive_chars(struct uart_port *the_port)
2317 if (!info->tty) 2324 if (!info->tty)
2318 return; 2325 return;
2319 2326
2327 spin_lock_irqsave(&the_port->lock, pflags);
2320 tty = info->tty; 2328 tty = info->tty;
2321 2329
2322 request_count = TTY_FLIPBUF_SIZE - tty->flip.count - 1; 2330 if (request_count > TTY_FLIPBUF_SIZE - tty->flip.count)
2331 request_count = TTY_FLIPBUF_SIZE - tty->flip.count;
2323 2332
2324 if (request_count > 0) { 2333 if (request_count > 0) {
2325 if (request_count > IOC4_MAX_CHARS - 2)
2326 request_count = IOC4_MAX_CHARS - 2;
2327 icount = &the_port->icount; 2334 icount = &the_port->icount;
2328 read_count = do_read(the_port, ch, request_count); 2335 read_count = do_read(the_port, ch, request_count);
2329 if (read_count > 0) { 2336 if (read_count > 0) {
2337 flip = 1;
2330 memcpy(tty->flip.char_buf_ptr, ch, read_count); 2338 memcpy(tty->flip.char_buf_ptr, ch, read_count);
2331 memset(tty->flip.flag_buf_ptr, TTY_NORMAL, read_count); 2339 memset(tty->flip.flag_buf_ptr, TTY_NORMAL, read_count);
2332 tty->flip.char_buf_ptr += read_count; 2340 tty->flip.char_buf_ptr += read_count;
@@ -2335,7 +2343,11 @@ static void receive_chars(struct uart_port *the_port)
2335 icount->rx += read_count; 2343 icount->rx += read_count;
2336 } 2344 }
2337 } 2345 }
2338 tty_flip_buffer_push(tty); 2346
2347 spin_unlock_irqrestore(&the_port->lock, pflags);
2348
2349 if (flip)
2350 tty_flip_buffer_push(tty);
2339} 2351}
2340 2352
2341/** 2353/**
@@ -2393,18 +2405,14 @@ static void ic4_shutdown(struct uart_port *the_port)
2393 2405
2394 info = the_port->info; 2406 info = the_port->info;
2395 2407
2396 if (!(info->flags & UIF_INITIALIZED))
2397 return;
2398
2399 wake_up_interruptible(&info->delta_msr_wait); 2408 wake_up_interruptible(&info->delta_msr_wait);
2400 2409
2401 if (info->tty) 2410 if (info->tty)
2402 set_bit(TTY_IO_ERROR, &info->tty->flags); 2411 set_bit(TTY_IO_ERROR, &info->tty->flags);
2403 2412
2404 spin_lock_irqsave(&port->ip_lock, port_flags); 2413 spin_lock_irqsave(&the_port->lock, port_flags);
2405 set_notification(port, N_ALL, 0); 2414 set_notification(port, N_ALL, 0);
2406 info->flags &= ~UIF_INITIALIZED; 2415 spin_unlock_irqrestore(&the_port->lock, port_flags);
2407 spin_unlock_irqrestore(&port->ip_lock, port_flags);
2408} 2416}
2409 2417
2410/** 2418/**
@@ -2463,12 +2471,10 @@ static unsigned int ic4_get_mctrl(struct uart_port *the_port)
2463static void ic4_start_tx(struct uart_port *the_port) 2471static void ic4_start_tx(struct uart_port *the_port)
2464{ 2472{
2465 struct ioc4_port *port = get_ioc4_port(the_port); 2473 struct ioc4_port *port = get_ioc4_port(the_port);
2466 unsigned long flags;
2467 2474
2468 if (port) { 2475 if (port) {
2469 spin_lock_irqsave(&port->ip_lock, flags); 2476 set_notification(port, N_OUTPUT_LOWAT, 1);
2470 transmit_chars(the_port); 2477 enable_intrs(port, port->ip_hooks->intr_tx_mt);
2471 spin_unlock_irqrestore(&port->ip_lock, flags);
2472 } 2478 }
2473} 2479}
2474 2480
@@ -2510,9 +2516,9 @@ static int ic4_startup(struct uart_port *the_port)
2510 } 2516 }
2511 2517
2512 /* Start up the serial port */ 2518 /* Start up the serial port */
2513 spin_lock_irqsave(&port->ip_lock, port_flags); 2519 spin_lock_irqsave(&the_port->lock, port_flags);
2514 retval = ic4_startup_local(the_port); 2520 retval = ic4_startup_local(the_port);
2515 spin_unlock_irqrestore(&port->ip_lock, port_flags); 2521 spin_unlock_irqrestore(&the_port->lock, port_flags);
2516 return retval; 2522 return retval;
2517} 2523}
2518 2524
@@ -2527,12 +2533,11 @@ static void
2527ic4_set_termios(struct uart_port *the_port, 2533ic4_set_termios(struct uart_port *the_port,
2528 struct termios *termios, struct termios *old_termios) 2534 struct termios *termios, struct termios *old_termios)
2529{ 2535{
2530 struct ioc4_port *port = get_ioc4_port(the_port);
2531 unsigned long port_flags; 2536 unsigned long port_flags;
2532 2537
2533 spin_lock_irqsave(&port->ip_lock, port_flags); 2538 spin_lock_irqsave(&the_port->lock, port_flags);
2534 ioc4_change_speed(the_port, termios, old_termios); 2539 ioc4_change_speed(the_port, termios, old_termios);
2535 spin_unlock_irqrestore(&port->ip_lock, port_flags); 2540 spin_unlock_irqrestore(&the_port->lock, port_flags);
2536} 2541}
2537 2542
2538/** 2543/**
@@ -2607,24 +2612,25 @@ ioc4_serial_core_attach(struct pci_dev *pdev)
2607 __FUNCTION__, (void *)the_port, 2612 __FUNCTION__, (void *)the_port,
2608 (void *)port)); 2613 (void *)port));
2609 2614
2610 spin_lock_init(&the_port->lock);
2611 /* membase, iobase and mapbase just need to be non-0 */ 2615 /* membase, iobase and mapbase just need to be non-0 */
2612 the_port->membase = (unsigned char __iomem *)1; 2616 the_port->membase = (unsigned char __iomem *)1;
2613 the_port->line = the_port->iobase = ii; 2617 the_port->iobase = (pdev->bus->number << 16) | ii;
2618 the_port->line = (Num_of_ioc4_cards << 2) | ii;
2614 the_port->mapbase = 1; 2619 the_port->mapbase = 1;
2615 the_port->type = PORT_16550A; 2620 the_port->type = PORT_16550A;
2616 the_port->fifosize = IOC4_MAX_CHARS; 2621 the_port->fifosize = IOC4_FIFO_CHARS;
2617 the_port->ops = &ioc4_ops; 2622 the_port->ops = &ioc4_ops;
2618 the_port->irq = control->ic_irq; 2623 the_port->irq = control->ic_irq;
2619 the_port->dev = &pdev->dev; 2624 the_port->dev = &pdev->dev;
2625 spin_lock_init(&the_port->lock);
2620 if (uart_add_one_port(&ioc4_uart, the_port) < 0) { 2626 if (uart_add_one_port(&ioc4_uart, the_port) < 0) {
2621 printk(KERN_WARNING 2627 printk(KERN_WARNING
2622 "%s: unable to add port %d\n", 2628 "%s: unable to add port %d bus %d\n",
2623 __FUNCTION__, the_port->line); 2629 __FUNCTION__, the_port->line, pdev->bus->number);
2624 } else { 2630 } else {
2625 DPRINT_CONFIG( 2631 DPRINT_CONFIG(
2626 ("IOC4 serial driver port %d irq = %d\n", 2632 ("IOC4 serial port %d irq = %d, bus %d\n",
2627 the_port->line, the_port->irq)); 2633 the_port->line, the_port->irq, pdev->bus->number));
2628 } 2634 }
2629 /* all ports are rs232 for now */ 2635 /* all ports are rs232 for now */
2630 ioc4_set_proto(port, PROTO_RS232); 2636 ioc4_set_proto(port, PROTO_RS232);
@@ -2734,6 +2740,8 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
2734 if ((ret = ioc4_serial_core_attach(idd->idd_pdev))) 2740 if ((ret = ioc4_serial_core_attach(idd->idd_pdev)))
2735 goto out4; 2741 goto out4;
2736 2742
2743 Num_of_ioc4_cards++;
2744
2737 return ret; 2745 return ret;
2738 2746
2739 /* error exits that give back resources */ 2747 /* error exits that give back resources */
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index efe79b1fd431..aec83f577ce6 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -1100,6 +1100,8 @@ mpsc_start_rx(struct mpsc_port_info *pi)
1100{ 1100{
1101 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); 1101 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1102 1102
1103 /* Issue a Receive Abort to clear any receive errors */
1104 writel(MPSC_CHR_2_RA, pi->mpsc_base + MPSC_CHR_2);
1103 if (pi->rcv_data) { 1105 if (pi->rcv_data) {
1104 mpsc_enter_hunt(pi); 1106 mpsc_enter_hunt(pi);
1105 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); 1107 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 2d8622eef701..401d94a7fe2e 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -147,8 +147,7 @@ static int uart_startup(struct uart_state *state, int init_hw)
147 * once we have successfully opened the port. Also set 147 * once we have successfully opened the port. Also set
148 * up the tty->alt_speed kludge 148 * up the tty->alt_speed kludge
149 */ 149 */
150 if (info->tty) 150 set_bit(TTY_IO_ERROR, &info->tty->flags);
151 set_bit(TTY_IO_ERROR, &info->tty->flags);
152 151
153 if (port->type == PORT_UNKNOWN) 152 if (port->type == PORT_UNKNOWN)
154 return 0; 153 return 0;
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c
index f056276b08a1..28757cb9d246 100644
--- a/drivers/sh/superhyway/superhyway.c
+++ b/drivers/sh/superhyway/superhyway.c
@@ -16,6 +16,8 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/superhyway.h> 18#include <linux/superhyway.h>
19#include <linux/string.h>
20#include <linux/slab.h>
19 21
20static int superhyway_devices; 22static int superhyway_devices;
21 23
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 45efeed1fcc3..49815ec4b842 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -14,6 +14,9 @@
14 * This file is licenced under the GPL. 14 * This file is licenced under the GPL.
15 */ 15 */
16 16
17#include <linux/signal.h> /* SA_INTERRUPT */
18#include <linux/jiffies.h>
19
17#include <asm/hardware.h> 20#include <asm/hardware.h>
18#include <asm/io.h> 21#include <asm/io.h>
19#include <asm/mach-types.h> 22#include <asm/mach-types.h>
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bf1d5ab4aa3a..7ce1d9ef0289 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -14,6 +14,8 @@
14 * This file is licenced under the GPL. 14 * This file is licenced under the GPL.
15 */ 15 */
16 16
17#include <linux/jiffies.h>
18
17#ifdef CONFIG_PPC_PMAC 19#ifdef CONFIG_PPC_PMAC
18#include <asm/machdep.h> 20#include <asm/machdep.h>
19#include <asm/pmac_feature.h> 21#include <asm/pmac_feature.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index d287dcccd415..f4a4aeda40b7 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/signal.h>
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
24#include <asm/hardware.h> 25#include <asm/hardware.h>
25#include <asm/arch/pxa-regs.h> 26#include <asm/arch/pxa-regs.h>
diff --git a/drivers/usb/input/pid.c b/drivers/usb/input/pid.c
index a00672c96644..dca5ee93a4ef 100644
--- a/drivers/usb/input/pid.c
+++ b/drivers/usb/input/pid.c
@@ -198,7 +198,7 @@ static int hid_pid_upload_effect(struct input_dev *dev,
198 } 198 }
199 199
200 effect->id = id; 200 effect->id = id;
201 dev_dbg(&pid_private->hid->dev->dev, "effect ID is %d\n.", id); 201 dev_dbg(&pid_private->hid->dev->dev, "effect ID is %d.\n", id);
202 pid_private->effects[id].owner = current->pid; 202 pid_private->effects[id].owner = current->pid;
203 pid_private->effects[id].flags = (1 << FF_PID_FLAGS_USED); 203 pid_private->effects[id].flags = (1 << FF_PID_FLAGS_USED);
204 spin_unlock_irqrestore(&pid_private->lock, flags); 204 spin_unlock_irqrestore(&pid_private->lock, flags);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 7e297947a2b2..7192b770bfb6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -494,7 +494,7 @@ config FB_TGA
494 494
495config FB_VESA 495config FB_VESA
496 bool "VESA VGA graphics support" 496 bool "VESA VGA graphics support"
497 depends on (FB = y) && (X86 || X86_64) 497 depends on (FB = y) && X86
498 select FB_CFB_FILLRECT 498 select FB_CFB_FILLRECT
499 select FB_CFB_COPYAREA 499 select FB_CFB_COPYAREA
500 select FB_CFB_IMAGEBLIT 500 select FB_CFB_IMAGEBLIT
@@ -712,7 +712,7 @@ config FB_RIVA_DEBUG
712 712
713config FB_I810 713config FB_I810
714 tristate "Intel 810/815 support (EXPERIMENTAL)" 714 tristate "Intel 810/815 support (EXPERIMENTAL)"
715 depends on FB && EXPERIMENTAL && PCI && X86 && !X86_64 715 depends on FB && EXPERIMENTAL && PCI && X86_32
716 select AGP 716 select AGP
717 select AGP_INTEL 717 select AGP_INTEL
718 select FB_MODE_HELPERS 718 select FB_MODE_HELPERS
@@ -761,7 +761,7 @@ config FB_I810_I2C
761 761
762config FB_INTEL 762config FB_INTEL
763 tristate "Intel 830M/845G/852GM/855GM/865G support (EXPERIMENTAL)" 763 tristate "Intel 830M/845G/852GM/855GM/865G support (EXPERIMENTAL)"
764 depends on FB && EXPERIMENTAL && PCI && X86 && !X86_64 764 depends on FB && EXPERIMENTAL && PCI && X86_32
765 select AGP 765 select AGP
766 select AGP_INTEL 766 select AGP_INTEL
767 select FB_MODE_HELPERS 767 select FB_MODE_HELPERS
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 7e731691e2a9..6a9ae2b3d1ab 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -28,7 +28,7 @@ config VGA_CONSOLE
28 28
29config VIDEO_SELECT 29config VIDEO_SELECT
30 bool "Video mode selection support" 30 bool "Video mode selection support"
31 depends on (X86 || X86_64) && VGA_CONSOLE 31 depends on X86 && VGA_CONSOLE
32 ---help--- 32 ---help---
33 This enables support for text mode selection on kernel startup. If 33 This enables support for text mode selection on kernel startup. If
34 you want to take advantage of some high-resolution text mode your 34 you want to take advantage of some high-resolution text mode your
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 809fee2140ac..56cd199605f4 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -579,6 +579,7 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table)
579{ 579{
580 int i, j; 580 int i, j;
581 581
582 vga_w(state.vgabase, VGA_PEL_MSK, 0xff);
582 for (i = j = 0; i < 16; i++) { 583 for (i = j = 0; i < 16; i++) {
583 vga_w(state.vgabase, VGA_PEL_IW, table[i]); 584 vga_w(state.vgabase, VGA_PEL_IW, table[i]);
584 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 585 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
@@ -721,6 +722,7 @@ static void vga_pal_blank(struct vgastate *state)
721{ 722{
722 int i; 723 int i;
723 724
725 vga_w(state->vgabase, VGA_PEL_MSK, 0xff);
724 for (i = 0; i < 16; i++) { 726 for (i = 0; i < 16; i++) {
725 vga_w(state->vgabase, VGA_PEL_IW, i); 727 vga_w(state->vgabase, VGA_PEL_IW, i);
726 vga_w(state->vgabase, VGA_PEL_D, 0); 728 vga_w(state->vgabase, VGA_PEL_D, 0);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 9073be4221a8..e2667ddab3f1 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -918,7 +918,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
918 } 918 }
919#endif 919#endif
920#elif defined(__powerpc__) 920#elif defined(__powerpc__)
921 vma->vm_page_prot = phys_mem_access_prot(file, off, 921 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
922 vma->vm_end - vma->vm_start, 922 vma->vm_end - vma->vm_start,
923 vma->vm_page_prot); 923 vma->vm_page_prot);
924#elif defined(__alpha__) 924#elif defined(__alpha__)
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 88c517a4c178..9e293e139a0e 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sched.h> /* schedule_timeout() */
24#include <linux/delay.h> 25#include <linux/delay.h>
25 26
26#include "w1_family.h" 27#include "w1_family.h"
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 04ca8840acf1..87c29d7b6c17 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/string.h>
17 18
18#include "zorro.h" 19#include "zorro.h"
19 20
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d3c05dfe20d2..0f2b40605b06 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -16,6 +16,8 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/zorro.h> 17#include <linux/zorro.h>
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/string.h>
20
19#include <asm/setup.h> 21#include <asm/setup.h>
20#include <asm/amigahw.h> 22#include <asm/amigahw.h>
21 23
diff --git a/fs/Kconfig b/fs/Kconfig
index 48f5422cb19a..01a295232f75 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -810,7 +810,7 @@ config TMPFS
810 810
811config HUGETLBFS 811config HUGETLBFS
812 bool "HugeTLB file system support" 812 bool "HugeTLB file system support"
813 depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || X86_64 || BROKEN 813 depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
814 814
815config HUGETLB_PAGE 815config HUGETLB_PAGE
816 def_bool HUGETLBFS 816 def_bool HUGETLBFS
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 434c19d076ac..175b2e8177c1 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -57,7 +57,7 @@ config BINFMT_SHARED_FLAT
57 57
58config BINFMT_AOUT 58config BINFMT_AOUT
59 tristate "Kernel support for a.out and ECOFF binaries" 59 tristate "Kernel support for a.out and ECOFF binaries"
60 depends on (X86 && !X86_64) || ALPHA || ARM || M68K || SPARC32 60 depends on X86_32 || ALPHA || ARM || M68K || SPARC32
61 ---help--- 61 ---help---
62 A.out (Assembler.OUTput) is a set of formats for libraries and 62 A.out (Assembler.OUTput) is a set of formats for libraries and
63 executables used in the earliest versions of UNIX. Linux used 63 executables used in the earliest versions of UNIX. Linux used
diff --git a/fs/attr.c b/fs/attr.c
index b1796fb9e524..67bcd9b14ea5 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -117,9 +117,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
117 struct timespec now; 117 struct timespec now;
118 unsigned int ia_valid = attr->ia_valid; 118 unsigned int ia_valid = attr->ia_valid;
119 119
120 if (!inode)
121 BUG();
122
123 mode = inode->i_mode; 120 mode = inode->i_mode;
124 now = current_fs_time(inode->i_sb); 121 now = current_fs_time(inode->i_sb);
125 122
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 918ccc267e41..6fa6adc40972 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1502,9 +1502,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1502 fill_psinfo(psinfo, current->group_leader, current->mm); 1502 fill_psinfo(psinfo, current->group_leader, current->mm);
1503 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); 1503 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1504 1504
1505 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current); 1505 numnote = 2;
1506
1507 numnote = 3;
1508 1506
1509 auxv = (elf_addr_t *) current->mm->saved_auxv; 1507 auxv = (elf_addr_t *) current->mm->saved_auxv;
1510 1508
diff --git a/fs/buffer.c b/fs/buffer.c
index 2066e4cb700c..35fa34977e81 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1478,8 +1478,10 @@ EXPORT_SYMBOL(__getblk);
1478void __breadahead(struct block_device *bdev, sector_t block, int size) 1478void __breadahead(struct block_device *bdev, sector_t block, int size)
1479{ 1479{
1480 struct buffer_head *bh = __getblk(bdev, block, size); 1480 struct buffer_head *bh = __getblk(bdev, block, size);
1481 ll_rw_block(READA, 1, &bh); 1481 if (likely(bh)) {
1482 brelse(bh); 1482 ll_rw_block(READA, 1, &bh);
1483 brelse(bh);
1484 }
1483} 1485}
1484EXPORT_SYMBOL(__breadahead); 1486EXPORT_SYMBOL(__breadahead);
1485 1487
@@ -1497,7 +1499,7 @@ __bread(struct block_device *bdev, sector_t block, int size)
1497{ 1499{
1498 struct buffer_head *bh = __getblk(bdev, block, size); 1500 struct buffer_head *bh = __getblk(bdev, block, size);
1499 1501
1500 if (!buffer_uptodate(bh)) 1502 if (likely(bh) && !buffer_uptodate(bh))
1501 bh = __bread_slow(bh); 1503 bh = __bread_slow(bh);
1502 return bh; 1504 return bh;
1503} 1505}
@@ -1637,6 +1639,15 @@ out:
1637} 1639}
1638EXPORT_SYMBOL(block_invalidatepage); 1640EXPORT_SYMBOL(block_invalidatepage);
1639 1641
1642int do_invalidatepage(struct page *page, unsigned long offset)
1643{
1644 int (*invalidatepage)(struct page *, unsigned long);
1645 invalidatepage = page->mapping->a_ops->invalidatepage;
1646 if (invalidatepage == NULL)
1647 invalidatepage = block_invalidatepage;
1648 return (*invalidatepage)(page, offset);
1649}
1650
1640/* 1651/*
1641 * We attach and possibly dirty the buffers atomically wrt 1652 * We attach and possibly dirty the buffers atomically wrt
1642 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1653 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
@@ -2696,7 +2707,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2696 * they may have been added in ext3_writepage(). Make them 2707 * they may have been added in ext3_writepage(). Make them
2697 * freeable here, so the page does not leak. 2708 * freeable here, so the page does not leak.
2698 */ 2709 */
2699 block_invalidatepage(page, 0); 2710 do_invalidatepage(page, 0);
2700 unlock_page(page); 2711 unlock_page(page);
2701 return 0; /* don't care */ 2712 return 0; /* don't care */
2702 } 2713 }
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a327e03753ac..43dbcb0b21eb 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -3046,6 +3046,10 @@ HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
3046/* Serial */ 3046/* Serial */
3047HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl) 3047HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl)
3048HANDLE_IOCTL(TIOCSSERIAL, serial_struct_ioctl) 3048HANDLE_IOCTL(TIOCSSERIAL, serial_struct_ioctl)
3049#ifdef TIOCGLTC
3050COMPATIBLE_IOCTL(TIOCGLTC)
3051COMPATIBLE_IOCTL(TIOCSLTC)
3052#endif
3049/* Usbdevfs */ 3053/* Usbdevfs */
3050HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control) 3054HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control)
3051HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk) 3055HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk)
diff --git a/fs/dquot.c b/fs/dquot.c
index 05f3327d64a3..ea7644227a65 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -662,7 +662,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
662restart: 662restart:
663 file_list_lock(); 663 file_list_lock();
664 list_for_each(p, &sb->s_files) { 664 list_for_each(p, &sb->s_files) {
665 struct file *filp = list_entry(p, struct file, f_list); 665 struct file *filp = list_entry(p, struct file, f_u.fu_list);
666 struct inode *inode = filp->f_dentry->d_inode; 666 struct inode *inode = filp->f_dentry->d_inode;
667 if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) { 667 if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
668 struct dentry *dentry = dget(filp->f_dentry); 668 struct dentry *dentry = dget(filp->f_dentry);
diff --git a/fs/exec.c b/fs/exec.c
index ba73797eb4cb..10d493fea7ce 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -630,10 +630,9 @@ static inline int de_thread(struct task_struct *tsk)
630 /* 630 /*
631 * Account for the thread group leader hanging around: 631 * Account for the thread group leader hanging around:
632 */ 632 */
633 count = 2; 633 count = 1;
634 if (thread_group_leader(current)) 634 if (!thread_group_leader(current)) {
635 count = 1; 635 count = 2;
636 else {
637 /* 636 /*
638 * The SIGALRM timer survives the exec, but needs to point 637 * The SIGALRM timer survives the exec, but needs to point
639 * at us as the new group leader now. We have a race with 638 * at us as the new group leader now. We have a race with
@@ -642,8 +641,10 @@ static inline int de_thread(struct task_struct *tsk)
642 * before we can safely let the old group leader die. 641 * before we can safely let the old group leader die.
643 */ 642 */
644 sig->real_timer.data = (unsigned long)current; 643 sig->real_timer.data = (unsigned long)current;
644 spin_unlock_irq(lock);
645 if (del_timer_sync(&sig->real_timer)) 645 if (del_timer_sync(&sig->real_timer))
646 add_timer(&sig->real_timer); 646 add_timer(&sig->real_timer);
647 spin_lock_irq(lock);
647 } 648 }
648 while (atomic_read(&sig->count) > count) { 649 while (atomic_read(&sig->count) > count) {
649 sig->group_exit_task = current; 650 sig->group_exit_task = current;
@@ -655,7 +656,6 @@ static inline int de_thread(struct task_struct *tsk)
655 } 656 }
656 sig->group_exit_task = NULL; 657 sig->group_exit_task = NULL;
657 sig->notify_count = 0; 658 sig->notify_count = 0;
658 sig->real_timer.data = (unsigned long)current;
659 spin_unlock_irq(lock); 659 spin_unlock_irq(lock);
660 660
661 /* 661 /*
@@ -1417,19 +1417,16 @@ static void zap_threads (struct mm_struct *mm)
1417static void coredump_wait(struct mm_struct *mm) 1417static void coredump_wait(struct mm_struct *mm)
1418{ 1418{
1419 DECLARE_COMPLETION(startup_done); 1419 DECLARE_COMPLETION(startup_done);
1420 int core_waiters;
1420 1421
1421 mm->core_waiters++; /* let other threads block */
1422 mm->core_startup_done = &startup_done; 1422 mm->core_startup_done = &startup_done;
1423 1423
1424 /* give other threads a chance to run: */
1425 yield();
1426
1427 zap_threads(mm); 1424 zap_threads(mm);
1428 if (--mm->core_waiters) { 1425 core_waiters = mm->core_waiters;
1429 up_write(&mm->mmap_sem); 1426 up_write(&mm->mmap_sem);
1427
1428 if (core_waiters)
1430 wait_for_completion(&startup_done); 1429 wait_for_completion(&startup_done);
1431 } else
1432 up_write(&mm->mmap_sem);
1433 BUG_ON(mm->core_waiters); 1430 BUG_ON(mm->core_waiters);
1434} 1431}
1435 1432
@@ -1463,11 +1460,21 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1463 current->fsuid = 0; /* Dump root private */ 1460 current->fsuid = 0; /* Dump root private */
1464 } 1461 }
1465 mm->dumpable = 0; 1462 mm->dumpable = 0;
1466 init_completion(&mm->core_done); 1463
1464 retval = -EAGAIN;
1467 spin_lock_irq(&current->sighand->siglock); 1465 spin_lock_irq(&current->sighand->siglock);
1468 current->signal->flags = SIGNAL_GROUP_EXIT; 1466 if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
1469 current->signal->group_exit_code = exit_code; 1467 current->signal->flags = SIGNAL_GROUP_EXIT;
1468 current->signal->group_exit_code = exit_code;
1469 retval = 0;
1470 }
1470 spin_unlock_irq(&current->sighand->siglock); 1471 spin_unlock_irq(&current->sighand->siglock);
1472 if (retval) {
1473 up_write(&mm->mmap_sem);
1474 goto fail;
1475 }
1476
1477 init_completion(&mm->core_done);
1471 coredump_wait(mm); 1478 coredump_wait(mm);
1472 1479
1473 /* 1480 /*
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fdba4d1d3c60..e7d3f0522d01 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -440,6 +440,10 @@ static int ext2_alloc_branch(struct inode *inode,
440 * the pointer to new one, then send parent to disk. 440 * the pointer to new one, then send parent to disk.
441 */ 441 */
442 bh = sb_getblk(inode->i_sb, parent); 442 bh = sb_getblk(inode->i_sb, parent);
443 if (!bh) {
444 err = -EIO;
445 break;
446 }
443 lock_buffer(bh); 447 lock_buffer(bh);
444 memset(bh->b_data, 0, blocksize); 448 memset(bh->b_data, 0, blocksize);
445 branch[n].bh = bh; 449 branch[n].bh = bh;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0213db4911a2..7992d21e0e09 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,8 @@
20#include <linux/quotaops.h> 20#include <linux/quotaops.h>
21#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
22 22
23#include "bitmap.h"
24
23/* 25/*
24 * balloc.c contains the blocks allocation and deallocation routines 26 * balloc.c contains the blocks allocation and deallocation routines
25 */ 27 */
@@ -1010,7 +1012,7 @@ retry:
1010 * allocation within the reservation window. 1012 * allocation within the reservation window.
1011 * 1013 *
1012 * This will avoid keeping on searching the reservation list again and 1014 * This will avoid keeping on searching the reservation list again and
1013 * again when someboday is looking for a free block (without 1015 * again when somebody is looking for a free block (without
1014 * reservation), and there are lots of free blocks, but they are all 1016 * reservation), and there are lots of free blocks, but they are all
1015 * being reserved. 1017 * being reserved.
1016 * 1018 *
@@ -1416,12 +1418,12 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1416 unsigned long bitmap_count, x; 1418 unsigned long bitmap_count, x;
1417 struct buffer_head *bitmap_bh = NULL; 1419 struct buffer_head *bitmap_bh = NULL;
1418 1420
1419 lock_super(sb);
1420 es = EXT3_SB(sb)->s_es; 1421 es = EXT3_SB(sb)->s_es;
1421 desc_count = 0; 1422 desc_count = 0;
1422 bitmap_count = 0; 1423 bitmap_count = 0;
1423 gdp = NULL; 1424 gdp = NULL;
1424 1425
1426 smp_rmb();
1425 for (i = 0; i < ngroups; i++) { 1427 for (i = 0; i < ngroups; i++) {
1426 gdp = ext3_get_group_desc(sb, i, NULL); 1428 gdp = ext3_get_group_desc(sb, i, NULL);
1427 if (!gdp) 1429 if (!gdp)
@@ -1440,7 +1442,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1440 brelse(bitmap_bh); 1442 brelse(bitmap_bh);
1441 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", 1443 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
1442 le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); 1444 le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count);
1443 unlock_super(sb);
1444 return bitmap_count; 1445 return bitmap_count;
1445#else 1446#else
1446 desc_count = 0; 1447 desc_count = 0;
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 6c419b9ab0e8..5b4ba3e246e6 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10#include <linux/buffer_head.h> 10#include <linux/buffer_head.h>
11 11#include "bitmap.h"
12 12
13static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0}; 13static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
14 14
diff --git a/fs/ext3/bitmap.h b/fs/ext3/bitmap.h
new file mode 100644
index 000000000000..6ee503a6bb4e
--- /dev/null
+++ b/fs/ext3/bitmap.h
@@ -0,0 +1,8 @@
1/* linux/fs/ext3/bitmap.c
2 *
3 * Copyright (C) 2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6*/
7
8extern unsigned long ext3_count_free (struct buffer_head *, unsigned int );
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 6549945f9ac1..df3f517c54ac 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -26,6 +26,7 @@
26 26
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
28 28
29#include "bitmap.h"
29#include "xattr.h" 30#include "xattr.h"
30#include "acl.h" 31#include "acl.h"
31 32
@@ -704,7 +705,6 @@ unsigned long ext3_count_free_inodes (struct super_block * sb)
704 unsigned long bitmap_count, x; 705 unsigned long bitmap_count, x;
705 struct buffer_head *bitmap_bh = NULL; 706 struct buffer_head *bitmap_bh = NULL;
706 707
707 lock_super (sb);
708 es = EXT3_SB(sb)->s_es; 708 es = EXT3_SB(sb)->s_es;
709 desc_count = 0; 709 desc_count = 0;
710 bitmap_count = 0; 710 bitmap_count = 0;
@@ -727,7 +727,6 @@ unsigned long ext3_count_free_inodes (struct super_block * sb)
727 brelse(bitmap_bh); 727 brelse(bitmap_bh);
728 printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n", 728 printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n",
729 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 729 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
730 unlock_super(sb);
731 return desc_count; 730 return desc_count;
732#else 731#else
733 desc_count = 0; 732 desc_count = 0;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 8b38f2232796..5d9b00e28837 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -491,7 +491,7 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
491 * the same format as ext3_get_branch() would do. We are calling it after 491 * the same format as ext3_get_branch() would do. We are calling it after
492 * we had read the existing part of chain and partial points to the last 492 * we had read the existing part of chain and partial points to the last
493 * triple of that (one with zero ->key). Upon the exit we have the same 493 * triple of that (one with zero ->key). Upon the exit we have the same
494 * picture as after the successful ext3_get_block(), excpet that in one 494 * picture as after the successful ext3_get_block(), except that in one
495 * place chain is disconnected - *branch->p is still zero (we did not 495 * place chain is disconnected - *branch->p is still zero (we did not
496 * set the last link), but branch->key contains the number that should 496 * set the last link), but branch->key contains the number that should
497 * be placed into *branch->p to fill that gap. 497 * be placed into *branch->p to fill that gap.
@@ -523,7 +523,6 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
523 if (!nr) 523 if (!nr)
524 break; 524 break;
525 branch[n].key = cpu_to_le32(nr); 525 branch[n].key = cpu_to_le32(nr);
526 keys = n+1;
527 526
528 /* 527 /*
529 * Get buffer_head for parent block, zero it out 528 * Get buffer_head for parent block, zero it out
@@ -531,6 +530,9 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
531 * parent to disk. 530 * parent to disk.
532 */ 531 */
533 bh = sb_getblk(inode->i_sb, parent); 532 bh = sb_getblk(inode->i_sb, parent);
533 if (!bh)
534 break;
535 keys = n+1;
534 branch[n].bh = bh; 536 branch[n].bh = bh;
535 lock_buffer(bh); 537 lock_buffer(bh);
536 BUFFER_TRACE(bh, "call get_create_access"); 538 BUFFER_TRACE(bh, "call get_create_access");
@@ -864,6 +866,10 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
864 if (!*errp && buffer_mapped(&dummy)) { 866 if (!*errp && buffer_mapped(&dummy)) {
865 struct buffer_head *bh; 867 struct buffer_head *bh;
866 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 868 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
869 if (!bh) {
870 *errp = -EIO;
871 goto err;
872 }
867 if (buffer_new(&dummy)) { 873 if (buffer_new(&dummy)) {
868 J_ASSERT(create != 0); 874 J_ASSERT(create != 0);
869 J_ASSERT(handle != 0); 875 J_ASSERT(handle != 0);
@@ -896,6 +902,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
896 } 902 }
897 return bh; 903 return bh;
898 } 904 }
905err:
899 return NULL; 906 return NULL;
900} 907}
901 908
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 50378d8ff84b..b3c690a3b54a 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -36,6 +36,8 @@
36#include <linux/quotaops.h> 36#include <linux/quotaops.h>
37#include <linux/buffer_head.h> 37#include <linux/buffer_head.h>
38#include <linux/smp_lock.h> 38#include <linux/smp_lock.h>
39
40#include "namei.h"
39#include "xattr.h" 41#include "xattr.h"
40#include "acl.h" 42#include "acl.h"
41 43
diff --git a/fs/ext3/namei.h b/fs/ext3/namei.h
new file mode 100644
index 000000000000..f2ce2b0065c9
--- /dev/null
+++ b/fs/ext3/namei.h
@@ -0,0 +1,8 @@
1/* linux/fs/ext3/namei.h
2 *
3 * Copyright (C) 2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6*/
7
8extern struct dentry *ext3_get_parent(struct dentry *child);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 57f79106267d..1be78b4b4de9 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -118,6 +118,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
118 int err; 118 int err;
119 119
120 bh = sb_getblk(sb, blk); 120 bh = sb_getblk(sb, blk);
121 if (!bh)
122 return ERR_PTR(-EIO);
121 if ((err = ext3_journal_get_write_access(handle, bh))) { 123 if ((err = ext3_journal_get_write_access(handle, bh))) {
122 brelse(bh); 124 brelse(bh);
123 bh = ERR_PTR(err); 125 bh = ERR_PTR(err);
@@ -202,6 +204,10 @@ static int setup_new_group_blocks(struct super_block *sb,
202 ext3_debug("update backup group %#04lx (+%d)\n", block, bit); 204 ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
203 205
204 gdb = sb_getblk(sb, block); 206 gdb = sb_getblk(sb, block);
207 if (!gdb) {
208 err = -EIO;
209 goto exit_bh;
210 }
205 if ((err = ext3_journal_get_write_access(handle, gdb))) { 211 if ((err = ext3_journal_get_write_access(handle, gdb))) {
206 brelse(gdb); 212 brelse(gdb);
207 goto exit_bh; 213 goto exit_bh;
@@ -643,6 +649,10 @@ static void update_backups(struct super_block *sb,
643 break; 649 break;
644 650
645 bh = sb_getblk(sb, group * bpg + blk_off); 651 bh = sb_getblk(sb, group * bpg + blk_off);
652 if (!bh) {
653 err = -EIO;
654 break;
655 }
646 ext3_debug("update metadata backup %#04lx\n", 656 ext3_debug("update metadata backup %#04lx\n",
647 (unsigned long)bh->b_blocknr); 657 (unsigned long)bh->b_blocknr);
648 if ((err = ext3_journal_get_write_access(handle, bh))) 658 if ((err = ext3_journal_get_write_access(handle, bh)))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 097383c11154..f594989ccb7a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -36,9 +36,12 @@
36#include <linux/namei.h> 36#include <linux/namei.h>
37#include <linux/quotaops.h> 37#include <linux/quotaops.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41
40#include "xattr.h" 42#include "xattr.h"
41#include "acl.h" 43#include "acl.h"
44#include "namei.h"
42 45
43static int ext3_load_journal(struct super_block *, struct ext3_super_block *); 46static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
44static int ext3_create_journal(struct super_block *, struct ext3_super_block *, 47static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
@@ -615,7 +618,6 @@ static struct super_operations ext3_sops = {
615#endif 618#endif
616}; 619};
617 620
618struct dentry *ext3_get_parent(struct dentry *child);
619static struct export_operations ext3_export_ops = { 621static struct export_operations ext3_export_ops = {
620 .get_parent = ext3_get_parent, 622 .get_parent = ext3_get_parent,
621}; 623};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 269c7b92db9a..430de9f63be3 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -210,7 +210,7 @@ ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index,
210 return cmp ? -ENODATA : 0; 210 return cmp ? -ENODATA : 0;
211} 211}
212 212
213int 213static int
214ext3_xattr_block_get(struct inode *inode, int name_index, const char *name, 214ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
215 void *buffer, size_t buffer_size) 215 void *buffer, size_t buffer_size)
216{ 216{
@@ -354,7 +354,7 @@ ext3_xattr_list_entries(struct inode *inode, struct ext3_xattr_entry *entry,
354 return buffer_size - rest; 354 return buffer_size - rest;
355} 355}
356 356
357int 357static int
358ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size) 358ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
359{ 359{
360 struct buffer_head *bh = NULL; 360 struct buffer_head *bh = NULL;
@@ -626,7 +626,7 @@ struct ext3_xattr_block_find {
626 struct buffer_head *bh; 626 struct buffer_head *bh;
627}; 627};
628 628
629int 629static int
630ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i, 630ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
631 struct ext3_xattr_block_find *bs) 631 struct ext3_xattr_block_find *bs)
632{ 632{
@@ -859,7 +859,7 @@ struct ext3_xattr_ibody_find {
859 struct ext3_iloc iloc; 859 struct ext3_iloc iloc;
860}; 860};
861 861
862int 862static int
863ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i, 863ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
864 struct ext3_xattr_ibody_find *is) 864 struct ext3_xattr_ibody_find *is)
865{ 865{
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 895049b2ac9c..ba824964b9bb 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -222,6 +222,80 @@ fat_shortname2uni(struct nls_table *nls, unsigned char *buf, int buf_size,
222 return len; 222 return len;
223} 223}
224 224
225enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
226
227/**
228 * fat_parse_long - Parse extended directory entry.
229 *
230 * This function returns zero on success, negative value on error, or one of
231 * the following:
232 *
233 * %PARSE_INVALID - Directory entry is invalid.
234 * %PARSE_NOT_LONGNAME - Directory entry does not contain longname.
235 * %PARSE_EOF - Directory has no more entries.
236 */
237static int fat_parse_long(struct inode *dir, loff_t *pos,
238 struct buffer_head **bh, struct msdos_dir_entry **de,
239 wchar_t **unicode, unsigned char *nr_slots)
240{
241 struct msdos_dir_slot *ds;
242 unsigned char id, slot, slots, alias_checksum;
243
244 if (!*unicode) {
245 *unicode = (wchar_t *)__get_free_page(GFP_KERNEL);
246 if (!*unicode) {
247 brelse(*bh);
248 return -ENOMEM;
249 }
250 }
251parse_long:
252 slots = 0;
253 ds = (struct msdos_dir_slot *)*de;
254 id = ds->id;
255 if (!(id & 0x40))
256 return PARSE_INVALID;
257 slots = id & ~0x40;
258 if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */
259 return PARSE_INVALID;
260 *nr_slots = slots;
261 alias_checksum = ds->alias_checksum;
262
263 slot = slots;
264 while (1) {
265 int offset;
266
267 slot--;
268 offset = slot * 13;
269 fat16_towchar(*unicode + offset, ds->name0_4, 5);
270 fat16_towchar(*unicode + offset + 5, ds->name5_10, 6);
271 fat16_towchar(*unicode + offset + 11, ds->name11_12, 2);
272
273 if (ds->id & 0x40)
274 (*unicode)[offset + 13] = 0;
275 if (fat_get_entry(dir, pos, bh, de) < 0)
276 return PARSE_EOF;
277 if (slot == 0)
278 break;
279 ds = (struct msdos_dir_slot *)*de;
280 if (ds->attr != ATTR_EXT)
281 return PARSE_NOT_LONGNAME;
282 if ((ds->id & ~0x40) != slot)
283 goto parse_long;
284 if (ds->alias_checksum != alias_checksum)
285 goto parse_long;
286 }
287 if ((*de)->name[0] == DELETED_FLAG)
288 return PARSE_INVALID;
289 if ((*de)->attr == ATTR_EXT)
290 goto parse_long;
291 if (IS_FREE((*de)->name) || ((*de)->attr & ATTR_VOLUME))
292 return PARSE_INVALID;
293 if (fat_checksum((*de)->name) != alias_checksum)
294 *nr_slots = 0;
295
296 return 0;
297}
298
225/* 299/*
226 * Return values: negative -> error, 0 -> not found, positive -> found, 300 * Return values: negative -> error, 0 -> not found, positive -> found,
227 * value is the total amount of slots, including the shortname entry. 301 * value is the total amount of slots, including the shortname entry.
@@ -259,68 +333,16 @@ parse_record:
259 if (de->attr != ATTR_EXT && IS_FREE(de->name)) 333 if (de->attr != ATTR_EXT && IS_FREE(de->name))
260 continue; 334 continue;
261 if (de->attr == ATTR_EXT) { 335 if (de->attr == ATTR_EXT) {
262 struct msdos_dir_slot *ds; 336 int status = fat_parse_long(inode, &cpos, &bh, &de,
263 unsigned char id; 337 &unicode, &nr_slots);
264 unsigned char slot; 338 if (status < 0)
265 unsigned char slots; 339 return status;
266 unsigned char sum; 340 else if (status == PARSE_INVALID)
267 unsigned char alias_checksum;
268
269 if (!unicode) {
270 unicode = (wchar_t *)
271 __get_free_page(GFP_KERNEL);
272 if (!unicode) {
273 brelse(bh);
274 return -ENOMEM;
275 }
276 }
277parse_long:
278 slots = 0;
279 ds = (struct msdos_dir_slot *) de;
280 id = ds->id;
281 if (!(id & 0x40))
282 continue;
283 slots = id & ~0x40;
284 if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */
285 continue;
286 nr_slots = slots;
287 alias_checksum = ds->alias_checksum;
288
289 slot = slots;
290 while (1) {
291 int offset;
292
293 slot--;
294 offset = slot * 13;
295 fat16_towchar(unicode + offset, ds->name0_4, 5);
296 fat16_towchar(unicode + offset + 5, ds->name5_10, 6);
297 fat16_towchar(unicode + offset + 11, ds->name11_12, 2);
298
299 if (ds->id & 0x40) {
300 unicode[offset + 13] = 0;
301 }
302 if (fat_get_entry(inode, &cpos, &bh, &de) < 0)
303 goto EODir;
304 if (slot == 0)
305 break;
306 ds = (struct msdos_dir_slot *) de;
307 if (ds->attr != ATTR_EXT)
308 goto parse_record;
309 if ((ds->id & ~0x40) != slot)
310 goto parse_long;
311 if (ds->alias_checksum != alias_checksum)
312 goto parse_long;
313 }
314 if (de->name[0] == DELETED_FLAG)
315 continue;
316 if (de->attr == ATTR_EXT)
317 goto parse_long;
318 if (IS_FREE(de->name) || (de->attr & ATTR_VOLUME))
319 continue; 341 continue;
320 for (sum = 0, i = 0; i < 11; i++) 342 else if (status == PARSE_NOT_LONGNAME)
321 sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + de->name[i]; 343 goto parse_record;
322 if (sum != alias_checksum) 344 else if (status == PARSE_EOF)
323 nr_slots = 0; 345 goto EODir;
324 } 346 }
325 347
326 memcpy(work, de->name, sizeof(de->name)); 348 memcpy(work, de->name, sizeof(de->name));
@@ -408,8 +430,8 @@ struct fat_ioctl_filldir_callback {
408 int short_len; 430 int short_len;
409}; 431};
410 432
411static int fat_readdirx(struct inode *inode, struct file *filp, void *dirent, 433static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
412 filldir_t filldir, int short_only, int both) 434 filldir_t filldir, int short_only, int both)
413{ 435{
414 struct super_block *sb = inode->i_sb; 436 struct super_block *sb = inode->i_sb;
415 struct msdos_sb_info *sbi = MSDOS_SB(sb); 437 struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -458,9 +480,10 @@ static int fat_readdirx(struct inode *inode, struct file *filp, void *dirent,
458 480
459 bh = NULL; 481 bh = NULL;
460GetNew: 482GetNew:
461 long_slots = 0;
462 if (fat_get_entry(inode, &cpos, &bh, &de) == -1) 483 if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
463 goto EODir; 484 goto EODir;
485parse_record:
486 long_slots = 0;
464 /* Check for long filename entry */ 487 /* Check for long filename entry */
465 if (isvfat) { 488 if (isvfat) {
466 if (de->name[0] == DELETED_FLAG) 489 if (de->name[0] == DELETED_FLAG)
@@ -475,69 +498,18 @@ GetNew:
475 } 498 }
476 499
477 if (isvfat && de->attr == ATTR_EXT) { 500 if (isvfat && de->attr == ATTR_EXT) {
478 struct msdos_dir_slot *ds; 501 int status = fat_parse_long(inode, &cpos, &bh, &de,
479 unsigned char id; 502 &unicode, &long_slots);
480 unsigned char slot; 503 if (status < 0) {
481 unsigned char slots; 504 filp->f_pos = cpos;
482 unsigned char sum; 505 ret = status;
483 unsigned char alias_checksum; 506 goto out;
484 507 } else if (status == PARSE_INVALID)
485 if (!unicode) {
486 unicode = (wchar_t *)__get_free_page(GFP_KERNEL);
487 if (!unicode) {
488 filp->f_pos = cpos;
489 brelse(bh);
490 ret = -ENOMEM;
491 goto out;
492 }
493 }
494ParseLong:
495 slots = 0;
496 ds = (struct msdos_dir_slot *) de;
497 id = ds->id;
498 if (!(id & 0x40))
499 goto RecEnd;
500 slots = id & ~0x40;
501 if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */
502 goto RecEnd; 508 goto RecEnd;
503 long_slots = slots; 509 else if (status == PARSE_NOT_LONGNAME)
504 alias_checksum = ds->alias_checksum; 510 goto parse_record;
505 511 else if (status == PARSE_EOF)
506 slot = slots; 512 goto EODir;
507 while (1) {
508 int offset;
509
510 slot--;
511 offset = slot * 13;
512 fat16_towchar(unicode + offset, ds->name0_4, 5);
513 fat16_towchar(unicode + offset + 5, ds->name5_10, 6);
514 fat16_towchar(unicode + offset + 11, ds->name11_12, 2);
515
516 if (ds->id & 0x40) {
517 unicode[offset + 13] = 0;
518 }
519 if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
520 goto EODir;
521 if (slot == 0)
522 break;
523 ds = (struct msdos_dir_slot *) de;
524 if (ds->attr != ATTR_EXT)
525 goto RecEnd; /* XXX */
526 if ((ds->id & ~0x40) != slot)
527 goto ParseLong;
528 if (ds->alias_checksum != alias_checksum)
529 goto ParseLong;
530 }
531 if (de->name[0] == DELETED_FLAG)
532 goto RecEnd;
533 if (de->attr == ATTR_EXT)
534 goto ParseLong;
535 if (IS_FREE(de->name) || (de->attr & ATTR_VOLUME))
536 goto RecEnd;
537 for (sum = 0, i = 0; i < 11; i++)
538 sum = (((sum&1)<<7)|((sum&0xfe)>>1)) + de->name[i];
539 if (sum != alias_checksum)
540 long_slots = 0;
541 } 513 }
542 514
543 if (sbi->options.dotsOK) { 515 if (sbi->options.dotsOK) {
@@ -671,7 +643,7 @@ out:
671static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir) 643static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
672{ 644{
673 struct inode *inode = filp->f_dentry->d_inode; 645 struct inode *inode = filp->f_dentry->d_inode;
674 return fat_readdirx(inode, filp, dirent, filldir, 0, 0); 646 return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
675} 647}
676 648
677static int fat_ioctl_filldir(void *__buf, const char *name, int name_len, 649static int fat_ioctl_filldir(void *__buf, const char *name, int name_len,
@@ -760,8 +732,8 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
760 down(&inode->i_sem); 732 down(&inode->i_sem);
761 ret = -ENOENT; 733 ret = -ENOENT;
762 if (!IS_DEADDIR(inode)) { 734 if (!IS_DEADDIR(inode)) {
763 ret = fat_readdirx(inode, filp, &buf, fat_ioctl_filldir, 735 ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
764 short_only, both); 736 short_only, both);
765 } 737 }
766 up(&inode->i_sem); 738 up(&inode->i_sem);
767 if (ret >= 0) 739 if (ret >= 0)
diff --git a/fs/file_table.c b/fs/file_table.c
index 86ec8ae985b4..4dc205546547 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -56,13 +56,13 @@ void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
56 56
57static inline void file_free_rcu(struct rcu_head *head) 57static inline void file_free_rcu(struct rcu_head *head)
58{ 58{
59 struct file *f = container_of(head, struct file, f_rcuhead); 59 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
60 kmem_cache_free(filp_cachep, f); 60 kmem_cache_free(filp_cachep, f);
61} 61}
62 62
63static inline void file_free(struct file *f) 63static inline void file_free(struct file *f)
64{ 64{
65 call_rcu(&f->f_rcuhead, file_free_rcu); 65 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
66} 66}
67 67
68/* Find an unused file structure and return a pointer to it. 68/* Find an unused file structure and return a pointer to it.
@@ -95,7 +95,7 @@ struct file *get_empty_filp(void)
95 f->f_gid = current->fsgid; 95 f->f_gid = current->fsgid;
96 rwlock_init(&f->f_owner.lock); 96 rwlock_init(&f->f_owner.lock);
97 /* f->f_version: 0 */ 97 /* f->f_version: 0 */
98 INIT_LIST_HEAD(&f->f_list); 98 INIT_LIST_HEAD(&f->f_u.fu_list);
99 return f; 99 return f;
100 100
101over: 101over:
@@ -225,15 +225,15 @@ void file_move(struct file *file, struct list_head *list)
225 if (!list) 225 if (!list)
226 return; 226 return;
227 file_list_lock(); 227 file_list_lock();
228 list_move(&file->f_list, list); 228 list_move(&file->f_u.fu_list, list);
229 file_list_unlock(); 229 file_list_unlock();
230} 230}
231 231
232void file_kill(struct file *file) 232void file_kill(struct file *file)
233{ 233{
234 if (!list_empty(&file->f_list)) { 234 if (!list_empty(&file->f_u.fu_list)) {
235 file_list_lock(); 235 file_list_lock();
236 list_del_init(&file->f_list); 236 list_del_init(&file->f_u.fu_list);
237 file_list_unlock(); 237 file_list_unlock();
238 } 238 }
239} 239}
@@ -245,7 +245,7 @@ int fs_may_remount_ro(struct super_block *sb)
245 /* Check that no files are currently opened for writing. */ 245 /* Check that no files are currently opened for writing. */
246 file_list_lock(); 246 file_list_lock();
247 list_for_each(p, &sb->s_files) { 247 list_for_each(p, &sb->s_files) {
248 struct file *file = list_entry(p, struct file, f_list); 248 struct file *file = list_entry(p, struct file, f_u.fu_list);
249 struct inode *inode = file->f_dentry->d_inode; 249 struct inode *inode = file->f_dentry->d_inode;
250 250
251 /* File with pending delete? */ 251 /* File with pending delete? */
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 44082bfdfec9..9f1072836c8e 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -12,6 +12,7 @@
12#include <linux/kmod.h> 12#include <linux/kmod.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sched.h> /* for 'current' */
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16 17
17/* 18/*
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e94ab398b717..ffab4783ac64 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -230,7 +230,6 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
230 * The inode is clean, unused 230 * The inode is clean, unused
231 */ 231 */
232 list_move(&inode->i_list, &inode_unused); 232 list_move(&inode->i_list, &inode_unused);
233 inodes_stat.nr_unused++;
234 } 233 }
235 } 234 }
236 wake_up_inode(inode); 235 wake_up_inode(inode);
@@ -238,14 +237,20 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
238} 237}
239 238
240/* 239/*
241 * Write out an inode's dirty pages. Called under inode_lock. 240 * Write out an inode's dirty pages. Called under inode_lock. Either the
241 * caller has ref on the inode (either via __iget or via syscall against an fd)
242 * or the inode has I_WILL_FREE set (via generic_forget_inode)
242 */ 243 */
243static int 244static int
244__writeback_single_inode(struct inode *inode, 245__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
245 struct writeback_control *wbc)
246{ 246{
247 wait_queue_head_t *wqh; 247 wait_queue_head_t *wqh;
248 248
249 if (!atomic_read(&inode->i_count))
250 WARN_ON(!(inode->i_state & I_WILL_FREE));
251 else
252 WARN_ON(inode->i_state & I_WILL_FREE);
253
249 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) { 254 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
250 list_move(&inode->i_list, &inode->i_sb->s_dirty); 255 list_move(&inode->i_list, &inode->i_sb->s_dirty);
251 return 0; 256 return 0;
@@ -259,11 +264,9 @@ __writeback_single_inode(struct inode *inode,
259 264
260 wqh = bit_waitqueue(&inode->i_state, __I_LOCK); 265 wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
261 do { 266 do {
262 __iget(inode);
263 spin_unlock(&inode_lock); 267 spin_unlock(&inode_lock);
264 __wait_on_bit(wqh, &wq, inode_wait, 268 __wait_on_bit(wqh, &wq, inode_wait,
265 TASK_UNINTERRUPTIBLE); 269 TASK_UNINTERRUPTIBLE);
266 iput(inode);
267 spin_lock(&inode_lock); 270 spin_lock(&inode_lock);
268 } while (inode->i_state & I_LOCK); 271 } while (inode->i_state & I_LOCK);
269 } 272 }
@@ -541,14 +544,15 @@ void sync_inodes(int wait)
541} 544}
542 545
543/** 546/**
544 * write_inode_now - write an inode to disk 547 * write_inode_now - write an inode to disk
545 * @inode: inode to write to disk 548 * @inode: inode to write to disk
546 * @sync: whether the write should be synchronous or not 549 * @sync: whether the write should be synchronous or not
550 *
551 * This function commits an inode to disk immediately if it is dirty. This is
552 * primarily needed by knfsd.
547 * 553 *
548 * This function commits an inode to disk immediately if it is 554 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
549 * dirty. This is primarily needed by knfsd.
550 */ 555 */
551
552int write_inode_now(struct inode *inode, int sync) 556int write_inode_now(struct inode *inode, int sync)
553{ 557{
554 int ret; 558 int ret;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index d4c869c6d01b..a6f90a6c754a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -151,9 +151,9 @@ void fuse_release_background(struct fuse_req *req)
151/* 151/*
152 * This function is called when a request is finished. Either a reply 152 * This function is called when a request is finished. Either a reply
153 * has arrived or it was interrupted (and not yet sent) or some error 153 * has arrived or it was interrupted (and not yet sent) or some error
154 * occured during communication with userspace, or the device file was 154 * occurred during communication with userspace, or the device file was
155 * closed. It decreases the referece count for the request. In case 155 * closed. It decreases the reference count for the request. In case
156 * of a background request the referece to the stored objects are 156 * of a background request the reference to the stored objects are
157 * released. The requester thread is woken up (if still waiting), and 157 * released. The requester thread is woken up (if still waiting), and
158 * finally the request is either freed or put on the unused_list 158 * finally the request is either freed or put on the unused_list
159 * 159 *
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 29f1e9f6e85c..70dba721acab 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -741,13 +741,14 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
741 if (inode && S_ISDIR(inode->i_mode)) { 741 if (inode && S_ISDIR(inode->i_mode)) {
742 /* Don't allow creating an alias to a directory */ 742 /* Don't allow creating an alias to a directory */
743 struct dentry *alias = d_find_alias(inode); 743 struct dentry *alias = d_find_alias(inode);
744 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) { 744 if (alias) {
745 dput(alias); 745 dput(alias);
746 iput(inode); 746 iput(inode);
747 return ERR_PTR(-EIO); 747 return ERR_PTR(-EIO);
748 } 748 }
749 } 749 }
750 return d_splice_alias(inode, entry); 750 d_add(entry, inode);
751 return NULL;
751} 752}
752 753
753static int fuse_setxattr(struct dentry *entry, const char *name, 754static int fuse_setxattr(struct dentry *entry, const char *name,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 24d761518d86..5cb456f572c1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -349,22 +349,22 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
349 int isdir); 349 int isdir);
350 350
351/** 351/**
352 * Initialise file operations on a regular file 352 * Initialize file operations on a regular file
353 */ 353 */
354void fuse_init_file_inode(struct inode *inode); 354void fuse_init_file_inode(struct inode *inode);
355 355
356/** 356/**
357 * Initialise inode operations on regular files and special files 357 * Initialize inode operations on regular files and special files
358 */ 358 */
359void fuse_init_common(struct inode *inode); 359void fuse_init_common(struct inode *inode);
360 360
361/** 361/**
362 * Initialise inode and file operations on a directory 362 * Initialize inode and file operations on a directory
363 */ 363 */
364void fuse_init_dir(struct inode *inode); 364void fuse_init_dir(struct inode *inode);
365 365
366/** 366/**
367 * Initialise inode operations on a symlink 367 * Initialize inode operations on a symlink
368 */ 368 */
369void fuse_init_symlink(struct inode *inode); 369void fuse_init_symlink(struct inode *inode);
370 370
@@ -411,7 +411,7 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc);
411 411
412/** 412/**
413 * Decrement reference count of a request. If count goes to zero put 413 * Decrement reference count of a request. If count goes to zero put
414 * on unused list (preallocated) or free reqest (not preallocated). 414 * on unused list (preallocated) or free request (not preallocated).
415 */ 415 */
416void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req); 416void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
417 417
@@ -431,7 +431,7 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
431void request_send_background(struct fuse_conn *fc, struct fuse_req *req); 431void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
432 432
433/** 433/**
434 * Release inodes and file assiciated with background request 434 * Release inodes and file associated with background request
435 */ 435 */
436void fuse_release_background(struct fuse_req *req); 436void fuse_release_background(struct fuse_req *req);
437 437
diff --git a/fs/inode.c b/fs/inode.c
index 7d3316527767..d8d04bd72b59 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1088,6 +1088,7 @@ static void generic_forget_inode(struct inode *inode)
1088 if (inode->i_data.nrpages) 1088 if (inode->i_data.nrpages)
1089 truncate_inode_pages(&inode->i_data, 0); 1089 truncate_inode_pages(&inode->i_data, 0);
1090 clear_inode(inode); 1090 clear_inode(inode);
1091 wake_up_inode(inode);
1091 destroy_inode(inode); 1092 destroy_inode(inode);
1092} 1093}
1093 1094
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 0f224384f176..8210ac16a368 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -15,6 +15,7 @@
15#include <linux/jffs2.h> 15#include <linux/jffs2.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/completion.h> 17#include <linux/completion.h>
18#include <linux/sched.h>
18#include "nodelist.h" 19#include "nodelist.h"
19 20
20 21
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 996d922e503e..316133c626b7 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -18,6 +18,8 @@
18#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h> 19#include <linux/crc32.h>
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/jiffies.h>
22
21#include "nodelist.h" 23#include "nodelist.h"
22 24
23/* For testing write failures */ 25/* For testing write failures */
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 154f511c7245..626a367bcd81 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -454,10 +454,10 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
454{ 454{
455 struct buffer_head *dotdot_bh; 455 struct buffer_head *dotdot_bh;
456 struct msdos_dir_entry *dotdot_de; 456 struct msdos_dir_entry *dotdot_de;
457 loff_t dotdot_i_pos;
458 struct inode *old_inode, *new_inode; 457 struct inode *old_inode, *new_inode;
459 struct fat_slot_info old_sinfo, sinfo; 458 struct fat_slot_info old_sinfo, sinfo;
460 struct timespec ts; 459 struct timespec ts;
460 loff_t dotdot_i_pos, new_i_pos;
461 int err, old_attrs, is_dir, update_dotdot, corrupt = 0; 461 int err, old_attrs, is_dir, update_dotdot, corrupt = 0;
462 462
463 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; 463 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
@@ -516,28 +516,24 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
516 if (new_inode) { 516 if (new_inode) {
517 if (err) 517 if (err)
518 goto out; 518 goto out;
519 if (MSDOS_I(new_inode)->i_pos != sinfo.i_pos) {
520 /* WTF??? Cry and fail. */
521 printk(KERN_WARNING "msdos_rename: fs corrupted\n");
522 goto out;
523 }
524
525 if (is_dir) { 519 if (is_dir) {
526 err = fat_dir_empty(new_inode); 520 err = fat_dir_empty(new_inode);
527 if (err) 521 if (err)
528 goto out; 522 goto out;
529 } 523 }
524 new_i_pos = MSDOS_I(new_inode)->i_pos;
530 fat_detach(new_inode); 525 fat_detach(new_inode);
531 } else { 526 } else {
532 err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0, 527 err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0,
533 &ts, &sinfo); 528 &ts, &sinfo);
534 if (err) 529 if (err)
535 goto out; 530 goto out;
531 new_i_pos = sinfo.i_pos;
536 } 532 }
537 new_dir->i_version++; 533 new_dir->i_version++;
538 534
539 fat_detach(old_inode); 535 fat_detach(old_inode);
540 fat_attach(old_inode, sinfo.i_pos); 536 fat_attach(old_inode, new_i_pos);
541 if (is_hid) 537 if (is_hid)
542 MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN; 538 MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN;
543 else 539 else
@@ -604,7 +600,7 @@ error_inode:
604 fat_attach(old_inode, old_sinfo.i_pos); 600 fat_attach(old_inode, old_sinfo.i_pos);
605 MSDOS_I(old_inode)->i_attrs = old_attrs; 601 MSDOS_I(old_inode)->i_attrs = old_attrs;
606 if (new_inode) { 602 if (new_inode) {
607 fat_attach(new_inode, sinfo.i_pos); 603 fat_attach(new_inode, new_i_pos);
608 if (corrupt) 604 if (corrupt)
609 corrupt |= fat_sync_inode(new_inode); 605 corrupt |= fat_sync_inode(new_inode);
610 } else { 606 } else {
diff --git a/fs/namei.c b/fs/namei.c
index aaaa81036234..c5769c4fcab1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1311,9 +1311,6 @@ static inline int may_create(struct inode *dir, struct dentry *child,
1311} 1311}
1312 1312
1313/* 1313/*
1314 * Special case: O_CREAT|O_EXCL implies O_NOFOLLOW for security
1315 * reasons.
1316 *
1317 * O_DIRECTORY translates into forcing a directory lookup. 1314 * O_DIRECTORY translates into forcing a directory lookup.
1318 */ 1315 */
1319static inline int lookup_flags(unsigned int f) 1316static inline int lookup_flags(unsigned int f)
@@ -1323,9 +1320,6 @@ static inline int lookup_flags(unsigned int f)
1323 if (f & O_NOFOLLOW) 1320 if (f & O_NOFOLLOW)
1324 retval &= ~LOOKUP_FOLLOW; 1321 retval &= ~LOOKUP_FOLLOW;
1325 1322
1326 if ((f & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
1327 retval &= ~LOOKUP_FOLLOW;
1328
1329 if (f & O_DIRECTORY) 1323 if (f & O_DIRECTORY)
1330 retval |= LOOKUP_DIRECTORY; 1324 retval |= LOOKUP_DIRECTORY;
1331 1325
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f2781ca42761..fc0f12ba89cc 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1274,14 +1274,12 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1274 } 1274 }
1275 1275
1276 if ((fattr->valid & NFS_ATTR_FATTR) == 0) { 1276 if ((fattr->valid & NFS_ATTR_FATTR) == 0) {
1277 spin_unlock(&inode->i_lock);
1278 return 0; 1277 return 0;
1279 } 1278 }
1280 1279
1281 /* Has the inode gone and changed behind our back? */ 1280 /* Has the inode gone and changed behind our back? */
1282 if (nfsi->fileid != fattr->fileid 1281 if (nfsi->fileid != fattr->fileid
1283 || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) { 1282 || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
1284 spin_unlock(&inode->i_lock);
1285 return -EIO; 1283 return -EIO;
1286 } 1284 }
1287 1285
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8a8c34461d48..b638fb500743 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -533,7 +533,7 @@ static void proc_kill_inodes(struct proc_dir_entry *de)
533 */ 533 */
534 file_list_lock(); 534 file_list_lock();
535 list_for_each(p, &sb->s_files) { 535 list_for_each(p, &sb->s_files) {
536 struct file * filp = list_entry(p, struct file, f_list); 536 struct file * filp = list_entry(p, struct file, f_u.fu_list);
537 struct dentry * dentry = filp->f_dentry; 537 struct dentry * dentry = filp->f_dentry;
538 struct inode * inode; 538 struct inode * inode;
539 struct file_operations *fops; 539 struct file_operations *fops;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index effa6c0c467a..e6a818a93f3d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -156,10 +156,13 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
156 156
157 WARN_ON(de && de->deleted); 157 WARN_ON(de && de->deleted);
158 158
159 if (de != NULL && !try_module_get(de->owner))
160 goto out_mod;
161
159 inode = iget(sb, ino); 162 inode = iget(sb, ino);
160 if (!inode) 163 if (!inode)
161 goto out_fail; 164 goto out_ino;
162 165
163 PROC_I(inode)->pde = de; 166 PROC_I(inode)->pde = de;
164 if (de) { 167 if (de) {
165 if (de->mode) { 168 if (de->mode) {
@@ -171,20 +174,20 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
171 inode->i_size = de->size; 174 inode->i_size = de->size;
172 if (de->nlink) 175 if (de->nlink)
173 inode->i_nlink = de->nlink; 176 inode->i_nlink = de->nlink;
174 if (!try_module_get(de->owner))
175 goto out_fail;
176 if (de->proc_iops) 177 if (de->proc_iops)
177 inode->i_op = de->proc_iops; 178 inode->i_op = de->proc_iops;
178 if (de->proc_fops) 179 if (de->proc_fops)
179 inode->i_fop = de->proc_fops; 180 inode->i_fop = de->proc_fops;
180 } 181 }
181 182
182out:
183 return inode; 183 return inode;
184 184
185out_fail: 185out_ino:
186 if (de != NULL)
187 module_put(de->owner);
188out_mod:
186 de_put(de); 189 de_put(de);
187 goto out; 190 return NULL;
188} 191}
189 192
190int proc_fill_super(struct super_block *s, void *data, int silent) 193int proc_fill_super(struct super_block *s, void *data, int silent)
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index a3453555a94e..5b6b0b6038a7 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -629,12 +629,4 @@ void __init proc_misc_init(void)
629 if (entry) 629 if (entry)
630 entry->proc_fops = &proc_sysrq_trigger_operations; 630 entry->proc_fops = &proc_sysrq_trigger_operations;
631#endif 631#endif
632#ifdef CONFIG_PPC32
633 {
634 extern struct file_operations ppc_htab_operations;
635 entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
636 if (entry)
637 entry->proc_fops = &ppc_htab_operations;
638 }
639#endif
640} 632}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 44b02fc02ebe..42afb5bef111 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1024,12 +1024,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
1024 strcpy(REISERFS_SB(s)->s_qf_names[qtype], arg); 1024 strcpy(REISERFS_SB(s)->s_qf_names[qtype], arg);
1025 *mount_options |= 1 << REISERFS_QUOTA; 1025 *mount_options |= 1 << REISERFS_QUOTA;
1026 } else { 1026 } else {
1027 if (REISERFS_SB(s)->s_qf_names[qtype]) { 1027 kfree(REISERFS_SB(s)->s_qf_names[qtype]);
1028 kfree(REISERFS_SB(s)-> 1028 REISERFS_SB(s)->s_qf_names[qtype] = NULL;
1029 s_qf_names[qtype]);
1030 REISERFS_SB(s)->s_qf_names[qtype] =
1031 NULL;
1032 }
1033 } 1029 }
1034 } 1030 }
1035 if (c == 'f') { 1031 if (c == 'f') {
@@ -1158,11 +1154,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1158 if (!reiserfs_parse_options 1154 if (!reiserfs_parse_options
1159 (s, arg, &mount_options, &blocks, NULL, &commit_max_age)) { 1155 (s, arg, &mount_options, &blocks, NULL, &commit_max_age)) {
1160#ifdef CONFIG_QUOTA 1156#ifdef CONFIG_QUOTA
1161 for (i = 0; i < MAXQUOTAS; i++) 1157 for (i = 0; i < MAXQUOTAS; i++) {
1162 if (REISERFS_SB(s)->s_qf_names[i]) { 1158 kfree(REISERFS_SB(s)->s_qf_names[i]);
1163 kfree(REISERFS_SB(s)->s_qf_names[i]); 1159 REISERFS_SB(s)->s_qf_names[i] = NULL;
1164 REISERFS_SB(s)->s_qf_names[i] = NULL; 1160 }
1165 }
1166#endif 1161#endif
1167 return -EINVAL; 1162 return -EINVAL;
1168 } 1163 }
@@ -1940,13 +1935,11 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1940 brelse(SB_BUFFER_WITH_SB(s)); 1935 brelse(SB_BUFFER_WITH_SB(s));
1941#ifdef CONFIG_QUOTA 1936#ifdef CONFIG_QUOTA
1942 for (j = 0; j < MAXQUOTAS; j++) { 1937 for (j = 0; j < MAXQUOTAS; j++) {
1943 if (sbi->s_qf_names[j]) 1938 kfree(sbi->s_qf_names[j]);
1944 kfree(sbi->s_qf_names[j]); 1939 sbi->s_qf_names[j] = NULL;
1945 } 1940 }
1946#endif 1941#endif
1947 if (sbi != NULL) { 1942 kfree(sbi);
1948 kfree(sbi);
1949 }
1950 1943
1951 s->s_fs_info = NULL; 1944 s->s_fs_info = NULL;
1952 return errval; 1945 return errval;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 6703efa3c430..a47ac9aac8b2 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -296,8 +296,7 @@ reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
296 } 296 }
297 } 297 }
298 298
299 if (value) 299 kfree(value);
300 kfree(value);
301 300
302 if (!error) { 301 if (!error) {
303 /* Release the old one */ 302 /* Release the old one */
diff --git a/fs/super.c b/fs/super.c
index 6e57ee252e14..f60155ec7780 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -513,7 +513,7 @@ static void mark_files_ro(struct super_block *sb)
513 struct file *f; 513 struct file *f;
514 514
515 file_list_lock(); 515 file_list_lock();
516 list_for_each_entry(f, &sb->s_files, f_list) { 516 list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
517 if (S_ISREG(f->f_dentry->d_inode->i_mode) && file_count(f)) 517 if (S_ISREG(f->f_dentry->d_inode->i_mode) && file_count(f))
518 f->f_mode &= ~FMODE_WRITE; 518 f->f_mode &= ~FMODE_WRITE;
519 } 519 }
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index 1c6f6b57ef1c..ef46939c0c1a 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -621,8 +621,7 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
621 } 621 }
622 622
623 /* build the entry of long file name */ 623 /* build the entry of long file name */
624 for (cksum = i = 0; i < 11; i++) 624 cksum = fat_checksum(msdos_name);
625 cksum = (((cksum&1)<<7)|((cksum&0xfe)>>1)) + msdos_name[i];
626 625
627 *nr_slots = usize / 13; 626 *nr_slots = usize / 13;
628 for (ps = slots, i = *nr_slots; i > 0; i--, ps++) { 627 for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
@@ -888,10 +887,10 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
888{ 887{
889 struct buffer_head *dotdot_bh; 888 struct buffer_head *dotdot_bh;
890 struct msdos_dir_entry *dotdot_de; 889 struct msdos_dir_entry *dotdot_de;
891 loff_t dotdot_i_pos;
892 struct inode *old_inode, *new_inode; 890 struct inode *old_inode, *new_inode;
893 struct fat_slot_info old_sinfo, sinfo; 891 struct fat_slot_info old_sinfo, sinfo;
894 struct timespec ts; 892 struct timespec ts;
893 loff_t dotdot_i_pos, new_i_pos;
895 int err, is_dir, update_dotdot, corrupt = 0; 894 int err, is_dir, update_dotdot, corrupt = 0;
896 895
897 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; 896 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
@@ -914,31 +913,24 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
914 913
915 ts = CURRENT_TIME_SEC; 914 ts = CURRENT_TIME_SEC;
916 if (new_inode) { 915 if (new_inode) {
917 err = vfat_find(new_dir, &new_dentry->d_name, &sinfo);
918 if (err)
919 goto out;
920 if (MSDOS_I(new_inode)->i_pos != sinfo.i_pos) {
921 /* WTF??? Cry and fail. */
922 printk(KERN_WARNING "vfat_rename: fs corrupted\n");
923 goto out;
924 }
925
926 if (is_dir) { 916 if (is_dir) {
927 err = fat_dir_empty(new_inode); 917 err = fat_dir_empty(new_inode);
928 if (err) 918 if (err)
929 goto out; 919 goto out;
930 } 920 }
921 new_i_pos = MSDOS_I(new_inode)->i_pos;
931 fat_detach(new_inode); 922 fat_detach(new_inode);
932 } else { 923 } else {
933 err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0, 924 err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
934 &ts, &sinfo); 925 &ts, &sinfo);
935 if (err) 926 if (err)
936 goto out; 927 goto out;
928 new_i_pos = sinfo.i_pos;
937 } 929 }
938 new_dir->i_version++; 930 new_dir->i_version++;
939 931
940 fat_detach(old_inode); 932 fat_detach(old_inode);
941 fat_attach(old_inode, sinfo.i_pos); 933 fat_attach(old_inode, new_i_pos);
942 if (IS_DIRSYNC(new_dir)) { 934 if (IS_DIRSYNC(new_dir)) {
943 err = fat_sync_inode(old_inode); 935 err = fat_sync_inode(old_inode);
944 if (err) 936 if (err)
@@ -1002,7 +994,7 @@ error_inode:
1002 fat_detach(old_inode); 994 fat_detach(old_inode);
1003 fat_attach(old_inode, old_sinfo.i_pos); 995 fat_attach(old_inode, old_sinfo.i_pos);
1004 if (new_inode) { 996 if (new_inode) {
1005 fat_attach(new_inode, sinfo.i_pos); 997 fat_attach(new_inode, new_i_pos);
1006 if (corrupt) 998 if (corrupt)
1007 corrupt |= fat_sync_inode(new_inode); 999 corrupt |= fat_sync_inode(new_inode);
1008 } else { 1000 } else {
diff --git a/fs/xattr.c b/fs/xattr.c
index 3f9c64bea151..f6e00c0e114f 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -143,7 +143,7 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
143 if (size) { 143 if (size) {
144 if (size > XATTR_SIZE_MAX) 144 if (size > XATTR_SIZE_MAX)
145 size = XATTR_SIZE_MAX; 145 size = XATTR_SIZE_MAX;
146 kvalue = kmalloc(size, GFP_KERNEL); 146 kvalue = kzalloc(size, GFP_KERNEL);
147 if (!kvalue) 147 if (!kvalue)
148 return -ENOMEM; 148 return -ENOMEM;
149 } 149 }
@@ -154,11 +154,15 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
154 error = -EOPNOTSUPP; 154 error = -EOPNOTSUPP;
155 if (d->d_inode->i_op && d->d_inode->i_op->getxattr) 155 if (d->d_inode->i_op && d->d_inode->i_op->getxattr)
156 error = d->d_inode->i_op->getxattr(d, kname, kvalue, size); 156 error = d->d_inode->i_op->getxattr(d, kname, kvalue, size);
157 else if (!strncmp(kname, XATTR_SECURITY_PREFIX, 157
158 sizeof XATTR_SECURITY_PREFIX - 1)) { 158 if (!strncmp(kname, XATTR_SECURITY_PREFIX,
159 sizeof XATTR_SECURITY_PREFIX - 1)) {
159 const char *suffix = kname + sizeof XATTR_SECURITY_PREFIX - 1; 160 const char *suffix = kname + sizeof XATTR_SECURITY_PREFIX - 1;
160 error = security_inode_getsecurity(d->d_inode, suffix, kvalue, 161 int rv = security_inode_getsecurity(d->d_inode, suffix, kvalue,
161 size); 162 size, error);
163 /* Security module active: overwrite error value */
164 if (rv != -EOPNOTSUPP)
165 error = rv;
162 } 166 }
163 if (error > 0) { 167 if (error > 0) {
164 if (size && copy_to_user(value, kvalue, error)) 168 if (size && copy_to_user(value, kvalue, error))
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index eb2cbd97d404..1a6295f2c2d4 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -26,9 +26,6 @@ struct semaphore {
26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ 26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
27} 27}
28 28
29#define __MUTEX_INITIALIZER(name) \
30 __SEMAPHORE_INITIALIZER(name,1)
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 30 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34 31
diff --git a/include/asm-arm/arch-ixp2000/ixdp2x01.h b/include/asm-arm/arch-ixp2000/ixdp2x01.h
index b768009c3a51..c6d51426e98f 100644
--- a/include/asm-arm/arch-ixp2000/ixdp2x01.h
+++ b/include/asm-arm/arch-ixp2000/ixdp2x01.h
@@ -22,7 +22,7 @@
22#define IXDP2X01_CPLD_REGION_SIZE 0x00100000 22#define IXDP2X01_CPLD_REGION_SIZE 0x00100000
23 23
24#define IXDP2X01_CPLD_VIRT_REG(reg) (volatile unsigned long*)(IXDP2X01_VIRT_CPLD_BASE | reg) 24#define IXDP2X01_CPLD_VIRT_REG(reg) (volatile unsigned long*)(IXDP2X01_VIRT_CPLD_BASE | reg)
25#define IXDP2X01_CPLD_PHYS_REG(reg) (volatile u32*)(IXDP2X01_PHYS_CPLD_BASE | reg) 25#define IXDP2X01_CPLD_PHYS_REG(reg) (IXDP2X01_PHYS_CPLD_BASE | reg)
26 26
27#define IXDP2X01_UART1_VIRT_BASE IXDP2X01_CPLD_VIRT_REG(0x40) 27#define IXDP2X01_UART1_VIRT_BASE IXDP2X01_CPLD_VIRT_REG(0x40)
28#define IXDP2X01_UART1_PHYS_BASE IXDP2X01_CPLD_PHYS_REG(0x40) 28#define IXDP2X01_UART1_PHYS_BASE IXDP2X01_CPLD_PHYS_REG(0x40)
diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h
index e350dcb544e8..80d05ecad2f0 100644
--- a/include/asm-arm/arch-ixp4xx/io.h
+++ b/include/asm-arm/arch-ixp4xx/io.h
@@ -113,7 +113,7 @@ __ixp4xx_writeb(u8 value, u32 addr)
113} 113}
114 114
115static inline void 115static inline void
116__ixp4xx_writesb(u32 bus_addr, u8 *vaddr, int count) 116__ixp4xx_writesb(u32 bus_addr, const u8 *vaddr, int count)
117{ 117{
118 while (count--) 118 while (count--)
119 writeb(*vaddr++, bus_addr); 119 writeb(*vaddr++, bus_addr);
@@ -136,7 +136,7 @@ __ixp4xx_writew(u16 value, u32 addr)
136} 136}
137 137
138static inline void 138static inline void
139__ixp4xx_writesw(u32 bus_addr, u16 *vaddr, int count) 139__ixp4xx_writesw(u32 bus_addr, const u16 *vaddr, int count)
140{ 140{
141 while (count--) 141 while (count--)
142 writew(*vaddr++, bus_addr); 142 writew(*vaddr++, bus_addr);
@@ -154,7 +154,7 @@ __ixp4xx_writel(u32 value, u32 addr)
154} 154}
155 155
156static inline void 156static inline void
157__ixp4xx_writesl(u32 bus_addr, u32 *vaddr, int count) 157__ixp4xx_writesl(u32 bus_addr, const u32 *vaddr, int count)
158{ 158{
159 while (count--) 159 while (count--)
160 writel(*vaddr++, bus_addr); 160 writel(*vaddr++, bus_addr);
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 366bafbdfbb1..5a0d19b466b0 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -397,9 +397,6 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd)
397#define pgd_clear(pgdp) do { } while (0) 397#define pgd_clear(pgdp) do { } while (0)
398#define set_pgd(pgd,pgdp) do { } while (0) 398#define set_pgd(pgd,pgdp) do { } while (0)
399 399
400#define page_pte_prot(page,prot) mk_pte(page, prot)
401#define page_pte(page) mk_pte(page, __pgprot(0))
402
403/* to find an entry in a page-table-directory */ 400/* to find an entry in a page-table-directory */
404#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 401#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
405 402
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 60f33e6eb800..71ca7d412687 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -24,8 +24,6 @@ struct semaphore {
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ 24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
25} 25}
26 26
27#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INIT(name,1)
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INIT(name,count) 28 struct semaphore name = __SEMAPHORE_INIT(name,count)
31 29
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index c49df635a80f..d626e70faded 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -544,7 +544,6 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
544asmlinkage int sys_fork(struct pt_regs *regs); 544asmlinkage int sys_fork(struct pt_regs *regs);
545asmlinkage int sys_vfork(struct pt_regs *regs); 545asmlinkage int sys_vfork(struct pt_regs *regs);
546asmlinkage int sys_pipe(unsigned long *fildes); 546asmlinkage int sys_pipe(unsigned long *fildes);
547asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
548struct sigaction; 547struct sigaction;
549asmlinkage long sys_rt_sigaction(int sig, 548asmlinkage long sys_rt_sigaction(int sig,
550 const struct sigaction __user *act, 549 const struct sigaction __user *act,
diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h
index f602cf572411..a590250277f8 100644
--- a/include/asm-arm26/pgtable.h
+++ b/include/asm-arm26/pgtable.h
@@ -98,8 +98,6 @@ extern struct page *empty_zero_page;
98#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 98#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
99#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) 99#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
100#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 100#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
101#define page_pte_prot(page,prot) mk_pte(page, prot)
102#define page_pte(page) mk_pte(page, __pgprot(0))
103 101
104/* 102/*
105 * Terminology: PGD = Page Directory, PMD = Page Middle Directory, 103 * Terminology: PGD = Page Directory, PMD = Page Middle Directory,
diff --git a/include/asm-arm26/semaphore.h b/include/asm-arm26/semaphore.h
index c1b6a1edad92..ccf15e704109 100644
--- a/include/asm-arm26/semaphore.h
+++ b/include/asm-arm26/semaphore.h
@@ -25,9 +25,6 @@ struct semaphore {
25 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ 25 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
26} 26}
27 27
28#define __MUTEX_INITIALIZER(name) \
29 __SEMAPHORE_INIT(name,1)
30
31#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 28#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
32 struct semaphore name = __SEMAPHORE_INIT(name,count) 29 struct semaphore name = __SEMAPHORE_INIT(name,count)
33 30
diff --git a/include/asm-arm26/unistd.h b/include/asm-arm26/unistd.h
index dfa0b0c30aa3..be4c2fb9c049 100644
--- a/include/asm-arm26/unistd.h
+++ b/include/asm-arm26/unistd.h
@@ -480,7 +480,6 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
480asmlinkage int sys_fork(struct pt_regs *regs); 480asmlinkage int sys_fork(struct pt_regs *regs);
481asmlinkage int sys_vfork(struct pt_regs *regs); 481asmlinkage int sys_vfork(struct pt_regs *regs);
482asmlinkage int sys_pipe(unsigned long *fildes); 482asmlinkage int sys_pipe(unsigned long *fildes);
483asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
484struct sigaction; 483struct sigaction;
485asmlinkage long sys_rt_sigaction(int sig, 484asmlinkage long sys_rt_sigaction(int sig,
486 const struct sigaction __user *act, 485 const struct sigaction __user *act,
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 8ed7636ab311..39faf69bcf76 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -33,9 +33,6 @@ struct semaphore {
33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34} 34}
35 35
36#define __MUTEX_INITIALIZER(name) \
37 __SEMAPHORE_INITIALIZER(name,1)
38
39#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 36#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 37 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41 38
diff --git a/include/asm-cris/unistd.h b/include/asm-cris/unistd.h
index 28232ad2ff34..156a34bfc583 100644
--- a/include/asm-cris/unistd.h
+++ b/include/asm-cris/unistd.h
@@ -367,7 +367,6 @@ asmlinkage int sys_fork(long r10, long r11, long r12, long r13,
367asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, 367asmlinkage int sys_vfork(long r10, long r11, long r12, long r13,
368 long mof, long srp, struct pt_regs *regs); 368 long mof, long srp, struct pt_regs *regs);
369asmlinkage int sys_pipe(unsigned long __user *fildes); 369asmlinkage int sys_pipe(unsigned long __user *fildes);
370asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
371struct sigaction; 370struct sigaction;
372asmlinkage long sys_rt_sigaction(int sig, 371asmlinkage long sys_rt_sigaction(int sig,
373 const struct sigaction __user *act, 372 const struct sigaction __user *act,
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 473fb4bb6329..b247e99dff49 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -436,8 +436,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
436 return pte; 436 return pte;
437} 437}
438 438
439#define page_pte(page) page_pte_prot((page), __pgprot(0))
440
441/* to find an entry in a page-table-directory. */ 439/* to find an entry in a page-table-directory. */
442#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 440#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
443#define pgd_index_k(addr) pgd_index(addr) 441#define pgd_index_k(addr) pgd_index(addr)
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index 393545630806..b18396288df1 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -47,9 +47,6 @@ struct semaphore {
47#define __SEMAPHORE_INITIALIZER(name,count) \ 47#define __SEMAPHORE_INITIALIZER(name,count) \
48{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) } 48{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
49 49
50#define __MUTEX_INITIALIZER(name) \
51 __SEMAPHORE_INITIALIZER(name,1)
52
53#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 50#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
54 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 51 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
55 52
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index fe6ef3774297..81bae2a99192 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -35,9 +35,6 @@ struct semaphore {
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36} 36}
37 37
38#define __MUTEX_INITIALIZER(name) \
39 __SEMAPHORE_INITIALIZER(name,1)
40
41#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
42 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
43 40
diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h
index 56a6401886fa..56a4a5686c88 100644
--- a/include/asm-h8300/unistd.h
+++ b/include/asm-h8300/unistd.h
@@ -528,7 +528,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
528asmlinkage int sys_execve(char *name, char **argv, char **envp, 528asmlinkage int sys_execve(char *name, char **argv, char **envp,
529 int dummy, ...); 529 int dummy, ...);
530asmlinkage int sys_pipe(unsigned long *fildes); 530asmlinkage int sys_pipe(unsigned long *fildes);
531asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
532struct sigaction; 531struct sigaction;
533asmlinkage long sys_rt_sigaction(int sig, 532asmlinkage long sys_rt_sigaction(int sig,
534 const struct sigaction __user *act, 533 const struct sigaction __user *act,
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 8c454aa58ac6..a515e2aed829 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -118,7 +118,8 @@ extern void release_lapic_nmi(void);
118extern void disable_timer_nmi_watchdog(void); 118extern void disable_timer_nmi_watchdog(void);
119extern void enable_timer_nmi_watchdog(void); 119extern void enable_timer_nmi_watchdog(void);
120extern void nmi_watchdog_tick (struct pt_regs * regs); 120extern void nmi_watchdog_tick (struct pt_regs * regs);
121extern int APIC_init_uniprocessor (void); 121extern int APIC_init(void);
122extern void APIC_late_time_init(void);
122extern void disable_APIC_timer(void); 123extern void disable_APIC_timer(void);
123extern void enable_APIC_timer(void); 124extern void enable_APIC_timer(void);
124 125
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 6df1a53c190e..29b851a18c6e 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -17,6 +17,8 @@
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; 17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); 18DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
19 19
20#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
21
20DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 22DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
21 23
22struct Xgt_desc_struct { 24struct Xgt_desc_struct {
@@ -60,7 +62,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
60 62
61static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) 63static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
62{ 64{
63 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr, 65 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
64 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); 66 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
65} 67}
66 68
@@ -68,7 +70,7 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *ad
68 70
69static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) 71static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
70{ 72{
71 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); 73 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
72} 74}
73 75
74#define LDT_entry_a(info) \ 76#define LDT_entry_a(info) \
@@ -109,7 +111,7 @@ static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 en
109 111
110static inline void load_TLS(struct thread_struct *t, unsigned int cpu) 112static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
111{ 113{
112#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] 114#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
113 C(0); C(1); C(2); 115 C(0); C(1); C(2);
114#undef C 116#undef C
115} 117}
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 622815bf3243..9139b89497a1 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -55,6 +55,7 @@ void init_8259A(int aeoi);
55void FASTCALL(send_IPI_self(int vector)); 55void FASTCALL(send_IPI_self(int vector));
56void init_VISWS_APIC_irqs(void); 56void init_VISWS_APIC_irqs(void);
57void setup_IO_APIC(void); 57void setup_IO_APIC(void);
58void IO_APIC_late_time_init(void);
58void disable_IO_APIC(void); 59void disable_IO_APIC(void);
59void print_IO_APIC(void); 60void print_IO_APIC(void);
60int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); 61int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
diff --git a/include/asm-i386/mach-default/smpboot_hooks.h b/include/asm-i386/mach-default/smpboot_hooks.h
index 7f45f6311059..d7c70c144f9f 100644
--- a/include/asm-i386/mach-default/smpboot_hooks.h
+++ b/include/asm-i386/mach-default/smpboot_hooks.h
@@ -1,11 +1,6 @@
1/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws 1/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
2 * which needs to alter them. */ 2 * which needs to alter them. */
3 3
4static inline void smpboot_clear_io_apic_irqs(void)
5{
6 io_apic_irqs = 0;
7}
8
9static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 4static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
10{ 5{
11 CMOS_WRITE(0xa, 0xf); 6 CMOS_WRITE(0xa, 0xf);
@@ -32,13 +27,3 @@ static inline void smpboot_restore_warm_reset_vector(void)
32 27
33 *((volatile long *) phys_to_virt(0x467)) = 0; 28 *((volatile long *) phys_to_virt(0x467)) = 0;
34} 29}
35
36static inline void smpboot_setup_io_apic(void)
37{
38 /*
39 * Here we can be sure that there is an IO-APIC in the system. Let's
40 * go and set it up:
41 */
42 if (!skip_ioapic_setup && nr_ioapics)
43 setup_IO_APIC();
44}
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 28a84f6185a7..4a0637a3e208 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -16,7 +16,7 @@ static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
16 16
17extern int parse_unisys_oem (char *oemptr); 17extern int parse_unisys_oem (char *oemptr);
18extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); 18extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
19extern void setup_unisys(); 19extern void setup_unisys(void);
20 20
21static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 21static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
22 char *productid) 22 char *productid)
diff --git a/include/asm-i386/mach-visws/smpboot_hooks.h b/include/asm-i386/mach-visws/smpboot_hooks.h
index d926471fa359..14d8e0375f7a 100644
--- a/include/asm-i386/mach-visws/smpboot_hooks.h
+++ b/include/asm-i386/mach-visws/smpboot_hooks.h
@@ -11,14 +11,7 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
11 11
12/* for visws do nothing for any of these */ 12/* for visws do nothing for any of these */
13 13
14static inline void smpboot_clear_io_apic_irqs(void)
15{
16}
17
18static inline void smpboot_restore_warm_reset_vector(void) 14static inline void smpboot_restore_warm_reset_vector(void)
19{ 15{
20} 16}
21 17
22static inline void smpboot_setup_io_apic(void)
23{
24}
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index fa07bd6c7529..74ef721b534d 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -26,11 +26,6 @@
26#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 26#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
27#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 27#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
28 28
29#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
30
31#define pmd_page_kernel(pmd) \
32((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
33
34/* 29/*
35 * All present user pages are user-executable: 30 * All present user pages are user-executable:
36 */ 31 */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index 2e3f4a344a2d..f1a8b454920a 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -74,11 +74,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
74 */ 74 */
75static inline void pud_clear (pud_t * pud) { } 75static inline void pud_clear (pud_t * pud) { }
76 76
77#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
78
79#define pmd_page_kernel(pmd) \
80((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
81
82#define pud_page(pud) \ 77#define pud_page(pud) \
83((struct page *) __va(pud_val(pud) & PAGE_MASK)) 78((struct page *) __va(pud_val(pud) & PAGE_MASK))
84 79
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 0e3ec809352d..03f3c8ac6383 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -323,8 +323,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
323 return pte; 323 return pte;
324} 324}
325 325
326#define page_pte(page) page_pte_prot(page, __pgprot(0))
327
328#define pmd_large(pmd) \ 326#define pmd_large(pmd) \
329((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) 327((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
330 328
@@ -369,6 +367,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
369#define pte_offset_kernel(dir, address) \ 367#define pte_offset_kernel(dir, address) \
370 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) 368 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
371 369
370#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
371
372#define pmd_page_kernel(pmd) \
373 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
374
372/* 375/*
373 * Helper function that returns the kernel pagetable entry controlling 376 * Helper function that returns the kernel pagetable entry controlling
374 * the virtual address 'address'. NULL means no pagetable entry present. 377 * the virtual address 'address'. NULL means no pagetable entry present.
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index ea563da63e24..6a42b2142fd6 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -55,9 +55,6 @@ struct semaphore {
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56} 56}
57 57
58#define __MUTEX_INITIALIZER(name) \
59 __SEMAPHORE_INITIALIZER(name,1)
60
61#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 58#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
62 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 59 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
63 60
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index acd5c26b69ba..97d52ac49e46 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; };
167#define __xg(x) ((struct __xchg_dummy *)(x)) 167#define __xg(x) ((struct __xchg_dummy *)(x))
168 168
169 169
170#ifdef CONFIG_X86_CMPXCHG64
171
170/* 172/*
171 * The semantics of XCHGCMP8B are a bit strange, this is why 173 * The semantics of XCHGCMP8B are a bit strange, this is why
172 * there is a loop and the loading of %%eax and %%edx has to 174 * there is a loop and the loading of %%eax and %%edx has to
@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr,
221 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ 223 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
222 __set_64bit(ptr, ll_low(value), ll_high(value)) ) 224 __set_64bit(ptr, ll_low(value), ll_high(value)) )
223 225
226#endif
227
224/* 228/*
225 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 229 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
226 * Note 2: xchg has side effect, so that attribute volatile is necessary, 230 * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -259,7 +263,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
259 263
260#ifdef CONFIG_X86_CMPXCHG 264#ifdef CONFIG_X86_CMPXCHG
261#define __HAVE_ARCH_CMPXCHG 1 265#define __HAVE_ARCH_CMPXCHG 1
262#endif
263 266
264static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 267static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
265 unsigned long new, int size) 268 unsigned long new, int size)
@@ -275,13 +278,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
275 case 2: 278 case 2:
276 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 279 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
277 : "=a"(prev) 280 : "=a"(prev)
278 : "q"(new), "m"(*__xg(ptr)), "0"(old) 281 : "r"(new), "m"(*__xg(ptr)), "0"(old)
279 : "memory"); 282 : "memory");
280 return prev; 283 return prev;
281 case 4: 284 case 4:
282 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" 285 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
283 : "=a"(prev) 286 : "=a"(prev)
284 : "q"(new), "m"(*__xg(ptr)), "0"(old) 287 : "r"(new), "m"(*__xg(ptr)), "0"(old)
285 : "memory"); 288 : "memory");
286 return prev; 289 return prev;
287 } 290 }
@@ -291,6 +294,30 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
291#define cmpxchg(ptr,o,n)\ 294#define cmpxchg(ptr,o,n)\
292 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 295 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
293 (unsigned long)(n),sizeof(*(ptr)))) 296 (unsigned long)(n),sizeof(*(ptr))))
297
298#endif
299
300#ifdef CONFIG_X86_CMPXCHG64
301
302static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
303 unsigned long long new)
304{
305 unsigned long long prev;
306 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
307 : "=A"(prev)
308 : "b"((unsigned long)new),
309 "c"((unsigned long)(new >> 32)),
310 "m"(*__xg(ptr)),
311 "0"(old)
312 : "memory");
313 return prev;
314}
315
316#define cmpxchg64(ptr,o,n)\
317 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
318 (unsigned long long)(n)))
319
320#endif
294 321
295#ifdef __KERNEL__ 322#ifdef __KERNEL__
296struct alt_instr { 323struct alt_instr {
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index fbaf90a3968c..0f92e78dfea1 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -448,7 +448,6 @@ asmlinkage int sys_clone(struct pt_regs regs);
448asmlinkage int sys_fork(struct pt_regs regs); 448asmlinkage int sys_fork(struct pt_regs regs);
449asmlinkage int sys_vfork(struct pt_regs regs); 449asmlinkage int sys_vfork(struct pt_regs regs);
450asmlinkage int sys_pipe(unsigned long __user *fildes); 450asmlinkage int sys_pipe(unsigned long __user *fildes);
451asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
452asmlinkage long sys_iopl(unsigned long unused); 451asmlinkage long sys_iopl(unsigned long unused);
453struct sigaction; 452struct sigaction;
454asmlinkage long sys_rt_sigaction(int sig, 453asmlinkage long sys_rt_sigaction(int sig,
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 3339c7b55a6f..21e32a06bc82 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -236,9 +236,6 @@ ia64_phys_addr_valid (unsigned long addr)
236#define pte_modify(_pte, newprot) \ 236#define pte_modify(_pte, newprot) \
237 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) 237 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
238 238
239#define page_pte_prot(page,prot) mk_pte(page, prot)
240#define page_pte(page) page_pte_prot(page, __pgprot(0))
241
242#define pte_none(pte) (!pte_val(pte)) 239#define pte_none(pte) (!pte_val(pte))
243#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) 240#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
244#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) 241#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index 3a2f0f3f78f3..bb8906285fab 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -24,8 +24,6 @@ struct semaphore {
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
25} 25}
26 26
27#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1)
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) 28 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
31 29
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 3a0c69524656..6d96a67439be 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -383,8 +383,6 @@ struct sigaction;
383long sys_execve(char __user *filename, char __user * __user *argv, 383long sys_execve(char __user *filename, char __user * __user *argv,
384 char __user * __user *envp, struct pt_regs *regs); 384 char __user * __user *envp, struct pt_regs *regs);
385asmlinkage long sys_pipe(void); 385asmlinkage long sys_pipe(void);
386asmlinkage long sys_ptrace(long request, pid_t pid,
387 unsigned long addr, unsigned long data);
388asmlinkage long sys_rt_sigaction(int sig, 386asmlinkage long sys_rt_sigaction(int sig,
389 const struct sigaction __user *act, 387 const struct sigaction __user *act,
390 struct sigaction __user *oact, 388 struct sigaction __user *oact,
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 388e5ee9fa27..1cd5fd4a5b2c 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -324,8 +324,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
324 return pte; 324 return pte;
325} 325}
326 326
327#define page_pte(page) page_pte_prot(page, __pgprot(0))
328
329/* 327/*
330 * Conversion functions: convert a page and protection to a page entry, 328 * Conversion functions: convert a page and protection to a page entry,
331 * and a page entry and page directory to the page they refer to. 329 * and a page entry and page directory to the page they refer to.
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index 53e3c60f21ec..bf447c52a0a1 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -32,9 +32,6 @@ struct semaphore {
32 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 32 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33} 33}
34 34
35#define __MUTEX_INITIALIZER(name) \
36 __SEMAPHORE_INITIALIZER(name,1)
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40 37
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 7a6be7727a92..0f589363f619 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -95,7 +95,7 @@ static inline struct thread_info *current_thread_info(void)
95} 95}
96 96
97/* thread information allocation */ 97/* thread information allocation */
98#if CONFIG_DEBUG_STACK_USAGE 98#ifdef CONFIG_DEBUG_STACK_USAGE
99#define alloc_thread_info(tsk) \ 99#define alloc_thread_info(tsk) \
100 ({ \ 100 ({ \
101 struct thread_info *ret; \ 101 struct thread_info *ret; \
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 8552d8f45ab1..ac399e1f7bc0 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -452,7 +452,6 @@ asmlinkage int sys_clone(struct pt_regs regs);
452asmlinkage int sys_fork(struct pt_regs regs); 452asmlinkage int sys_fork(struct pt_regs regs);
453asmlinkage int sys_vfork(struct pt_regs regs); 453asmlinkage int sys_vfork(struct pt_regs regs);
454asmlinkage int sys_pipe(unsigned long __user *fildes); 454asmlinkage int sys_pipe(unsigned long __user *fildes);
455asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
456struct sigaction; 455struct sigaction;
457asmlinkage long sys_rt_sigaction(int sig, 456asmlinkage long sys_rt_sigaction(int sig,
458 const struct sigaction __user *act, 457 const struct sigaction __user *act,
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index ab94cf3ed447..fd4c7cc3d3be 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -36,9 +36,6 @@ struct semaphore {
36 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 36 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
37} 37}
38 38
39#define __MUTEX_INITIALIZER(name) \
40 __SEMAPHORE_INITIALIZER(name,1)
41
42#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 39#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
43 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
44 41
diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h
index 1ed3b787ee05..fda1eccf10aa 100644
--- a/include/asm-m68k/sun3xflop.h
+++ b/include/asm-m68k/sun3xflop.h
@@ -27,10 +27,8 @@
27 27
28/* We don't need no stinkin' I/O port allocation crap. */ 28/* We don't need no stinkin' I/O port allocation crap. */
29#undef release_region 29#undef release_region
30#undef check_region
31#undef request_region 30#undef request_region
32#define release_region(X, Y) do { } while(0) 31#define release_region(X, Y) do { } while(0)
33#define check_region(X, Y) (0)
34#define request_region(X, Y, Z) (1) 32#define request_region(X, Y, Z) (1)
35 33
36struct sun3xflop_private { 34struct sun3xflop_private {
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index cbabde4f8a45..c2554bcd1747 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -444,7 +444,6 @@ asmlinkage long sys_mmap2(
444 unsigned long fd, unsigned long pgoff); 444 unsigned long fd, unsigned long pgoff);
445asmlinkage int sys_execve(char *name, char **argv, char **envp); 445asmlinkage int sys_execve(char *name, char **argv, char **envp);
446asmlinkage int sys_pipe(unsigned long *fildes); 446asmlinkage int sys_pipe(unsigned long *fildes);
447asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
448struct pt_regs; 447struct pt_regs;
449struct sigaction; 448struct sigaction;
450asmlinkage long sys_rt_sigaction(int sig, 449asmlinkage long sys_rt_sigaction(int sig,
diff --git a/include/asm-m68knommu/ide.h b/include/asm-m68knommu/ide.h
index b1cbf8bb9232..836f0721ecf9 100644
--- a/include/asm-m68knommu/ide.h
+++ b/include/asm-m68knommu/ide.h
@@ -163,13 +163,6 @@ ide_free_irq(unsigned int irq, void *dev_id)
163} 163}
164 164
165 165
166static IDE_INLINE int
167ide_check_region(ide_ioreg_t from, unsigned int extent)
168{
169 return 0;
170}
171
172
173static IDE_INLINE void 166static IDE_INLINE void
174ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) 167ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
175{ 168{
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index febe85add509..17aee15906a6 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -35,9 +35,6 @@ struct semaphore {
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36} 36}
37 37
38#define __MUTEX_INITIALIZER(name) \
39 __SEMAPHORE_INITIALIZER(name,1)
40
41#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
42 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
43 40
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index 84b6fa14459f..5373988a7e51 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -504,7 +504,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
504 unsigned long fd, unsigned long pgoff); 504 unsigned long fd, unsigned long pgoff);
505asmlinkage int sys_execve(char *name, char **argv, char **envp); 505asmlinkage int sys_execve(char *name, char **argv, char **envp);
506asmlinkage int sys_pipe(unsigned long *fildes); 506asmlinkage int sys_pipe(unsigned long *fildes);
507asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
508struct pt_regs; 507struct pt_regs;
509int sys_request_irq(unsigned int, 508int sys_request_irq(unsigned int,
510 irqreturn_t (*)(int, void *, struct pt_regs *), 509 irqreturn_t (*)(int, void *, struct pt_regs *),
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 3e0a522c0f0e..82166b254b27 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -169,7 +169,6 @@ static inline void pud_clear(pud_t *pudp)
169#define __pgd_offset(address) pgd_index(address) 169#define __pgd_offset(address) pgd_index(address)
170#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 170#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
171#define __pmd_offset(address) pmd_index(address) 171#define __pmd_offset(address) pmd_index(address)
172#define page_pte(page) page_pte_prot(page, __pgprot(0))
173 172
174/* to find an entry in a kernel page-table-directory */ 173/* to find an entry in a kernel page-table-directory */
175#define pgd_offset_k(address) pgd_offset(&init_mm, 0) 174#define pgd_offset_k(address) pgd_offset(&init_mm, 0)
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 1e8ae2723be4..34facd996503 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -75,7 +75,6 @@ extern void paging_init(void);
75 * Conversion functions: convert a page and protection to a page entry, 75 * Conversion functions: convert a page and protection to a page entry,
76 * and a page entry and page directory to the page they refer to. 76 * and a page entry and page directory to the page they refer to.
77 */ 77 */
78#define page_pte(page) page_pte_prot(page, __pgprot(0))
79#define pmd_phys(pmd) (pmd_val(pmd) - PAGE_OFFSET) 78#define pmd_phys(pmd) (pmd_val(pmd) - PAGE_OFFSET)
80#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 79#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
81#define pmd_page_kernel(pmd) pmd_val(pmd) 80#define pmd_page_kernel(pmd) pmd_val(pmd)
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index c2c97dec661b..3d6aa7c7ea81 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -45,9 +45,6 @@ struct semaphore {
45 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 45 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
46} 46}
47 47
48#define __MUTEX_INITIALIZER(name) \
49 __SEMAPHORE_INITIALIZER(name, 1)
50
51#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 48#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
52 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 49 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
53 50
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index c9eaf4c104de..89ea8b60e945 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -1177,7 +1177,6 @@ asmlinkage long sys_mmap2(
1177 unsigned long fd, unsigned long pgoff); 1177 unsigned long fd, unsigned long pgoff);
1178asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs); 1178asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs);
1179asmlinkage int sys_pipe(nabi_no_regargs struct pt_regs regs); 1179asmlinkage int sys_pipe(nabi_no_regargs struct pt_regs regs);
1180asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
1181struct sigaction; 1180struct sigaction;
1182asmlinkage long sys_rt_sigaction(int sig, 1181asmlinkage long sys_rt_sigaction(int sig,
1183 const struct sigaction __user *act, 1182 const struct sigaction __user *act,
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index 3243cf2cd227..b27bf7aeb256 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -22,7 +22,6 @@
22 22
23#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) 23#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
24#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) 24#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
25#define ide_check_region(from,extent) check_region((from), (extent))
26#define ide_request_region(from,extent,name) request_region((from), (extent), (name)) 25#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
27#define ide_release_region(from,extent) release_region((from), (extent)) 26#define ide_release_region(from,extent) release_region((from), (extent))
28/* Generic I/O and MEMIO string operations. */ 27/* Generic I/O and MEMIO string operations. */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index f78bb2e34538..c9ee41cd0707 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -49,9 +49,6 @@ struct semaphore {
49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
50} 50}
51 51
52#define __MUTEX_INITIALIZER(name) \
53 __SEMAPHORE_INITIALIZER(name,1)
54
55#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 52#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
56 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 53 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
57 54
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h
index e7a620c5c5e6..80b7b98c70a1 100644
--- a/include/asm-parisc/unistd.h
+++ b/include/asm-parisc/unistd.h
@@ -1011,7 +1011,6 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
1011 struct pt_regs *regs); 1011 struct pt_regs *regs);
1012int sys_vfork(struct pt_regs *regs); 1012int sys_vfork(struct pt_regs *regs);
1013int sys_pipe(int *fildes); 1013int sys_pipe(int *fildes);
1014long sys_ptrace(long request, pid_t pid, long addr, long data);
1015struct sigaction; 1014struct sigaction;
1016asmlinkage long sys_rt_sigaction(int sig, 1015asmlinkage long sys_rt_sigaction(int sig,
1017 const struct sigaction __user *act, 1016 const struct sigaction __user *act,
diff --git a/include/asm-ppc64/a.out.h b/include/asm-powerpc/a.out.h
index 3871e252a6f1..c7393a977364 100644
--- a/include/asm-ppc64/a.out.h
+++ b/include/asm-powerpc/a.out.h
@@ -1,14 +1,5 @@
1#ifndef __PPC64_A_OUT_H__ 1#ifndef _ASM_POWERPC_A_OUT_H
2#define __PPC64_A_OUT_H__ 2#define _ASM_POWERPC_A_OUT_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12 3
13struct exec 4struct exec
14{ 5{
@@ -27,6 +18,7 @@ struct exec
27#define N_SYMSIZE(a) ((a).a_syms) 18#define N_SYMSIZE(a) ((a).a_syms)
28 19
29#ifdef __KERNEL__ 20#ifdef __KERNEL__
21#ifdef __powerpc64__
30 22
31#define STACK_TOP_USER64 TASK_SIZE_USER64 23#define STACK_TOP_USER64 TASK_SIZE_USER64
32#define STACK_TOP_USER32 TASK_SIZE_USER32 24#define STACK_TOP_USER32 TASK_SIZE_USER32
@@ -34,6 +26,11 @@ struct exec
34#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 26#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
35 STACK_TOP_USER32 : STACK_TOP_USER64) 27 STACK_TOP_USER32 : STACK_TOP_USER64)
36 28
29#else /* __powerpc64__ */
30
31#define STACK_TOP TASK_SIZE
32
33#endif /* __powerpc64__ */
37#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
38 35
39#endif /* __PPC64_A_OUT_H__ */ 36#endif /* _ASM_POWERPC_A_OUT_H */
diff --git a/include/asm-ppc/atomic.h b/include/asm-powerpc/atomic.h
index eeafd505836e..ed4b345ed75d 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -1,29 +1,20 @@
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
1/* 4/*
2 * PowerPC atomic operations 5 * PowerPC atomic operations
3 */ 6 */
4 7
5#ifndef _ASM_PPC_ATOMIC_H_
6#define _ASM_PPC_ATOMIC_H_
7
8typedef struct { volatile int counter; } atomic_t; 8typedef struct { volatile int counter; } atomic_t;
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <asm/synch.h>
11 12
12#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
13 14
14#define atomic_read(v) ((v)->counter) 15#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) (((v)->counter) = (i)) 16#define atomic_set(v,i) (((v)->counter) = (i))
16 17
17extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
18
19#ifdef CONFIG_SMP
20#define SMP_SYNC "sync"
21#define SMP_ISYNC "\n\tisync"
22#else
23#define SMP_SYNC ""
24#define SMP_ISYNC
25#endif
26
27/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 18/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
28 * The old ATOMIC_SYNC_FIX covered some but not all of this. 19 * The old ATOMIC_SYNC_FIX covered some but not all of this.
29 */ 20 */
@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
53 int t; 44 int t;
54 45
55 __asm__ __volatile__( 46 __asm__ __volatile__(
47 EIEIO_ON_SMP
56"1: lwarx %0,0,%2 # atomic_add_return\n\ 48"1: lwarx %0,0,%2 # atomic_add_return\n\
57 add %0,%1,%0\n" 49 add %0,%1,%0\n"
58 PPC405_ERR77(0,%2) 50 PPC405_ERR77(0,%2)
59" stwcx. %0,0,%2 \n\ 51" stwcx. %0,0,%2 \n\
60 bne- 1b" 52 bne- 1b"
61 SMP_ISYNC 53 ISYNC_ON_SMP
62 : "=&r" (t) 54 : "=&r" (t)
63 : "r" (a), "r" (&v->counter) 55 : "r" (a), "r" (&v->counter)
64 : "cc", "memory"); 56 : "cc", "memory");
@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
88 int t; 80 int t;
89 81
90 __asm__ __volatile__( 82 __asm__ __volatile__(
83 EIEIO_ON_SMP
91"1: lwarx %0,0,%2 # atomic_sub_return\n\ 84"1: lwarx %0,0,%2 # atomic_sub_return\n\
92 subf %0,%1,%0\n" 85 subf %0,%1,%0\n"
93 PPC405_ERR77(0,%2) 86 PPC405_ERR77(0,%2)
94" stwcx. %0,0,%2 \n\ 87" stwcx. %0,0,%2 \n\
95 bne- 1b" 88 bne- 1b"
96 SMP_ISYNC 89 ISYNC_ON_SMP
97 : "=&r" (t) 90 : "=&r" (t)
98 : "r" (a), "r" (&v->counter) 91 : "r" (a), "r" (&v->counter)
99 : "cc", "memory"); 92 : "cc", "memory");
@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
121 int t; 114 int t;
122 115
123 __asm__ __volatile__( 116 __asm__ __volatile__(
117 EIEIO_ON_SMP
124"1: lwarx %0,0,%1 # atomic_inc_return\n\ 118"1: lwarx %0,0,%1 # atomic_inc_return\n\
125 addic %0,%0,1\n" 119 addic %0,%0,1\n"
126 PPC405_ERR77(0,%1) 120 PPC405_ERR77(0,%1)
127" stwcx. %0,0,%1 \n\ 121" stwcx. %0,0,%1 \n\
128 bne- 1b" 122 bne- 1b"
129 SMP_ISYNC 123 ISYNC_ON_SMP
130 : "=&r" (t) 124 : "=&r" (t)
131 : "r" (&v->counter) 125 : "r" (&v->counter)
132 : "cc", "memory"); 126 : "cc", "memory");
@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
164 int t; 158 int t;
165 159
166 __asm__ __volatile__( 160 __asm__ __volatile__(
161 EIEIO_ON_SMP
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 162"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 163 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 164 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 165" stwcx. %0,0,%1\n\
171 bne- 1b" 166 bne- 1b"
172 SMP_ISYNC 167 ISYNC_ON_SMP
173 : "=&r" (t) 168 : "=&r" (t)
174 : "r" (&v->counter) 169 : "r" (&v->counter)
175 : "cc", "memory"); 170 : "cc", "memory");
@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
189 int t; 184 int t;
190 185
191 __asm__ __volatile__( 186 __asm__ __volatile__(
187 EIEIO_ON_SMP
192"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 188"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
193 addic. %0,%0,-1\n\ 189 addic. %0,%0,-1\n\
194 blt- 2f\n" 190 blt- 2f\n"
195 PPC405_ERR77(0,%1) 191 PPC405_ERR77(0,%1)
196" stwcx. %0,0,%1\n\ 192" stwcx. %0,0,%1\n\
197 bne- 1b" 193 bne- 1b"
198 SMP_ISYNC 194 ISYNC_ON_SMP
199 "\n\ 195 "\n\
2002:" : "=&r" (t) 1962:" : "=&r" (t)
201 : "r" (&v->counter) 197 : "r" (&v->counter)
@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
204 return t; 200 return t;
205} 201}
206 202
207#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") 203#define smp_mb__before_atomic_dec() smp_mb()
208#define smp_mb__before_atomic_dec() __MB 204#define smp_mb__after_atomic_dec() smp_mb()
209#define smp_mb__after_atomic_dec() __MB 205#define smp_mb__before_atomic_inc() smp_mb()
210#define smp_mb__before_atomic_inc() __MB 206#define smp_mb__after_atomic_inc() smp_mb()
211#define smp_mb__after_atomic_inc() __MB
212 207
213#endif /* __KERNEL__ */ 208#endif /* __KERNEL__ */
214#endif /* _ASM_PPC_ATOMIC_H_ */ 209#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-ppc64/auxvec.h b/include/asm-powerpc/auxvec.h
index ac6381a106e1..79d8c4732309 100644
--- a/include/asm-ppc64/auxvec.h
+++ b/include/asm-powerpc/auxvec.h
@@ -1,5 +1,5 @@
1#ifndef __PPC64_AUXVEC_H 1#ifndef _ASM_POWERPC_AUXVEC_H
2#define __PPC64_AUXVEC_H 2#define _ASM_POWERPC_AUXVEC_H
3 3
4/* 4/*
5 * We need to put in some extra aux table entries to tell glibc what 5 * We need to put in some extra aux table entries to tell glibc what
@@ -14,6 +14,8 @@
14/* The vDSO location. We have to use the same value as x86 for glibc's 14/* The vDSO location. We have to use the same value as x86 for glibc's
15 * sake :-) 15 * sake :-)
16 */ 16 */
17#ifdef __powerpc64__
17#define AT_SYSINFO_EHDR 33 18#define AT_SYSINFO_EHDR 33
19#endif
18 20
19#endif /* __PPC64_AUXVEC_H */ 21#endif
diff --git a/include/asm-ppc/backlight.h b/include/asm-powerpc/backlight.h
index 3a1c3dede2a0..1ba1f27a0b63 100644
--- a/include/asm-ppc/backlight.h
+++ b/include/asm-powerpc/backlight.h
@@ -1,12 +1,13 @@
1/* 1/*
2 * Routines for handling backlight control on PowerBooks 2 * Routines for handling backlight control on PowerBooks
3 * 3 *
4 * For now, implementation resides in arch/ppc/kernel/pmac_support.c 4 * For now, implementation resides in
5 * arch/powerpc/platforms/powermac/pmac_support.c
5 * 6 *
6 */ 7 */
8#ifndef __ASM_POWERPC_BACKLIGHT_H
9#define __ASM_POWERPC_BACKLIGHT_H
7#ifdef __KERNEL__ 10#ifdef __KERNEL__
8#ifndef __ASM_PPC_BACKLIGHT_H
9#define __ASM_PPC_BACKLIGHT_H
10 11
11/* Abstract values */ 12/* Abstract values */
12#define BACKLIGHT_OFF 0 13#define BACKLIGHT_OFF 0
@@ -26,5 +27,5 @@ extern int get_backlight_enable(void);
26extern int set_backlight_level(int level); 27extern int set_backlight_level(int level);
27extern int get_backlight_level(void); 28extern int get_backlight_level(void);
28 29
29#endif
30#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
31#endif
diff --git a/include/asm-ppc64/bug.h b/include/asm-powerpc/bug.h
index 160178278861..e4d028e87020 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_BUG_H 1#ifndef _ASM_POWERPC_BUG_H
2#define _PPC64_BUG_H 2#define _ASM_POWERPC_BUG_H
3 3
4/* 4/*
5 * Define an illegal instr to trap on the bug. 5 * Define an illegal instr to trap on the bug.
@@ -11,9 +11,21 @@
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \
21 ".long " #label ", " #line ", " #file ", " #func "\n"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */
25
14struct bug_entry { 26struct bug_entry {
15 unsigned long bug_addr; 27 unsigned long bug_addr;
16 long line; 28 int line;
17 const char *file; 29 const char *file;
18 const char *function; 30 const char *function;
19}; 31};
@@ -32,28 +44,28 @@ struct bug_entry *find_bug(unsigned long bugaddr);
32 __asm__ __volatile__( \ 44 __asm__ __volatile__( \
33 "1: twi 31,0,0\n" \ 45 "1: twi 31,0,0\n" \
34 ".section __bug_table,\"a\"\n\t" \ 46 ".section __bug_table,\"a\"\n\t" \
35 " .llong 1b,%0,%1,%2\n" \ 47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \
36 ".previous" \ 48 ".previous" \
37 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
38} while (0) 50} while (0)
39 51
40#define BUG_ON(x) do { \ 52#define BUG_ON(x) do { \
41 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
42 "1: tdnei %0,0\n" \ 54 TRAP_OP(%0,0) \
43 ".section __bug_table,\"a\"\n\t" \ 55 ".section __bug_table,\"a\"\n\t" \
44 " .llong 1b,%1,%2,%3\n" \ 56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \
45 ".previous" \ 57 ".previous" \
46 : : "r" ((long long)(x)), "i" (__LINE__), \ 58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \
47 "i" (__FILE__), "i" (__FUNCTION__)); \ 59 "i" (__FILE__), "i" (__FUNCTION__)); \
48} while (0) 60} while (0)
49 61
50#define WARN_ON(x) do { \ 62#define WARN_ON(x) do { \
51 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
52 "1: tdnei %0,0\n" \ 64 TRAP_OP(%0,0) \
53 ".section __bug_table,\"a\"\n\t" \ 65 ".section __bug_table,\"a\"\n\t" \
54 " .llong 1b,%1,%2,%3\n" \ 66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \
55 ".previous" \ 67 ".previous" \
56 : : "r" ((long long)(x)), \ 68 : : "r" ((DATA_TYPE)(x)), \
57 "i" (__LINE__ + BUG_WARNING_TRAP), \ 69 "i" (__LINE__ + BUG_WARNING_TRAP), \
58 "i" (__FILE__), "i" (__FUNCTION__)); \ 70 "i" (__FILE__), "i" (__FUNCTION__)); \
59} while (0) 71} while (0)
@@ -61,9 +73,9 @@ struct bug_entry *find_bug(unsigned long bugaddr);
61#define HAVE_ARCH_BUG 73#define HAVE_ARCH_BUG
62#define HAVE_ARCH_BUG_ON 74#define HAVE_ARCH_BUG_ON
63#define HAVE_ARCH_WARN_ON 75#define HAVE_ARCH_WARN_ON
64#endif 76#endif /* CONFIG_BUG */
65#endif 77#endif /* __ASSEMBLY __ */
66 78
67#include <asm-generic/bug.h> 79#include <asm-generic/bug.h>
68 80
69#endif 81#endif /* _ASM_POWERPC_BUG_H */
diff --git a/include/asm-ppc64/byteorder.h b/include/asm-powerpc/byteorder.h
index 8b57da62b674..b37752214a16 100644
--- a/include/asm-ppc64/byteorder.h
+++ b/include/asm-powerpc/byteorder.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_BYTEORDER_H 1#ifndef _ASM_POWERPC_BYTEORDER_H
2#define _PPC64_BYTEORDER_H 2#define _ASM_POWERPC_BYTEORDER_H
3 3
4/* 4/*
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -77,10 +77,13 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
77 77
78#ifndef __STRICT_ANSI__ 78#ifndef __STRICT_ANSI__
79#define __BYTEORDER_HAS_U64__ 79#define __BYTEORDER_HAS_U64__
80#endif 80#ifndef __powerpc64__
81#define __SWAB_64_THRU_32__
82#endif /* __powerpc64__ */
83#endif /* __STRICT_ANSI__ */
81 84
82#endif /* __GNUC__ */ 85#endif /* __GNUC__ */
83 86
84#include <linux/byteorder/big_endian.h> 87#include <linux/byteorder/big_endian.h>
85 88
86#endif /* _PPC64_BYTEORDER_H */ 89#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/include/asm-ppc64/checksum.h b/include/asm-powerpc/checksum.h
index d22d4469de43..d8354d8a49ce 100644
--- a/include/asm-ppc64/checksum.h
+++ b/include/asm-powerpc/checksum.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_CHECKSUM_H 1#ifndef _ASM_POWERPC_CHECKSUM_H
2#define _PPC64_CHECKSUM_H 2#define _ASM_POWERPC_CHECKSUM_H
3 3
4/* 4/*
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -41,8 +41,14 @@ extern unsigned int csum_partial(const unsigned char * buff, int len,
41 unsigned int sum); 41 unsigned int sum);
42 42
43/* 43/*
44 * the same as csum_partial, but copies from src to dst while it 44 * Computes the checksum of a memory block at src, length len,
45 * checksums 45 * and adds in "sum" (32-bit), while copying the block to dst.
46 * If an access exception occurs on src or dst, it stores -EFAULT
47 * to *src_err or *dst_err respectively (if that pointer is not
48 * NULL), and, for an error on src, zeroes the rest of dst.
49 *
50 * Like csum_partial, this must be called with even lengths,
51 * except for the last fragment.
46 */ 52 */
47extern unsigned int csum_partial_copy_generic(const char *src, char *dst, 53extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
48 int len, unsigned int sum, 54 int len, unsigned int sum,
@@ -51,12 +57,18 @@ extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
51 * the same as csum_partial, but copies from src to dst while it 57 * the same as csum_partial, but copies from src to dst while it
52 * checksums. 58 * checksums.
53 */ 59 */
54
55unsigned int csum_partial_copy_nocheck(const char *src, 60unsigned int csum_partial_copy_nocheck(const char *src,
56 char *dst, 61 char *dst,
57 int len, 62 int len,
58 unsigned int sum); 63 unsigned int sum);
59 64
65#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
66 csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
67
68#define csum_partial_copy_nocheck(src, dst, len, sum) \
69 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
70
71
60/* 72/*
61 * turns a 32-bit partial checksum (e.g. from csum_partial) into a 73 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
62 * 1's complement 16-bit checksum. 74 * 1's complement 16-bit checksum.
@@ -83,12 +95,7 @@ static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
83 return csum_fold(csum_partial(buff, len, 0)); 95 return csum_fold(csum_partial(buff, len, 0));
84} 96}
85 97
86#define csum_partial_copy_from_user(src, dst, len, sum, errp) \ 98#ifdef __powerpc64__
87 csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
88
89#define csum_partial_copy_nocheck(src, dst, len, sum) \
90 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
91
92static inline u32 csum_tcpudp_nofold(u32 saddr, 99static inline u32 csum_tcpudp_nofold(u32 saddr,
93 u32 daddr, 100 u32 daddr,
94 unsigned short len, 101 unsigned short len,
@@ -103,5 +110,23 @@ static inline u32 csum_tcpudp_nofold(u32 saddr,
103 s += (s >> 32); 110 s += (s >> 32);
104 return (u32) s; 111 return (u32) s;
105} 112}
113#else
114static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
115 unsigned long daddr,
116 unsigned short len,
117 unsigned short proto,
118 unsigned int sum)
119{
120 __asm__("\n\
121 addc %0,%0,%1 \n\
122 adde %0,%0,%2 \n\
123 adde %0,%0,%3 \n\
124 addze %0,%0 \n\
125 "
126 : "=r" (sum)
127 : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
128 return sum;
129}
106 130
107#endif 131#endif
132#endif
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
new file mode 100644
index 000000000000..c019501daceb
--- /dev/null
+++ b/include/asm-powerpc/cputable.h
@@ -0,0 +1,427 @@
1#ifndef __ASM_POWERPC_CPUTABLE_H
2#define __ASM_POWERPC_CPUTABLE_H
3
4#include <linux/config.h>
5#include <asm/ppc_asm.h> /* for ASM_CONST */
6
7#define PPC_FEATURE_32 0x80000000
8#define PPC_FEATURE_64 0x40000000
9#define PPC_FEATURE_601_INSTR 0x20000000
10#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
11#define PPC_FEATURE_HAS_FPU 0x08000000
12#define PPC_FEATURE_HAS_MMU 0x04000000
13#define PPC_FEATURE_HAS_4xxMAC 0x02000000
14#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
15#define PPC_FEATURE_HAS_SPE 0x00800000
16#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
17#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
18#define PPC_FEATURE_NO_TB 0x00100000
19
20#ifdef __KERNEL__
21#ifndef __ASSEMBLY__
22
23/* This structure can grow, it's real size is used by head.S code
24 * via the mkdefs mechanism.
25 */
26struct cpu_spec;
27struct op_powerpc_model;
28
29typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
30
31struct cpu_spec {
32 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
33 unsigned int pvr_mask;
34 unsigned int pvr_value;
35
36 char *cpu_name;
37 unsigned long cpu_features; /* Kernel features */
38 unsigned int cpu_user_features; /* Userland features */
39
40 /* cache line sizes */
41 unsigned int icache_bsize;
42 unsigned int dcache_bsize;
43
44 /* number of performance monitor counters */
45 unsigned int num_pmcs;
46
47 /* this is called to initialize various CPU bits like L1 cache,
48 * BHT, SPD, etc... from head.S before branching to identify_machine
49 */
50 cpu_setup_t cpu_setup;
51
52 /* Used by oprofile userspace to select the right counters */
53 char *oprofile_cpu_type;
54
55 /* Processor specific oprofile operations */
56 struct op_powerpc_model *oprofile_model;
57};
58
59extern struct cpu_spec *cur_cpu_spec;
60
61extern void identify_cpu(unsigned long offset, unsigned long cpu);
62extern void do_cpu_ftr_fixups(unsigned long offset);
63
64#endif /* __ASSEMBLY__ */
65
66/* CPU kernel features */
67
68/* Retain the 32b definitions all use bottom half of word */
69#define CPU_FTR_SPLIT_ID_CACHE ASM_CONST(0x0000000000000001)
70#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
71#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
72#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
73#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
74#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
75#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
76#define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080)
77#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
78#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
79#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
80#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
81#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
82#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
83#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
84#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
85#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
86#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
87#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
88#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
89
90#ifdef __powerpc64__
91/* Add the 64b processor unique features in the top half of the word */
92#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
93#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
94#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
95#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
96#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
97#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
98#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
99#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
100#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
101#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
102#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
103#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
104#else
105/* ensure on 32b processors the flags are available for compiling but
106 * don't do anything */
107#define CPU_FTR_SLB ASM_CONST(0x0)
108#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
109#define CPU_FTR_TLBIEL ASM_CONST(0x0)
110#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
111#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0)
112#define CPU_FTR_IABR ASM_CONST(0x0)
113#define CPU_FTR_MMCRA ASM_CONST(0x0)
114#define CPU_FTR_CTRL ASM_CONST(0x0)
115#define CPU_FTR_SMT ASM_CONST(0x0)
116#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
117#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
118#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
119#endif
120
121#ifndef __ASSEMBLY__
122
123#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
124 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
125 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
126
127/* iSeries doesn't support large pages */
128#ifdef CONFIG_PPC_ISERIES
129#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
130#else
131#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
132#endif /* CONFIG_PPC_ISERIES */
133
134/* We only set the altivec features if the kernel was compiled with altivec
135 * support
136 */
137#ifdef CONFIG_ALTIVEC
138#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
139#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
140#else
141#define CPU_FTR_ALTIVEC_COMP 0
142#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
143#endif
144
145/* We need to mark all pages as being coherent if we're SMP or we
146 * have a 74[45]x and an MPC107 host bridge.
147 */
148#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
149#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
150#else
151#define CPU_FTR_COMMON 0
152#endif
153
154/* The powersave features NAP & DOZE seems to confuse BDI when
155 debugging. So if a BDI is used, disable theses
156 */
157#ifndef CONFIG_BDI_SWITCH
158#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
159#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
160#else
161#define CPU_FTR_MAYBE_CAN_DOZE 0
162#define CPU_FTR_MAYBE_CAN_NAP 0
163#endif
164
165#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
166 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
167 !defined(CONFIG_BOOKE))
168
169enum {
170 CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
171 CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
172 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
173 CPU_FTR_MAYBE_CAN_NAP,
174 CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
176 CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
177 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
178 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
179 CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
180 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
181 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
182 CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
183 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
184 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
185 CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
186 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
187 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
188 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
189 CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
190 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
191 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
192 CPU_FTR_NO_DPM,
193 CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
194 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
195 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
196 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
197 CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
198 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
199 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
200 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
201 CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
202 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
203 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
204 CPU_FTR_MAYBE_CAN_NAP,
205 CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
206 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
207 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
208 CPU_FTR_MAYBE_CAN_NAP,
209 CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
210 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
211 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
212 CPU_FTR_NEED_COHERENT,
213 CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
214 CPU_FTR_USE_TB |
215 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
216 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
217 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
218 CPU_FTR_NEED_COHERENT,
219 CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
220 CPU_FTR_USE_TB |
221 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
222 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
223 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
224 CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
225 CPU_FTR_USE_TB |
226 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
227 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
228 CPU_FTR_NEED_COHERENT,
229 CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB |
231 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
233 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
234 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
235 CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
236 CPU_FTR_USE_TB |
237 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
238 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
239 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
240 CPU_FTR_NEED_COHERENT,
241 CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
242 CPU_FTR_USE_TB |
243 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
244 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
245 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
246 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
247 CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB |
249 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
250 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
251 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
252 CPU_FTR_NEED_COHERENT,
253 CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
254 CPU_FTR_USE_TB |
255 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
256 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
257 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
258 CPU_FTR_NEED_COHERENT,
259 CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
260 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
261 CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
262 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
263 CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
264 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
265 CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
266 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
267 CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
268 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
269 CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
270 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
271 CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
272 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
273 CPU_FTR_MAYBE_CAN_NAP,
274 CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
275 CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
276 CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
277 CPU_FTRS_E200 = CPU_FTR_USE_TB,
278 CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
279 CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
280 CPU_FTR_BIG_PHYS,
281 CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
282#ifdef __powerpc64__
283 CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
284 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
285 CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
286 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
287 CPU_FTR_MMCRA | CPU_FTR_CTRL,
288 CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
289 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
290 CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
291 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
292 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
293 CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
294 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
295 CPU_FTR_MMCRA | CPU_FTR_SMT |
296 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
297 CPU_FTR_MMCRA_SIHV,
298 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
299 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
300 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
301 CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
302 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
303#endif
304
305 CPU_FTRS_POSSIBLE =
306#if CLASSIC_PPC
307 CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
308 CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
309 CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
310 CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
311 CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
312 CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
313 CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
314 CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
315#else
316 CPU_FTRS_GENERIC_32 |
317#endif
318#ifdef CONFIG_PPC64BRIDGE
319 CPU_FTRS_POWER3_32 |
320#endif
321#ifdef CONFIG_POWER4
322 CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
323#endif
324#ifdef CONFIG_8xx
325 CPU_FTRS_8XX |
326#endif
327#ifdef CONFIG_40x
328 CPU_FTRS_40X |
329#endif
330#ifdef CONFIG_44x
331 CPU_FTRS_44X |
332#endif
333#ifdef CONFIG_E200
334 CPU_FTRS_E200 |
335#endif
336#ifdef CONFIG_E500
337 CPU_FTRS_E500 | CPU_FTRS_E500_2 |
338#endif
339#ifdef __powerpc64__
340 CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
341 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
342#endif
343 0,
344
345 CPU_FTRS_ALWAYS =
346#if CLASSIC_PPC
347 CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
348 CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
349 CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
350 CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
351 CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
352 CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
353 CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
354 CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
355#else
356 CPU_FTRS_GENERIC_32 &
357#endif
358#ifdef CONFIG_PPC64BRIDGE
359 CPU_FTRS_POWER3_32 &
360#endif
361#ifdef CONFIG_POWER4
362 CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
363#endif
364#ifdef CONFIG_8xx
365 CPU_FTRS_8XX &
366#endif
367#ifdef CONFIG_40x
368 CPU_FTRS_40X &
369#endif
370#ifdef CONFIG_44x
371 CPU_FTRS_44X &
372#endif
373#ifdef CONFIG_E200
374 CPU_FTRS_E200 &
375#endif
376#ifdef CONFIG_E500
377 CPU_FTRS_E500 & CPU_FTRS_E500_2 &
378#endif
379#ifdef __powerpc64__
380 CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
381 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
382#endif
383 CPU_FTRS_POSSIBLE,
384};
385
386static inline int cpu_has_feature(unsigned long feature)
387{
388 return (CPU_FTRS_ALWAYS & feature) ||
389 (CPU_FTRS_POSSIBLE
390 & cur_cpu_spec->cpu_features
391 & feature);
392}
393
394#endif /* !__ASSEMBLY__ */
395
396#ifdef __ASSEMBLY__
397
398#define BEGIN_FTR_SECTION 98:
399
400#ifndef __powerpc64__
401#define END_FTR_SECTION(msk, val) \
40299: \
403 .section __ftr_fixup,"a"; \
404 .align 2; \
405 .long msk; \
406 .long val; \
407 .long 98b; \
408 .long 99b; \
409 .previous
410#else /* __powerpc64__ */
411#define END_FTR_SECTION(msk, val) \
41299: \
413 .section __ftr_fixup,"a"; \
414 .align 3; \
415 .llong msk; \
416 .llong val; \
417 .llong 98b; \
418 .llong 99b; \
419 .previous
420#endif /* __powerpc64__ */
421
422#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
423#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
424#endif /* __ASSEMBLY__ */
425
426#endif /* __KERNEL__ */
427#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/include/asm-ppc/dbdma.h b/include/asm-powerpc/dbdma.h
index 8973565f95d3..8973565f95d3 100644
--- a/include/asm-ppc/dbdma.h
+++ b/include/asm-powerpc/dbdma.h
diff --git a/include/asm-ppc/dma.h b/include/asm-powerpc/dma.h
index cc8e5cd8c9d2..926378d2cd94 100644
--- a/include/asm-ppc/dma.h
+++ b/include/asm-powerpc/dma.h
@@ -1,18 +1,14 @@
1#ifndef _ASM_POWERPC_DMA_H
2#define _ASM_POWERPC_DMA_H
3
1/* 4/*
2 * include/asm-ppc/dma.h: Defines for using and allocating dma channels. 5 * Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992. 6 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen 7 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992. 8 * and John Boyd, Nov. 1992.
6 * Changes for ppc sound by Christoph Nadig 9 * Changes for ppc sound by Christoph Nadig
7 */ 10 */
8 11
9#ifdef __KERNEL__
10
11#include <linux/config.h>
12#include <asm/io.h>
13#include <linux/spinlock.h>
14#include <asm/system.h>
15
16/* 12/*
17 * Note: Adapted for PowerPC by Gary Thomas 13 * Note: Adapted for PowerPC by Gary Thomas
18 * Modified by Cort Dougan <cort@cs.nmt.edu> 14 * Modified by Cort Dougan <cort@cs.nmt.edu>
@@ -25,8 +21,10 @@
25 * with a grain of salt. 21 * with a grain of salt.
26 */ 22 */
27 23
28#ifndef _ASM_DMA_H 24#include <linux/config.h>
29#define _ASM_DMA_H 25#include <asm/io.h>
26#include <linux/spinlock.h>
27#include <asm/system.h>
30 28
31#ifndef MAX_DMA_CHANNELS 29#ifndef MAX_DMA_CHANNELS
32#define MAX_DMA_CHANNELS 8 30#define MAX_DMA_CHANNELS 8
@@ -34,11 +32,9 @@
34 32
35/* The maximum address that we can perform a DMA transfer to on this platform */ 33/* The maximum address that we can perform a DMA transfer to on this platform */
36/* Doesn't really apply... */ 34/* Doesn't really apply... */
37#define MAX_DMA_ADDRESS 0xFFFFFFFF 35#define MAX_DMA_ADDRESS (~0UL)
38 36
39/* in arch/ppc/kernel/setup.c -- Cort */ 37#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
40extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
41extern unsigned long ISA_DMA_THRESHOLD;
42 38
43#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER 39#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
44#define dma_outb outb_p 40#define dma_outb outb_p
@@ -171,7 +167,18 @@ extern long ppc_cs4232_dma, ppc_cs4232_dma2;
171#define DMA1_EXT_REG 0x40B 167#define DMA1_EXT_REG 0x40B
172#define DMA2_EXT_REG 0x4D6 168#define DMA2_EXT_REG 0x4D6
173 169
170#ifndef __powerpc64__
171 /* in arch/ppc/kernel/setup.c -- Cort */
172 extern unsigned int DMA_MODE_WRITE;
173 extern unsigned int DMA_MODE_READ;
174 extern unsigned long ISA_DMA_THRESHOLD;
175#else
176 #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
177 #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
178#endif
179
174#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ 180#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
181
175#define DMA_AUTOINIT 0x10 182#define DMA_AUTOINIT 0x10
176 183
177extern spinlock_t dma_spin_lock; 184extern spinlock_t dma_spin_lock;
@@ -200,8 +207,9 @@ static __inline__ void enable_dma(unsigned int dmanr)
200 if (dmanr <= 3) { 207 if (dmanr <= 3) {
201 dma_outb(dmanr, DMA1_MASK_REG); 208 dma_outb(dmanr, DMA1_MASK_REG);
202 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */ 209 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
203 } else 210 } else {
204 dma_outb(dmanr & 3, DMA2_MASK_REG); 211 dma_outb(dmanr & 3, DMA2_MASK_REG);
212 }
205} 213}
206 214
207static __inline__ void disable_dma(unsigned int dmanr) 215static __inline__ void disable_dma(unsigned int dmanr)
@@ -290,19 +298,26 @@ static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
290static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys) 298static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
291{ 299{
292 if (dmanr <= 3) { 300 if (dmanr <= 3) {
293 dma_outb(phys & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); 301 dma_outb(phys & 0xff,
294 dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); 302 ((dmanr & 3) << 1) + IO_DMA1_BASE);
303 dma_outb((phys >> 8) & 0xff,
304 ((dmanr & 3) << 1) + IO_DMA1_BASE);
295 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { 305 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
296 dma_outb(phys & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 306 dma_outb(phys & 0xff,
297 dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 307 ((dmanr & 3) << 2) + IO_DMA2_BASE);
308 dma_outb((phys >> 8) & 0xff,
309 ((dmanr & 3) << 2) + IO_DMA2_BASE);
298 dma_outb((dmanr & 3), DMA2_EXT_REG); 310 dma_outb((dmanr & 3), DMA2_EXT_REG);
299 } else { 311 } else {
300 dma_outb((phys >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 312 dma_outb((phys >> 1) & 0xff,
301 dma_outb((phys >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 313 ((dmanr & 3) << 2) + IO_DMA2_BASE);
314 dma_outb((phys >> 9) & 0xff,
315 ((dmanr & 3) << 2) + IO_DMA2_BASE);
302 } 316 }
303 set_dma_page(dmanr, phys >> 16); 317 set_dma_page(dmanr, phys >> 16);
304} 318}
305 319
320
306/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for 321/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
307 * a specific DMA channel. 322 * a specific DMA channel.
308 * You must ensure the parameters are valid. 323 * You must ensure the parameters are valid.
@@ -315,21 +330,24 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
315{ 330{
316 count--; 331 count--;
317 if (dmanr <= 3) { 332 if (dmanr <= 3) {
318 dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); 333 dma_outb(count & 0xff,
319 dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + 334 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
320 IO_DMA1_BASE); 335 dma_outb((count >> 8) & 0xff,
336 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
321 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { 337 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
322 dma_outb(count & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); 338 dma_outb(count & 0xff,
323 dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 2) + 2 + 339 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
324 IO_DMA2_BASE); 340 dma_outb((count >> 8) & 0xff,
341 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
325 } else { 342 } else {
326 dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + 343 dma_outb((count >> 1) & 0xff,
327 IO_DMA2_BASE); 344 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
328 dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + 345 dma_outb((count >> 9) & 0xff,
329 IO_DMA2_BASE); 346 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
330 } 347 }
331} 348}
332 349
350
333/* Get DMA residue count. After a DMA transfer, this 351/* Get DMA residue count. After a DMA transfer, this
334 * should return zero. Reading this while a DMA transfer is 352 * should return zero. Reading this while a DMA transfer is
335 * still in progress will return unpredictable results. 353 * still in progress will return unpredictable results.
@@ -340,8 +358,8 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
340 */ 358 */
341static __inline__ int get_dma_residue(unsigned int dmanr) 359static __inline__ int get_dma_residue(unsigned int dmanr)
342{ 360{
343 unsigned int io_port = (dmanr <= 3) ? 361 unsigned int io_port = (dmanr <= 3)
344 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE 362 ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
345 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; 363 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
346 364
347 /* using short to get 16-bit wrap around */ 365 /* using short to get 16-bit wrap around */
@@ -352,7 +370,6 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
352 370
353 return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2) 371 return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
354 ? count : (count << 1); 372 ? count : (count << 1);
355
356} 373}
357 374
358/* These are in kernel/dma.c: */ 375/* These are in kernel/dma.c: */
@@ -367,5 +384,7 @@ extern int isa_dma_bridge_buggy;
367#else 384#else
368#define isa_dma_bridge_buggy (0) 385#define isa_dma_bridge_buggy (0)
369#endif 386#endif
370#endif /* _ASM_DMA_H */ 387
371#endif /* __KERNEL__ */ 388#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
389
390#endif /* _ASM_POWERPC_DMA_H */
diff --git a/include/asm-ppc64/elf.h b/include/asm-powerpc/elf.h
index c919a89343db..d22b10021b5d 100644
--- a/include/asm-ppc64/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -1,10 +1,11 @@
1#ifndef __PPC64_ELF_H 1#ifndef _ASM_POWERPC_ELF_H
2#define __PPC64_ELF_H 2#define _ASM_POWERPC_ELF_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/cputable.h> 6#include <asm/cputable.h>
7#include <asm/auxvec.h> 7#include <asm/auxvec.h>
8#include <asm/page.h>
8 9
9/* PowerPC relocations defined by the ABIs */ 10/* PowerPC relocations defined by the ABIs */
10#define R_PPC_NONE 0 11#define R_PPC_NONE 0
@@ -75,7 +76,7 @@
75#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ 76#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
76#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ 77#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
77 78
78/* Keep this the last entry. */ 79/* keep this the last entry. */
79#define R_PPC_NUM 95 80#define R_PPC_NUM 95
80 81
81/* 82/*
@@ -90,8 +91,6 @@
90 91
91#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ 92#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
92#define ELF_NFPREG 33 /* includes fpscr */ 93#define ELF_NFPREG 33 /* includes fpscr */
93#define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
94#define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
95 94
96typedef unsigned long elf_greg_t64; 95typedef unsigned long elf_greg_t64;
97typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; 96typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
@@ -100,8 +99,21 @@ typedef unsigned int elf_greg_t32;
100typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG]; 99typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
101 100
102/* 101/*
103 * These are used to set parameters in the core dumps. 102 * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
104 */ 103 */
104#ifdef __powerpc64__
105# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
106# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
107# define ELF_GREG_TYPE elf_greg_t64
108#else
109# define ELF_NEVRREG 34 /* includes acc (as 2) */
110# define ELF_NVRREG 33 /* includes vscr */
111# define ELF_GREG_TYPE elf_greg_t32
112# define ELF_ARCH EM_PPC
113# define ELF_CLASS ELFCLASS32
114# define ELF_DATA ELFDATA2MSB
115#endif /* __powerpc64__ */
116
105#ifndef ELF_ARCH 117#ifndef ELF_ARCH
106# define ELF_ARCH EM_PPC64 118# define ELF_ARCH EM_PPC64
107# define ELF_CLASS ELFCLASS64 119# define ELF_CLASS ELFCLASS64
@@ -114,8 +126,9 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
114 typedef elf_greg_t32 elf_greg_t; 126 typedef elf_greg_t32 elf_greg_t;
115 typedef elf_gregset_t32 elf_gregset_t; 127 typedef elf_gregset_t32 elf_gregset_t;
116# define elf_addr_t u32 128# define elf_addr_t u32
117#endif 129#endif /* ELF_ARCH */
118 130
131/* Floating point registers */
119typedef double elf_fpreg_t; 132typedef double elf_fpreg_t;
120typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 133typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
121 134
@@ -125,7 +138,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
125 * The entry with index 32 contains the vscr as the last word (offset 12) 138 * The entry with index 32 contains the vscr as the last word (offset 12)
126 * within the quadword. This allows the vscr to be stored as either a 139 * within the quadword. This allows the vscr to be stored as either a
127 * quadword (since it must be copied via a vector register to/from storage) 140 * quadword (since it must be copied via a vector register to/from storage)
128 * or as a word. The entry with index 33 contains the vrsave as the first 141 * or as a word.
142 *
143 * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
129 * word (offset 0) within the quadword. 144 * word (offset 0) within the quadword.
130 * 145 *
131 * This definition of the VMX state is compatible with the current PPC32 146 * This definition of the VMX state is compatible with the current PPC32
@@ -138,7 +153,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
138 */ 153 */
139typedef __vector128 elf_vrreg_t; 154typedef __vector128 elf_vrreg_t;
140typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; 155typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
156#ifdef __powerpc64__
141typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; 157typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
158#endif
142 159
143/* 160/*
144 * This is used to ensure we don't load something for the wrong architecture. 161 * This is used to ensure we don't load something for the wrong architecture.
@@ -146,7 +163,7 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
146#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) 163#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
147 164
148#define USE_ELF_CORE_DUMP 165#define USE_ELF_CORE_DUMP
149#define ELF_EXEC_PAGESIZE 4096 166#define ELF_EXEC_PAGESIZE PAGE_SIZE
150 167
151/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 168/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
152 use of this is to invoke "./ld.so someprog" to test out a new version of 169 use of this is to invoke "./ld.so someprog" to test out a new version of
@@ -158,26 +175,30 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
158#ifdef __KERNEL__ 175#ifdef __KERNEL__
159 176
160/* Common routine for both 32-bit and 64-bit processes */ 177/* Common routine for both 32-bit and 64-bit processes */
161static inline void ppc64_elf_core_copy_regs(elf_gregset_t elf_regs, 178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
162 struct pt_regs *regs) 179 struct pt_regs *regs)
163{ 180{
164 int i; 181 int i;
165 int gprs = sizeof(struct pt_regs)/sizeof(elf_greg_t64); 182 int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
166 183
167 if (gprs > ELF_NGREG) 184 if (gprs > ELF_NGREG)
168 gprs = ELF_NGREG; 185 gprs = ELF_NGREG;
169 186
170 for (i=0; i < gprs; i++) 187 for (i=0; i < gprs; i++)
171 elf_regs[i] = (elf_greg_t)((elf_greg_t64 *)regs)[i]; 188 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
189
190 memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \
191 sizeof(elf_gregset_t) - sizeof(struct pt_regs));
192
172} 193}
173#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs); 194#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
174 195
175static inline int dump_task_regs(struct task_struct *tsk, 196static inline int dump_task_regs(struct task_struct *tsk,
176 elf_gregset_t *elf_regs) 197 elf_gregset_t *elf_regs)
177{ 198{
178 struct pt_regs *regs = tsk->thread.regs; 199 struct pt_regs *regs = tsk->thread.regs;
179 if (regs) 200 if (regs)
180 ppc64_elf_core_copy_regs(*elf_regs, regs); 201 ppc_elf_core_copy_regs(*elf_regs, regs);
181 202
182 return 1; 203 return 1;
183} 204}
@@ -186,15 +207,17 @@ static inline int dump_task_regs(struct task_struct *tsk,
186extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); 207extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
187#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 208#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
188 209
189/* XXX Should we define the XFPREGS using altivec ??? */ 210#endif /* __KERNEL__ */
190
191#endif
192 211
193/* This yields a mask that user programs can use to figure out what 212/* ELF_HWCAP yields a mask that user programs can use to figure out what
194 instruction set this cpu supports. This could be done in userspace, 213 instruction set this cpu supports. This could be done in userspace,
195 but it's not easy, and we've already done it here. */ 214 but it's not easy, and we've already done it here. */
196 215# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
197#define ELF_HWCAP (cur_cpu_spec->cpu_user_features) 216#ifdef __powerpc64__
217# define ELF_PLAT_INIT(_r, load_addr) do { \
218 _r->gpr[2] = load_addr; \
219} while (0)
220#endif /* __powerpc64__ */
198 221
199/* This yields a string that ld.so will use to load implementation 222/* This yields a string that ld.so will use to load implementation
200 specific libraries for optimization. This is more specific in 223 specific libraries for optimization. This is more specific in
@@ -205,14 +228,10 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
205 228
206#define ELF_PLATFORM (NULL) 229#define ELF_PLATFORM (NULL)
207 230
208#define ELF_PLAT_INIT(_r, load_addr) do { \
209 memset(_r->gpr, 0, sizeof(_r->gpr)); \
210 _r->ctr = _r->link = _r->xer = _r->ccr = 0; \
211 _r->gpr[2] = load_addr; \
212} while (0)
213
214#ifdef __KERNEL__ 231#ifdef __KERNEL__
215#define SET_PERSONALITY(ex, ibcs2) \ 232
233#ifdef __powerpc64__
234# define SET_PERSONALITY(ex, ibcs2) \
216do { \ 235do { \
217 unsigned long new_flags = 0; \ 236 unsigned long new_flags = 0; \
218 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 237 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
@@ -225,7 +244,6 @@ do { \
225 if (personality(current->personality) != PER_LINUX32) \ 244 if (personality(current->personality) != PER_LINUX32) \
226 set_personality(PER_LINUX); \ 245 set_personality(PER_LINUX); \
227} while (0) 246} while (0)
228
229/* 247/*
230 * An executable for which elf_read_implies_exec() returns TRUE will 248 * An executable for which elf_read_implies_exec() returns TRUE will
231 * have the READ_IMPLIES_EXEC personality flag set automatically. This 249 * have the READ_IMPLIES_EXEC personality flag set automatically. This
@@ -233,19 +251,26 @@ do { \
233 * the 64bit ABI has never had these issues dont enable the workaround 251 * the 64bit ABI has never had these issues dont enable the workaround
234 * even if we have an executable stack. 252 * even if we have an executable stack.
235 */ 253 */
236#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ 254# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
237 (exec_stk != EXSTACK_DISABLE_X) : 0) 255 (exec_stk != EXSTACK_DISABLE_X) : 0)
256#else
257# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
258#endif /* __powerpc64__ */
238 259
239#endif 260#endif /* __KERNEL__ */
240 261
241extern int dcache_bsize; 262extern int dcache_bsize;
242extern int icache_bsize; 263extern int icache_bsize;
243extern int ucache_bsize; 264extern int ucache_bsize;
244 265
245/* We do have an arch_setup_additional_pages for vDSO matters */ 266#ifdef __powerpc64__
246#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
247struct linux_binprm; 267struct linux_binprm;
268#define ARCH_HAS_SETUP_ADDITIONAL_PAGES /* vDSO has arch_setup_additional_pages */
248extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); 269extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack);
270#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
271#else
272#define VDSO_AUX_ENT(a,b)
273#endif /* __powerpc64__ */
249 274
250/* 275/*
251 * The requirements here are: 276 * The requirements here are:
@@ -265,9 +290,8 @@ do { \
265 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ 290 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
266 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ 291 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
267 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ 292 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
268 /* vDSO base */ \ 293 VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
269 NEW_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base); \ 294} while (0)
270 } while (0)
271 295
272/* PowerPC64 relocations defined by the ABIs */ 296/* PowerPC64 relocations defined by the ABIs */
273#define R_PPC64_NONE R_PPC_NONE 297#define R_PPC64_NONE R_PPC_NONE
@@ -384,4 +408,4 @@ do { \
384/* Keep this the last entry. */ 408/* Keep this the last entry. */
385#define R_PPC64_NUM 107 409#define R_PPC64_NUM 107
386 410
387#endif /* __PPC64_ELF_H */ 411#endif /* _ASM_POWERPC_ELF_H */
diff --git a/include/asm-ppc64/firmware.h b/include/asm-powerpc/firmware.h
index 22bb85cf60af..806c142ae9ea 100644
--- a/include/asm-ppc64/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -1,8 +1,4 @@
1/* 1/*
2 * include/asm-ppc64/firmware.h
3 *
4 * Extracted from include/asm-ppc64/cputable.h
5 *
6 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
7 * 3 *
8 * Modifications for ppc64: 4 * Modifications for ppc64:
@@ -13,8 +9,8 @@
13 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
15 */ 11 */
16#ifndef __ASM_PPC_FIRMWARE_H 12#ifndef __ASM_POWERPC_FIRMWARE_H
17#define __ASM_PPC_FIRMWARE_H 13#define __ASM_POWERPC_FIRMWARE_H
18 14
19#ifdef __KERNEL__ 15#ifdef __KERNEL__
20 16
@@ -98,4 +94,4 @@ extern firmware_feature_t firmware_features_table[];
98 94
99#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
100#endif /* __KERNEL__ */ 96#endif /* __KERNEL__ */
101#endif /* __ASM_PPC_FIRMWARE_H */ 97#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/include/asm-powerpc/grackle.h b/include/asm-powerpc/grackle.h
new file mode 100644
index 000000000000..563c7a5e64c9
--- /dev/null
+++ b/include/asm-powerpc/grackle.h
@@ -0,0 +1,7 @@
1/*
2 * Functions for setting up and using a MPC106 northbridge
3 */
4
5#include <asm/pci-bridge.h>
6
7extern void setup_grackle(struct pci_controller *hose);
diff --git a/include/asm-ppc/hardirq.h b/include/asm-powerpc/hardirq.h
index 94f1411b1a93..3b3e3b49ec12 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-powerpc/hardirq.h
@@ -1,11 +1,8 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_POWERPC_HARDIRQ_H
2#ifndef __ASM_HARDIRQ_H 2#define _ASM_POWERPC_HARDIRQ_H
3#define __ASM_HARDIRQ_H
4 3
5#include <linux/config.h>
6#include <linux/cache.h>
7#include <linux/smp_lock.h>
8#include <asm/irq.h> 4#include <asm/irq.h>
5#include <asm/bug.h>
9 6
10/* The __last_jiffy_stamp field is needed to ensure that no decrementer 7/* The __last_jiffy_stamp field is needed to ensure that no decrementer
11 * interrupt is lost on SMP machines. Since on most CPUs it is in the same 8 * interrupt is lost on SMP machines. Since on most CPUs it is in the same
@@ -13,7 +10,7 @@
13 * for uniformity. 10 * for uniformity.
14 */ 11 */
15typedef struct { 12typedef struct {
16 unsigned long __softirq_pending; /* set_bit is used on this */ 13 unsigned int __softirq_pending; /* set_bit is used on this */
17 unsigned int __last_jiffy_stamp; 14 unsigned int __last_jiffy_stamp;
18} ____cacheline_aligned irq_cpustat_t; 15} ____cacheline_aligned irq_cpustat_t;
19 16
@@ -27,5 +24,4 @@ static inline void ack_bad_irq(int irq)
27 BUG(); 24 BUG();
28} 25}
29 26
30#endif /* __ASM_HARDIRQ_H */ 27#endif /* _ASM_POWERPC_HARDIRQ_H */
31#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/heathrow.h b/include/asm-powerpc/heathrow.h
index 22ac179856b9..22ac179856b9 100644
--- a/include/asm-ppc/heathrow.h
+++ b/include/asm-powerpc/heathrow.h
diff --git a/include/asm-ppc64/hw_irq.h b/include/asm-powerpc/hw_irq.h
index baea40e695ec..c37b31b96337 100644
--- a/include/asm-ppc64/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -1,22 +1,17 @@
1/* 1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 *
4 * Use inline IRQs where possible - Anton Blanchard <anton@au.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 3 */
4#ifndef _ASM_POWERPC_HW_IRQ_H
5#define _ASM_POWERPC_HW_IRQ_H
6
11#ifdef __KERNEL__ 7#ifdef __KERNEL__
12#ifndef _PPC64_HW_IRQ_H
13#define _PPC64_HW_IRQ_H
14 8
15#include <linux/config.h> 9#include <linux/config.h>
16#include <linux/errno.h> 10#include <linux/errno.h>
17#include <asm/irq.h> 11#include <asm/ptrace.h>
12#include <asm/processor.h>
18 13
19int timer_interrupt(struct pt_regs *); 14extern void timer_interrupt(struct pt_regs *);
20extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); 15extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
21 16
22#ifdef CONFIG_PPC_ISERIES 17#ifdef CONFIG_PPC_ISERIES
@@ -33,45 +28,60 @@ extern void local_irq_restore(unsigned long);
33 28
34#else 29#else
35 30
36#define local_save_flags(flags) ((flags) = mfmsr()) 31#if defined(CONFIG_BOOKE)
32#define SET_MSR_EE(x) mtmsr(x)
33#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
34#elif defined(__powerpc64__)
35#define SET_MSR_EE(x) __mtmsrd(x, 1)
37#define local_irq_restore(flags) do { \ 36#define local_irq_restore(flags) do { \
38 __asm__ __volatile__("": : :"memory"); \ 37 __asm__ __volatile__("": : :"memory"); \
39 __mtmsrd((flags), 1); \ 38 __mtmsrd((flags), 1); \
40} while(0) 39} while(0)
40#else
41#define SET_MSR_EE(x) mtmsr(x)
42#define local_irq_restore(flags) mtmsr(flags)
43#endif
41 44
42static inline void local_irq_disable(void) 45static inline void local_irq_disable(void)
43{ 46{
47#ifdef CONFIG_BOOKE
48 __asm__ __volatile__("wrteei 0": : :"memory");
49#else
44 unsigned long msr; 50 unsigned long msr;
45 msr = mfmsr();
46 __mtmsrd(msr & ~MSR_EE, 1);
47 __asm__ __volatile__("": : :"memory"); 51 __asm__ __volatile__("": : :"memory");
52 msr = mfmsr();
53 SET_MSR_EE(msr & ~MSR_EE);
54#endif
48} 55}
49 56
50static inline void local_irq_enable(void) 57static inline void local_irq_enable(void)
51{ 58{
59#ifdef CONFIG_BOOKE
60 __asm__ __volatile__("wrteei 1": : :"memory");
61#else
52 unsigned long msr; 62 unsigned long msr;
53 __asm__ __volatile__("": : :"memory"); 63 __asm__ __volatile__("": : :"memory");
54 msr = mfmsr(); 64 msr = mfmsr();
55 __mtmsrd(msr | MSR_EE, 1); 65 SET_MSR_EE(msr | MSR_EE);
66#endif
56} 67}
57 68
58static inline void __do_save_and_cli(unsigned long *flags) 69static inline void local_irq_save_ptr(unsigned long *flags)
59{ 70{
60 unsigned long msr; 71 unsigned long msr;
61 msr = mfmsr(); 72 msr = mfmsr();
62 *flags = msr; 73 *flags = msr;
63 __mtmsrd(msr & ~MSR_EE, 1); 74#ifdef CONFIG_BOOKE
75 __asm__ __volatile__("wrteei 0": : :"memory");
76#else
77 SET_MSR_EE(msr & ~MSR_EE);
78#endif
64 __asm__ __volatile__("": : :"memory"); 79 __asm__ __volatile__("": : :"memory");
65} 80}
66 81
67#define local_irq_save(flags) __do_save_and_cli(&flags) 82#define local_save_flags(flags) ((flags) = mfmsr())
68 83#define local_irq_save(flags) local_irq_save_ptr(&flags)
69#define irqs_disabled() \ 84#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
70({ \
71 unsigned long flags; \
72 local_save_flags(flags); \
73 !(flags & MSR_EE); \
74})
75 85
76#endif /* CONFIG_PPC_ISERIES */ 86#endif /* CONFIG_PPC_ISERIES */
77 87
@@ -99,6 +109,6 @@ static inline void __do_save_and_cli(unsigned long *flags)
99 */ 109 */
100struct hw_interrupt_type; 110struct hw_interrupt_type;
101static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} 111static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
102 112
103#endif /* _PPC64_HW_IRQ_H */ 113#endif /* __KERNEL__ */
104#endif /* __KERNEL__ */ 114#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
new file mode 100644
index 000000000000..fc4bfee124d7
--- /dev/null
+++ b/include/asm-powerpc/i8259.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_POWERPC_I8259_H
2#define _ASM_POWERPC_I8259_H
3
4#include <linux/irq.h>
5
6extern struct hw_interrupt_type i8259_pic;
7
8extern void i8259_init(unsigned long intack_addr, int offset);
9extern int i8259_irq(struct pt_regs *regs);
10extern int i8259_irq_cascade(struct pt_regs *regs, void *unused);
11
12#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-ppc64/iommu.h b/include/asm-powerpc/iommu.h
index c2f3b6e8a42f..9d91bdd667ae 100644
--- a/include/asm-ppc64/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * iommu.h
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup: 3 * Rewrite, cleanup:
5 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation 4 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
@@ -22,6 +21,7 @@
22#ifndef _ASM_IOMMU_H 21#ifndef _ASM_IOMMU_H
23#define _ASM_IOMMU_H 22#define _ASM_IOMMU_H
24 23
24#include <linux/config.h>
25#include <asm/types.h> 25#include <asm/types.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/device.h> 27#include <linux/device.h>
@@ -29,44 +29,11 @@
29 29
30/* 30/*
31 * IOMAP_MAX_ORDER defines the largest contiguous block 31 * IOMAP_MAX_ORDER defines the largest contiguous block
32 * of dma (tce) space we can get. IOMAP_MAX_ORDER = 13 32 * of dma space we can get. IOMAP_MAX_ORDER = 13
33 * allows up to 2**12 pages (4096 * 4096) = 16 MB 33 * allows up to 2**12 pages (4096 * 4096) = 16 MB
34 */ 34 */
35#define IOMAP_MAX_ORDER 13 35#define IOMAP_MAX_ORDER 13
36 36
37/*
38 * Tces come in two formats, one for the virtual bus and a different
39 * format for PCI
40 */
41#define TCE_VB 0
42#define TCE_PCI 1
43
44/* tce_entry
45 * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
46 * abstracted so layout is irrelevant.
47 */
48union tce_entry {
49 unsigned long te_word;
50 struct {
51 unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
52 unsigned int tb_rsvd :6;
53 unsigned long tb_rpn :40; /* Real page number */
54 unsigned int tb_valid :1; /* Tce is valid (vb only) */
55 unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
56 unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
57 unsigned int tb_pciwr :1; /* Write allowed (pci only) */
58 unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
59 } te_bits;
60#define te_cacheBits te_bits.tb_cacheBits
61#define te_rpn te_bits.tb_rpn
62#define te_valid te_bits.tb_valid
63#define te_allio te_bits.tb_allio
64#define te_lpindex te_bits.tb_lpindex
65#define te_pciwr te_bits.tb_pciwr
66#define te_rdwr te_bits.tb_rdwr
67};
68
69
70struct iommu_table { 37struct iommu_table {
71 unsigned long it_busno; /* Bus number this table belongs to */ 38 unsigned long it_busno; /* Bus number this table belongs to */
72 unsigned long it_size; /* Size of iommu table in entries */ 39 unsigned long it_size; /* Size of iommu table in entries */
@@ -83,6 +50,7 @@ struct iommu_table {
83}; 50};
84 51
85struct scatterlist; 52struct scatterlist;
53struct device_node;
86 54
87#ifdef CONFIG_PPC_MULTIPLATFORM 55#ifdef CONFIG_PPC_MULTIPLATFORM
88 56
@@ -104,9 +72,8 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn);
104 72
105#ifdef CONFIG_PPC_ISERIES 73#ifdef CONFIG_PPC_ISERIES
106 74
107struct iSeries_Device_Node;
108/* Creates table for an individual device node */ 75/* Creates table for an individual device node */
109extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn); 76extern void iommu_devnode_init_iSeries(struct device_node *dn);
110 77
111#endif /* CONFIG_PPC_ISERIES */ 78#endif /* CONFIG_PPC_ISERIES */
112 79
diff --git a/include/asm-ppc/irq.h b/include/asm-powerpc/irq.h
index bd9674807f05..c7c3f912a3c2 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -1,11 +1,23 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2#ifndef _ASM_IRQ_H 2#ifndef _ASM_POWERPC_IRQ_H
3#define _ASM_IRQ_H 3#define _ASM_POWERPC_IRQ_H
4
5/*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
4 11
5#include <linux/config.h> 12#include <linux/config.h>
6#include <asm/machdep.h> /* ppc_md */ 13#include <linux/threads.h>
14
15#include <asm/types.h>
7#include <asm/atomic.h> 16#include <asm/atomic.h>
8 17
18/* this number is used when no interrupt has been assigned */
19#define NO_IRQ (-1)
20
9/* 21/*
10 * These constants are used for passing information about interrupt 22 * These constants are used for passing information about interrupt
11 * signal polarity and level/edge sensing to the low-level PIC chip 23 * signal polarity and level/edge sensing to the low-level PIC chip
@@ -24,6 +36,50 @@
24 */ 36 */
25#define ARCH_HAS_IRQ_PER_CPU 37#define ARCH_HAS_IRQ_PER_CPU
26 38
39#define get_irq_desc(irq) (&irq_desc[(irq)])
40
41/* Define a way to iterate across irqs. */
42#define for_each_irq(i) \
43 for ((i) = 0; (i) < NR_IRQS; ++(i))
44
45#ifdef CONFIG_PPC64
46
47/*
48 * Maximum number of interrupt sources that we can handle.
49 */
50#define NR_IRQS 512
51
52/* Interrupt numbers are virtual in case they are sparsely
53 * distributed by the hardware.
54 */
55extern unsigned int virt_irq_to_real_map[NR_IRQS];
56
57/* Create a mapping for a real_irq if it doesn't already exist.
58 * Return the virtual irq as a convenience.
59 */
60int virt_irq_create_mapping(unsigned int real_irq);
61void virt_irq_init(void);
62
63static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
64{
65 return virt_irq_to_real_map[virt_irq];
66}
67
68extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
69
70/*
71 * List of interrupt controllers.
72 */
73#define IC_INVALID 0
74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3
77#define IC_ISERIES 4
78
79extern u64 ppc64_interrupt_controller;
80
81#else /* 32-bit */
82
27#if defined(CONFIG_40x) 83#if defined(CONFIG_40x)
28#include <asm/ibm4xx.h> 84#include <asm/ibm4xx.h>
29 85
@@ -66,11 +122,6 @@
66#define NR_UIC_IRQS UIC_WIDTH 122#define NR_UIC_IRQS UIC_WIDTH
67#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) 123#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
68#endif 124#endif
69static __inline__ int
70irq_canonicalize(int irq)
71{
72 return (irq);
73}
74 125
75#elif defined(CONFIG_44x) 126#elif defined(CONFIG_44x)
76#include <asm/ibm44x.h> 127#include <asm/ibm44x.h>
@@ -78,12 +129,6 @@ irq_canonicalize(int irq)
78#define NR_UIC_IRQS 32 129#define NR_UIC_IRQS 32
79#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) 130#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
80 131
81static __inline__ int
82irq_canonicalize(int irq)
83{
84 return (irq);
85}
86
87#elif defined(CONFIG_8xx) 132#elif defined(CONFIG_8xx)
88 133
89/* Now include the board configuration specific associations. 134/* Now include the board configuration specific associations.
@@ -170,20 +215,9 @@ irq_canonicalize(int irq)
170 */ 215 */
171#define mk_int_int_mask(IL) (1 << (7 - (IL/2))) 216#define mk_int_int_mask(IL) (1 << (7 - (IL/2)))
172 217
173/* always the same on 8xx -- Cort */
174static __inline__ int irq_canonicalize(int irq)
175{
176 return irq;
177}
178
179#elif defined(CONFIG_83xx) 218#elif defined(CONFIG_83xx)
180#include <asm/mpc83xx.h> 219#include <asm/mpc83xx.h>
181 220
182static __inline__ int irq_canonicalize(int irq)
183{
184 return irq;
185}
186
187#define NR_IRQS (NR_IPIC_INTS) 221#define NR_IRQS (NR_IPIC_INTS)
188 222
189#elif defined(CONFIG_85xx) 223#elif defined(CONFIG_85xx)
@@ -307,17 +341,13 @@ static __inline__ int irq_canonicalize(int irq)
307#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET) 341#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
308#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET) 342#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
309 343
310static __inline__ int irq_canonicalize(int irq)
311{
312 return irq;
313}
314
315#else /* CONFIG_40x + CONFIG_8xx */ 344#else /* CONFIG_40x + CONFIG_8xx */
316/* 345/*
317 * this is the # irq's for all ppc arch's (pmac/chrp/prep) 346 * this is the # irq's for all ppc arch's (pmac/chrp/prep)
318 * so it is the max of them all 347 * so it is the max of them all
319 */ 348 */
320#define NR_IRQS 256 349#define NR_IRQS 256
350#define __DO_IRQ_CANON 1
321 351
322#ifndef CONFIG_8260 352#ifndef CONFIG_8260
323 353
@@ -394,25 +424,81 @@ static __inline__ int irq_canonicalize(int irq)
394 424
395#endif /* CONFIG_8260 */ 425#endif /* CONFIG_8260 */
396 426
427#endif
428
429#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
430/* pedantic: these are long because they are used with set_bit --RR */
431extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
432extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
433extern atomic_t ppc_n_lost_interrupts;
434
435#define virt_irq_create_mapping(x) (x)
436
437#endif
438
397/* 439/*
398 * This gets called from serial.c, which is now used on 440 * Because many systems have two overlapping names spaces for
399 * powermacs as well as prep/chrp boxes. 441 * interrupts (ISA and XICS for example), and the ISA interrupts
400 * Prep and chrp both have cascaded 8259 PICs. 442 * have historically not been easy to renumber, we allow ISA
443 * interrupts to take values 0 - 15, and shift up the remaining
444 * interrupts by 0x10.
401 */ 445 */
446#define NUM_ISA_INTERRUPTS 0x10
447extern int __irq_offset_value;
448
449static inline int irq_offset_up(int irq)
450{
451 return(irq + __irq_offset_value);
452}
453
454static inline int irq_offset_down(int irq)
455{
456 return(irq - __irq_offset_value);
457}
458
459static inline int irq_offset_value(void)
460{
461 return __irq_offset_value;
462}
463
464#ifdef __DO_IRQ_CANON
465extern int ppc_do_canonicalize_irqs;
466#else
467#define ppc_do_canonicalize_irqs 0
468#endif
469
402static __inline__ int irq_canonicalize(int irq) 470static __inline__ int irq_canonicalize(int irq)
403{ 471{
404 if (ppc_md.irq_canonicalize) 472 if (ppc_do_canonicalize_irqs && irq == 2)
405 return ppc_md.irq_canonicalize(irq); 473 irq = 9;
406 return irq; 474 return irq;
407} 475}
408 476
409#endif 477extern int distribute_irqs;
410 478
411#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 479struct irqaction;
412/* pedantic: these are long because they are used with set_bit --RR */ 480struct pt_regs;
413extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 481
414extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 482#ifdef CONFIG_IRQSTACKS
415extern atomic_t ppc_n_lost_interrupts; 483/*
484 * Per-cpu stacks for handling hard and soft interrupts.
485 */
486extern struct thread_info *hardirq_ctx[NR_CPUS];
487extern struct thread_info *softirq_ctx[NR_CPUS];
488
489extern void irq_ctx_init(void);
490extern void call_do_softirq(struct thread_info *tp);
491extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
492 struct irqaction *action, struct thread_info *tp);
493
494#define __ARCH_HAS_DO_SOFTIRQ
495
496#else
497#define irq_ctx_init()
498
499#endif /* CONFIG_IRQSTACKS */
500
501extern void do_IRQ(struct pt_regs *regs);
416 502
417#endif /* _ASM_IRQ_H */ 503#endif /* _ASM_IRQ_H */
418#endif /* __KERNEL__ */ 504#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/kdebug.h b/include/asm-powerpc/kdebug.h
index d383d161cf8d..9dcbac674811 100644
--- a/include/asm-ppc64/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_KDEBUG_H 1#ifndef _ASM_POWERPC_KDEBUG_H
2#define _PPC64_KDEBUG_H 1 2#define _ASM_POWERPC_KDEBUG_H
3 3
4/* nearly identical to x86_64/i386 code */ 4/* nearly identical to x86_64/i386 code */
5 5
@@ -21,7 +21,7 @@ struct die_args {
21 then free. 21 then free.
22 */ 22 */
23int register_die_notifier(struct notifier_block *nb); 23int register_die_notifier(struct notifier_block *nb);
24extern struct notifier_block *ppc64_die_chain; 24extern struct notifier_block *powerpc_die_chain;
25 25
26/* Grossly misnamed. */ 26/* Grossly misnamed. */
27enum die_val { 27enum die_val {
@@ -30,14 +30,13 @@ enum die_val {
30 DIE_DABR_MATCH, 30 DIE_DABR_MATCH,
31 DIE_BPT, 31 DIE_BPT,
32 DIE_SSTEP, 32 DIE_SSTEP,
33 DIE_GPF,
34 DIE_PAGE_FAULT, 33 DIE_PAGE_FAULT,
35}; 34};
36 35
37static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) 36static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
38{ 37{
39 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; 38 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
40 return notifier_call_chain(&ppc64_die_chain, val, &args); 39 return notifier_call_chain(&powerpc_die_chain, val, &args);
41} 40}
42 41
43#endif 42#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-ppc/keylargo.h b/include/asm-powerpc/keylargo.h
index a669a3f0f5a2..a669a3f0f5a2 100644
--- a/include/asm-ppc/keylargo.h
+++ b/include/asm-powerpc/keylargo.h
diff --git a/include/asm-powerpc/kmap_types.h b/include/asm-powerpc/kmap_types.h
new file mode 100644
index 000000000000..b6bac6f61c16
--- /dev/null
+++ b/include/asm-powerpc/kmap_types.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_POWERPC_KMAP_TYPES_H
2#define _ASM_POWERPC_KMAP_TYPES_H
3
4#ifdef __KERNEL__
5
6/*
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13enum km_type {
14 KM_BOUNCE_READ,
15 KM_SKB_SUNRPC_DATA,
16 KM_SKB_DATA_SOFTIRQ,
17 KM_USER0,
18 KM_USER1,
19 KM_BIO_SRC_IRQ,
20 KM_BIO_DST_IRQ,
21 KM_PTE0,
22 KM_PTE1,
23 KM_IRQ0,
24 KM_IRQ1,
25 KM_SOFTIRQ0,
26 KM_SOFTIRQ1,
27 KM_PPC_SYNC_PAGE,
28 KM_PPC_SYNC_ICACHE,
29 KM_TYPE_NR
30};
31
32#endif /* __KERNEL__ */
33#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/include/asm-ppc64/kprobes.h b/include/asm-powerpc/kprobes.h
index d9129d2b038e..b2f09f17fbe0 100644
--- a/include/asm-ppc64/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -1,8 +1,7 @@
1#ifndef _ASM_KPROBES_H 1#ifndef _ASM_POWERPC_KPROBES_H
2#define _ASM_KPROBES_H 2#define _ASM_POWERPC_KPROBES_H
3/* 3/*
4 * Kernel Probes (KProbes) 4 * Kernel Probes (KProbes)
5 * include/asm-ppc64/kprobes.h
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -64,4 +63,4 @@ static inline int kprobe_exceptions_notify(struct notifier_block *self,
64 return 0; 63 return 0;
65} 64}
66#endif 65#endif
67#endif /* _ASM_KPROBES_H */ 66#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-ppc64/lmb.h b/include/asm-powerpc/lmb.h
index de91e034bd98..ea0afe343545 100644
--- a/include/asm-ppc64/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -50,7 +50,7 @@ extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
50extern unsigned long __init lmb_phys_mem_size(void); 50extern unsigned long __init lmb_phys_mem_size(void);
51extern unsigned long __init lmb_end_of_DRAM(void); 51extern unsigned long __init lmb_end_of_DRAM(void);
52extern unsigned long __init lmb_abs_to_phys(unsigned long); 52extern unsigned long __init lmb_abs_to_phys(unsigned long);
53extern void __init lmb_enforce_memory_limit(void); 53extern void __init lmb_enforce_memory_limit(unsigned long);
54 54
55extern void lmb_dump_all(void); 55extern void lmb_dump_all(void);
56 56
diff --git a/include/asm-ppc64/machdep.h b/include/asm-powerpc/machdep.h
index 8027160ec96d..451b345cfc78 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_POWERPC_MACHDEP_H
2#define _ASM_POWERPC_MACHDEP_H
1#ifdef __KERNEL__ 3#ifdef __KERNEL__
2#ifndef _PPC64_MACHDEP_H
3#define _PPC64_MACHDEP_H
4 4
5/* 5/*
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -16,6 +16,11 @@
16 16
17#include <asm/setup.h> 17#include <asm/setup.h>
18 18
19/* We export this macro for external modules like Alsa to know if
20 * ppc_md.feature_call is implemented or not
21 */
22#define CONFIG_PPC_HAS_FEATURE_CALLS
23
19struct pt_regs; 24struct pt_regs;
20struct pci_bus; 25struct pci_bus;
21struct device_node; 26struct device_node;
@@ -39,6 +44,7 @@ struct smp_ops_t {
39#endif 44#endif
40 45
41struct machdep_calls { 46struct machdep_calls {
47#ifdef CONFIG_PPC64
42 void (*hpte_invalidate)(unsigned long slot, 48 void (*hpte_invalidate)(unsigned long slot,
43 unsigned long va, 49 unsigned long va,
44 int large, 50 int large,
@@ -56,9 +62,8 @@ struct machdep_calls {
56 unsigned long vflags, 62 unsigned long vflags,
57 unsigned long rflags); 63 unsigned long rflags);
58 long (*hpte_remove)(unsigned long hpte_group); 64 long (*hpte_remove)(unsigned long hpte_group);
59 void (*flush_hash_range)(unsigned long context, 65 void (*flush_hash_range)(unsigned long number, int local);
60 unsigned long number, 66
61 int local);
62 /* special for kexec, to be called in real mode, linar mapping is 67 /* special for kexec, to be called in real mode, linar mapping is
63 * destroyed as well */ 68 * destroyed as well */
64 void (*hpte_clear_all)(void); 69 void (*hpte_clear_all)(void);
@@ -75,18 +80,21 @@ struct machdep_calls {
75 void (*iommu_dev_setup)(struct pci_dev *dev); 80 void (*iommu_dev_setup)(struct pci_dev *dev);
76 void (*iommu_bus_setup)(struct pci_bus *bus); 81 void (*iommu_bus_setup)(struct pci_bus *bus);
77 void (*irq_bus_setup)(struct pci_bus *bus); 82 void (*irq_bus_setup)(struct pci_bus *bus);
83#endif
78 84
79 int (*probe)(int platform); 85 int (*probe)(int platform);
80 void (*setup_arch)(void); 86 void (*setup_arch)(void);
81 void (*init_early)(void); 87 void (*init_early)(void);
82 /* Optional, may be NULL. */ 88 /* Optional, may be NULL. */
83 void (*get_cpuinfo)(struct seq_file *m); 89 void (*show_cpuinfo)(struct seq_file *m);
90 void (*show_percpuinfo)(struct seq_file *m, int i);
84 91
85 void (*init_IRQ)(void); 92 void (*init_IRQ)(void);
86 int (*get_irq)(struct pt_regs *); 93 int (*get_irq)(struct pt_regs *);
87 void (*cpu_irq_down)(int secondary); 94 void (*cpu_irq_down)(int secondary);
88 95
89 /* PCI stuff */ 96 /* PCI stuff */
97 /* Called after scanning the bus, before allocating resources */
90 void (*pcibios_fixup)(void); 98 void (*pcibios_fixup)(void);
91 int (*pci_probe_mode)(struct pci_bus *); 99 int (*pci_probe_mode)(struct pci_bus *);
92 100
@@ -96,9 +104,13 @@ struct machdep_calls {
96 void (*panic)(char *str); 104 void (*panic)(char *str);
97 void (*cpu_die)(void); 105 void (*cpu_die)(void);
98 106
107 long (*time_init)(void); /* Optional, may be NULL */
108
99 int (*set_rtc_time)(struct rtc_time *); 109 int (*set_rtc_time)(struct rtc_time *);
100 void (*get_rtc_time)(struct rtc_time *); 110 void (*get_rtc_time)(struct rtc_time *);
101 void (*get_boot_time)(struct rtc_time *); 111 unsigned long (*get_boot_time)(void);
112 unsigned char (*rtc_read_val)(int addr);
113 void (*rtc_write_val)(int addr, unsigned char val);
102 114
103 void (*calibrate_decr)(void); 115 void (*calibrate_decr)(void);
104 116
@@ -107,10 +119,12 @@ struct machdep_calls {
107 /* Interface for platform error logging */ 119 /* Interface for platform error logging */
108 void (*log_error)(char *buf, unsigned int err_type, int fatal); 120 void (*log_error)(char *buf, unsigned int err_type, int fatal);
109 121
122 unsigned char (*nvram_read_val)(int addr);
123 void (*nvram_write_val)(int addr, unsigned char val);
110 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index); 124 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
111 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index); 125 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
112 ssize_t (*nvram_size)(void); 126 ssize_t (*nvram_size)(void);
113 int (*nvram_sync)(void); 127 void (*nvram_sync)(void);
114 128
115 /* Exception handlers */ 129 /* Exception handlers */
116 void (*system_reset_exception)(struct pt_regs *regs); 130 void (*system_reset_exception)(struct pt_regs *regs);
@@ -130,19 +144,97 @@ struct machdep_calls {
130 144
131 /* Get access protection for /dev/mem */ 145 /* Get access protection for /dev/mem */
132 pgprot_t (*phys_mem_access_prot)(struct file *file, 146 pgprot_t (*phys_mem_access_prot)(struct file *file,
133 unsigned long offset, 147 unsigned long pfn,
134 unsigned long size, 148 unsigned long size,
135 pgprot_t vma_prot); 149 pgprot_t vma_prot);
136 150
137 /* Idle loop for this platform, leave empty for default idle loop */ 151 /* Idle loop for this platform, leave empty for default idle loop */
138 int (*idle_loop)(void); 152 void (*idle_loop)(void);
139 153
140 /* Function to enable pmcs for this platform, called once per cpu. */ 154 /* Function to enable performance monitor counters for this
155 platform, called once per cpu. */
141 void (*enable_pmcs)(void); 156 void (*enable_pmcs)(void);
157
158#ifdef CONFIG_PPC32 /* XXX for now */
159 /* A general init function, called by ppc_init in init/main.c.
160 May be NULL. */
161 void (*init)(void);
162
163 void (*idle)(void);
164 void (*power_save)(void);
165
166 void (*heartbeat)(void);
167 unsigned long heartbeat_reset;
168 unsigned long heartbeat_count;
169
170 void (*setup_io_mappings)(void);
171
172 void (*early_serial_map)(void);
173 void (*kgdb_map_scc)(void);
174
175 /*
176 * optional PCI "hooks"
177 */
178
179 /* Called after PPC generic resource fixup to perform
180 machine specific fixups */
181 void (*pcibios_fixup_resources)(struct pci_dev *);
182
183 /* Called for each PCI bus in the system when it's probed */
184 void (*pcibios_fixup_bus)(struct pci_bus *);
185
186 /* Called when pci_enable_device() is called (initial=0) or
187 * when a device with no assigned resource is found (initial=1).
188 * Returns 0 to allow assignment/enabling of the device. */
189 int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
190
191 /* For interrupt routing */
192 unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
193 int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
194
195 /* Called in indirect_* to avoid touching devices */
196 int (*pci_exclude_device)(unsigned char, unsigned char);
197
198 /* Called at then very end of pcibios_init() */
199 void (*pcibios_after_init)(void);
200
201 /* this is for modules, since _machine can be a define -- Cort */
202 int ppc_machine;
203
204#ifdef CONFIG_KEXEC
205 /* Called to shutdown machine specific hardware not already controlled
206 * by other drivers.
207 * XXX Should we move this one out of kexec scope?
208 */
209 void (*machine_shutdown)(void);
210
211 /* Called to do the minimal shutdown needed to run a kexec'd kernel
212 * to run successfully.
213 * XXX Should we move this one out of kexec scope?
214 */
215 void (*machine_crash_shutdown)(void);
216
217 /* Called to do what every setup is needed on image and the
218 * reboot code buffer. Returns 0 on success.
219 * Provide your own (maybe dummy) implementation if your platform
220 * claims to support kexec.
221 */
222 int (*machine_kexec_prepare)(struct kimage *image);
223
224 /* Called to handle any machine specific cleanup on image */
225 void (*machine_kexec_cleanup)(struct kimage *image);
226
227 /* Called to perform the _real_ kexec.
228 * Do NOT allocate memory or fail here. We are past the point of
229 * no return.
230 */
231 void (*machine_kexec)(struct kimage *image);
232#endif /* CONFIG_KEXEC */
233#endif /* CONFIG_PPC32 */
142}; 234};
143 235
144extern int default_idle(void); 236extern void default_idle(void);
145extern int native_idle(void); 237extern void native_idle(void);
146 238
147extern struct machdep_calls ppc_md; 239extern struct machdep_calls ppc_md;
148extern char cmd_line[COMMAND_LINE_SIZE]; 240extern char cmd_line[COMMAND_LINE_SIZE];
@@ -162,6 +254,13 @@ extern sys_ctrler_t sys_ctrler;
162 254
163#endif /* CONFIG_PPC_PMAC */ 255#endif /* CONFIG_PPC_PMAC */
164 256
257extern void setup_pci_ptrs(void);
258
259#ifdef CONFIG_SMP
260/* Poor default implementations */
261extern void __devinit smp_generic_give_timebase(void);
262extern void __devinit smp_generic_take_timebase(void);
263#endif /* CONFIG_SMP */
165 264
166 265
167/* Functions to produce codes on the leds. 266/* Functions to produce codes on the leds.
@@ -181,5 +280,5 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
181 ppc_md.log_error(buf, err_type, fatal); 280 ppc_md.log_error(buf, err_type, fatal);
182} 281}
183 282
184#endif /* _PPC64_MACHDEP_H */
185#endif /* __KERNEL__ */ 283#endif /* __KERNEL__ */
284#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/include/asm-ppc/macio.h b/include/asm-powerpc/macio.h
index b553dd4b139e..b553dd4b139e 100644
--- a/include/asm-ppc/macio.h
+++ b/include/asm-powerpc/macio.h
diff --git a/include/asm-ppc/mediabay.h b/include/asm-powerpc/mediabay.h
index 9daa3252d7b6..9daa3252d7b6 100644
--- a/include/asm-ppc/mediabay.h
+++ b/include/asm-powerpc/mediabay.h
diff --git a/arch/ppc64/kernel/mpic.h b/include/asm-powerpc/mpic.h
index ca78a7f10528..7083d1f74260 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -1,3 +1,6 @@
1#ifndef _ASM_POWERPC_MPIC_H
2#define _ASM_POWERPC_MPIC_H
3
1#include <linux/irq.h> 4#include <linux/irq.h>
2 5
3/* 6/*
@@ -258,12 +261,21 @@ extern void mpic_setup_this_cpu(void);
258/* Clean up for kexec (or cpu offline or ...) */ 261/* Clean up for kexec (or cpu offline or ...) */
259extern void mpic_teardown_this_cpu(int secondary); 262extern void mpic_teardown_this_cpu(int secondary);
260 263
264/* Get the current cpu priority for this cpu (0..15) */
265extern int mpic_cpu_get_priority(void);
266
267/* Set the current cpu priority for this cpu */
268extern void mpic_cpu_set_priority(int prio);
269
261/* Request IPIs on primary mpic */ 270/* Request IPIs on primary mpic */
262extern void mpic_request_ipis(void); 271extern void mpic_request_ipis(void);
263 272
264/* Send an IPI (non offseted number 0..3) */ 273/* Send an IPI (non offseted number 0..3) */
265extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); 274extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
266 275
276/* Send a message (IPI) to a given target (cpu number or MSG_*) */
277void smp_mpic_message_pass(int target, int msg);
278
267/* Fetch interrupt from a given mpic */ 279/* Fetch interrupt from a given mpic */
268extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 280extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
269/* This one gets to the primary mpic */ 281/* This one gets to the primary mpic */
@@ -271,3 +283,5 @@ extern int mpic_get_irq(struct pt_regs *regs);
271 283
272/* global mpic for pSeries */ 284/* global mpic for pSeries */
273extern struct mpic *pSeries_mpic; 285extern struct mpic *pSeries_mpic;
286
287#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-ppc/of_device.h b/include/asm-powerpc/of_device.h
index 575bce418f80..ddb16aae0bd6 100644
--- a/include/asm-ppc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -1,5 +1,5 @@
1#ifndef __OF_DEVICE_H__ 1#ifndef _ASM_POWERPC_OF_DEVICE_H
2#define __OF_DEVICE_H__ 2#define _ASM_POWERPC_OF_DEVICE_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/mod_devicetable.h> 5#include <linux/mod_devicetable.h>
@@ -61,5 +61,4 @@ extern struct of_device *of_platform_device_create(struct device_node *np,
61 struct device *parent); 61 struct device *parent);
62extern void of_release_dev(struct device *dev); 62extern void of_release_dev(struct device *dev);
63 63
64#endif /* __OF_DEVICE_H__ */ 64#endif /* _ASM_POWERPC_OF_DEVICE_H */
65
diff --git a/include/asm-ppc/ohare.h b/include/asm-powerpc/ohare.h
index 023b59772231..023b59772231 100644
--- a/include/asm-ppc/ohare.h
+++ b/include/asm-powerpc/ohare.h
diff --git a/include/asm-ppc64/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index b04f1dfb1421..8013cd273ced 100644
--- a/include/asm-ppc64/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -9,39 +9,49 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef OP_IMPL_H 12#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
13#define OP_IMPL_H 1 13#define _ASM_POWERPC_OPROFILE_IMPL_H
14 14
15#define OP_MAX_COUNTER 8 15#define OP_MAX_COUNTER 8
16 16
17/* Per-counter configuration as set via oprofilefs. */ 17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config { 18struct op_counter_config {
19#ifdef __powerpc64__
19 unsigned long valid; 20 unsigned long valid;
21#endif
20 unsigned long enabled; 22 unsigned long enabled;
21 unsigned long event; 23 unsigned long event;
22 unsigned long count; 24 unsigned long count;
23 unsigned long kernel; 25 unsigned long kernel;
26#ifdef __powerpc64__
24 /* We dont support per counter user/kernel selection */ 27 /* We dont support per counter user/kernel selection */
28#endif
25 unsigned long user; 29 unsigned long user;
26 unsigned long unit_mask; 30 unsigned long unit_mask;
27}; 31};
28 32
29/* System-wide configuration as set via oprofilefs. */ 33/* System-wide configuration as set via oprofilefs. */
30struct op_system_config { 34struct op_system_config {
35#ifdef __powerpc64__
31 unsigned long mmcr0; 36 unsigned long mmcr0;
32 unsigned long mmcr1; 37 unsigned long mmcr1;
33 unsigned long mmcra; 38 unsigned long mmcra;
39#endif
34 unsigned long enable_kernel; 40 unsigned long enable_kernel;
35 unsigned long enable_user; 41 unsigned long enable_user;
42#ifdef __powerpc64__
36 unsigned long backtrace_spinlocks; 43 unsigned long backtrace_spinlocks;
44#endif
37}; 45};
38 46
39/* Per-arch configuration */ 47/* Per-arch configuration */
40struct op_ppc64_model { 48struct op_powerpc_model {
41 void (*reg_setup) (struct op_counter_config *, 49 void (*reg_setup) (struct op_counter_config *,
42 struct op_system_config *, 50 struct op_system_config *,
43 int num_counters); 51 int num_counters);
52#ifdef __powerpc64__
44 void (*cpu_setup) (void *); 53 void (*cpu_setup) (void *);
54#endif
45 void (*start) (struct op_counter_config *); 55 void (*start) (struct op_counter_config *);
46 void (*stop) (void); 56 void (*stop) (void);
47 void (*handle_interrupt) (struct pt_regs *, 57 void (*handle_interrupt) (struct pt_regs *,
@@ -49,8 +59,9 @@ struct op_ppc64_model {
49 int num_counters; 59 int num_counters;
50}; 60};
51 61
52extern struct op_ppc64_model op_model_rs64; 62#ifdef __powerpc64__
53extern struct op_ppc64_model op_model_power4; 63extern struct op_powerpc_model op_model_rs64;
64extern struct op_powerpc_model op_model_power4;
54 65
55static inline unsigned int ctr_read(unsigned int i) 66static inline unsigned int ctr_read(unsigned int i)
56{ 67{
@@ -107,5 +118,6 @@ static inline void ctr_write(unsigned int i, unsigned int val)
107 break; 118 break;
108 } 119 }
109} 120}
121#endif /* __powerpc64__ */
110 122
111#endif 123#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/include/asm-ppc64/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
index c0db1ea7f7d1..c0db1ea7f7d1 100644
--- a/include/asm-ppc64/pSeries_reconfig.h
+++ b/include/asm-powerpc/pSeries_reconfig.h
diff --git a/include/asm-ppc/parport.h b/include/asm-powerpc/parport.h
index 11f96d3de5b6..d86b410a6f8b 100644
--- a/include/asm-ppc/parport.h
+++ b/include/asm-powerpc/parport.h
@@ -6,8 +6,8 @@
6 * This file should only be included by drivers/parport/parport_pc.c. 6 * This file should only be included by drivers/parport/parport_pc.c.
7 */ 7 */
8 8
9#ifndef _ASM_PPC_PARPORT_H 9#ifndef _ASM_POWERPC_PARPORT_H
10#define _ASM_PPC_PARPORT_H 10#define _ASM_POWERPC_PARPORT_H
11 11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); 12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) 13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
@@ -15,4 +15,4 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
15 return parport_pc_find_isa_ports (autoirq, autodma); 15 return parport_pc_find_isa_ports (autoirq, autodma);
16} 16}
17 17
18#endif /* !(_ASM_PPC_PARPORT_H) */ 18#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/include/asm-ppc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
index e9683bcff19b..e9683bcff19b 100644
--- a/include/asm-ppc/pmac_feature.h
+++ b/include/asm-powerpc/pmac_feature.h
diff --git a/include/asm-ppc/pmac_low_i2c.h b/include/asm-powerpc/pmac_low_i2c.h
index 809a5963d5e7..809a5963d5e7 100644
--- a/include/asm-ppc/pmac_low_i2c.h
+++ b/include/asm-powerpc/pmac_low_i2c.h
diff --git a/include/asm-ppc64/pmc.h b/include/asm-powerpc/pmc.h
index d1d297dbccfe..2f3c3fc2b796 100644
--- a/include/asm-ppc64/pmc.h
+++ b/include/asm-powerpc/pmc.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _PPC64_PMC_H 19#ifndef _POWERPC_PMC_H
20#define _PPC64_PMC_H 20#define _POWERPC_PMC_H
21 21
22#include <asm/ptrace.h> 22#include <asm/ptrace.h>
23 23
@@ -26,6 +26,21 @@ typedef void (*perf_irq_t)(struct pt_regs *);
26int reserve_pmc_hardware(perf_irq_t new_perf_irq); 26int reserve_pmc_hardware(perf_irq_t new_perf_irq);
27void release_pmc_hardware(void); 27void release_pmc_hardware(void);
28 28
29#ifdef CONFIG_PPC64
29void power4_enable_pmcs(void); 30void power4_enable_pmcs(void);
31#endif
30 32
31#endif /* _PPC64_PMC_H */ 33#ifdef CONFIG_FSL_BOOKE
34void init_pmc_stop(int ctr);
35void set_pmc_event(int ctr, int event);
36void set_pmc_user_kernel(int ctr, int user, int kernel);
37void set_pmc_marked(int ctr, int mark0, int mark1);
38void pmc_start_ctr(int ctr, int enable);
39void pmc_start_ctrs(int enable);
40void pmc_stop_ctrs(void);
41void dump_pmcs(void);
42
43extern struct op_powerpc_model op_model_fsl_booke;
44#endif
45
46#endif /* _POWERPC_PMC_H */
diff --git a/include/asm-ppc64/posix_types.h b/include/asm-powerpc/posix_types.h
index 516de7201b5d..c6391077224f 100644
--- a/include/asm-ppc64/posix_types.h
+++ b/include/asm-powerpc/posix_types.h
@@ -1,44 +1,54 @@
1#ifndef _PPC64_POSIX_TYPES_H 1#ifndef _ASM_POWERPC_POSIX_TYPES_H
2#define _PPC64_POSIX_TYPES_H 2#define _ASM_POWERPC_POSIX_TYPES_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot 6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used. 7 * assume GCC is being used.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */ 8 */
14 9
15typedef unsigned long __kernel_ino_t; 10typedef unsigned long __kernel_ino_t;
16typedef unsigned long __kernel_nlink_t;
17typedef unsigned int __kernel_mode_t; 11typedef unsigned int __kernel_mode_t;
18typedef long __kernel_off_t; 12typedef long __kernel_off_t;
19typedef long long __kernel_loff_t;
20typedef int __kernel_pid_t; 13typedef int __kernel_pid_t;
21typedef int __kernel_ipc_pid_t;
22typedef unsigned int __kernel_uid_t; 14typedef unsigned int __kernel_uid_t;
23typedef unsigned int __kernel_gid_t; 15typedef unsigned int __kernel_gid_t;
24typedef unsigned long __kernel_size_t;
25typedef long __kernel_ssize_t;
26typedef long __kernel_ptrdiff_t; 16typedef long __kernel_ptrdiff_t;
27typedef long __kernel_time_t; 17typedef long __kernel_time_t;
18typedef long __kernel_clock_t;
28typedef int __kernel_timer_t; 19typedef int __kernel_timer_t;
29typedef int __kernel_clockid_t; 20typedef int __kernel_clockid_t;
30typedef long __kernel_suseconds_t; 21typedef long __kernel_suseconds_t;
31typedef long __kernel_clock_t;
32typedef int __kernel_daddr_t; 22typedef int __kernel_daddr_t;
33typedef char * __kernel_caddr_t; 23typedef char * __kernel_caddr_t;
34typedef unsigned short __kernel_uid16_t; 24typedef unsigned short __kernel_uid16_t;
35typedef unsigned short __kernel_gid16_t; 25typedef unsigned short __kernel_gid16_t;
36typedef unsigned int __kernel_uid32_t; 26typedef unsigned int __kernel_uid32_t;
37typedef unsigned int __kernel_gid32_t; 27typedef unsigned int __kernel_gid32_t;
38
39typedef unsigned int __kernel_old_uid_t; 28typedef unsigned int __kernel_old_uid_t;
40typedef unsigned int __kernel_old_gid_t; 29typedef unsigned int __kernel_old_gid_t;
30
31#ifdef __powerpc64__
32typedef unsigned long __kernel_nlink_t;
33typedef int __kernel_ipc_pid_t;
34typedef unsigned long __kernel_size_t;
35typedef long __kernel_ssize_t;
41typedef unsigned long __kernel_old_dev_t; 36typedef unsigned long __kernel_old_dev_t;
37#else
38typedef unsigned short __kernel_nlink_t;
39typedef short __kernel_ipc_pid_t;
40typedef unsigned int __kernel_size_t;
41typedef int __kernel_ssize_t;
42typedef unsigned int __kernel_old_dev_t;
43#endif
44
45#ifdef __powerpc64__
46typedef long long __kernel_loff_t;
47#else
48#ifdef __GNUC__
49typedef long long __kernel_loff_t;
50#endif
51#endif
42 52
43typedef struct { 53typedef struct {
44 int val[2]; 54 int val[2];
@@ -116,4 +126,4 @@ static __inline__ void __FD_ZERO(__kernel_fd_set *p)
116 126
117#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ 127#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
118#endif /* __GNUC__ */ 128#endif /* __GNUC__ */
119#endif /* _PPC64_POSIX_TYPES_H */ 129#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/ppc64/kernel/pci.h b/include/asm-powerpc/ppc-pci.h
index 5eb2cc320566..a88728fba8f6 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -6,8 +6,8 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#ifndef __PPC_KERNEL_PCI_H__ 9#ifndef _ASM_POWERPC_PPC_PCI_H
10#define __PPC_KERNEL_PCI_H__ 10#define _ASM_POWERPC_PPC_PCI_H
11 11
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <asm/pci-bridge.h> 13#include <asm/pci-bridge.h>
@@ -51,4 +51,4 @@ extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses; 51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev); 52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53 53
54#endif /* __PPC_KERNEL_PCI_H__ */ 54#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-ppc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index bb53e2def363..f99f2af82ca5 100644
--- a/include/asm-ppc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -1,38 +1,42 @@
1/* 1/*
2 * include/asm-ppc/ppc_asm.h
3 *
4 * Definitions used by various bits of low-level assembly code on PowerPC.
5 *
6 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */ 3 */
4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H
13 6
7#include <linux/stringify.h>
14#include <linux/config.h> 8#include <linux/config.h>
15 9
10#ifdef __ASSEMBLY__
11
16/* 12/*
17 * Macros for storing registers into and loading registers from 13 * Macros for storing registers into and loading registers from
18 * exception frames. 14 * exception frames.
19 */ 15 */
16#ifdef __powerpc64__
17#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
18#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
19#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
20#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
21#else
20#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) 22#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
23#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
24#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
25 SAVE_10GPRS(22, base)
26#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
27 REST_10GPRS(22, base)
28#endif
29
30
21#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) 31#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
22#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) 32#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
23#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) 33#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
24#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) 34#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
25#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
26#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) 35#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
27#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) 36#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
28#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 37#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
29#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 38#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
30 39
31#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
32 SAVE_10GPRS(22, base)
33#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
34 REST_10GPRS(22, base)
35
36#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base) 40#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
37#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 41#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
38#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 42#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
@@ -47,32 +51,170 @@
47#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 51#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
48 52
49#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base 53#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
50#define SAVE_2VR(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 54#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
51#define SAVE_4VR(n,b,base) SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base) 55#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
52#define SAVE_8VR(n,b,base) SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base) 56#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
53#define SAVE_16VR(n,b,base) SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base) 57#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
54#define SAVE_32VR(n,b,base) SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base) 58#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
55#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base 59#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
56#define REST_2VR(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 60#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
57#define REST_4VR(n,b,base) REST_2VR(n,b,base); REST_2VR(n+2,b,base) 61#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
58#define REST_8VR(n,b,base) REST_4VR(n,b,base); REST_4VR(n+4,b,base) 62#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
59#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base) 63#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
60#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base) 64#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
61 65
62#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) 66#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
63#define SAVE_2EVR(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) 67#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
64#define SAVE_4EVR(n,s,base) SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base) 68#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
65#define SAVE_8EVR(n,s,base) SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base) 69#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
66#define SAVE_16EVR(n,s,base) SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base) 70#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
67#define SAVE_32EVR(n,s,base) SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base) 71#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
68
69#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n 72#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
70#define REST_2EVR(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base) 73#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
71#define REST_4EVR(n,s,base) REST_2EVR(n,s,base); REST_2EVR(n+2,s,base) 74#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
72#define REST_8EVR(n,s,base) REST_4EVR(n,s,base); REST_4EVR(n+4,s,base) 75#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
73#define REST_16EVR(n,s,base) REST_8EVR(n,s,base); REST_8EVR(n+8,s,base) 76#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
74#define REST_32EVR(n,s,base) REST_16EVR(n,s,base); REST_16EVR(n+16,s,base) 77#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
78
79/* Macros to adjust thread priority for hardware multithreading */
80#define HMT_VERY_LOW or 31,31,31 # very low priority
81#define HMT_LOW or 1,1,1
82#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
83#define HMT_MEDIUM or 2,2,2
84#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
85#define HMT_HIGH or 3,3,3
86
87/* handle instructions that older assemblers may not know */
88#define RFCI .long 0x4c000066 /* rfci instruction */
89#define RFDI .long 0x4c00004e /* rfdi instruction */
90#define RFMCI .long 0x4c00004c /* rfmci instruction */
91
92#ifdef CONFIG_PPC64
93
94#define XGLUE(a,b) a##b
95#define GLUE(a,b) XGLUE(a,b)
96
97#define _GLOBAL(name) \
98 .section ".text"; \
99 .align 2 ; \
100 .globl name; \
101 .globl GLUE(.,name); \
102 .section ".opd","aw"; \
103name: \
104 .quad GLUE(.,name); \
105 .quad .TOC.@tocbase; \
106 .quad 0; \
107 .previous; \
108 .type GLUE(.,name),@function; \
109GLUE(.,name):
110
111#define _KPROBE(name) \
112 .section ".kprobes.text","a"; \
113 .align 2 ; \
114 .globl name; \
115 .globl GLUE(.,name); \
116 .section ".opd","aw"; \
117name: \
118 .quad GLUE(.,name); \
119 .quad .TOC.@tocbase; \
120 .quad 0; \
121 .previous; \
122 .type GLUE(.,name),@function; \
123GLUE(.,name):
124
125#define _STATIC(name) \
126 .section ".text"; \
127 .align 2 ; \
128 .section ".opd","aw"; \
129name: \
130 .quad GLUE(.,name); \
131 .quad .TOC.@tocbase; \
132 .quad 0; \
133 .previous; \
134 .type GLUE(.,name),@function; \
135GLUE(.,name):
136
137#else /* 32-bit */
138
139#define _GLOBAL(n) \
140 .text; \
141 .stabs __stringify(n:F-1),N_FUN,0,0,n;\
142 .globl n; \
143n:
144
145#define _KPROBE(n) \
146 .section ".kprobes.text","a"; \
147 .globl n; \
148n:
149
150#endif
151
152/*
153 * LOADADDR( rn, name )
154 * loads the address of 'name' into 'rn'
155 *
156 * LOADBASE( rn, name )
157 * loads the address (possibly without the low 16 bits) of 'name' into 'rn'
158 * suitable for base+disp addressing
159 */
160#ifdef __powerpc64__
161#define LOADADDR(rn,name) \
162 lis rn,name##@highest; \
163 ori rn,rn,name##@higher; \
164 rldicr rn,rn,32,31; \
165 oris rn,rn,name##@h; \
166 ori rn,rn,name##@l
167
168#define LOADBASE(rn,name) \
169 ld rn,name@got(r2)
170
171#define OFF(name) 0
172
173#define SET_REG_TO_CONST(reg, value) \
174 lis reg,(((value)>>48)&0xFFFF); \
175 ori reg,reg,(((value)>>32)&0xFFFF); \
176 rldicr reg,reg,32,31; \
177 oris reg,reg,(((value)>>16)&0xFFFF); \
178 ori reg,reg,((value)&0xFFFF);
179
180#define SET_REG_TO_LABEL(reg, label) \
181 lis reg,(label)@highest; \
182 ori reg,reg,(label)@higher; \
183 rldicr reg,reg,32,31; \
184 oris reg,reg,(label)@h; \
185 ori reg,reg,(label)@l;
186
187/* operations for longs and pointers */
188#define LDL ld
189#define STL std
190#define CMPI cmpdi
191#define SZL 8
192
193/* offsets for stack frame layout */
194#define LRSAVE 16
195
196#else /* 32-bit */
197#define LOADADDR(rn,name) \
198 lis rn,name@ha; \
199 addi rn,rn,name@l
200
201#define LOADBASE(rn,name) \
202 lis rn,name@ha
203
204#define OFF(name) name@l
205
206/* operations for longs and pointers */
207#define LDL lwz
208#define STL stw
209#define CMPI cmpwi
210#define SZL 4
211
212/* offsets for stack frame layout */
213#define LRSAVE 4
214
215#endif
75 216
217/* various errata or part fixups */
76#ifdef CONFIG_PPC601_SYNC_FIX 218#ifdef CONFIG_PPC601_SYNC_FIX
77#define SYNC \ 219#define SYNC \
78BEGIN_FTR_SECTION \ 220BEGIN_FTR_SECTION \
@@ -93,6 +235,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
93#define ISYNC_601 235#define ISYNC_601
94#endif 236#endif
95 237
238
96#ifndef CONFIG_SMP 239#ifndef CONFIG_SMP
97#define TLBSYNC 240#define TLBSYNC
98#else /* CONFIG_SMP */ 241#else /* CONFIG_SMP */
@@ -104,6 +247,7 @@ BEGIN_FTR_SECTION \
104END_FTR_SECTION_IFCLR(CPU_FTR_601) 247END_FTR_SECTION_IFCLR(CPU_FTR_601)
105#endif 248#endif
106 249
250
107/* 251/*
108 * This instruction is not implemented on the PPC 603 or 601; however, on 252 * This instruction is not implemented on the PPC 603 or 601; however, on
109 * the 403GCX and 405GP tlbia IS defined and tlbie is not. 253 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
@@ -121,18 +265,52 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
121 bdnz 0b 265 bdnz 0b
122#endif 266#endif
123 267
124#ifdef CONFIG_BOOKE 268
269#ifdef CONFIG_IBM405_ERR77
270#define PPC405_ERR77(ra,rb) dcbt ra, rb;
271#define PPC405_ERR77_SYNC sync;
272#else
273#define PPC405_ERR77(ra,rb)
274#define PPC405_ERR77_SYNC
275#endif
276
277
278#ifdef CONFIG_IBM440EP_ERR42
279#define PPC440EP_ERR42 isync
280#else
281#define PPC440EP_ERR42
282#endif
283
284
285#if defined(CONFIG_BOOKE)
286#define toreal(rd)
287#define fromreal(rd)
288
125#define tophys(rd,rs) \ 289#define tophys(rd,rs) \
126 addis rd,rs,0 290 addis rd,rs,0
127 291
128#define tovirt(rd,rs) \ 292#define tovirt(rd,rs) \
129 addis rd,rs,0 293 addis rd,rs,0
130 294
131#else /* CONFIG_BOOKE */ 295#elif defined(CONFIG_PPC64)
296#define toreal(rd) /* we can access c000... in real mode */
297#define fromreal(rd)
298
299#define tophys(rd,rs) \
300 clrldi rd,rs,2
301
302#define tovirt(rd,rs) \
303 rotldi rd,rs,16; \
304 ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
305 rotldi rd,rd,48
306#else
132/* 307/*
133 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the 308 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
134 * physical base address of RAM at compile time. 309 * physical base address of RAM at compile time.
135 */ 310 */
311#define toreal(rd) tophys(rd,rd)
312#define fromreal(rd) tovirt(rd,rd)
313
136#define tophys(rd,rs) \ 314#define tophys(rd,rs) \
1370: addis rd,rs,-KERNELBASE@h; \ 3150: addis rd,rs,-KERNELBASE@h; \
138 .section ".vtop_fixup","aw"; \ 316 .section ".vtop_fixup","aw"; \
@@ -146,22 +324,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
146 .align 1; \ 324 .align 1; \
147 .long 0b; \ 325 .long 0b; \
148 .previous 326 .previous
149#endif /* CONFIG_BOOKE */ 327#endif
150 328
151/* 329#ifdef CONFIG_PPC64
152 * On 64-bit cpus, we use the rfid instruction instead of rfi, but 330#define RFI rfid
153 * we then have to make sure we preserve the top 32 bits except for 331#define MTMSRD(r) mtmsrd r
154 * the 64-bit mode bit, which we clear.
155 */
156#ifdef CONFIG_PPC64BRIDGE
157#define FIX_SRR1(ra, rb) \
158 mr rb,ra; \
159 mfmsr ra; \
160 clrldi ra,ra,1; /* turn off 64-bit mode */ \
161 rldimi ra,rb,0,32
162#define RFI .long 0x4c000024 /* rfid instruction */
163#define MTMSRD(r) .long (0x7c000164 + ((r) << 21)) /* mtmsrd */
164#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
165 332
166#else 333#else
167#define FIX_SRR1(ra, rb) 334#define FIX_SRR1(ra, rb)
@@ -172,24 +339,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
172#endif 339#endif
173#define MTMSRD(r) mtmsr r 340#define MTMSRD(r) mtmsr r
174#define CLR_TOP32(r) 341#define CLR_TOP32(r)
175#endif /* CONFIG_PPC64BRIDGE */
176
177#define RFCI .long 0x4c000066 /* rfci instruction */
178#define RFDI .long 0x4c00004e /* rfdi instruction */
179#define RFMCI .long 0x4c00004c /* rfmci instruction */
180
181#ifdef CONFIG_IBM405_ERR77
182#define PPC405_ERR77(ra,rb) dcbt ra, rb;
183#define PPC405_ERR77_SYNC sync;
184#else
185#define PPC405_ERR77(ra,rb)
186#define PPC405_ERR77_SYNC
187#endif
188
189#ifdef CONFIG_IBM440EP_ERR42
190#define PPC440EP_ERR42 isync
191#else
192#define PPC440EP_ERR42
193#endif 342#endif
194 343
195/* The boring bits... */ 344/* The boring bits... */
@@ -277,6 +426,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
277#define fr30 30 426#define fr30 30
278#define fr31 31 427#define fr31 31
279 428
429/* AltiVec Registers (VPRs) */
430
280#define vr0 0 431#define vr0 0
281#define vr1 1 432#define vr1 1
282#define vr2 2 433#define vr2 2
@@ -310,6 +461,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
310#define vr30 30 461#define vr30 30
311#define vr31 31 462#define vr31 31
312 463
464/* SPE Registers (EVPRs) */
465
313#define evr0 0 466#define evr0 0
314#define evr1 1 467#define evr1 1
315#define evr2 2 468#define evr2 2
@@ -348,3 +501,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
348#define N_RSYM 64 501#define N_RSYM 64
349#define N_SLINE 68 502#define N_SLINE 68
350#define N_SO 100 503#define N_SO 100
504
505#define ASM_CONST(x) x
506#else
507 #define __ASM_CONST(x) x##UL
508 #define ASM_CONST(x) __ASM_CONST(x)
509#endif /* __ASSEMBLY__ */
510
511#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-ppc/processor.h b/include/asm-powerpc/processor.h
index b05b5d9cae20..eee954a001fd 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -1,21 +1,28 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_POWERPC_PROCESSOR_H
2#ifndef __ASM_PPC_PROCESSOR_H 2#define _ASM_POWERPC_PROCESSOR_H
3#define __ASM_PPC_PROCESSOR_H
4 3
5/* 4/*
6 * Default implementation of macro that returns current 5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
7 * instruction pointer ("program counter"). 6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
8 */ 11 */
9#define current_text_addr() ({ __label__ _l; _l: &&_l;})
10 12
11#include <linux/config.h> 13#include <linux/config.h>
12#include <linux/stringify.h> 14#include <asm/reg.h>
13 15
16#ifndef __ASSEMBLY__
17#include <linux/compiler.h>
14#include <asm/ptrace.h> 18#include <asm/ptrace.h>
15#include <asm/types.h> 19#include <asm/types.h>
16#include <asm/mpc8xx.h> 20#ifdef CONFIG_PPC64
17#include <asm/reg.h> 21#include <asm/systemcfg.h>
22#endif
18 23
24#ifdef CONFIG_PPC32
25/* 32-bit platform types */
19/* We only need to define a new _MACH_xxx for machines which are part of 26/* We only need to define a new _MACH_xxx for machines which are part of
20 * a configuration which supports more than one type of different machine. 27 * a configuration which supports more than one type of different machine.
21 * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac. 28 * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
@@ -36,20 +43,6 @@
36#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ 43#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
37#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ 44#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
38 45
39#define _GLOBAL(n)\
40 .stabs __stringify(n:F-1),N_FUN,0,0,n;\
41 .globl n;\
42n:
43
44/*
45 * this is the minimum allowable io space due to the location
46 * of the io areas on prep (first one at 0x80000000) but
47 * as soon as I get around to remapping the io areas with the BATs
48 * to match the mac we can raise this. -- Cort
49 */
50#define TASK_SIZE (CONFIG_TASK_SIZE)
51
52#ifndef __ASSEMBLY__
53#ifdef CONFIG_PPC_MULTIPLATFORM 46#ifdef CONFIG_PPC_MULTIPLATFORM
54extern int _machine; 47extern int _machine;
55 48
@@ -67,17 +60,49 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
67#else 60#else
68#define _machine 0 61#define _machine 0
69#endif /* CONFIG_PPC_MULTIPLATFORM */ 62#endif /* CONFIG_PPC_MULTIPLATFORM */
63#endif /* CONFIG_PPC32 */
64
65#ifdef CONFIG_PPC64
66/* Platforms supported by PPC64 */
67#define PLATFORM_PSERIES 0x0100
68#define PLATFORM_PSERIES_LPAR 0x0101
69#define PLATFORM_ISERIES_LPAR 0x0201
70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000
74
75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform)
77#define _MACH_Pmac PLATFORM_POWERMAC
78#endif
79
80/*
81 * Default implementation of macro that returns current
82 * instruction pointer ("program counter").
83 */
84#define current_text_addr() ({ __label__ _l; _l: &&_l;})
85
86/* Macros for adjusting thread priority (hardware multi-threading) */
87#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
88#define HMT_low() asm volatile("or 1,1,1 # low priority")
89#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
90#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
91#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
92#define HMT_high() asm volatile("or 3,3,3 # high priority")
93
94#ifdef __KERNEL__
95
96extern int have_of;
70 97
71struct task_struct; 98struct task_struct;
72void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); 99void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
73void release_thread(struct task_struct *); 100void release_thread(struct task_struct *);
74 101
75/* Prepare to copy thread state - unlazy all lazy status */ 102/* Prepare to copy thread state - unlazy all lazy status */
76extern void prepare_to_copy(struct task_struct *tsk); 103extern void prepare_to_copy(struct task_struct *tsk);
77 104
78/* 105/* Create a new kernel thread. */
79 * Create a new kernel thread.
80 */
81extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 106extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
82 107
83/* Lazy FPU handling on uni-processor */ 108/* Lazy FPU handling on uni-processor */
@@ -85,10 +110,37 @@ extern struct task_struct *last_task_used_math;
85extern struct task_struct *last_task_used_altivec; 110extern struct task_struct *last_task_used_altivec;
86extern struct task_struct *last_task_used_spe; 111extern struct task_struct *last_task_used_spe;
87 112
113#ifdef CONFIG_PPC32
114#define TASK_SIZE (CONFIG_TASK_SIZE)
115
88/* This decides where the kernel will search for a free chunk of vm 116/* This decides where the kernel will search for a free chunk of vm
89 * space during mmap's. 117 * space during mmap's.
90 */ 118 */
91#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) 119#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
120#endif
121
122#ifdef CONFIG_PPC64
123/* 64-bit user address space is 44-bits (16TB user VM) */
124#define TASK_SIZE_USER64 (0x0000100000000000UL)
125
126/*
127 * 32-bit user address space is 4GB - 1 page
128 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
129 */
130#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
131
132#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
133 TASK_SIZE_USER32 : TASK_SIZE_USER64)
134
135/* This decides where the kernel will search for a free chunk of vm
136 * space during mmap's.
137 */
138#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
139#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
140
141#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
142 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
143#endif
92 144
93typedef struct { 145typedef struct {
94 unsigned long seg; 146 unsigned long seg;
@@ -96,18 +148,32 @@ typedef struct {
96 148
97struct thread_struct { 149struct thread_struct {
98 unsigned long ksp; /* Kernel stack pointer */ 150 unsigned long ksp; /* Kernel stack pointer */
151#ifdef CONFIG_PPC64
152 unsigned long ksp_vsid;
153#endif
99 struct pt_regs *regs; /* Pointer to saved register state */ 154 struct pt_regs *regs; /* Pointer to saved register state */
100 mm_segment_t fs; /* for get_fs() validation */ 155 mm_segment_t fs; /* for get_fs() validation */
156#ifdef CONFIG_PPC32
101 void *pgdir; /* root of page-table tree */ 157 void *pgdir; /* root of page-table tree */
102 int fpexc_mode; /* floating-point exception mode */
103 signed long last_syscall; 158 signed long last_syscall;
159#endif
104#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) 160#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
105 unsigned long dbcr0; /* debug control register values */ 161 unsigned long dbcr0; /* debug control register values */
106 unsigned long dbcr1; 162 unsigned long dbcr1;
107#endif 163#endif
108 double fpr[32]; /* Complete floating point set */ 164 double fpr[32]; /* Complete floating point set */
109 unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */ 165 struct { /* fpr ... fpscr must be contiguous */
110 unsigned long fpscr; /* Floating point status */ 166
167 unsigned int pad;
168 unsigned int val; /* Floating point status */
169 } fpscr;
170 int fpexc_mode; /* floating-point exception mode */
171#ifdef CONFIG_PPC64
172 unsigned long start_tb; /* Start purr when proc switched in */
173 unsigned long accum_tb; /* Total accumilated purr for process */
174 unsigned long vdso_base; /* base of the vDSO library */
175#endif
176 unsigned long dabr; /* Data address breakpoint register */
111#ifdef CONFIG_ALTIVEC 177#ifdef CONFIG_ALTIVEC
112 /* Complete AltiVec register set */ 178 /* Complete AltiVec register set */
113 vector128 vr[32] __attribute((aligned(16))); 179 vector128 vr[32] __attribute((aligned(16)));
@@ -128,51 +194,58 @@ struct thread_struct {
128 194
129#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) 195#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
130 196
197
198#ifdef CONFIG_PPC32
131#define INIT_THREAD { \ 199#define INIT_THREAD { \
132 .ksp = INIT_SP, \ 200 .ksp = INIT_SP, \
133 .fs = KERNEL_DS, \ 201 .fs = KERNEL_DS, \
134 .pgdir = swapper_pg_dir, \ 202 .pgdir = swapper_pg_dir, \
135 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 203 .fpexc_mode = MSR_FE0 | MSR_FE1, \
136} 204}
205#else
206#define INIT_THREAD { \
207 .ksp = INIT_SP, \
208 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
209 .fs = KERNEL_DS, \
210 .fpr = {0}, \
211 .fpscr = { .val = 0, }, \
212 .fpexc_mode = MSR_FE0|MSR_FE1, \
213}
214#endif
137 215
138/* 216/*
139 * Return saved PC of a blocked thread. For now, this is the "user" PC 217 * Return saved PC of a blocked thread. For now, this is the "user" PC
140 */ 218 */
141#define thread_saved_pc(tsk) \ 219#define thread_saved_pc(tsk) \
142 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 220 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
143 221
144unsigned long get_wchan(struct task_struct *p); 222unsigned long get_wchan(struct task_struct *p);
145 223
146#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 224#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
147#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) 225#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
148 226
149/* Get/set floating-point exception mode */ 227/* Get/set floating-point exception mode */
150#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) 228#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
151#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) 229#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
152 230
153extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); 231extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
154extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); 232extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
155 233
156static inline unsigned int __unpack_fe01(unsigned int msr_bits) 234static inline unsigned int __unpack_fe01(unsigned long msr_bits)
157{ 235{
158 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 236 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
159} 237}
160 238
161static inline unsigned int __pack_fe01(unsigned int fpmode) 239static inline unsigned long __pack_fe01(unsigned int fpmode)
162{ 240{
163 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); 241 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
164} 242}
165 243
166/* in process.c - for early bootup debug -- Cort */ 244#ifdef CONFIG_PPC64
167int ll_printk(const char *, ...); 245#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
168void ll_puts(const char *); 246#else
169
170/* In misc.c */
171void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
172
173#define have_of (_machine == _MACH_chrp || _machine == _MACH_Pmac)
174
175#define cpu_relax() barrier() 247#define cpu_relax() barrier()
248#endif
176 249
177/* 250/*
178 * Prefetch macros. 251 * Prefetch macros.
@@ -181,21 +254,28 @@ void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
181#define ARCH_HAS_PREFETCHW 254#define ARCH_HAS_PREFETCHW
182#define ARCH_HAS_SPINLOCK_PREFETCH 255#define ARCH_HAS_SPINLOCK_PREFETCH
183 256
184extern inline void prefetch(const void *x) 257static inline void prefetch(const void *x)
185{ 258{
186 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); 259 if (unlikely(!x))
260 return;
261
262 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
187} 263}
188 264
189extern inline void prefetchw(const void *x) 265static inline void prefetchw(const void *x)
190{ 266{
191 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); 267 if (unlikely(!x))
268 return;
269
270 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
192} 271}
193 272
194#define spin_lock_prefetch(x) prefetchw(x) 273#define spin_lock_prefetch(x) prefetchw(x)
195 274
196extern int emulate_altivec(struct pt_regs *regs); 275#ifdef CONFIG_PPC64
197 276#define HAVE_ARCH_PICK_MMAP_LAYOUT
198#endif /* !__ASSEMBLY__ */ 277#endif
199 278
200#endif /* __ASM_PPC_PROCESSOR_H */
201#endif /* __KERNEL__ */ 279#endif /* __KERNEL__ */
280#endif /* __ASSEMBLY__ */
281#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
new file mode 100644
index 000000000000..3a0104fa0462
--- /dev/null
+++ b/include/asm-powerpc/prom.h
@@ -0,0 +1,219 @@
1#ifndef _POWERPC_PROM_H
2#define _POWERPC_PROM_H
3#ifdef __KERNEL__
4
5/*
6 * Definitions for talking to the Open Firmware PROM on
7 * Power Macintosh computers.
8 *
9 * Copyright (C) 1996-2005 Paul Mackerras.
10 *
11 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#include <linux/config.h>
19#include <linux/types.h>
20#include <linux/proc_fs.h>
21#include <asm/atomic.h>
22
23/* Definitions used by the flattened device tree */
24#define OF_DT_HEADER 0xd00dfeed /* marker */
25#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
26#define OF_DT_END_NODE 0x2 /* End node */
27#define OF_DT_PROP 0x3 /* Property: name off, size,
28 * content */
29#define OF_DT_NOP 0x4 /* nop */
30#define OF_DT_END 0x9
31
32#define OF_DT_VERSION 0x10
33
34/*
35 * This is what gets passed to the kernel by prom_init or kexec
36 *
37 * The dt struct contains the device tree structure, full pathes and
38 * property contents. The dt strings contain a separate block with just
39 * the strings for the property names, and is fully page aligned and
40 * self contained in a page, so that it can be kept around by the kernel,
41 * each property name appears only once in this page (cheap compression)
42 *
43 * the mem_rsvmap contains a map of reserved ranges of physical memory,
44 * passing it here instead of in the device-tree itself greatly simplifies
45 * the job of everybody. It's just a list of u64 pairs (base/size) that
46 * ends when size is 0
47 */
48struct boot_param_header
49{
50 u32 magic; /* magic word OF_DT_HEADER */
51 u32 totalsize; /* total size of DT block */
52 u32 off_dt_struct; /* offset to structure */
53 u32 off_dt_strings; /* offset to strings */
54 u32 off_mem_rsvmap; /* offset to memory reserve map */
55 u32 version; /* format version */
56 u32 last_comp_version; /* last compatible version */
57 /* version 2 fields below */
58 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
59 /* version 3 fields below */
60 u32 dt_strings_size; /* size of the DT strings block */
61};
62
63
64
65typedef u32 phandle;
66typedef u32 ihandle;
67
68struct address_range {
69 unsigned long space;
70 unsigned long address;
71 unsigned long size;
72};
73
74struct interrupt_info {
75 int line;
76 int sense; /* +ve/-ve logic, edge or level, etc. */
77};
78
79struct pci_address {
80 u32 a_hi;
81 u32 a_mid;
82 u32 a_lo;
83};
84
85struct isa_address {
86 u32 a_hi;
87 u32 a_lo;
88};
89
90struct isa_range {
91 struct isa_address isa_addr;
92 struct pci_address pci_addr;
93 unsigned int size;
94};
95
96struct reg_property {
97 unsigned long address;
98 unsigned long size;
99};
100
101struct reg_property32 {
102 unsigned int address;
103 unsigned int size;
104};
105
106struct reg_property64 {
107 u64 address;
108 u64 size;
109};
110
111struct property {
112 char *name;
113 int length;
114 unsigned char *value;
115 struct property *next;
116};
117
118struct device_node {
119 char *name;
120 char *type;
121 phandle node;
122 phandle linux_phandle;
123 int n_addrs;
124 struct address_range *addrs;
125 int n_intrs;
126 struct interrupt_info *intrs;
127 char *full_name;
128
129 struct property *properties;
130 struct device_node *parent;
131 struct device_node *child;
132 struct device_node *sibling;
133 struct device_node *next; /* next device of same type */
134 struct device_node *allnext; /* next in list of all nodes */
135 struct proc_dir_entry *pde; /* this node's proc directory */
136 struct kref kref;
137 unsigned long _flags;
138 void *data;
139};
140
141extern struct device_node *of_chosen;
142
143/* flag descriptions */
144#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
145
146#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
147#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
148
149#define HAVE_ARCH_DEVTREE_FIXUPS
150
151static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
152{
153 dn->pde = de;
154}
155
156
157/* OBSOLETE: Old style node lookup */
158extern struct device_node *find_devices(const char *name);
159extern struct device_node *find_type_devices(const char *type);
160extern struct device_node *find_path_device(const char *path);
161extern struct device_node *find_compatible_devices(const char *type,
162 const char *compat);
163extern struct device_node *find_all_nodes(void);
164
165/* New style node lookup */
166extern struct device_node *of_find_node_by_name(struct device_node *from,
167 const char *name);
168extern struct device_node *of_find_node_by_type(struct device_node *from,
169 const char *type);
170extern struct device_node *of_find_compatible_node(struct device_node *from,
171 const char *type, const char *compat);
172extern struct device_node *of_find_node_by_path(const char *path);
173extern struct device_node *of_find_node_by_phandle(phandle handle);
174extern struct device_node *of_find_all_nodes(struct device_node *prev);
175extern struct device_node *of_get_parent(const struct device_node *node);
176extern struct device_node *of_get_next_child(const struct device_node *node,
177 struct device_node *prev);
178extern struct device_node *of_node_get(struct device_node *node);
179extern void of_node_put(struct device_node *node);
180
181/* For updating the device tree at runtime */
182extern void of_attach_node(struct device_node *);
183extern void of_detach_node(const struct device_node *);
184
185/* Other Prototypes */
186extern void finish_device_tree(void);
187extern void unflatten_device_tree(void);
188extern void early_init_devtree(void *);
189extern int device_is_compatible(struct device_node *device, const char *);
190extern int machine_is_compatible(const char *compat);
191extern unsigned char *get_property(struct device_node *node, const char *name,
192 int *lenp);
193extern void print_properties(struct device_node *node);
194extern int prom_n_addr_cells(struct device_node* np);
195extern int prom_n_size_cells(struct device_node* np);
196extern int prom_n_intr_cells(struct device_node* np);
197extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
198extern void prom_add_property(struct device_node* np, struct property* prop);
199
200#ifdef CONFIG_PPC32
201/*
202 * PCI <-> OF matching functions
203 * (XXX should these be here?)
204 */
205struct pci_bus;
206struct pci_dev;
207extern int pci_device_from_OF_node(struct device_node *node,
208 u8* bus, u8* devfn);
209extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
210extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
211extern void pci_create_OF_bus_map(void);
212#endif
213
214extern struct resource *request_OF_resource(struct device_node* node,
215 int index, const char* name_postfix);
216extern int release_OF_resource(struct device_node* node, int index);
217
218#endif /* __KERNEL__ */
219#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-ppc/reg.h b/include/asm-powerpc/reg.h
index 73c33e3ef9c6..da848412f11b 100644
--- a/include/asm-ppc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -6,53 +6,107 @@
6 * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. 6 * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
7 */ 7 */
8 8
9#ifndef _ASM_POWERPC_REG_H
10#define _ASM_POWERPC_REG_H
9#ifdef __KERNEL__ 11#ifdef __KERNEL__
10#ifndef __ASM_PPC_REGS_H__
11#define __ASM_PPC_REGS_H__
12 12
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <asm/cputable.h>
14 15
15/* Pickup Book E specific registers. */ 16/* Pickup Book E specific registers. */
16#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 17#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
17#include <asm/reg_booke.h> 18#include <asm/reg_booke.h>
18#endif 19#endif
19 20
20/* Machine State Register (MSR) Fields */ 21#define MSR_SF_LG 63 /* Enable 64 bit mode */
21#define MSR_SF (1<<63) 22#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
22#define MSR_ISF (1<<61) 23#define MSR_HV_LG 60 /* Hypervisor state */
23#define MSR_VEC (1<<25) /* Enable AltiVec */ 24#define MSR_VEC_LG 25 /* Enable AltiVec */
24#define MSR_POW (1<<18) /* Enable Power Management */ 25#define MSR_POW_LG 18 /* Enable Power Management */
25#define MSR_WE (1<<18) /* Wait State Enable */ 26#define MSR_WE_LG 18 /* Wait State Enable */
26#define MSR_TGPR (1<<17) /* TLB Update registers in use */ 27#define MSR_TGPR_LG 17 /* TLB Update registers in use */
27#define MSR_CE (1<<17) /* Critical Interrupt Enable */ 28#define MSR_CE_LG 17 /* Critical Interrupt Enable */
28#define MSR_ILE (1<<16) /* Interrupt Little Endian */ 29#define MSR_ILE_LG 16 /* Interrupt Little Endian */
29#define MSR_EE (1<<15) /* External Interrupt Enable */ 30#define MSR_EE_LG 15 /* External Interrupt Enable */
30#define MSR_PR (1<<14) /* Problem State / Privilege Level */ 31#define MSR_PR_LG 14 /* Problem State / Privilege Level */
31#define MSR_FP (1<<13) /* Floating Point enable */ 32#define MSR_FP_LG 13 /* Floating Point enable */
32#define MSR_ME (1<<12) /* Machine Check Enable */ 33#define MSR_ME_LG 12 /* Machine Check Enable */
33#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */ 34#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
34#define MSR_SE (1<<10) /* Single Step */ 35#define MSR_SE_LG 10 /* Single Step */
35#define MSR_BE (1<<9) /* Branch Trace */ 36#define MSR_BE_LG 9 /* Branch Trace */
36#define MSR_DE (1<<9) /* Debug Exception Enable */ 37#define MSR_DE_LG 9 /* Debug Exception Enable */
37#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */ 38#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
38#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */ 39#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
39#define MSR_IR (1<<5) /* Instruction Relocate */ 40#define MSR_IR_LG 5 /* Instruction Relocate */
40#define MSR_DR (1<<4) /* Data Relocate */ 41#define MSR_DR_LG 4 /* Data Relocate */
41#define MSR_PE (1<<3) /* Protection Enable */ 42#define MSR_PE_LG 3 /* Protection Enable */
42#define MSR_PX (1<<2) /* Protection Exclusive Mode */ 43#define MSR_PX_LG 2 /* Protection Exclusive Mode */
43#define MSR_RI (1<<1) /* Recoverable Exception */ 44#define MSR_PMM_LG 2 /* Performance monitor */
44#define MSR_LE (1<<0) /* Little Endian */ 45#define MSR_RI_LG 1 /* Recoverable Exception */
46#define MSR_LE_LG 0 /* Little Endian */
45 47
48#ifdef __ASSEMBLY__
49#define __MASK(X) (1<<(X))
50#else
51#define __MASK(X) (1UL<<(X))
52#endif
53
54#ifdef CONFIG_PPC64
55#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
56#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
57#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
58#else
59/* so tests for these bits fail on 32-bit */
60#define MSR_SF 0
61#define MSR_ISF 0
62#define MSR_HV 0
63#endif
64
65#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
66#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
67#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
68#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
69#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
70#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
71#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
72#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
73#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
74#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
75#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
76#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
77#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
78#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
79#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
80#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
81#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
82#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
83#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
84#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
85#ifndef MSR_PMM
86#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
87#endif
88#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
89#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
90
91#ifdef CONFIG_PPC64
92#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
93#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
94
95#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
96#define MSR_USER64 MSR_USER32 | MSR_SF
97
98#else /* 32-bit */
46/* Default MSR for kernel mode. */ 99/* Default MSR for kernel mode. */
100#ifndef MSR_KERNEL /* reg_booke.h also defines this */
47#ifdef CONFIG_APUS_FAST_EXCEPT 101#ifdef CONFIG_APUS_FAST_EXCEPT
48#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR) 102#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
49#endif 103#else
50
51#ifndef MSR_KERNEL
52#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) 104#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
53#endif 105#endif
106#endif
54 107
55#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 108#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
109#endif
56 110
57/* Floating Point Status and Control Register (FPSCR) Fields */ 111/* Floating Point Status and Control Register (FPSCR) Fields */
58#define FPSCR_FX 0x80000000 /* FPU exception summary */ 112#define FPSCR_FX 0x80000000 /* FPU exception summary */
@@ -60,7 +114,7 @@
60#define FPSCR_VX 0x20000000 /* Invalid operation summary */ 114#define FPSCR_VX 0x20000000 /* Invalid operation summary */
61#define FPSCR_OX 0x10000000 /* Overflow exception summary */ 115#define FPSCR_OX 0x10000000 /* Overflow exception summary */
62#define FPSCR_UX 0x08000000 /* Underflow exception summary */ 116#define FPSCR_UX 0x08000000 /* Underflow exception summary */
63#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */ 117#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
64#define FPSCR_XX 0x02000000 /* Inexact exception summary */ 118#define FPSCR_XX 0x02000000 /* Inexact exception summary */
65#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ 119#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
66#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ 120#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
@@ -85,8 +139,18 @@
85 139
86/* Special Purpose Registers (SPRNs)*/ 140/* Special Purpose Registers (SPRNs)*/
87#define SPRN_CTR 0x009 /* Count Register */ 141#define SPRN_CTR 0x009 /* Count Register */
142#define SPRN_CTRLF 0x088
143#define SPRN_CTRLT 0x098
144#define CTRL_RUNLATCH 0x1
88#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ 145#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
146#define DABR_TRANSLATION (1UL << 2)
89#define SPRN_DAR 0x013 /* Data Address Register */ 147#define SPRN_DAR 0x013 /* Data Address Register */
148#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
149#define DSISR_NOHPTE 0x40000000 /* no translation found */
150#define DSISR_PROTFAULT 0x08000000 /* protection fault */
151#define DSISR_ISSTORE 0x02000000 /* access was a store */
152#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
153#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
90#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ 154#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
91#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 155#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
92#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 156#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
@@ -131,7 +195,6 @@
131#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ 195#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
132#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ 196#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
133#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ 197#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
134#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
135#define SPRN_EAR 0x11A /* External Address Register */ 198#define SPRN_EAR 0x11A /* External Address Register */
136#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 199#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
137#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ 200#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
@@ -187,6 +250,16 @@
187#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ 250#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
188#define SPRN_HID4 0x3F4 /* 970 HID4 */ 251#define SPRN_HID4 0x3F4 /* 970 HID4 */
189#define SPRN_HID5 0x3F6 /* 970 HID5 */ 252#define SPRN_HID5 0x3F6 /* 970 HID5 */
253#define SPRN_HID6 0x3F9 /* BE HID 6 */
254#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
255#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
256#define SPRN_TSCR 0x399 /* Thread switch control on BE */
257#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
258#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
259#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
260#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
261#define SPRN_TSC 0x3FD /* Thread switch control on others */
262#define SPRN_TST 0x3FC /* Thread switch timeout on others */
190#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) 263#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
191#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ 264#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
192#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ 265#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
@@ -270,22 +343,18 @@
270#define L3CR_L3DO 0x00000040 /* L3 data only mode */ 343#define L3CR_L3DO 0x00000040 /* L3 data only mode */
271#define L3CR_PMEN 0x00000004 /* L3 private memory enable */ 344#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
272#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ 345#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
346
273#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ 347#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
274#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ 348#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
275#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ 349#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
276#define SPRN_LDSTDB 0x3f4 /* */ 350#define SPRN_LDSTDB 0x3f4 /* */
277#define SPRN_LR 0x008 /* Link Register */ 351#define SPRN_LR 0x008 /* Link Register */
278#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
279#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
280#ifndef SPRN_PIR 352#ifndef SPRN_PIR
281#define SPRN_PIR 0x3FF /* Processor Identification Register */ 353#define SPRN_PIR 0x3FF /* Processor Identification Register */
282#endif 354#endif
283#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
284#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
285#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
286#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
287#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ 355#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
288#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ 356#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
357#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
289#define SPRN_PVR 0x11F /* Processor Version Register */ 358#define SPRN_PVR 0x11F /* Processor Version Register */
290#define SPRN_RPA 0x3D6 /* Required Physical Address Register */ 359#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
291#define SPRN_SDA 0x3BF /* Sampled Data Address Register */ 360#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
@@ -327,6 +396,52 @@
327#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ 396#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
328#define SPRN_XER 0x001 /* Fixed Point Exception Register */ 397#define SPRN_XER 0x001 /* Fixed Point Exception Register */
329 398
399/* Performance monitor SPRs */
400#ifdef CONFIG_PPC64
401#define SPRN_MMCR0 795
402#define MMCR0_FC 0x80000000UL /* freeze counters */
403#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
404#define MMCR0_KERNEL_DISABLE MMCR0_FCS
405#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
406#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
407#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
408#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
409#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
410#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
411#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
412#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
413#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
414#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
415#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
416#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
417#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
418#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
419#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
420#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
421#define SPRN_MMCR1 798
422#define SPRN_MMCRA 0x312
423#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
424#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
425#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
426#define SPRN_PMC1 787
427#define SPRN_PMC2 788
428#define SPRN_PMC3 789
429#define SPRN_PMC4 790
430#define SPRN_PMC5 791
431#define SPRN_PMC6 792
432#define SPRN_PMC7 793
433#define SPRN_PMC8 794
434#define SPRN_SIAR 780
435#define SPRN_SDAR 781
436
437#else /* 32-bit */
438#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
439#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
440#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
441#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
442#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
443#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
444
330/* Bit definitions for MMCR0 and PMC1 / PMC2. */ 445/* Bit definitions for MMCR0 and PMC1 / PMC2. */
331#define MMCR0_PMC1_CYCLES (1 << 7) 446#define MMCR0_PMC1_CYCLES (1 << 7)
332#define MMCR0_PMC1_ICACHEMISS (5 << 7) 447#define MMCR0_PMC1_ICACHEMISS (5 << 7)
@@ -336,14 +451,15 @@
336#define MMCR0_PMC2_ITLB 0x7 451#define MMCR0_PMC2_ITLB 0x7
337#define MMCR0_PMC2_LOADMISSTIME 0x5 452#define MMCR0_PMC2_LOADMISSTIME 0x5
338#define MMCR0_PMXE (1 << 26) 453#define MMCR0_PMXE (1 << 26)
339 454#endif
340/* Processor Version Register */
341 455
342/* Processor Version Register (PVR) field extraction */ 456/* Processor Version Register (PVR) field extraction */
343 457
344#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ 458#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
345#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ 459#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
346 460
461#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
462
347/* 463/*
348 * IBM has further subdivided the standard PowerPC 16-bit version and 464 * IBM has further subdivided the standard PowerPC 16-bit version and
349 * revision subfields of the PVR for the PowerPC 403s into the following: 465 * revision subfields of the PVR for the PowerPC 403s into the following:
@@ -399,42 +515,99 @@
399#define PVR_8245 0x80811014 515#define PVR_8245 0x80811014
400#define PVR_8260 PVR_8240 516#define PVR_8260 PVR_8240
401 517
402#if 0 518/* 64-bit processors */
403/* Segment Registers */ 519/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
404#define SR0 0 520#define PV_NORTHSTAR 0x0033
405#define SR1 1 521#define PV_PULSAR 0x0034
406#define SR2 2 522#define PV_POWER4 0x0035
407#define SR3 3 523#define PV_ICESTAR 0x0036
408#define SR4 4 524#define PV_SSTAR 0x0037
409#define SR5 5 525#define PV_POWER4p 0x0038
410#define SR6 6 526#define PV_970 0x0039
411#define SR7 7 527#define PV_POWER5 0x003A
412#define SR8 8 528#define PV_POWER5p 0x003B
413#define SR9 9 529#define PV_970FX 0x003C
414#define SR10 10 530#define PV_630 0x0040
415#define SR11 11 531#define PV_630p 0x0041
416#define SR12 12 532#define PV_970MP 0x0044
417#define SR13 13 533#define PV_BE 0x0070
418#define SR14 14 534
419#define SR15 15 535/*
420#endif 536 * Number of entries in the SLB. If this ever changes we should handle
537 * it with a use a cpu feature fixup.
538 */
539#define SLB_NUM_ENTRIES 64
421 540
422/* Macros for setting and retrieving special purpose registers */ 541/* Macros for setting and retrieving special purpose registers */
423#ifndef __ASSEMBLY__ 542#ifndef __ASSEMBLY__
424#define mfmsr() ({unsigned int rval; \ 543#define mfmsr() ({unsigned long rval; \
425 asm volatile("mfmsr %0" : "=r" (rval)); rval;}) 544 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
545#ifdef CONFIG_PPC64
546#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
547 : : "r" (v))
548#define mtmsrd(v) __mtmsrd((v), 0)
549#define mtmsr(v) mtmsrd(v)
550#else
426#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) 551#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
552#endif
427 553
428#define mfspr(rn) ({unsigned int rval; \ 554#define mfspr(rn) ({unsigned long rval; \
429 asm volatile("mfspr %0," __stringify(rn) \ 555 asm volatile("mfspr %0," __stringify(rn) \
430 : "=r" (rval)); rval;}) 556 : "=r" (rval)); rval;})
431#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) 557#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
432 558
559#define mftb() ({unsigned long rval; \
560 asm volatile("mftb %0" : "=r" (rval)); rval;})
561#define mftbl() ({unsigned long rval; \
562 asm volatile("mftbl %0" : "=r" (rval)); rval;})
563
564#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
565#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
566
567#ifdef CONFIG_PPC32
433#define mfsrin(v) ({unsigned int rval; \ 568#define mfsrin(v) ({unsigned int rval; \
434 asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ 569 asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
435 rval;}) 570 rval;})
571#endif
436 572
437#define proc_trap() asm volatile("trap") 573#define proc_trap() asm volatile("trap")
574
575#ifdef CONFIG_PPC64
576static inline void ppc64_runlatch_on(void)
577{
578 unsigned long ctrl;
579
580 if (cpu_has_feature(CPU_FTR_CTRL)) {
581 ctrl = mfspr(SPRN_CTRLF);
582 ctrl |= CTRL_RUNLATCH;
583 mtspr(SPRN_CTRLT, ctrl);
584 }
585}
586
587static inline void ppc64_runlatch_off(void)
588{
589 unsigned long ctrl;
590
591 if (cpu_has_feature(CPU_FTR_CTRL)) {
592 ctrl = mfspr(SPRN_CTRLF);
593 ctrl &= ~CTRL_RUNLATCH;
594 mtspr(SPRN_CTRLT, ctrl);
595 }
596}
597#endif
598
599#define __get_SP() ({unsigned long sp; \
600 asm volatile("mr %0,1": "=r" (sp)); sp;})
601
602#else /* __ASSEMBLY__ */
603
604#define RUNLATCH_ON(REG) \
605BEGIN_FTR_SECTION \
606 mfspr (REG),SPRN_CTRLF; \
607 ori (REG),(REG),CTRL_RUNLATCH; \
608 mtspr SPRN_CTRLT,(REG); \
609END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
610
438#endif /* __ASSEMBLY__ */ 611#endif /* __ASSEMBLY__ */
439#endif /* __ASM_PPC_REGS_H__ */
440#endif /* __KERNEL__ */ 612#endif /* __KERNEL__ */
613#endif /* _ASM_POWERPC_REG_H */
diff --git a/include/asm-ppc64/rtas.h b/include/asm-powerpc/rtas.h
index e7d1b5222802..2c050332471d 100644
--- a/include/asm-ppc64/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_RTAS_H 1#ifndef _POWERPC_RTAS_H
2#define _PPC64_RTAS_H 2#define _POWERPC_RTAS_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/page.h> 5#include <asm/page.h>
@@ -190,7 +190,7 @@ extern void rtas_progress(char *s, unsigned short hex);
190extern void rtas_initialize(void); 190extern void rtas_initialize(void);
191 191
192struct rtc_time; 192struct rtc_time;
193extern void rtas_get_boot_time(struct rtc_time *rtc_time); 193extern unsigned long rtas_get_boot_time(void);
194extern void rtas_get_rtc_time(struct rtc_time *rtc_time); 194extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
195extern int rtas_set_rtc_time(struct rtc_time *rtc_time); 195extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
196 196
@@ -246,4 +246,4 @@ extern unsigned long rtas_rmo_buf;
246 246
247#define GLOBAL_INTERRUPT_QUEUE 9005 247#define GLOBAL_INTERRUPT_QUEUE 9005
248 248
249#endif /* _PPC64_RTAS_H */ 249#endif /* _POWERPC_RTAS_H */
diff --git a/include/asm-powerpc/rtc.h b/include/asm-powerpc/rtc.h
new file mode 100644
index 000000000000..f5802926b6c0
--- /dev/null
+++ b/include/asm-powerpc/rtc.h
@@ -0,0 +1,78 @@
1/*
2 * Real-time clock definitions and interfaces
3 *
4 * Author: Tom Rini <trini@mvista.com>
5 *
6 * 2002 (c) MontaVista, Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 *
11 * Based on:
12 * include/asm-m68k/rtc.h
13 *
14 * Copyright Richard Zidlicky
15 * implementation details for genrtc/q40rtc driver
16 *
17 * And the old drivers/macintosh/rtc.c which was heavily based on:
18 * Linux/SPARC Real Time Clock Driver
19 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
20 *
21 * With additional work by Paul Mackerras and Franz Sirl.
22 */
23
24#ifndef __ASM_POWERPC_RTC_H__
25#define __ASM_POWERPC_RTC_H__
26
27#ifdef __KERNEL__
28
29#include <linux/rtc.h>
30
31#include <asm/machdep.h>
32#include <asm/time.h>
33
34#define RTC_PIE 0x40 /* periodic interrupt enable */
35#define RTC_AIE 0x20 /* alarm interrupt enable */
36#define RTC_UIE 0x10 /* update-finished interrupt enable */
37
38/* some dummy definitions */
39#define RTC_BATT_BAD 0x100 /* battery bad */
40#define RTC_SQWE 0x08 /* enable square-wave output */
41#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
42#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
43#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
44
45static inline unsigned int get_rtc_time(struct rtc_time *time)
46{
47 if (ppc_md.get_rtc_time)
48 ppc_md.get_rtc_time(time);
49 return RTC_24H;
50}
51
52/* Set the current date and time in the real time clock. */
53static inline int set_rtc_time(struct rtc_time *time)
54{
55 if (ppc_md.set_rtc_time)
56 return ppc_md.set_rtc_time(time);
57 return -EINVAL;
58}
59
60static inline unsigned int get_rtc_ss(void)
61{
62 struct rtc_time h;
63
64 get_rtc_time(&h);
65 return h.tm_sec;
66}
67
68static inline int get_rtc_pll(struct rtc_pll_info *pll)
69{
70 return -EINVAL;
71}
72static inline int set_rtc_pll(struct rtc_pll_info *pll)
73{
74 return -EINVAL;
75}
76
77#endif /* __KERNEL__ */
78#endif /* __ASM_POWERPC_RTC_H__ */
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-powerpc/rwsem.h
index 7a647fae3765..79bae4933b73 100644
--- a/include/asm-ppc64/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -1,18 +1,14 @@
1#ifndef _ASM_POWERPC_RWSEM_H
2#define _ASM_POWERPC_RWSEM_H
3
4#ifdef __KERNEL__
5
1/* 6/*
2 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 7 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
4 * by Paul Mackerras <paulus@samba.org>. 9 * by Paul Mackerras <paulus@samba.org>.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _PPC64_RWSEM_H
13#define _PPC64_RWSEM_H
14
15#ifdef __KERNEL__
16#include <linux/list.h> 12#include <linux/list.h>
17#include <linux/spinlock.h> 13#include <linux/spinlock.h>
18#include <asm/atomic.h> 14#include <asm/atomic.h>
@@ -168,5 +164,5 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
168 return (sem->count != 0); 164 return (sem->count != 0);
169} 165}
170 166
171#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
172#endif /* _PPC_RWSEM_XADD_H */ 168#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/include/asm-ppc64/scatterlist.h b/include/asm-powerpc/scatterlist.h
index cecce6c6dfbb..8c992d1491d4 100644
--- a/include/asm-ppc64/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -1,6 +1,5 @@
1#ifndef _PPC64_SCATTERLIST_H 1#ifndef _ASM_POWERPC_SCATTERLIST_H
2#define _PPC64_SCATTERLIST_H 2#define _ASM_POWERPC_SCATTERLIST_H
3
4/* 3/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 4 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * 5 *
@@ -10,6 +9,7 @@
10 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
11 */ 10 */
12 11
12#ifdef __KERNEL__
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/dma.h> 14#include <asm/dma.h>
15 15
@@ -19,13 +19,27 @@ struct scatterlist {
19 unsigned int length; 19 unsigned int length;
20 20
21 /* For TCE support */ 21 /* For TCE support */
22 u32 dma_address; 22 dma_addr_t dma_address;
23 u32 dma_length; 23 u32 dma_length;
24}; 24};
25 25
26/*
27 * These macros should be used after a dma_map_sg call has been done
28 * to get bus addresses of each of the SG entries and their lengths.
29 * You should only work with the number of sg entries pci_map_sg
30 * returns, or alternatively stop on the first sg_dma_len(sg) which
31 * is 0.
32 */
26#define sg_dma_address(sg) ((sg)->dma_address) 33#define sg_dma_address(sg) ((sg)->dma_address)
34#ifdef __powerpc64__
27#define sg_dma_len(sg) ((sg)->dma_length) 35#define sg_dma_len(sg) ((sg)->dma_length)
36#else
37#define sg_dma_len(sg) ((sg)->length)
38#endif
28 39
40#ifdef __powerpc64__
29#define ISA_DMA_THRESHOLD (~0UL) 41#define ISA_DMA_THRESHOLD (~0UL)
42#endif
30 43
31#endif /* !(_PPC64_SCATTERLIST_H) */ 44#endif /* __KERNEL__ */
45#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/include/asm-ppc64/seccomp.h b/include/asm-powerpc/seccomp.h
index c130c334bda1..1e1cfe12882b 100644
--- a/include/asm-ppc64/seccomp.h
+++ b/include/asm-powerpc/seccomp.h
@@ -1,11 +1,6 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_POWERPC_SECCOMP_H
2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on ppc64"
7#endif
8 2
3#include <linux/thread_info.h>
9#include <linux/unistd.h> 4#include <linux/unistd.h>
10 5
11#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
@@ -18,4 +13,4 @@
18#define __NR_seccomp_exit_32 __NR_exit 13#define __NR_seccomp_exit_32 __NR_exit
19#define __NR_seccomp_sigreturn_32 __NR_sigreturn 14#define __NR_seccomp_sigreturn_32 __NR_sigreturn
20 15
21#endif /* _ASM_SECCOMP_H */ 16#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/include/asm-ppc64/sections.h b/include/asm-powerpc/sections.h
index 308ca6f5ced2..47be2ac2a925 100644
--- a/include/asm-ppc64/sections.h
+++ b/include/asm-powerpc/sections.h
@@ -1,22 +1,11 @@
1#ifndef _PPC64_SECTIONS_H 1#ifndef _ASM_POWERPC_SECTIONS_H
2#define _PPC64_SECTIONS_H 2#define _ASM_POWERPC_SECTIONS_H
3
4extern char _end[];
5 3
6#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
7 5
8#define __pmac 6#ifdef __powerpc64__
9#define __pmacdata
10
11#define __prep
12#define __prepdata
13
14#define __chrp
15#define __chrpdata
16
17#define __openfirmware
18#define __openfirmwaredata
19 7
8extern char _end[];
20 9
21static inline int in_kernel_text(unsigned long addr) 10static inline int in_kernel_text(unsigned long addr)
22{ 11{
@@ -27,3 +16,5 @@ static inline int in_kernel_text(unsigned long addr)
27} 16}
28 17
29#endif 18#endif
19
20#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/include/asm-ppc64/semaphore.h b/include/asm-powerpc/semaphore.h
index aefe7753ea41..57369d2cadef 100644
--- a/include/asm-ppc64/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_SEMAPHORE_H 1#ifndef _ASM_POWERPC_SEMAPHORE_H
2#define _PPC64_SEMAPHORE_H 2#define _ASM_POWERPC_SEMAPHORE_H
3 3
4/* 4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are 5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
@@ -31,9 +31,6 @@ struct semaphore {
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32} 32}
33 33
34#define __MUTEX_INITIALIZER(name) \
35 __SEMAPHORE_INITIALIZER(name, 1)
36
37#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 34#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
38 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
39 36
@@ -95,4 +92,4 @@ static inline void up(struct semaphore * sem)
95 92
96#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
97 94
98#endif /* !(_PPC64_SEMAPHORE_H) */ 95#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-ppc64/smu.h b/include/asm-powerpc/smu.h
index dee8eefe47bc..dee8eefe47bc 100644
--- a/include/asm-ppc64/smu.h
+++ b/include/asm-powerpc/smu.h
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-powerpc/spinlock_types.h
index a37c8eabb9f2..74236c9f05b1 100644
--- a/include/asm-ppc64/spinlock_types.h
+++ b/include/asm-powerpc/spinlock_types.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SPINLOCK_TYPES_H 1#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H 2#define _ASM_POWERPC_SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H 4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly" 5# error "please don't include this file directly"
diff --git a/include/asm-ppc64/sstep.h b/include/asm-powerpc/sstep.h
index 4a68db50ee6f..630a9889c07c 100644
--- a/include/asm-ppc64/sstep.h
+++ b/include/asm-powerpc/sstep.h
@@ -16,8 +16,10 @@ struct pt_regs;
16 * we don't allow putting a breakpoint on an mtmsrd instruction. 16 * we don't allow putting a breakpoint on an mtmsrd instruction.
17 * Similarly we don't allow breakpoints on rfid instructions. 17 * Similarly we don't allow breakpoints on rfid instructions.
18 * These macros tell us if an instruction is a mtmsrd or rfid. 18 * These macros tell us if an instruction is a mtmsrd or rfid.
19 * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
20 * and an mtmsrd (64-bit).
19 */ 21 */
20#define IS_MTMSRD(instr) (((instr) & 0xfc0007fe) == 0x7c000164) 22#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
21#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024) 23#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
22 24
23/* Emulate instructions that cause a transfer of control. */ 25/* Emulate instructions that cause a transfer of control. */
diff --git a/include/asm-ppc64/statfs.h b/include/asm-powerpc/statfs.h
index 3c985e5246a7..67024026c10d 100644
--- a/include/asm-ppc64/statfs.h
+++ b/include/asm-powerpc/statfs.h
@@ -1,12 +1,11 @@
1#ifndef _PPC64_STATFS_H 1#ifndef _ASM_POWERPC_STATFS_H
2#define _PPC64_STATFS_H 2#define _ASM_POWERPC_STATFS_H
3 3
4/* 4/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
5 * This program is free software; you can redistribute it and/or 5
6 * modify it under the terms of the GNU General Public License 6#ifndef __powerpc64__
7 * as published by the Free Software Foundation; either version 7#include <asm-generic/statfs.h>
8 * 2 of the License, or (at your option) any later version. 8#else
9 */
10 9
11#ifndef __KERNEL_STRICT_NAMES 10#ifndef __KERNEL_STRICT_NAMES
12#include <linux/types.h> 11#include <linux/types.h>
@@ -57,5 +56,5 @@ struct compat_statfs64 {
57 __u32 f_frsize; 56 __u32 f_frsize;
58 __u32 f_spare[5]; 57 __u32 f_spare[5];
59}; 58};
60 59#endif /* ! __powerpc64__ */
61#endif /* _PPC64_STATFS_H */ 60#endif
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 000000000000..4660c0394a77
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_POWERPC_SYNCH_H
2#define _ASM_POWERPC_SYNCH_H
3
4#include <linux/config.h>
5
6#ifdef __powerpc64__
7#define __SUBARCH_HAS_LWSYNC
8#endif
9
10#ifdef __SUBARCH_HAS_LWSYNC
11# define LWSYNC lwsync
12#else
13# define LWSYNC sync
14#endif
15
16
17/*
18 * Arguably the bitops and *xchg operations don't imply any memory barrier
19 * or SMP ordering, but in fact a lot of drivers expect them to imply
20 * both, since they do on x86 cpus.
21 */
22#ifdef CONFIG_SMP
23#define EIEIO_ON_SMP "eieio\n"
24#define ISYNC_ON_SMP "\n\tisync"
25#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
26#else
27#define EIEIO_ON_SMP
28#define ISYNC_ON_SMP
29#define SYNC_ON_SMP
30#endif
31
32static inline void eieio(void)
33{
34 __asm__ __volatile__ ("eieio" : : : "memory");
35}
36
37static inline void isync(void)
38{
39 __asm__ __volatile__ ("isync" : : : "memory");
40}
41
42#ifdef CONFIG_SMP
43#define eieio_on_smp() eieio()
44#define isync_on_smp() isync()
45#else
46#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
47#define isync_on_smp() __asm__ __volatile__("": : :"memory")
48#endif
49
50#endif /* _ASM_POWERPC_SYNCH_H */
51
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644
index 000000000000..5b2ecbc47907
--- /dev/null
+++ b/include/asm-powerpc/system.h
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_SYSTEM_H
5#define _ASM_POWERPC_SYSTEM_H
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9
10#include <asm/hw_irq.h>
11#include <asm/ppc_asm.h>
12#include <asm/atomic.h>
13
14/*
15 * Memory barrier.
16 * The sync instruction guarantees that all memory accesses initiated
17 * by this processor have been performed (with respect to all other
18 * mechanisms that access memory). The eieio instruction is a barrier
19 * providing an ordering (separately) for (a) cacheable stores and (b)
20 * loads and stores to non-cacheable memory (e.g. I/O devices).
21 *
22 * mb() prevents loads and stores being reordered across this point.
23 * rmb() prevents loads being reordered across this point.
24 * wmb() prevents stores being reordered across this point.
25 * read_barrier_depends() prevents data-dependent loads being reordered
26 * across this point (nop on PPC).
27 *
28 * We have to use the sync instructions for mb(), since lwsync doesn't
29 * order loads with respect to previous stores. Lwsync is fine for
30 * rmb(), though. Note that lwsync is interpreted as sync by
31 * 32-bit and older 64-bit CPUs.
32 *
33 * For wmb(), we use sync since wmb is used in drivers to order
34 * stores to system memory with respect to writes to the device.
35 * However, smp_wmb() can be a lighter-weight eieio barrier on
36 * SMP since it is only used to order updates to system memory.
37 */
38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
39#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
40#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41#define read_barrier_depends() do { } while(0)
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44#define set_wmb(var, value) do { var = value; wmb(); } while (0)
45
46#ifdef CONFIG_SMP
47#define smp_mb() mb()
48#define smp_rmb() rmb()
49#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
50#define smp_read_barrier_depends() read_barrier_depends()
51#else
52#define smp_mb() barrier()
53#define smp_rmb() barrier()
54#define smp_wmb() barrier()
55#define smp_read_barrier_depends() do { } while(0)
56#endif /* CONFIG_SMP */
57
58#ifdef __KERNEL__
59struct task_struct;
60struct pt_regs;
61
62#ifdef CONFIG_DEBUGGER
63
64extern int (*__debugger)(struct pt_regs *regs);
65extern int (*__debugger_ipi)(struct pt_regs *regs);
66extern int (*__debugger_bpt)(struct pt_regs *regs);
67extern int (*__debugger_sstep)(struct pt_regs *regs);
68extern int (*__debugger_iabr_match)(struct pt_regs *regs);
69extern int (*__debugger_dabr_match)(struct pt_regs *regs);
70extern int (*__debugger_fault_handler)(struct pt_regs *regs);
71
72#define DEBUGGER_BOILERPLATE(__NAME) \
73static inline int __NAME(struct pt_regs *regs) \
74{ \
75 if (unlikely(__ ## __NAME)) \
76 return __ ## __NAME(regs); \
77 return 0; \
78}
79
80DEBUGGER_BOILERPLATE(debugger)
81DEBUGGER_BOILERPLATE(debugger_ipi)
82DEBUGGER_BOILERPLATE(debugger_bpt)
83DEBUGGER_BOILERPLATE(debugger_sstep)
84DEBUGGER_BOILERPLATE(debugger_iabr_match)
85DEBUGGER_BOILERPLATE(debugger_dabr_match)
86DEBUGGER_BOILERPLATE(debugger_fault_handler)
87
88#ifdef CONFIG_XMON
89extern void xmon_init(int enable);
90#endif
91
92#else
93static inline int debugger(struct pt_regs *regs) { return 0; }
94static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
95static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
96static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
97static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
98static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
99static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
100#endif
101
102extern int set_dabr(unsigned long dabr);
103extern void print_backtrace(unsigned long *);
104extern void show_regs(struct pt_regs * regs);
105extern void flush_instruction_cache(void);
106extern void hard_reset_now(void);
107extern void poweroff_now(void);
108
109#ifdef CONFIG_6xx
110extern long _get_L2CR(void);
111extern long _get_L3CR(void);
112extern void _set_L2CR(unsigned long);
113extern void _set_L3CR(unsigned long);
114#else
115#define _get_L2CR() 0L
116#define _get_L3CR() 0L
117#define _set_L2CR(val) do { } while(0)
118#define _set_L3CR(val) do { } while(0)
119#endif
120
121extern void via_cuda_init(void);
122extern void read_rtc_time(void);
123extern void pmac_find_display(void);
124extern void giveup_fpu(struct task_struct *);
125extern void disable_kernel_fp(void);
126extern void enable_kernel_fp(void);
127extern void flush_fp_to_thread(struct task_struct *);
128extern void enable_kernel_altivec(void);
129extern void giveup_altivec(struct task_struct *);
130extern void load_up_altivec(struct task_struct *);
131extern int emulate_altivec(struct pt_regs *);
132extern void giveup_spe(struct task_struct *);
133extern void load_up_spe(struct task_struct *);
134extern int fix_alignment(struct pt_regs *);
135extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
136extern void cvt_df(double *from, float *to, struct thread_struct *thread);
137
138#ifdef CONFIG_ALTIVEC
139extern void flush_altivec_to_thread(struct task_struct *);
140#else
141static inline void flush_altivec_to_thread(struct task_struct *t)
142{
143}
144#endif
145
146#ifdef CONFIG_SPE
147extern void flush_spe_to_thread(struct task_struct *);
148#else
149static inline void flush_spe_to_thread(struct task_struct *t)
150{
151}
152#endif
153
154extern int call_rtas(const char *, int, int, unsigned long *, ...);
155extern void cacheable_memzero(void *p, unsigned int nb);
156extern void *cacheable_memcpy(void *, const void *, unsigned int);
157extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
158extern void bad_page_fault(struct pt_regs *, unsigned long, int);
159extern int die(const char *, struct pt_regs *, long);
160extern void _exception(int, struct pt_regs *, int, unsigned long);
161#ifdef CONFIG_BOOKE_WDT
162extern u32 booke_wdt_enabled;
163extern u32 booke_wdt_period;
164#endif /* CONFIG_BOOKE_WDT */
165
166/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
167extern unsigned char e2a(unsigned char);
168
169struct device_node;
170extern void note_scsi_host(struct device_node *, void *);
171
172extern struct task_struct *__switch_to(struct task_struct *,
173 struct task_struct *);
174#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
175
176struct thread_struct;
177extern struct task_struct *_switch(struct thread_struct *prev,
178 struct thread_struct *next);
179
180extern unsigned int rtas_data;
181extern int mem_init_done; /* set on boot once kmalloc can be called */
182extern unsigned long memory_limit;
183
184extern int powersave_nap; /* set if nap mode can be used in idle loop */
185
186/*
187 * Atomic exchange
188 *
189 * Changes the memory location '*ptr' to be val and returns
190 * the previous value stored there.
191 */
192static __inline__ unsigned long
193__xchg_u32(volatile void *p, unsigned long val)
194{
195 unsigned long prev;
196
197 __asm__ __volatile__(
198 EIEIO_ON_SMP
199"1: lwarx %0,0,%2 \n"
200 PPC405_ERR77(0,%2)
201" stwcx. %3,0,%2 \n\
202 bne- 1b"
203 ISYNC_ON_SMP
204 : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
205 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
206 : "cc", "memory");
207
208 return prev;
209}
210
211#ifdef CONFIG_PPC64
212static __inline__ unsigned long
213__xchg_u64(volatile void *p, unsigned long val)
214{
215 unsigned long prev;
216
217 __asm__ __volatile__(
218 EIEIO_ON_SMP
219"1: ldarx %0,0,%2 \n"
220 PPC405_ERR77(0,%2)
221" stdcx. %3,0,%2 \n\
222 bne- 1b"
223 ISYNC_ON_SMP
224 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
225 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
226 : "cc", "memory");
227
228 return prev;
229}
230#endif
231
232/*
233 * This function doesn't exist, so you'll get a linker error
234 * if something tries to do an invalid xchg().
235 */
236extern void __xchg_called_with_bad_pointer(void);
237
238static __inline__ unsigned long
239__xchg(volatile void *ptr, unsigned long x, unsigned int size)
240{
241 switch (size) {
242 case 4:
243 return __xchg_u32(ptr, x);
244#ifdef CONFIG_PPC64
245 case 8:
246 return __xchg_u64(ptr, x);
247#endif
248 }
249 __xchg_called_with_bad_pointer();
250 return x;
251}
252
253#define xchg(ptr,x) \
254 ({ \
255 __typeof__(*(ptr)) _x_ = (x); \
256 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
257 })
258
259#define tas(ptr) (xchg((ptr),1))
260
261/*
262 * Compare and exchange - if *p == old, set it to new,
263 * and return the old value of *p.
264 */
265#define __HAVE_ARCH_CMPXCHG 1
266
267static __inline__ unsigned long
268__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
269{
270 unsigned int prev;
271
272 __asm__ __volatile__ (
273 EIEIO_ON_SMP
274"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
275 cmpw 0,%0,%3\n\
276 bne- 2f\n"
277 PPC405_ERR77(0,%2)
278" stwcx. %4,0,%2\n\
279 bne- 1b"
280 ISYNC_ON_SMP
281 "\n\
2822:"
283 : "=&r" (prev), "=m" (*p)
284 : "r" (p), "r" (old), "r" (new), "m" (*p)
285 : "cc", "memory");
286
287 return prev;
288}
289
290#ifdef CONFIG_PPC64
291static __inline__ unsigned long
292__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
293{
294 unsigned long prev;
295
296 __asm__ __volatile__ (
297 EIEIO_ON_SMP
298"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
299 cmpd 0,%0,%3\n\
300 bne- 2f\n\
301 stdcx. %4,0,%2\n\
302 bne- 1b"
303 ISYNC_ON_SMP
304 "\n\
3052:"
306 : "=&r" (prev), "=m" (*p)
307 : "r" (p), "r" (old), "r" (new), "m" (*p)
308 : "cc", "memory");
309
310 return prev;
311}
312#endif
313
314/* This function doesn't exist, so you'll get a linker error
315 if something tries to do an invalid cmpxchg(). */
316extern void __cmpxchg_called_with_bad_pointer(void);
317
318static __inline__ unsigned long
319__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
320 unsigned int size)
321{
322 switch (size) {
323 case 4:
324 return __cmpxchg_u32(ptr, old, new);
325#ifdef CONFIG_PPC64
326 case 8:
327 return __cmpxchg_u64(ptr, old, new);
328#endif
329 }
330 __cmpxchg_called_with_bad_pointer();
331 return old;
332}
333
334#define cmpxchg(ptr,o,n) \
335 ({ \
336 __typeof__(*(ptr)) _o_ = (o); \
337 __typeof__(*(ptr)) _n_ = (n); \
338 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
339 (unsigned long)_n_, sizeof(*(ptr))); \
340 })
341
342#ifdef CONFIG_PPC64
343/*
344 * We handle most unaligned accesses in hardware. On the other hand
345 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
346 * powers of 2 writes until it reaches sufficient alignment).
347 *
348 * Based on this we disable the IP header alignment in network drivers.
349 */
350#define NET_IP_ALIGN 0
351#endif
352
353#define arch_align_stack(x) (x)
354
355/* Used in very early kernel initialization. */
356extern unsigned long reloc_offset(void);
357extern unsigned long add_reloc_offset(unsigned long);
358extern void reloc_got2(unsigned long);
359
360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
361
362#endif /* __KERNEL__ */
363#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-ppc64/thread_info.h b/include/asm-powerpc/thread_info.h
index 0494df6fca74..ab17db79f69d 100644
--- a/include/asm-ppc64/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -1,15 +1,25 @@
1/* thread_info.h: PPC low-level thread information 1/* thread_info.h: PowerPC low-level thread information
2 * adapted from the i386 version by Paul Mackerras 2 * adapted from the i386 version by Paul Mackerras
3 * 3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com) 4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller 5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */ 6 */
7 7
8#ifndef _ASM_THREAD_INFO_H 8#ifndef _ASM_POWERPC_THREAD_INFO_H
9#define _ASM_THREAD_INFO_H 9#define _ASM_POWERPC_THREAD_INFO_H
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13/* We have 8k stacks on ppc32 and 16k on ppc64 */
14
15#ifdef CONFIG_PPC64
16#define THREAD_SHIFT 14
17#else
18#define THREAD_SHIFT 13
19#endif
20
21#define THREAD_SIZE (1 << THREAD_SHIFT)
22
13#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
14#include <linux/config.h> 24#include <linux/config.h>
15#include <linux/cache.h> 25#include <linux/cache.h>
@@ -24,7 +34,8 @@ struct thread_info {
24 struct task_struct *task; /* main task structure */ 34 struct task_struct *task; /* main task structure */
25 struct exec_domain *exec_domain; /* execution domain */ 35 struct exec_domain *exec_domain; /* execution domain */
26 int cpu; /* cpu we're on */ 36 int cpu; /* cpu we're on */
27 int preempt_count; /* 0 => preemptable, <0 => BUG */ 37 int preempt_count; /* 0 => preemptable,
38 <0 => BUG */
28 struct restart_block restart_block; 39 struct restart_block restart_block;
29 /* set by force_successful_syscall_return */ 40 /* set by force_successful_syscall_return */
30 unsigned char syscall_noerror; 41 unsigned char syscall_noerror;
@@ -54,32 +65,38 @@ struct thread_info {
54 65
55/* thread information allocation */ 66/* thread information allocation */
56 67
57#define THREAD_ORDER 2
58#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
59#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
60#ifdef CONFIG_DEBUG_STACK_USAGE 68#ifdef CONFIG_DEBUG_STACK_USAGE
61#define alloc_thread_info(tsk) \ 69#define THREAD_INFO_GFP GFP_KERNEL | __GFP_ZERO
62 ({ \
63 struct thread_info *ret; \
64 \
65 ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \
66 if (ret) \
67 memset(ret, 0, THREAD_SIZE); \
68 ret; \
69 })
70#else 70#else
71#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 71#define THREAD_INFO_GFP GFP_KERNEL
72#endif 72#endif
73
74#if THREAD_SHIFT >= PAGE_SHIFT
75
76#define THREAD_ORDER (THREAD_SHIFT - PAGE_SHIFT)
77
78#define alloc_thread_info(tsk) \
79 ((struct thread_info *)__get_free_pages(THREAD_INFO_GFP, THREAD_ORDER))
80#define free_thread_info(ti) free_pages((unsigned long)ti, THREAD_ORDER)
81
82#else /* THREAD_SHIFT < PAGE_SHIFT */
83
84#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, THREAD_INFO_GFP)
73#define free_thread_info(ti) kfree(ti) 85#define free_thread_info(ti) kfree(ti)
86
87#endif /* THREAD_SHIFT < PAGE_SHIFT */
88
74#define get_thread_info(ti) get_task_struct((ti)->task) 89#define get_thread_info(ti) get_task_struct((ti)->task)
75#define put_thread_info(ti) put_task_struct((ti)->task) 90#define put_thread_info(ti) put_task_struct((ti)->task)
76 91
77/* how to get the thread information struct from C */ 92/* how to get the thread information struct from C */
78static inline struct thread_info *current_thread_info(void) 93static inline struct thread_info *current_thread_info(void)
79{ 94{
80 struct thread_info *ti; 95 register unsigned long sp asm("r1");
81 __asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT)); 96
82 return ti; 97 /* gcc4, at least, is smart enough to turn this into a single
98 * rlwinm for ppc32 and clrrdi for ppc64 */
99 return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
83} 100}
84 101
85#endif /* __ASSEMBLY__ */ 102#endif /* __ASSEMBLY__ */
@@ -122,4 +139,4 @@ static inline struct thread_info *current_thread_info(void)
122 139
123#endif /* __KERNEL__ */ 140#endif /* __KERNEL__ */
124 141
125#endif /* _ASM_THREAD_INFO_H */ 142#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
new file mode 100644
index 000000000000..410e795f7d43
--- /dev/null
+++ b/include/asm-powerpc/time.h
@@ -0,0 +1,226 @@
1/*
2 * Common time prototypes and such for all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef __POWERPC_TIME_H
14#define __POWERPC_TIME_H
15
16#ifdef __KERNEL__
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/percpu.h>
20
21#include <asm/processor.h>
22#ifdef CONFIG_PPC64
23#include <asm/paca.h>
24#include <asm/iSeries/HvCall.h>
25#endif
26
27/* time.c */
28extern unsigned long tb_ticks_per_jiffy;
29extern unsigned long tb_ticks_per_usec;
30extern unsigned long tb_ticks_per_sec;
31extern u64 tb_to_xs;
32extern unsigned tb_to_us;
33extern unsigned long tb_last_stamp;
34extern u64 tb_last_jiffy;
35
36DECLARE_PER_CPU(unsigned long, last_jiffy);
37
38struct rtc_time;
39extern void to_tm(int tim, struct rtc_time * tm);
40extern time_t last_rtc_update;
41
42extern void generic_calibrate_decr(void);
43extern void wakeup_decrementer(void);
44
45/* Some sane defaults: 125 MHz timebase, 1GHz processor */
46extern unsigned long ppc_proc_freq;
47#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
48extern unsigned long ppc_tb_freq;
49#define DEFAULT_TB_FREQ 125000000UL
50
51/*
52 * By putting all of this stuff into a single struct we
53 * reduce the number of cache lines touched by do_gettimeofday.
54 * Both by collecting all of the data in one cache line and
55 * by touching only one TOC entry on ppc64.
56 */
57struct gettimeofday_vars {
58 u64 tb_to_xs;
59 u64 stamp_xsec;
60 u64 tb_orig_stamp;
61};
62
63struct gettimeofday_struct {
64 unsigned long tb_ticks_per_sec;
65 struct gettimeofday_vars vars[2];
66 struct gettimeofday_vars * volatile varp;
67 unsigned var_idx;
68 unsigned tb_to_us;
69};
70
71struct div_result {
72 u64 result_high;
73 u64 result_low;
74};
75
76/* Accessor functions for the timebase (RTC on 601) registers. */
77/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
78#ifdef CONFIG_6xx
79#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB))
80#else
81#define __USE_RTC() 0
82#endif
83
84/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
85static inline unsigned long get_tbl(void)
86{
87 unsigned long tbl;
88
89#if defined(CONFIG_403GCX)
90 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
91#else
92 asm volatile("mftb %0" : "=r" (tbl));
93#endif
94 return tbl;
95}
96
97static inline unsigned int get_tbu(void)
98{
99 unsigned int tbu;
100
101#if defined(CONFIG_403GCX)
102 asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
103#else
104 asm volatile("mftbu %0" : "=r" (tbu));
105#endif
106 return tbu;
107}
108
109static inline unsigned int get_rtcl(void)
110{
111 unsigned int rtcl;
112
113 asm volatile("mfrtcl %0" : "=r" (rtcl));
114 return rtcl;
115}
116
117static inline u64 get_rtc(void)
118{
119 unsigned int hi, lo, hi2;
120
121 do {
122 asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
123 : "=r" (hi), "=r" (lo), "=r" (hi2));
124 } while (hi2 != hi);
125 return (u64)hi * 1000000000 + lo;
126}
127
128#ifdef CONFIG_PPC64
129static inline u64 get_tb(void)
130{
131 return mftb();
132}
133#else
134static inline u64 get_tb(void)
135{
136 unsigned int tbhi, tblo, tbhi2;
137
138 do {
139 tbhi = get_tbu();
140 tblo = get_tbl();
141 tbhi2 = get_tbu();
142 } while (tbhi != tbhi2);
143
144 return ((u64)tbhi << 32) | tblo;
145}
146#endif
147
148static inline void set_tb(unsigned int upper, unsigned int lower)
149{
150 mtspr(SPRN_TBWL, 0);
151 mtspr(SPRN_TBWU, upper);
152 mtspr(SPRN_TBWL, lower);
153}
154
155/* Accessor functions for the decrementer register.
156 * The 4xx doesn't even have a decrementer. I tried to use the
157 * generic timer interrupt code, which seems OK, with the 4xx PIT
158 * in auto-reload mode. The problem is PIT stops counting when it
159 * hits zero. If it would wrap, we could use it just like a decrementer.
160 */
161static inline unsigned int get_dec(void)
162{
163#if defined(CONFIG_40x)
164 return (mfspr(SPRN_PIT));
165#else
166 return (mfspr(SPRN_DEC));
167#endif
168}
169
170static inline void set_dec(int val)
171{
172#if defined(CONFIG_40x)
173 return; /* Have to let it auto-reload */
174#elif defined(CONFIG_8xx_CPU6)
175 set_dec_cpu6(val);
176#else
177#ifdef CONFIG_PPC_ISERIES
178 struct paca_struct *lpaca = get_paca();
179 int cur_dec;
180
181 if (lpaca->lppaca.shared_proc) {
182 lpaca->lppaca.virtual_decr = val;
183 cur_dec = get_dec();
184 if (cur_dec > val)
185 HvCall_setVirtualDecr();
186 } else
187#endif
188 mtspr(SPRN_DEC, val);
189#endif /* not 40x or 8xx_CPU6 */
190}
191
192static inline unsigned long tb_ticks_since(unsigned long tstamp)
193{
194 if (__USE_RTC()) {
195 int delta = get_rtcl() - (unsigned int) tstamp;
196 return delta < 0 ? delta + 1000000000 : delta;
197 }
198 return get_tbl() - tstamp;
199}
200
201#define mulhwu(x,y) \
202({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
203
204#ifdef CONFIG_PPC64
205#define mulhdu(x,y) \
206({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
207#else
208extern u64 mulhdu(u64, u64);
209#endif
210
211extern void smp_space_timers(unsigned int);
212
213extern unsigned mulhwu_scale_factor(unsigned, unsigned);
214extern void div128_by_32(u64 dividend_high, u64 dividend_low,
215 unsigned divisor, struct div_result *dr);
216
217/* Used to store Processor Utilization register (purr) values */
218
219struct cpu_usage {
220 u64 current_tb; /* Holds the current purr register values */
221};
222
223DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
224
225#endif /* __KERNEL__ */
226#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc64/types.h b/include/asm-powerpc/types.h
index bf294c1761b2..ec3c2ee8bf86 100644
--- a/include/asm-ppc64/types.h
+++ b/include/asm-powerpc/types.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_TYPES_H 1#ifndef _ASM_POWERPC_TYPES_H
2#define _PPC64_TYPES_H 2#define _ASM_POWERPC_TYPES_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
@@ -16,7 +16,11 @@
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19#ifdef __powerpc64__
19typedef unsigned int umode_t; 20typedef unsigned int umode_t;
21#else
22typedef unsigned short umode_t;
23#endif
20 24
21/* 25/*
22 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the 26 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
@@ -32,8 +36,15 @@ typedef unsigned short __u16;
32typedef __signed__ int __s32; 36typedef __signed__ int __s32;
33typedef unsigned int __u32; 37typedef unsigned int __u32;
34 38
39#ifdef __powerpc64__
35typedef __signed__ long __s64; 40typedef __signed__ long __s64;
36typedef unsigned long __u64; 41typedef unsigned long __u64;
42#else
43#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
44typedef __signed__ long long __s64;
45typedef unsigned long long __u64;
46#endif
47#endif /* __powerpc64__ */
37 48
38typedef struct { 49typedef struct {
39 __u32 u[4]; 50 __u32 u[4];
@@ -45,10 +56,16 @@ typedef struct {
45/* 56/*
46 * These aren't exported outside the kernel to avoid name space clashes 57 * These aren't exported outside the kernel to avoid name space clashes
47 */ 58 */
59#ifdef __powerpc64__
48#define BITS_PER_LONG 64 60#define BITS_PER_LONG 64
61#else
62#define BITS_PER_LONG 32
63#endif
49 64
50#ifndef __ASSEMBLY__ 65#ifndef __ASSEMBLY__
51 66
67#include <linux/config.h>
68
52typedef signed char s8; 69typedef signed char s8;
53typedef unsigned char u8; 70typedef unsigned char u8;
54 71
@@ -58,12 +75,21 @@ typedef unsigned short u16;
58typedef signed int s32; 75typedef signed int s32;
59typedef unsigned int u32; 76typedef unsigned int u32;
60 77
78#ifdef __powerpc64__
61typedef signed long s64; 79typedef signed long s64;
62typedef unsigned long u64; 80typedef unsigned long u64;
81#else
82typedef signed long long s64;
83typedef unsigned long long u64;
84#endif
63 85
64typedef __vector128 vector128; 86typedef __vector128 vector128;
65 87
88#ifdef __powerpc64__
89typedef u64 dma_addr_t;
90#else
66typedef u32 dma_addr_t; 91typedef u32 dma_addr_t;
92#endif
67typedef u64 dma64_addr_t; 93typedef u64 dma64_addr_t;
68 94
69typedef struct { 95typedef struct {
@@ -72,8 +98,13 @@ typedef struct {
72 unsigned long env; 98 unsigned long env;
73} func_descr_t; 99} func_descr_t;
74 100
101#ifdef CONFIG_LBD
102typedef u64 sector_t;
103#define HAVE_SECTOR_T
104#endif
105
75#endif /* __ASSEMBLY__ */ 106#endif /* __ASSEMBLY__ */
76 107
77#endif /* __KERNEL__ */ 108#endif /* __KERNEL__ */
78 109
79#endif /* _PPC64_TYPES_H */ 110#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/include/asm-ppc/uninorth.h b/include/asm-powerpc/uninorth.h
index f737732c3861..f737732c3861 100644
--- a/include/asm-ppc/uninorth.h
+++ b/include/asm-powerpc/uninorth.h
diff --git a/include/asm-ppc/unistd.h b/include/asm-powerpc/unistd.h
index 3173ab3d2eb9..0991dfceef1d 100644
--- a/include/asm-ppc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -3,7 +3,13 @@
3 3
4/* 4/*
5 * This file contains the system call numbers. 5 * This file contains the system call numbers.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
6 */ 11 */
12
7#define __NR_restart_syscall 0 13#define __NR_restart_syscall 0
8#define __NR_exit 1 14#define __NR_exit 1
9#define __NR_fork 2 15#define __NR_fork 2
@@ -196,19 +202,23 @@
196#define __NR_vfork 189 202#define __NR_vfork 189
197#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ 203#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
198#define __NR_readahead 191 204#define __NR_readahead 191
205#ifndef __powerpc64__ /* these are 32-bit only */
199#define __NR_mmap2 192 206#define __NR_mmap2 192
200#define __NR_truncate64 193 207#define __NR_truncate64 193
201#define __NR_ftruncate64 194 208#define __NR_ftruncate64 194
202#define __NR_stat64 195 209#define __NR_stat64 195
203#define __NR_lstat64 196 210#define __NR_lstat64 196
204#define __NR_fstat64 197 211#define __NR_fstat64 197
212#endif
205#define __NR_pciconfig_read 198 213#define __NR_pciconfig_read 198
206#define __NR_pciconfig_write 199 214#define __NR_pciconfig_write 199
207#define __NR_pciconfig_iobase 200 215#define __NR_pciconfig_iobase 200
208#define __NR_multiplexer 201 216#define __NR_multiplexer 201
209#define __NR_getdents64 202 217#define __NR_getdents64 202
210#define __NR_pivot_root 203 218#define __NR_pivot_root 203
219#ifndef __powerpc64__
211#define __NR_fcntl64 204 220#define __NR_fcntl64 204
221#endif
212#define __NR_madvise 205 222#define __NR_madvise 205
213#define __NR_mincore 206 223#define __NR_mincore 206
214#define __NR_gettid 207 224#define __NR_gettid 207
@@ -230,7 +240,9 @@
230#define __NR_sched_getaffinity 223 240#define __NR_sched_getaffinity 223
231/* 224 currently unused */ 241/* 224 currently unused */
232#define __NR_tuxcall 225 242#define __NR_tuxcall 225
243#ifndef __powerpc64__
233#define __NR_sendfile64 226 244#define __NR_sendfile64 226
245#endif
234#define __NR_io_setup 227 246#define __NR_io_setup 227
235#define __NR_io_destroy 228 247#define __NR_io_destroy 228
236#define __NR_io_getevents 229 248#define __NR_io_getevents 229
@@ -258,14 +270,16 @@
258#define __NR_utimes 251 270#define __NR_utimes 251
259#define __NR_statfs64 252 271#define __NR_statfs64 252
260#define __NR_fstatfs64 253 272#define __NR_fstatfs64 253
273#ifndef __powerpc64__
261#define __NR_fadvise64_64 254 274#define __NR_fadvise64_64 254
275#endif
262#define __NR_rtas 255 276#define __NR_rtas 255
263#define __NR_sys_debug_setcontext 256 277#define __NR_sys_debug_setcontext 256
264/* Number 257 is reserved for vserver */ 278/* Number 257 is reserved for vserver */
265/* 258 currently unused */ 279/* 258 currently unused */
266/* Number 259 is reserved for new sys_mbind */ 280#define __NR_mbind 259
267/* Number 260 is reserved for new sys_get_mempolicy */ 281#define __NR_get_mempolicy 260
268/* Number 261 is reserved for new sys_set_mempolicy */ 282#define __NR_set_mempolicy 261
269#define __NR_mq_open 262 283#define __NR_mq_open 262
270#define __NR_mq_unlink 263 284#define __NR_mq_unlink 263
271#define __NR_mq_timedsend 264 285#define __NR_mq_timedsend 264
@@ -285,7 +299,12 @@
285 299
286#define __NR_syscalls 278 300#define __NR_syscalls 278
287 301
288#define __NR(n) #n 302#ifdef __KERNEL__
303#define __NR__exit __NR_exit
304#define NR_syscalls __NR_syscalls
305#endif
306
307#ifndef __ASSEMBLY__
289 308
290/* On powerpc a system call basically clobbers the same registers like a 309/* On powerpc a system call basically clobbers the same registers like a
291 * function call, with the exception of LR (which is needed for the 310 * function call, with the exception of LR (which is needed for the
@@ -389,7 +408,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
389{ \ 408{ \
390 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \ 409 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
391} 410}
392
393#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ 411#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
394type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ 412type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
395{ \ 413{ \
@@ -398,12 +416,13 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
398 416
399#ifdef __KERNEL__ 417#ifdef __KERNEL__
400 418
401#define __NR__exit __NR_exit 419#include <linux/config.h>
402#define NR_syscalls __NR_syscalls 420#include <linux/types.h>
421#include <linux/compiler.h>
422#include <linux/linkage.h>
403 423
404#define __ARCH_WANT_IPC_PARSE_VERSION 424#define __ARCH_WANT_IPC_PARSE_VERSION
405#define __ARCH_WANT_OLD_READDIR 425#define __ARCH_WANT_OLD_READDIR
406#define __ARCH_WANT_OLD_STAT
407#define __ARCH_WANT_STAT64 426#define __ARCH_WANT_STAT64
408#define __ARCH_WANT_SYS_ALARM 427#define __ARCH_WANT_SYS_ALARM
409#define __ARCH_WANT_SYS_GETHOSTNAME 428#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -423,23 +442,17 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
423#define __ARCH_WANT_SYS_SIGPENDING 442#define __ARCH_WANT_SYS_SIGPENDING
424#define __ARCH_WANT_SYS_SIGPROCMASK 443#define __ARCH_WANT_SYS_SIGPROCMASK
425#define __ARCH_WANT_SYS_RT_SIGACTION 444#define __ARCH_WANT_SYS_RT_SIGACTION
426 445#ifdef CONFIG_PPC32
427/* 446#define __ARCH_WANT_OLD_STAT
428 * Forking from kernel space will result in the child getting a new, 447#endif
429 * empty kernel stack area. Thus the child cannot access automatic 448#ifdef CONFIG_PPC64
430 * variables set in the parent unless they are in registers, and the 449#define __ARCH_WANT_COMPAT_SYS_TIME
431 * procedure where the fork was done cannot return to its caller in 450#endif
432 * the child.
433 */
434
435#ifdef __KERNEL_SYSCALLS__
436
437#include <linux/compiler.h>
438#include <linux/types.h>
439 451
440/* 452/*
441 * System call prototypes. 453 * System call prototypes.
442 */ 454 */
455#ifdef __KERNEL_SYSCALLS__
443extern pid_t setsid(void); 456extern pid_t setsid(void);
444extern int write(int fd, const char *buf, off_t count); 457extern int write(int fd, const char *buf, off_t count);
445extern int read(int fd, char *buf, off_t count); 458extern int read(int fd, char *buf, off_t count);
@@ -449,10 +462,13 @@ extern int execve(const char *file, char **argv, char **envp);
449extern int open(const char *file, int flag, int mode); 462extern int open(const char *file, int flag, int mode);
450extern int close(int fd); 463extern int close(int fd);
451extern pid_t waitpid(pid_t pid, int *wait_stat, int options); 464extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
465#endif /* __KERNEL_SYSCALLS__ */
452 466
453unsigned long sys_mmap(unsigned long addr, size_t len, 467/*
454 unsigned long prot, unsigned long flags, 468 * Functions that implement syscalls.
455 unsigned long fd, off_t offset); 469 */
470unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
471 unsigned long flags, unsigned long fd, off_t offset);
456unsigned long sys_mmap2(unsigned long addr, size_t len, 472unsigned long sys_mmap2(unsigned long addr, size_t len,
457 unsigned long prot, unsigned long flags, 473 unsigned long prot, unsigned long flags,
458 unsigned long fd, unsigned long pgoff); 474 unsigned long fd, unsigned long pgoff);
@@ -461,22 +477,18 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
461 unsigned long a3, unsigned long a4, unsigned long a5, 477 unsigned long a3, unsigned long a4, unsigned long a5,
462 struct pt_regs *regs); 478 struct pt_regs *regs);
463int sys_clone(unsigned long clone_flags, unsigned long usp, 479int sys_clone(unsigned long clone_flags, unsigned long usp,
464 int __user *parent_tidp, void __user *child_threadptr, 480 int __user *parent_tidp, void __user *child_threadptr,
465 int __user *child_tidp, int p6, 481 int __user *child_tidp, int p6, struct pt_regs *regs);
466 struct pt_regs *regs); 482int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
467int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, 483 unsigned long p4, unsigned long p5, unsigned long p6,
468 struct pt_regs *regs); 484 struct pt_regs *regs);
469int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, 485int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
486 unsigned long p4, unsigned long p5, unsigned long p6,
470 struct pt_regs *regs); 487 struct pt_regs *regs);
471int sys_pipe(int __user *fildes); 488int sys_pipe(int __user *fildes);
472int sys_ptrace(long request, long pid, long addr, long data);
473struct sigaction; 489struct sigaction;
474long sys_rt_sigaction(int sig, 490long sys_rt_sigaction(int sig, const struct sigaction __user *act,
475 const struct sigaction __user *act, 491 struct sigaction __user *oact, size_t sigsetsize);
476 struct sigaction __user *oact,
477 size_t sigsetsize);
478
479#endif /* __KERNEL_SYSCALLS__ */
480 492
481/* 493/*
482 * "Conditional" syscalls 494 * "Conditional" syscalls
@@ -484,10 +496,14 @@ long sys_rt_sigaction(int sig,
484 * What we want is __attribute__((weak,alias("sys_ni_syscall"))), 496 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
485 * but it doesn't work on all toolchains, so we just do it by hand 497 * but it doesn't work on all toolchains, so we just do it by hand
486 */ 498 */
487#ifndef cond_syscall 499#ifdef CONFIG_PPC32
488#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 500#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
501#else
502#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
489#endif 503#endif
490 504
491#endif /* __KERNEL__ */ 505#endif /* __KERNEL__ */
506
507#endif /* __ASSEMBLY__ */
492 508
493#endif /* _ASM_PPC_UNISTD_H_ */ 509#endif /* _ASM_PPC_UNISTD_H_ */
diff --git a/include/asm-ppc64/vga.h b/include/asm-powerpc/vga.h
index c09849743f45..f8d350aabf1a 100644
--- a/include/asm-ppc64/vga.h
+++ b/include/asm-powerpc/vga.h
@@ -1,16 +1,14 @@
1#ifndef _ASM_POWERPC_VGA_H_
2#define _ASM_POWERPC_VGA_H_
3
4#ifdef __KERNEL__
5
1/* 6/*
2 * Access to VGA videoram 7 * Access to VGA videoram
3 * 8 *
4 * (c) 1998 Martin Mares <mj@ucw.cz> 9 * (c) 1998 Martin Mares <mj@ucw.cz>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _LINUX_ASM_VGA_H_
13#define _LINUX_ASM_VGA_H_
14 12
15#include <asm/io.h> 13#include <asm/io.h>
16 14
@@ -42,9 +40,15 @@ static inline u16 scr_readw(volatile const u16 *addr)
42#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ 40#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
43 41
44extern unsigned long vgacon_remap_base; 42extern unsigned long vgacon_remap_base;
43
44#ifdef __powerpc64__
45#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0)) 45#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
46#else
47#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
48#endif
46 49
47#define vga_readb(x) (*(x)) 50#define vga_readb(x) (*(x))
48#define vga_writeb(x,y) (*(y) = (x)) 51#define vga_writeb(x,y) (*(y) = (x))
49 52
50#endif 53#endif /* __KERNEL__ */
54#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/include/asm-ppc64/vio.h b/include/asm-powerpc/vio.h
index 03f1b95f433b..e0ccf108277c 100644
--- a/include/asm-ppc64/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -1,18 +1,18 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003 IBM Corp. 4 * Copyright (c) 2003 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#ifndef _ASM_VIO_H 14#ifndef _ASM_POWERPC_VIO_H
15#define _ASM_VIO_H 15#define _ASM_POWERPC_VIO_H
16 16
17#include <linux/config.h> 17#include <linux/config.h>
18#include <linux/init.h> 18#include <linux/init.h>
@@ -55,10 +55,10 @@ struct vio_dev {
55 55
56struct vio_driver { 56struct vio_driver {
57 struct list_head node; 57 struct list_head node;
58 char *name;
59 const struct vio_device_id *id_table; 58 const struct vio_device_id *id_table;
60 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); 59 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
61 int (*remove)(struct vio_dev *dev); 60 int (*remove)(struct vio_dev *dev);
61 void (*shutdown)(struct vio_dev *dev);
62 unsigned long driver_data; 62 unsigned long driver_data;
63 struct device_driver driver; 63 struct device_driver driver;
64}; 64};
@@ -103,4 +103,4 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
103 return container_of(dev, struct vio_dev, dev); 103 return container_of(dev, struct vio_dev, dev);
104} 104}
105 105
106#endif /* _ASM_VIO_H */ 106#endif /* _ASM_POWERPC_VIO_H */
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
new file mode 100644
index 000000000000..43f7129984c7
--- /dev/null
+++ b/include/asm-powerpc/xmon.h
@@ -0,0 +1,12 @@
1#ifndef __PPC_XMON_H
2#define __PPC_XMON_H
3#ifdef __KERNEL__
4
5struct pt_regs;
6
7extern int xmon(struct pt_regs *excp);
8extern void xmon_printf(const char *fmt, ...);
9extern void xmon_init(int);
10
11#endif
12#endif
diff --git a/include/asm-ppc/a.out.h b/include/asm-ppc/a.out.h
deleted file mode 100644
index 8979a94c4a81..000000000000
--- a/include/asm-ppc/a.out.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __PPC_A_OUT_H__
2#define __PPC_A_OUT_H__
3
4/* grabbed from the intel stuff */
5#define STACK_TOP TASK_SIZE
6
7
8struct exec
9{
10 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
11 unsigned a_text; /* length of text, in bytes */
12 unsigned a_data; /* length of data, in bytes */
13 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
14 unsigned a_syms; /* length of symbol table data in file, in bytes */
15 unsigned a_entry; /* start address */
16 unsigned a_trsize; /* length of relocation info for text, in bytes */
17 unsigned a_drsize; /* length of relocation info for data, in bytes */
18};
19
20
21#define N_TRSIZE(a) ((a).a_trsize)
22#define N_DRSIZE(a) ((a).a_drsize)
23#define N_SYMSIZE(a) ((a).a_syms)
24
25
26#endif
diff --git a/include/asm-ppc/auxvec.h b/include/asm-ppc/auxvec.h
deleted file mode 100644
index 172358df29c8..000000000000
--- a/include/asm-ppc/auxvec.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __PPC_AUXVEC_H
2#define __PPC_AUXVEC_H
3
4/*
5 * We need to put in some extra aux table entries to tell glibc what
6 * the cache block size is, so it can use the dcbz instruction safely.
7 */
8#define AT_DCACHEBSIZE 19
9#define AT_ICACHEBSIZE 20
10#define AT_UCACHEBSIZE 21
11/* A special ignored type value for PPC, for glibc compatibility. */
12#define AT_IGNOREPPC 22
13
14#endif
diff --git a/include/asm-ppc/bug.h b/include/asm-ppc/bug.h
deleted file mode 100644
index 8b34fd682b0d..000000000000
--- a/include/asm-ppc/bug.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _PPC_BUG_H
2#define _PPC_BUG_H
3
4struct bug_entry {
5 unsigned long bug_addr;
6 int line;
7 const char *file;
8 const char *function;
9};
10
11/*
12 * If this bit is set in the line number it means that the trap
13 * is for WARN_ON rather than BUG or BUG_ON.
14 */
15#define BUG_WARNING_TRAP 0x1000000
16
17#ifdef CONFIG_BUG
18#define BUG() do { \
19 __asm__ __volatile__( \
20 "1: twi 31,0,0\n" \
21 ".section __bug_table,\"a\"\n\t" \
22 " .long 1b,%0,%1,%2\n" \
23 ".previous" \
24 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
25} while (0)
26
27#define BUG_ON(x) do { \
28 if (!__builtin_constant_p(x) || (x)) { \
29 __asm__ __volatile__( \
30 "1: twnei %0,0\n" \
31 ".section __bug_table,\"a\"\n\t" \
32 " .long 1b,%1,%2,%3\n" \
33 ".previous" \
34 : : "r" (x), "i" (__LINE__), "i" (__FILE__), \
35 "i" (__FUNCTION__)); \
36 } \
37} while (0)
38
39#define WARN_ON(x) do { \
40 if (!__builtin_constant_p(x) || (x)) { \
41 __asm__ __volatile__( \
42 "1: twnei %0,0\n" \
43 ".section __bug_table,\"a\"\n\t" \
44 " .long 1b,%1,%2,%3\n" \
45 ".previous" \
46 : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
47 "i" (__FILE__), "i" (__FUNCTION__)); \
48 } \
49} while (0)
50
51#define HAVE_ARCH_BUG
52#define HAVE_ARCH_BUG_ON
53#define HAVE_ARCH_WARN_ON
54#endif
55
56#include <asm-generic/bug.h>
57
58#endif
diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h
deleted file mode 100644
index c63c81ec7968..000000000000
--- a/include/asm-ppc/byteorder.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _PPC_BYTEORDER_H
2#define _PPC_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8#ifdef __KERNEL__
9
10extern __inline__ unsigned ld_le16(const volatile unsigned short *addr)
11{
12 unsigned val;
13
14 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
15 return val;
16}
17
18extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
19{
20 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
21}
22
23extern __inline__ unsigned ld_le32(const volatile unsigned *addr)
24{
25 unsigned val;
26
27 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
28 return val;
29}
30
31extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
32{
33 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
34}
35
36static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
37{
38 __u16 result;
39
40 __asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value));
41 return result;
42}
43
44static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
45{
46 __u32 result;
47
48 __asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value));
49 __asm__("rlwimi %0,%2,8,8,15" : "=&r" (result) : "0" (result), "r" (value));
50 __asm__("rlwimi %0,%2,24,0,7" : "=&r" (result) : "0" (result), "r" (value));
51
52 return result;
53}
54#define __arch__swab32(x) ___arch__swab32(x)
55#define __arch__swab16(x) ___arch__swab16(x)
56
57/* The same, but returns converted value from the location pointer by addr. */
58#define __arch__swab16p(addr) ld_le16(addr)
59#define __arch__swab32p(addr) ld_le32(addr)
60
61/* The same, but do the conversion in situ, ie. put the value back to addr. */
62#define __arch__swab16s(addr) st_le16(addr,*addr)
63#define __arch__swab32s(addr) st_le32(addr,*addr)
64
65#endif /* __KERNEL__ */
66
67#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
68# define __BYTEORDER_HAS_U64__
69# define __SWAB_64_THRU_32__
70#endif
71
72#endif /* __GNUC__ */
73
74#include <linux/byteorder/big_endian.h>
75
76#endif /* _PPC_BYTEORDER_H */
diff --git a/include/asm-ppc/cache.h b/include/asm-ppc/cache.h
index 38f2f1be4a87..7a157d0f4b5f 100644
--- a/include/asm-ppc/cache.h
+++ b/include/asm-ppc/cache.h
@@ -9,21 +9,18 @@
9 9
10/* bytes per L1 cache line */ 10/* bytes per L1 cache line */
11#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 11#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
12#define L1_CACHE_LINE_SIZE 16 12#define L1_CACHE_SHIFT 4
13#define LG_L1_CACHE_LINE_SIZE 4
14#define MAX_COPY_PREFETCH 1 13#define MAX_COPY_PREFETCH 1
15#elif defined(CONFIG_PPC64BRIDGE) 14#elif defined(CONFIG_PPC64BRIDGE)
16#define L1_CACHE_LINE_SIZE 128 15#define L1_CACHE_SHIFT 7
17#define LG_L1_CACHE_LINE_SIZE 7
18#define MAX_COPY_PREFETCH 1 16#define MAX_COPY_PREFETCH 1
19#else 17#else
20#define L1_CACHE_LINE_SIZE 32 18#define L1_CACHE_SHIFT 5
21#define LG_L1_CACHE_LINE_SIZE 5
22#define MAX_COPY_PREFETCH 4 19#define MAX_COPY_PREFETCH 4
23#endif 20#endif
24 21
25#define L1_CACHE_BYTES L1_CACHE_LINE_SIZE 22#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
26#define L1_CACHE_SHIFT LG_L1_CACHE_LINE_SIZE 23
27#define SMP_CACHE_BYTES L1_CACHE_BYTES 24#define SMP_CACHE_BYTES L1_CACHE_BYTES
28#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 25#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
29 26
diff --git a/include/asm-ppc/checksum.h b/include/asm-ppc/checksum.h
deleted file mode 100644
index cf953a92c7ab..000000000000
--- a/include/asm-ppc/checksum.h
+++ /dev/null
@@ -1,107 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_CHECKSUM_H
3#define _PPC_CHECKSUM_H
4
5
6/*
7 * computes the checksum of a memory block at buff, length len,
8 * and adds in "sum" (32-bit)
9 *
10 * returns a 32-bit number suitable for feeding into itself
11 * or csum_tcpudp_magic
12 *
13 * this function must be called with even lengths, except
14 * for the last fragment, which may be odd
15 *
16 * it's best to have buff aligned on a 32-bit boundary
17 */
18extern unsigned int csum_partial(const unsigned char * buff, int len,
19 unsigned int sum);
20
21/*
22 * Computes the checksum of a memory block at src, length len,
23 * and adds in "sum" (32-bit), while copying the block to dst.
24 * If an access exception occurs on src or dst, it stores -EFAULT
25 * to *src_err or *dst_err respectively (if that pointer is not
26 * NULL), and, for an error on src, zeroes the rest of dst.
27 *
28 * Like csum_partial, this must be called with even lengths,
29 * except for the last fragment.
30 */
31extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
32 int len, unsigned int sum,
33 int *src_err, int *dst_err);
34
35#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
36 csum_partial_copy_generic((__force void *)(src), (dst), (len), (sum), (errp), NULL)
37
38/* FIXME: this needs to be written to really do no check -- Cort */
39#define csum_partial_copy_nocheck(src, dst, len, sum) \
40 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
41
42/*
43 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
44 * 1's complement 16-bit checksum.
45 */
46static inline unsigned int csum_fold(unsigned int sum)
47{
48 unsigned int tmp;
49
50 /* swap the two 16-bit halves of sum */
51 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
52 /* if there is a carry from adding the two 16-bit halves,
53 it will carry from the lower half into the upper half,
54 giving us the correct sum in the upper half. */
55 sum = ~(sum + tmp) >> 16;
56 return sum;
57}
58
59/*
60 * this routine is used for miscellaneous IP-like checksums, mainly
61 * in icmp.c
62 */
63static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
64{
65 return csum_fold(csum_partial(buff, len, 0));
66}
67
68/*
69 * FIXME: I swiped this one from the sparc and made minor modifications.
70 * It may not be correct. -- Cort
71 */
72static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
73 unsigned long daddr,
74 unsigned short len,
75 unsigned short proto,
76 unsigned int sum)
77{
78 __asm__("\n\
79 addc %0,%0,%1 \n\
80 adde %0,%0,%2 \n\
81 adde %0,%0,%3 \n\
82 addze %0,%0 \n\
83 "
84 : "=r" (sum)
85 : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
86 return sum;
87}
88
89/*
90 * This is a version of ip_compute_csum() optimized for IP headers,
91 * which always checksum on 4 octet boundaries. ihl is the number
92 * of 32-bit words and is always >= 5.
93 */
94extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
95
96/*
97 * computes the checksum of the TCP/UDP pseudo-header
98 * returns a 16-bit checksum, already complemented
99 */
100extern unsigned short csum_tcpudp_magic(unsigned long saddr,
101 unsigned long daddr,
102 unsigned short len,
103 unsigned short proto,
104 unsigned int sum);
105
106#endif
107#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h
index 9483d4bfacf7..43d2ebbc7748 100644
--- a/include/asm-ppc/cpm2.h
+++ b/include/asm-ppc/cpm2.h
@@ -1087,6 +1087,9 @@ typedef struct im_idma {
1087#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */ 1087#define SCCR_PCIDF_MSK 0x00000078 /* PCI division factor */
1088#define SCCR_PCIDF_SHIFT 3 1088#define SCCR_PCIDF_SHIFT 3
1089 1089
1090#ifndef CPM_IMMR_OFFSET
1091#define CPM_IMMR_OFFSET 0x101a8
1092#endif
1090 1093
1091#endif /* __CPM2__ */ 1094#endif /* __CPM2__ */
1092#endif /* __KERNEL__ */ 1095#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/cputable.h b/include/asm-ppc/cputable.h
deleted file mode 100644
index e17c492c870b..000000000000
--- a/include/asm-ppc/cputable.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * include/asm-ppc/cputable.h
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef __ASM_PPC_CPUTABLE_H
13#define __ASM_PPC_CPUTABLE_H
14
15/* Exposed to userland CPU features */
16#define PPC_FEATURE_32 0x80000000
17#define PPC_FEATURE_64 0x40000000
18#define PPC_FEATURE_601_INSTR 0x20000000
19#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
20#define PPC_FEATURE_HAS_FPU 0x08000000
21#define PPC_FEATURE_HAS_MMU 0x04000000
22#define PPC_FEATURE_HAS_4xxMAC 0x02000000
23#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
24#define PPC_FEATURE_HAS_SPE 0x00800000
25#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
26#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
27#define PPC_FEATURE_NO_TB 0x00100000
28
29#ifdef __KERNEL__
30
31#ifndef __ASSEMBLY__
32
33/* This structure can grow, it's real size is used by head.S code
34 * via the mkdefs mecanism.
35 */
36struct cpu_spec;
37
38typedef void (*cpu_setup_t)(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
39
40struct cpu_spec {
41 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
42 unsigned int pvr_mask;
43 unsigned int pvr_value;
44
45 char *cpu_name;
46 unsigned int cpu_features; /* Kernel features */
47 unsigned int cpu_user_features; /* Userland features */
48
49 /* cache line sizes */
50 unsigned int icache_bsize;
51 unsigned int dcache_bsize;
52
53 /* number of performance monitor counters */
54 unsigned int num_pmcs;
55
56 /* this is called to initialize various CPU bits like L1 cache,
57 * BHT, SPD, etc... from head.S before branching to identify_machine
58 */
59 cpu_setup_t cpu_setup;
60};
61
62extern struct cpu_spec cpu_specs[];
63extern struct cpu_spec *cur_cpu_spec[];
64
65static inline unsigned int cpu_has_feature(unsigned int feature)
66{
67 return cur_cpu_spec[0]->cpu_features & feature;
68}
69
70#endif /* __ASSEMBLY__ */
71
72/* CPU kernel features */
73#define CPU_FTR_SPLIT_ID_CACHE 0x00000001
74#define CPU_FTR_L2CR 0x00000002
75#define CPU_FTR_SPEC7450 0x00000004
76#define CPU_FTR_ALTIVEC 0x00000008
77#define CPU_FTR_TAU 0x00000010
78#define CPU_FTR_CAN_DOZE 0x00000020
79#define CPU_FTR_USE_TB 0x00000040
80#define CPU_FTR_604_PERF_MON 0x00000080
81#define CPU_FTR_601 0x00000100
82#define CPU_FTR_HPTE_TABLE 0x00000200
83#define CPU_FTR_CAN_NAP 0x00000400
84#define CPU_FTR_L3CR 0x00000800
85#define CPU_FTR_L3_DISABLE_NAP 0x00001000
86#define CPU_FTR_NAP_DISABLE_L2_PR 0x00002000
87#define CPU_FTR_DUAL_PLL_750FX 0x00004000
88#define CPU_FTR_NO_DPM 0x00008000
89#define CPU_FTR_HAS_HIGH_BATS 0x00010000
90#define CPU_FTR_NEED_COHERENT 0x00020000
91#define CPU_FTR_NO_BTIC 0x00040000
92#define CPU_FTR_BIG_PHYS 0x00080000
93
94#ifdef __ASSEMBLY__
95
96#define BEGIN_FTR_SECTION 98:
97
98#define END_FTR_SECTION(msk, val) \
9999: \
100 .section __ftr_fixup,"a"; \
101 .align 2; \
102 .long msk; \
103 .long val; \
104 .long 98b; \
105 .long 99b; \
106 .previous
107
108#else
109
110#define BEGIN_FTR_SECTION "98:\n"
111#define END_FTR_SECTION(msk, val) \
112"99:\n" \
113" .section __ftr_fixup,\"a\";\n" \
114" .align 2;\n" \
115" .long "#msk";\n" \
116" .long "#val";\n" \
117" .long 98b;\n" \
118" .long 99b;\n" \
119" .previous\n"
120
121
122#endif /* __ASSEMBLY__ */
123
124#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
125#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
126
127#endif /* __ASM_PPC_CPUTABLE_H */
128#endif /* __KERNEL__ */
129
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
deleted file mode 100644
index c25cc35e6ab5..000000000000
--- a/include/asm-ppc/elf.h
+++ /dev/null
@@ -1,151 +0,0 @@
1#ifndef __PPC_ELF_H
2#define __PPC_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7#include <asm/types.h>
8#include <asm/ptrace.h>
9#include <asm/cputable.h>
10#include <asm/auxvec.h>
11
12/* PowerPC relocations defined by the ABIs */
13#define R_PPC_NONE 0
14#define R_PPC_ADDR32 1 /* 32bit absolute address */
15#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
16#define R_PPC_ADDR16 3 /* 16bit absolute address */
17#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
18#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
19#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
20#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
21#define R_PPC_ADDR14_BRTAKEN 8
22#define R_PPC_ADDR14_BRNTAKEN 9
23#define R_PPC_REL24 10 /* PC relative 26 bit */
24#define R_PPC_REL14 11 /* PC relative 16 bit */
25#define R_PPC_REL14_BRTAKEN 12
26#define R_PPC_REL14_BRNTAKEN 13
27#define R_PPC_GOT16 14
28#define R_PPC_GOT16_LO 15
29#define R_PPC_GOT16_HI 16
30#define R_PPC_GOT16_HA 17
31#define R_PPC_PLTREL24 18
32#define R_PPC_COPY 19
33#define R_PPC_GLOB_DAT 20
34#define R_PPC_JMP_SLOT 21
35#define R_PPC_RELATIVE 22
36#define R_PPC_LOCAL24PC 23
37#define R_PPC_UADDR32 24
38#define R_PPC_UADDR16 25
39#define R_PPC_REL32 26
40#define R_PPC_PLT32 27
41#define R_PPC_PLTREL32 28
42#define R_PPC_PLT16_LO 29
43#define R_PPC_PLT16_HI 30
44#define R_PPC_PLT16_HA 31
45#define R_PPC_SDAREL16 32
46#define R_PPC_SECTOFF 33
47#define R_PPC_SECTOFF_LO 34
48#define R_PPC_SECTOFF_HI 35
49#define R_PPC_SECTOFF_HA 36
50/* Keep this the last entry. */
51#define R_PPC_NUM 37
52
53#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
54#define ELF_NFPREG 33 /* includes fpscr */
55#define ELF_NVRREG 33 /* includes vscr */
56#define ELF_NEVRREG 34 /* includes acc (as 2) */
57
58/*
59 * These are used to set parameters in the core dumps.
60 */
61#define ELF_ARCH EM_PPC
62#define ELF_CLASS ELFCLASS32
63#define ELF_DATA ELFDATA2MSB
64
65/* General registers */
66typedef unsigned long elf_greg_t;
67typedef elf_greg_t elf_gregset_t[ELF_NGREG];
68
69/* Floating point registers */
70typedef double elf_fpreg_t;
71typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
72
73/* Altivec registers */
74typedef __vector128 elf_vrreg_t;
75typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
76
77#ifdef __KERNEL__
78
79struct task_struct;
80
81/*
82 * This is used to ensure we don't load something for the wrong architecture.
83 */
84
85#define elf_check_arch(x) ((x)->e_machine == EM_PPC)
86
87/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
88 use of this is to invoke "./ld.so someprog" to test out a new version of
89 the loader. We need to make sure that it is out of the way of the program
90 that it will "exec", and that there is sufficient room for the brk. */
91
92#define ELF_ET_DYN_BASE (0x08000000)
93
94#define USE_ELF_CORE_DUMP
95#define ELF_EXEC_PAGESIZE 4096
96
97#define ELF_CORE_COPY_REGS(gregs, regs) \
98 memcpy((gregs), (regs), sizeof(struct pt_regs)); \
99 memset((char *)(gregs) + sizeof(struct pt_regs), 0, \
100 sizeof(elf_gregset_t) - sizeof(struct pt_regs));
101
102#define ELF_CORE_COPY_TASK_REGS(t, elfregs) \
103 ((t)->thread.regs? \
104 ({ ELF_CORE_COPY_REGS((elfregs), (t)->thread.regs); 1; }): 0)
105
106extern int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpu);
107#define ELF_CORE_COPY_FPREGS(t, fpu) dump_task_fpu((t), (fpu))
108
109/* This yields a mask that user programs can use to figure out what
110 instruction set this cpu supports. This could be done in userspace,
111 but it's not easy, and we've already done it here. */
112
113#define ELF_HWCAP (cur_cpu_spec[0]->cpu_user_features)
114
115/* This yields a string that ld.so will use to load implementation
116 specific libraries for optimization. This is more specific in
117 intent than poking at uname or /proc/cpuinfo.
118
119 For the moment, we have only optimizations for the Intel generations,
120 but that could change... */
121
122#define ELF_PLATFORM (NULL)
123
124#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
125
126extern int dcache_bsize;
127extern int icache_bsize;
128extern int ucache_bsize;
129
130/*
131 * The requirements here are:
132 * - keep the final alignment of sp (sp & 0xf)
133 * - make sure the 32-bit value at the first 16 byte aligned position of
134 * AUXV is greater than 16 for glibc compatibility.
135 * AT_IGNOREPPC is used for that.
136 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
137 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
138 */
139#define ARCH_DLINFO \
140do { \
141 /* Handle glibc compatibility. */ \
142 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
143 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
144 /* Cache size items */ \
145 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
146 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
147 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
148 } while (0)
149
150#endif /* __KERNEL__ */
151#endif
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
deleted file mode 100644
index 47dc7990fb26..000000000000
--- a/include/asm-ppc/hw_irq.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifdef __KERNEL__
5#ifndef _PPC_HW_IRQ_H
6#define _PPC_HW_IRQ_H
7
8#include <asm/ptrace.h>
9#include <asm/reg.h>
10
11extern void timer_interrupt(struct pt_regs *);
12
13#define INLINE_IRQS
14
15#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
16
17#ifdef INLINE_IRQS
18
19static inline void local_irq_disable(void)
20{
21 unsigned long msr;
22 msr = mfmsr();
23 mtmsr(msr & ~MSR_EE);
24 __asm__ __volatile__("": : :"memory");
25}
26
27static inline void local_irq_enable(void)
28{
29 unsigned long msr;
30 __asm__ __volatile__("": : :"memory");
31 msr = mfmsr();
32 mtmsr(msr | MSR_EE);
33}
34
35static inline void local_irq_save_ptr(unsigned long *flags)
36{
37 unsigned long msr;
38 msr = mfmsr();
39 *flags = msr;
40 mtmsr(msr & ~MSR_EE);
41 __asm__ __volatile__("": : :"memory");
42}
43
44#define local_save_flags(flags) ((flags) = mfmsr())
45#define local_irq_save(flags) local_irq_save_ptr(&flags)
46#define local_irq_restore(flags) mtmsr(flags)
47
48#else
49
50extern void local_irq_enable(void);
51extern void local_irq_disable(void);
52extern void local_irq_restore(unsigned long);
53extern void local_save_flags_ptr(unsigned long *);
54
55#define local_save_flags(flags) local_save_flags_ptr(&flags)
56#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
57
58#endif
59
60extern void do_lost_interrupts(unsigned long);
61
62#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
63#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
64#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
65
66/* Should we handle this via lost interrupts and IPIs or should we don't care like
67 * we do now ? --BenH.
68 */
69struct hw_interrupt_type;
70static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
71
72
73#endif /* _PPC_HW_IRQ_H */
74#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/i8259.h b/include/asm-ppc/i8259.h
deleted file mode 100644
index 091b71295de4..000000000000
--- a/include/asm-ppc/i8259.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _PPC_KERNEL_i8259_H
2#define _PPC_KERNEL_i8259_H
3
4#include <linux/irq.h>
5
6extern struct hw_interrupt_type i8259_pic;
7
8extern void i8259_init(long intack_addr);
9extern int i8259_irq(struct pt_regs *regs);
10
11#endif /* _PPC_KERNEL_i8259_H */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 94d83998a759..f7f614dfc648 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -8,6 +8,7 @@
8 8
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/synch.h>
11#include <asm/mmu.h> 12#include <asm/mmu.h>
12 13
13#define SIO_CONFIG_RA 0x398 14#define SIO_CONFIG_RA 0x398
@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
440#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 441#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
441#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) 442#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
442 443
443/*
444 * Enforce In-order Execution of I/O:
445 * Acts as a barrier to ensure all previous I/O accesses have
446 * completed before any further ones are issued.
447 */
448extern inline void eieio(void)
449{
450 __asm__ __volatile__ ("eieio" : : : "memory");
451}
452
453/* Enforce in-order execution of data I/O. 444/* Enforce in-order execution of data I/O.
454 * No distinction between read/write on PPC; use eieio for all three. 445 * No distinction between read/write on PPC; use eieio for all three.
455 */ 446 */
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
deleted file mode 100644
index 6d6fc78731e5..000000000000
--- a/include/asm-ppc/kmap_types.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H
4
5enum km_type {
6 KM_BOUNCE_READ,
7 KM_SKB_SUNRPC_DATA,
8 KM_SKB_DATA_SOFTIRQ,
9 KM_USER0,
10 KM_USER1,
11 KM_BIO_SRC_IRQ,
12 KM_BIO_DST_IRQ,
13 KM_PTE0,
14 KM_PTE1,
15 KM_IRQ0,
16 KM_IRQ1,
17 KM_SOFTIRQ0,
18 KM_SOFTIRQ1,
19 KM_PPC_SYNC_PAGE,
20 KM_PPC_SYNC_ICACHE,
21 KM_TYPE_NR
22};
23
24#endif
25#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index 1d4ab70a56f3..f01255bd1dc3 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -98,7 +98,7 @@ struct machdep_calls {
98 98
99 /* Get access protection for /dev/mem */ 99 /* Get access protection for /dev/mem */
100 pgprot_t (*phys_mem_access_prot)(struct file *file, 100 pgprot_t (*phys_mem_access_prot)(struct file *file,
101 unsigned long offset, 101 unsigned long pfn,
102 unsigned long size, 102 unsigned long size,
103 pgprot_t vma_prot); 103 pgprot_t vma_prot);
104 104
@@ -167,7 +167,7 @@ extern sys_ctrler_t sys_ctrler;
167 167
168#ifdef CONFIG_SMP 168#ifdef CONFIG_SMP
169struct smp_ops_t { 169struct smp_ops_t {
170 void (*message_pass)(int target, int msg, unsigned long data, int wait); 170 void (*message_pass)(int target, int msg);
171 int (*probe)(void); 171 int (*probe)(void);
172 void (*kick_cpu)(int nr); 172 void (*kick_cpu)(int nr);
173 void (*setup_cpu)(int nr); 173 void (*setup_cpu)(int nr);
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index afe26ffc2e2d..4f152cca13c1 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -164,13 +164,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
164 struct task_struct *tsk) 164 struct task_struct *tsk)
165{ 165{
166#ifdef CONFIG_ALTIVEC 166#ifdef CONFIG_ALTIVEC
167 asm volatile ( 167 if (cpu_has_feature(CPU_FTR_ALTIVEC))
168 BEGIN_FTR_SECTION 168 asm volatile ("dssall;\n"
169 "dssall;\n"
170#ifndef CONFIG_POWER4 169#ifndef CONFIG_POWER4
171 "sync;\n" /* G4 needs a sync here, G5 apparently not */ 170 "sync;\n" /* G4 needs a sync here, G5 apparently not */
172#endif 171#endif
173 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
174 : : ); 172 : : );
175#endif /* CONFIG_ALTIVEC */ 173#endif /* CONFIG_ALTIVEC */
176 174
diff --git a/include/asm-ppc/mpc8260.h b/include/asm-ppc/mpc8260.h
index 9694eca16e92..321452695039 100644
--- a/include/asm-ppc/mpc8260.h
+++ b/include/asm-ppc/mpc8260.h
@@ -92,6 +92,10 @@ enum ppc_sys_devices {
92extern unsigned char __res[]; 92extern unsigned char __res[];
93#endif 93#endif
94 94
95#ifndef BOARD_CHIP_NAME
96#define BOARD_CHIP_NAME ""
97#endif
98
95#endif /* CONFIG_8260 */ 99#endif /* CONFIG_8260 */
96#endif /* !__ASM_PPC_MPC8260_H__ */ 100#endif /* !__ASM_PPC_MPC8260_H__ */
97#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index 516984ee14b5..d98db980cd49 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -67,6 +67,8 @@ extern unsigned char __res[];
67#define MPC85xx_DMA3_SIZE (0x00080) 67#define MPC85xx_DMA3_SIZE (0x00080)
68#define MPC85xx_ENET1_OFFSET (0x24000) 68#define MPC85xx_ENET1_OFFSET (0x24000)
69#define MPC85xx_ENET1_SIZE (0x01000) 69#define MPC85xx_ENET1_SIZE (0x01000)
70#define MPC85xx_MIIM_OFFSET (0x24520)
71#define MPC85xx_MIIM_SIZE (0x00018)
70#define MPC85xx_ENET2_OFFSET (0x25000) 72#define MPC85xx_ENET2_OFFSET (0x25000)
71#define MPC85xx_ENET2_SIZE (0x01000) 73#define MPC85xx_ENET2_SIZE (0x01000)
72#define MPC85xx_ENET3_OFFSET (0x26000) 74#define MPC85xx_ENET3_OFFSET (0x26000)
@@ -132,6 +134,7 @@ enum ppc_sys_devices {
132 MPC85xx_eTSEC3, 134 MPC85xx_eTSEC3,
133 MPC85xx_eTSEC4, 135 MPC85xx_eTSEC4,
134 MPC85xx_IIC2, 136 MPC85xx_IIC2,
137 MPC85xx_MDIO,
135}; 138};
136 139
137/* Internal interrupts are all Level Sensitive, and Positive Polarity */ 140/* Internal interrupts are all Level Sensitive, and Positive Polarity */
diff --git a/include/asm-ppc/mpc8xx.h b/include/asm-ppc/mpc8xx.h
index 208a2e11daee..46f159cf589e 100644
--- a/include/asm-ppc/mpc8xx.h
+++ b/include/asm-ppc/mpc8xx.h
@@ -113,6 +113,10 @@ enum ppc_sys_devices {
113 MPC8xx_CPM_USB, 113 MPC8xx_CPM_USB,
114}; 114};
115 115
116#ifndef BOARD_CHIP_NAME
117#define BOARD_CHIP_NAME ""
118#endif
119
116#endif /* !__ASSEMBLY__ */ 120#endif /* !__ASSEMBLY__ */
117#endif /* CONFIG_8xx */ 121#endif /* CONFIG_8xx */
118#endif /* __CONFIG_8xx_DEFS */ 122#endif /* __CONFIG_8xx_DEFS */
diff --git a/include/asm-ppc/open_pic.h b/include/asm-ppc/open_pic.h
index 7848aa610c05..ec2f46629ca2 100644
--- a/include/asm-ppc/open_pic.h
+++ b/include/asm-ppc/open_pic.h
@@ -58,8 +58,7 @@ extern int openpic_get_irq(struct pt_regs *regs);
58extern void openpic_reset_processor_phys(u_int cpumask); 58extern void openpic_reset_processor_phys(u_int cpumask);
59extern void openpic_setup_ISU(int isu_num, unsigned long addr); 59extern void openpic_setup_ISU(int isu_num, unsigned long addr);
60extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask); 60extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
61extern void smp_openpic_message_pass(int target, int msg, unsigned long data, 61extern void smp_openpic_message_pass(int target, int msg);
62 int wait);
63extern void openpic_set_k2_cascade(int irq); 62extern void openpic_set_k2_cascade(int irq);
64extern void openpic_set_priority(u_int pri); 63extern void openpic_set_priority(u_int pri);
65extern u_int openpic_get_priority(void); 64extern u_int openpic_get_priority(void);
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
index 4789dc024240..fc44f7ca62d7 100644
--- a/include/asm-ppc/page.h
+++ b/include/asm-ppc/page.h
@@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t;
34#define PTE_FMT "%.8lx" 34#define PTE_FMT "%.8lx"
35#endif 35#endif
36 36
37/* align addr on a size boundary - adjust address up/down if needed */
38#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
39#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
40
41/* align addr on a size boundary - adjust address up if needed */
42#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
43
44/* to align the pointer to the (next) page boundary */
45#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
46
47
37#undef STRICT_MM_TYPECHECKS 48#undef STRICT_MM_TYPECHECKS
38 49
39#ifdef STRICT_MM_TYPECHECKS 50#ifdef STRICT_MM_TYPECHECKS
@@ -76,13 +87,6 @@ typedef unsigned long pgprot_t;
76 87
77#endif 88#endif
78 89
79
80/* align addr on a size boundary - adjust address up if needed -- Cort */
81#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
82
83/* to align the pointer to the (next) page boundary */
84#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
85
86struct page; 90struct page;
87extern void clear_pages(void *page, int order); 91extern void clear_pages(void *page, int order);
88static inline void clear_page(void *page) { clear_pages(page, 0); } 92static inline void clear_page(void *page) { clear_pages(page, 0); }
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index ffa423456c2b..e58c78f90a5a 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -79,6 +79,11 @@ struct pci_controller {
79 struct resource mem_space; 79 struct resource mem_space;
80}; 80};
81 81
82static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
83{
84 return bus->sysdata;
85}
86
82/* These are used for config access before all the PCI probing 87/* These are used for config access before all the PCI probing
83 has been done. */ 88 has been done. */
84int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn, 89int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 9dd06cd40096..61434edbad7b 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -24,9 +24,9 @@ struct pci_dev;
24 * Set this to 1 if you want the kernel to re-assign all PCI 24 * Set this to 1 if you want the kernel to re-assign all PCI
25 * bus numbers 25 * bus numbers
26 */ 26 */
27extern int pci_assign_all_busses; 27extern int pci_assign_all_buses;
28 28
29#define pcibios_assign_all_busses() (pci_assign_all_busses) 29#define pcibios_assign_all_busses() (pci_assign_all_buses)
30#define pcibios_scan_all_fns(a, b) 0 30#define pcibios_scan_all_fns(a, b) 0
31 31
32#define PCIBIOS_MIN_IO 0x1000 32#define PCIBIOS_MIN_IO 0x1000
@@ -126,7 +126,7 @@ extern void pcibios_add_platform_entries(struct pci_dev *dev);
126 126
127struct file; 127struct file;
128extern pgprot_t pci_phys_mem_access_prot(struct file *file, 128extern pgprot_t pci_phys_mem_access_prot(struct file *file,
129 unsigned long offset, 129 unsigned long pfn,
130 unsigned long size, 130 unsigned long size,
131 pgprot_t prot); 131 pgprot_t prot);
132 132
diff --git a/include/asm-ppc/perfmon.h b/include/asm-ppc/perfmon.h
deleted file mode 100644
index 5e7a89c47b5b..000000000000
--- a/include/asm-ppc/perfmon.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __PERFMON_H
2#define __PERFMON_H
3
4extern void (*perf_irq)(struct pt_regs *);
5
6int request_perfmon_irq(void (*handler)(struct pt_regs *));
7void free_perfmon_irq(void);
8
9#ifdef CONFIG_FSL_BOOKE
10void init_pmc_stop(int ctr);
11void set_pmc_event(int ctr, int event);
12void set_pmc_user_kernel(int ctr, int user, int kernel);
13void set_pmc_marked(int ctr, int mark0, int mark1);
14void pmc_start_ctr(int ctr, int enable);
15void pmc_start_ctrs(int enable);
16void pmc_stop_ctrs(void);
17void dump_pmcs(void);
18
19extern struct op_ppc32_model op_model_fsl_booke;
20#endif
21
22#endif /* __PERFMON_H */
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index eee601bb9ada..b28a713ba862 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -705,7 +705,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
705#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 705#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
706 706
707struct file; 707struct file;
708extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 708extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
709 unsigned long size, pgprot_t vma_prot); 709 unsigned long size, pgprot_t vma_prot);
710#define __HAVE_PHYS_MEM_ACCESS_PROT 710#define __HAVE_PHYS_MEM_ACCESS_PROT
711 711
diff --git a/include/asm-ppc/posix_types.h b/include/asm-ppc/posix_types.h
deleted file mode 100644
index a14a82abe8d2..000000000000
--- a/include/asm-ppc/posix_types.h
+++ /dev/null
@@ -1,111 +0,0 @@
1#ifndef _PPC_POSIX_TYPES_H
2#define _PPC_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned int __kernel_uid_t;
16typedef unsigned int __kernel_gid_t;
17typedef unsigned int __kernel_size_t;
18typedef int __kernel_ssize_t;
19typedef long __kernel_ptrdiff_t;
20typedef long __kernel_time_t;
21typedef long __kernel_suseconds_t;
22typedef long __kernel_clock_t;
23typedef int __kernel_timer_t;
24typedef int __kernel_clockid_t;
25typedef int __kernel_daddr_t;
26typedef char * __kernel_caddr_t;
27typedef short __kernel_ipc_pid_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned int __kernel_old_uid_t;
34typedef unsigned int __kernel_old_gid_t;
35typedef unsigned int __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#ifndef __GNUC__
46
47#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
48#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
49#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
50#define __FD_ZERO(set) \
51 ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
52
53#else /* __GNUC__ */
54
55#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
56 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
57/* With GNU C, use inline functions instead so args are evaluated only once: */
58
59#undef __FD_SET
60static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
61{
62 unsigned long _tmp = fd / __NFDBITS;
63 unsigned long _rem = fd % __NFDBITS;
64 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
65}
66
67#undef __FD_CLR
68static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
69{
70 unsigned long _tmp = fd / __NFDBITS;
71 unsigned long _rem = fd % __NFDBITS;
72 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
73}
74
75#undef __FD_ISSET
76static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
77{
78 unsigned long _tmp = fd / __NFDBITS;
79 unsigned long _rem = fd % __NFDBITS;
80 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
81}
82
83/*
84 * This will unroll the loop for the normal constant case (8 ints,
85 * for a 256-bit fd_set)
86 */
87#undef __FD_ZERO
88static __inline__ void __FD_ZERO(__kernel_fd_set *p)
89{
90 unsigned int *tmp = (unsigned int *)p->fds_bits;
91 int i;
92
93 if (__builtin_constant_p(__FDSET_LONGS)) {
94 switch (__FDSET_LONGS) {
95 case 8:
96 tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
97 tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
98 return;
99 }
100 }
101 i = __FDSET_LONGS;
102 while (i) {
103 i--;
104 *tmp = 0;
105 tmp++;
106 }
107}
108
109#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
110#endif /* __GNUC__ */
111#endif /* _PPC_POSIX_TYPES_H */
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h
index 7043c164b537..c34fb4e37a97 100644
--- a/include/asm-ppc/ptrace.h
+++ b/include/asm-ppc/ptrace.h
@@ -57,7 +57,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
57 57
58#define force_successful_syscall_return() \ 58#define force_successful_syscall_return() \
59 do { \ 59 do { \
60 current_thread_info()->local_flags |= _TIFL_FORCE_NOERROR; \ 60 current_thread_info()->syscall_noerror = 1; \
61 } while(0) 61 } while(0)
62 62
63/* 63/*
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
deleted file mode 100644
index 3501ea72f88c..000000000000
--- a/include/asm-ppc/rwsem.h
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
4 * by Paul Mackerras <paulus@samba.org>.
5 */
6
7#ifndef _PPC_RWSEM_H
8#define _PPC_RWSEM_H
9
10#ifdef __KERNEL__
11#include <linux/list.h>
12#include <linux/spinlock.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15
16/*
17 * the semaphore definition
18 */
19struct rw_semaphore {
20 /* XXX this should be able to be an atomic_t -- paulus */
21 signed long count;
22#define RWSEM_UNLOCKED_VALUE 0x00000000
23#define RWSEM_ACTIVE_BIAS 0x00000001
24#define RWSEM_ACTIVE_MASK 0x0000ffff
25#define RWSEM_WAITING_BIAS (-0x00010000)
26#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
27#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
28 spinlock_t wait_lock;
29 struct list_head wait_list;
30#if RWSEM_DEBUG
31 int debug;
32#endif
33};
34
35/*
36 * initialisation
37 */
38#if RWSEM_DEBUG
39#define __RWSEM_DEBUG_INIT , 0
40#else
41#define __RWSEM_DEBUG_INIT /* */
42#endif
43
44#define __RWSEM_INITIALIZER(name) \
45 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
46 LIST_HEAD_INIT((name).wait_list) \
47 __RWSEM_DEBUG_INIT }
48
49#define DECLARE_RWSEM(name) \
50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
51
52extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
53extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
54extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
55extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
56
57static inline void init_rwsem(struct rw_semaphore *sem)
58{
59 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list);
62#if RWSEM_DEBUG
63 sem->debug = 0;
64#endif
65}
66
67/*
68 * lock for reading
69 */
70static inline void __down_read(struct rw_semaphore *sem)
71{
72 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
73 smp_wmb();
74 else
75 rwsem_down_read_failed(sem);
76}
77
78static inline int __down_read_trylock(struct rw_semaphore *sem)
79{
80 int tmp;
81
82 while ((tmp = sem->count) >= 0) {
83 if (tmp == cmpxchg(&sem->count, tmp,
84 tmp + RWSEM_ACTIVE_READ_BIAS)) {
85 smp_wmb();
86 return 1;
87 }
88 }
89 return 0;
90}
91
92/*
93 * lock for writing
94 */
95static inline void __down_write(struct rw_semaphore *sem)
96{
97 int tmp;
98
99 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
100 (atomic_t *)(&sem->count));
101 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
102 smp_wmb();
103 else
104 rwsem_down_write_failed(sem);
105}
106
107static inline int __down_write_trylock(struct rw_semaphore *sem)
108{
109 int tmp;
110
111 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
112 RWSEM_ACTIVE_WRITE_BIAS);
113 smp_wmb();
114 return tmp == RWSEM_UNLOCKED_VALUE;
115}
116
117/*
118 * unlock after reading
119 */
120static inline void __up_read(struct rw_semaphore *sem)
121{
122 int tmp;
123
124 smp_wmb();
125 tmp = atomic_dec_return((atomic_t *)(&sem->count));
126 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
127 rwsem_wake(sem);
128}
129
130/*
131 * unlock after writing
132 */
133static inline void __up_write(struct rw_semaphore *sem)
134{
135 smp_wmb();
136 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
137 (atomic_t *)(&sem->count)) < 0)
138 rwsem_wake(sem);
139}
140
141/*
142 * implement atomic add functionality
143 */
144static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
145{
146 atomic_add(delta, (atomic_t *)(&sem->count));
147}
148
149/*
150 * downgrade write lock to read lock
151 */
152static inline void __downgrade_write(struct rw_semaphore *sem)
153{
154 int tmp;
155
156 smp_wmb();
157 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
158 if (tmp < 0)
159 rwsem_downgrade_wake(sem);
160}
161
162/*
163 * implement exchange and add functionality
164 */
165static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
166{
167 smp_mb();
168 return atomic_add_return(delta, (atomic_t *)(&sem->count));
169}
170
171static inline int rwsem_is_locked(struct rw_semaphore *sem)
172{
173 return (sem->count != 0);
174}
175
176#endif /* __KERNEL__ */
177#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-ppc/scatterlist.h b/include/asm-ppc/scatterlist.h
deleted file mode 100644
index f21f18f56548..000000000000
--- a/include/asm-ppc/scatterlist.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_SCATTERLIST_H
3#define _PPC_SCATTERLIST_H
4
5#include <asm/dma.h>
6
7struct scatterlist {
8 struct page *page;
9 unsigned int offset;
10 dma_addr_t dma_address;
11 unsigned int length;
12};
13
14/*
15 * These macros should be used after a pci_map_sg call has been done
16 * to get bus addresses of each of the SG entries and their lengths.
17 * You should only work with the number of sg entries pci_map_sg
18 * returns, or alternatively stop on the first sg_dma_len(sg) which
19 * is 0.
20 */
21#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->length)
23
24#endif /* !(_PPC_SCATTERLIST_H) */
25#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/seccomp.h b/include/asm-ppc/seccomp.h
deleted file mode 100644
index 666c4da96d87..000000000000
--- a/include/asm-ppc/seccomp.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/unistd.h>
4
5#define __NR_seccomp_read __NR_read
6#define __NR_seccomp_write __NR_write
7#define __NR_seccomp_exit __NR_exit
8#define __NR_seccomp_sigreturn __NR_rt_sigreturn
9
10#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-ppc/sections.h b/include/asm-ppc/sections.h
deleted file mode 100644
index ba8f43ac9bf3..000000000000
--- a/include/asm-ppc/sections.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_SECTIONS_H
3#define _PPC_SECTIONS_H
4
5#include <asm-generic/sections.h>
6
7#define __pmac __attribute__ ((__section__ (".pmac.text")))
8#define __pmacdata __attribute__ ((__section__ (".pmac.data")))
9#define __pmacfunc(__argpmac) \
10 __argpmac __pmac; \
11 __argpmac
12
13#define __prep __attribute__ ((__section__ (".prep.text")))
14#define __prepdata __attribute__ ((__section__ (".prep.data")))
15#define __prepfunc(__argprep) \
16 __argprep __prep; \
17 __argprep
18
19#define __chrp __attribute__ ((__section__ (".chrp.text")))
20#define __chrpdata __attribute__ ((__section__ (".chrp.data")))
21#define __chrpfunc(__argchrp) \
22 __argchrp __chrp; \
23 __argchrp
24
25/* this is actually just common chrp/pmac code, not OF code -- Cort */
26#define __openfirmware __attribute__ ((__section__ (".openfirmware.text")))
27#define __openfirmwaredata __attribute__ ((__section__ (".openfirmware.data")))
28#define __openfirmwarefunc(__argopenfirmware) \
29 __argopenfirmware __openfirmware; \
30 __argopenfirmware
31
32#endif /* _PPC_SECTIONS_H */
33#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h
deleted file mode 100644
index 89e6e73be08c..000000000000
--- a/include/asm-ppc/semaphore.h
+++ /dev/null
@@ -1,111 +0,0 @@
1#ifndef _PPC_SEMAPHORE_H
2#define _PPC_SEMAPHORE_H
3
4/*
5 * Swiped from asm-sparc/semaphore.h and modified
6 * -- Cort (cort@cs.nmt.edu)
7 *
8 * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
9 * -- Ani Joshi (ajoshi@unixbox.com)
10 *
11 * Remove spinlock-based RW semaphores; RW semaphore definitions are
12 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
13 * Rework semaphores to use atomic_dec_if_positive.
14 * -- Paul Mackerras (paulus@samba.org)
15 */
16
17#ifdef __KERNEL__
18
19#include <asm/atomic.h>
20#include <asm/system.h>
21#include <linux/wait.h>
22#include <linux/rwsem.h>
23
24struct semaphore {
25 /*
26 * Note that any negative value of count is equivalent to 0,
27 * but additionally indicates that some process(es) might be
28 * sleeping on `wait'.
29 */
30 atomic_t count;
31 wait_queue_head_t wait;
32};
33
34#define __SEMAPHORE_INITIALIZER(name, n) \
35{ \
36 .count = ATOMIC_INIT(n), \
37 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
38}
39
40#define __MUTEX_INITIALIZER(name) \
41 __SEMAPHORE_INITIALIZER(name, 1)
42
43#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
44 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
45
46#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
47#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
48
49static inline void sema_init (struct semaphore *sem, int val)
50{
51 atomic_set(&sem->count, val);
52 init_waitqueue_head(&sem->wait);
53}
54
55static inline void init_MUTEX (struct semaphore *sem)
56{
57 sema_init(sem, 1);
58}
59
60static inline void init_MUTEX_LOCKED (struct semaphore *sem)
61{
62 sema_init(sem, 0);
63}
64
65extern void __down(struct semaphore * sem);
66extern int __down_interruptible(struct semaphore * sem);
67extern void __up(struct semaphore * sem);
68
69extern inline void down(struct semaphore * sem)
70{
71 might_sleep();
72
73 /*
74 * Try to get the semaphore, take the slow path if we fail.
75 */
76 if (atomic_dec_return(&sem->count) < 0)
77 __down(sem);
78 smp_wmb();
79}
80
81extern inline int down_interruptible(struct semaphore * sem)
82{
83 int ret = 0;
84
85 might_sleep();
86
87 if (atomic_dec_return(&sem->count) < 0)
88 ret = __down_interruptible(sem);
89 smp_wmb();
90 return ret;
91}
92
93extern inline int down_trylock(struct semaphore * sem)
94{
95 int ret;
96
97 ret = atomic_dec_if_positive(&sem->count) < 0;
98 smp_wmb();
99 return ret;
100}
101
102extern inline void up(struct semaphore * sem)
103{
104 smp_wmb();
105 if (atomic_inc_return(&sem->count) <= 0)
106 __up(sem);
107}
108
109#endif /* __KERNEL__ */
110
111#endif /* !(_PPC_SEMAPHORE_H) */
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h
index 829481c0a9dc..30e9268a888c 100644
--- a/include/asm-ppc/smp.h
+++ b/include/asm-ppc/smp.h
@@ -35,6 +35,7 @@ extern cpumask_t cpu_possible_map;
35extern unsigned long smp_proc_in_lock[]; 35extern unsigned long smp_proc_in_lock[];
36extern volatile unsigned long cpu_callin_map[]; 36extern volatile unsigned long cpu_callin_map[];
37extern int smp_tb_synchronized; 37extern int smp_tb_synchronized;
38extern struct smp_ops_t *smp_ops;
38 39
39extern void smp_send_tlb_invalidate(int); 40extern void smp_send_tlb_invalidate(int);
40extern void smp_send_xmon_break(int cpu); 41extern void smp_send_xmon_break(int cpu);
@@ -45,32 +46,31 @@ extern int __cpu_disable(void);
45extern void __cpu_die(unsigned int cpu); 46extern void __cpu_die(unsigned int cpu);
46extern void cpu_die(void) __attribute__((noreturn)); 47extern void cpu_die(void) __attribute__((noreturn));
47 48
48#define NO_PROC_ID 0xFF /* No processor magic marker */
49#define PROC_CHANGE_PENALTY 20
50
51#define raw_smp_processor_id() (current_thread_info()->cpu) 49#define raw_smp_processor_id() (current_thread_info()->cpu)
52 50
53extern int __cpu_up(unsigned int cpu); 51extern int __cpu_up(unsigned int cpu);
54 52
55extern int smp_hw_index[]; 53extern int smp_hw_index[];
56#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) 54#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
57 55#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
58struct klock_info_struct { 56#define set_hard_smp_processor_id(cpu, phys)\
59 unsigned long kernel_flag; 57 (smp_hw_index[(cpu)] = (phys))
60 unsigned char akp; 58
61};
62
63extern struct klock_info_struct klock_info;
64#define KLOCK_HELD 0xffffffff
65#define KLOCK_CLEAR 0x0
66
67#endif /* __ASSEMBLY__ */ 59#endif /* __ASSEMBLY__ */
68 60
69#else /* !(CONFIG_SMP) */ 61#else /* !(CONFIG_SMP) */
70 62
71static inline void cpu_die(void) { } 63static inline void cpu_die(void) { }
64#define get_hard_smp_processor_id(cpu) 0
65#define set_hard_smp_processor_id(cpu, phys)
66#define hard_smp_processor_id() 0
72 67
73#endif /* !(CONFIG_SMP) */ 68#endif /* !(CONFIG_SMP) */
74 69
70#ifndef __ASSEMBLY__
71extern int boot_cpuid;
72extern int boot_cpuid_phys;
73#endif
74
75#endif /* !(_PPC_SMP_H) */ 75#endif /* !(_PPC_SMP_H) */
76#endif /* __KERNEL__ */ 76#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
index 20edcf2a6e0c..5c64b75f0295 100644
--- a/include/asm-ppc/spinlock.h
+++ b/include/asm-ppc/spinlock.h
@@ -9,7 +9,7 @@
9 * (the type definitions are in asm/raw_spinlock_types.h) 9 * (the type definitions are in asm/raw_spinlock_types.h)
10 */ 10 */
11 11
12#define __raw_spin_is_locked(x) ((x)->lock != 0) 12#define __raw_spin_is_locked(x) ((x)->slock != 0)
13#define __raw_spin_unlock_wait(lock) \ 13#define __raw_spin_unlock_wait(lock) \
14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -31,17 +31,17 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
31 bne- 2b\n\ 31 bne- 2b\n\
32 isync" 32 isync"
33 : "=&r"(tmp) 33 : "=&r"(tmp)
34 : "r"(&lock->lock), "r"(1) 34 : "r"(&lock->slock), "r"(1)
35 : "cr0", "memory"); 35 : "cr0", "memory");
36} 36}
37 37
38static inline void __raw_spin_unlock(raw_spinlock_t *lock) 38static inline void __raw_spin_unlock(raw_spinlock_t *lock)
39{ 39{
40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); 40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory");
41 lock->lock = 0; 41 lock->slock = 0;
42} 42}
43 43
44#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) 44#define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock)))
45 45
46/* 46/*
47 * Read-write spinlocks, allowing multiple readers 47 * Read-write spinlocks, allowing multiple readers
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
deleted file mode 100644
index 7919ccc75b8a..000000000000
--- a/include/asm-ppc/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __ASM_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 volatile unsigned long lock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 volatile signed int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { 0 }
19
20#endif
diff --git a/include/asm-ppc/statfs.h b/include/asm-ppc/statfs.h
deleted file mode 100644
index 807c69954a1b..000000000000
--- a/include/asm-ppc/statfs.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _PPC_STATFS_H
2#define _PPC_STATFS_H
3
4#include <asm-generic/statfs.h>
5#endif
6
7
8
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index d754ab570fe0..bd99cb53a19f 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -70,25 +70,47 @@ extern void _set_L3CR(unsigned long);
70#endif 70#endif
71extern void via_cuda_init(void); 71extern void via_cuda_init(void);
72extern void pmac_nvram_init(void); 72extern void pmac_nvram_init(void);
73extern void chrp_nvram_init(void);
73extern void read_rtc_time(void); 74extern void read_rtc_time(void);
74extern void pmac_find_display(void); 75extern void pmac_find_display(void);
75extern void giveup_fpu(struct task_struct *); 76extern void giveup_fpu(struct task_struct *);
76extern void enable_kernel_fp(void); 77extern void enable_kernel_fp(void);
78extern void flush_fp_to_thread(struct task_struct *);
77extern void enable_kernel_altivec(void); 79extern void enable_kernel_altivec(void);
78extern void giveup_altivec(struct task_struct *); 80extern void giveup_altivec(struct task_struct *);
79extern void load_up_altivec(struct task_struct *); 81extern void load_up_altivec(struct task_struct *);
82extern int emulate_altivec(struct pt_regs *);
80extern void giveup_spe(struct task_struct *); 83extern void giveup_spe(struct task_struct *);
81extern void load_up_spe(struct task_struct *); 84extern void load_up_spe(struct task_struct *);
82extern int fix_alignment(struct pt_regs *); 85extern int fix_alignment(struct pt_regs *);
83extern void cvt_fd(float *from, double *to, unsigned long *fpscr); 86extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
84extern void cvt_df(double *from, float *to, unsigned long *fpscr); 87extern void cvt_df(double *from, float *to, struct thread_struct *thread);
88
89#ifdef CONFIG_ALTIVEC
90extern void flush_altivec_to_thread(struct task_struct *);
91#else
92static inline void flush_altivec_to_thread(struct task_struct *t)
93{
94}
95#endif
96
97#ifdef CONFIG_SPE
98extern void flush_spe_to_thread(struct task_struct *);
99#else
100static inline void flush_spe_to_thread(struct task_struct *t)
101{
102}
103#endif
104
85extern int call_rtas(const char *, int, int, unsigned long *, ...); 105extern int call_rtas(const char *, int, int, unsigned long *, ...);
86extern void cacheable_memzero(void *p, unsigned int nb); 106extern void cacheable_memzero(void *p, unsigned int nb);
87extern void *cacheable_memcpy(void *, const void *, unsigned int); 107extern void *cacheable_memcpy(void *, const void *, unsigned int);
88extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); 108extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
89extern void bad_page_fault(struct pt_regs *, unsigned long, int); 109extern void bad_page_fault(struct pt_regs *, unsigned long, int);
90extern void die(const char *, struct pt_regs *, long); 110extern int die(const char *, struct pt_regs *, long);
91extern void _exception(int, struct pt_regs *, int, unsigned long); 111extern void _exception(int, struct pt_regs *, int, unsigned long);
112void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
113
92#ifdef CONFIG_BOOKE_WDT 114#ifdef CONFIG_BOOKE_WDT
93extern u32 booke_wdt_enabled; 115extern u32 booke_wdt_enabled;
94extern u32 booke_wdt_period; 116extern u32 booke_wdt_period;
diff --git a/include/asm-ppc/thread_info.h b/include/asm-ppc/thread_info.h
deleted file mode 100644
index 27903db42efc..000000000000
--- a/include/asm-ppc/thread_info.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/* thread_info.h: PPC low-level thread information
2 * adapted from the i386 version by Paul Mackerras
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */
7
8#ifndef _ASM_THREAD_INFO_H
9#define _ASM_THREAD_INFO_H
10
11#ifdef __KERNEL__
12#ifndef __ASSEMBLY__
13/*
14 * low level task data.
15 * If you change this, change the TI_* offsets below to match.
16 */
17struct thread_info {
18 struct task_struct *task; /* main task structure */
19 struct exec_domain *exec_domain; /* execution domain */
20 unsigned long flags; /* low level flags */
21 unsigned long local_flags; /* non-racy flags */
22 int cpu; /* cpu we're on */
23 int preempt_count; /* 0 => preemptable,
24 <0 => BUG */
25 struct restart_block restart_block;
26};
27
28#define INIT_THREAD_INFO(tsk) \
29{ \
30 .task = &tsk, \
31 .exec_domain = &default_exec_domain, \
32 .flags = 0, \
33 .local_flags = 0, \
34 .cpu = 0, \
35 .preempt_count = 1, \
36 .restart_block = { \
37 .fn = do_no_restart_syscall, \
38 }, \
39}
40
41#define init_thread_info (init_thread_union.thread_info)
42#define init_stack (init_thread_union.stack)
43
44/*
45 * macros/functions for gaining access to the thread information structure
46 */
47
48/* how to get the thread information struct from C */
49static inline struct thread_info *current_thread_info(void)
50{
51 struct thread_info *ti;
52 __asm__("rlwinm %0,1,0,0,18" : "=r"(ti));
53 return ti;
54}
55
56/* thread information allocation */
57#define alloc_thread_info(tsk) ((struct thread_info *) \
58 __get_free_pages(GFP_KERNEL, 1))
59#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
60#define get_thread_info(ti) get_task_struct((ti)->task)
61#define put_thread_info(ti) put_task_struct((ti)->task)
62#endif /* __ASSEMBLY__ */
63
64/*
65 * Size of kernel stack for each process.
66 */
67#define THREAD_SIZE 8192 /* 2 pages */
68
69#define PREEMPT_ACTIVE 0x10000000
70
71/*
72 * thread information flag bit numbers
73 */
74#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
75#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
76#define TIF_SIGPENDING 2 /* signal pending */
77#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
78#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
79 TIF_NEED_RESCHED */
80#define TIF_MEMDIE 5
81#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
82#define TIF_SECCOMP 7 /* secure computing */
83
84/* as above, but as bit values */
85#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
86#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
87#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
88#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
89#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
90#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
91#define _TIF_SECCOMP (1<<TIF_SECCOMP)
92
93#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
94
95/*
96 * Non racy (local) flags bit numbers
97 */
98#define TIFL_FORCE_NOERROR 0 /* don't return error from current
99 syscall even if result < 0 */
100
101/* as above, but as bit values */
102#define _TIFL_FORCE_NOERROR (1<<TIFL_FORCE_NOERROR)
103
104
105#endif /* __KERNEL__ */
106
107#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-ppc/types.h b/include/asm-ppc/types.h
deleted file mode 100644
index 77dc24d7d2ad..000000000000
--- a/include/asm-ppc/types.h
+++ /dev/null
@@ -1,69 +0,0 @@
1#ifndef _PPC_TYPES_H
2#define _PPC_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef __signed__ char __s8;
7typedef unsigned char __u8;
8
9typedef __signed__ short __s16;
10typedef unsigned short __u16;
11
12typedef __signed__ int __s32;
13typedef unsigned int __u32;
14
15#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
16typedef __signed__ long long __s64;
17typedef unsigned long long __u64;
18#endif
19
20typedef struct {
21 __u32 u[4];
22} __vector128;
23
24/*
25 * XXX allowed outside of __KERNEL__ for now, until glibc gets
26 * a proper set of asm headers of its own. -- paulus
27 */
28typedef unsigned short umode_t;
29
30#endif /* __ASSEMBLY__ */
31
32#ifdef __KERNEL__
33/*
34 * These aren't exported outside the kernel to avoid name space clashes
35 */
36#define BITS_PER_LONG 32
37
38#ifndef __ASSEMBLY__
39
40#include <linux/config.h>
41
42typedef signed char s8;
43typedef unsigned char u8;
44
45typedef signed short s16;
46typedef unsigned short u16;
47
48typedef signed int s32;
49typedef unsigned int u32;
50
51typedef signed long long s64;
52typedef unsigned long long u64;
53
54typedef __vector128 vector128;
55
56/* DMA addresses are 32-bits wide */
57typedef u32 dma_addr_t;
58typedef u64 dma64_addr_t;
59
60#ifdef CONFIG_LBD
61typedef u64 sector_t;
62#define HAVE_SECTOR_T
63#endif
64
65#endif /* __ASSEMBLY__ */
66
67#endif /* __KERNEL__ */
68
69#endif
diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h
deleted file mode 100644
index c5864734e3e1..000000000000
--- a/include/asm-ppc/vga.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifdef __KERNEL__
8#ifndef _LINUX_ASM_VGA_H_
9#define _LINUX_ASM_VGA_H_
10
11#include <asm/io.h>
12
13#include <linux/config.h>
14
15#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
16
17#define VT_BUF_HAVE_RW
18/*
19 * These are only needed for supporting VGA or MDA text mode, which use little
20 * endian byte ordering.
21 * In other cases, we can optimize by using native byte ordering and
22 * <linux/vt_buffer.h> has already done the right job for us.
23 */
24
25extern inline void scr_writew(u16 val, volatile u16 *addr)
26{
27 st_le16(addr, val);
28}
29
30extern inline u16 scr_readw(volatile const u16 *addr)
31{
32 return ld_le16(addr);
33}
34
35#define VT_BUF_HAVE_MEMCPYW
36#define scr_memcpyw memcpy
37
38#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
39
40extern unsigned long vgacon_remap_base;
41#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
42#define vga_readb(x) (*(x))
43#define vga_writeb(x,y) (*(y) = (x))
44
45#endif
46#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/xmon.h b/include/asm-ppc/xmon.h
deleted file mode 100644
index 042b83e6680d..000000000000
--- a/include/asm-ppc/xmon.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __PPC_XMON_H
2#define __PPC_XMON_H
3#ifdef __KERNEL__
4
5struct pt_regs;
6
7extern void xmon(struct pt_regs *excp);
8extern void xmon_printf(const char *fmt, ...);
9extern void xmon_map_scc(void);
10extern int xmon_bpt(struct pt_regs *regs);
11extern int xmon_sstep(struct pt_regs *regs);
12extern int xmon_iabr_match(struct pt_regs *regs);
13extern int xmon_dabr_match(struct pt_regs *regs);
14extern void (*xmon_fault_handler)(struct pt_regs *regs);
15
16#endif
17#endif
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-ppc64/abs_addr.h
index 84c24d4cdb71..dc3fc3fefef2 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-ppc64/abs_addr.h
@@ -63,4 +63,11 @@ static inline unsigned long phys_to_abs(unsigned long pa)
63#define virt_to_abs(va) phys_to_abs(__pa(va)) 63#define virt_to_abs(va) phys_to_abs(__pa(va))
64#define abs_to_virt(aa) __va(aa) 64#define abs_to_virt(aa) __va(aa)
65 65
66/*
67 * Converts Virtual Address to Real Address for
68 * Legacy iSeries Hypervisor calls
69 */
70#define iseries_hv_addr(virtaddr) \
71 (0x8000000000000000 | virt_to_abs(virtaddr))
72
66#endif /* _ABS_ADDR_H */ 73#endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h
deleted file mode 100644
index 0e5f25e83bc0..000000000000
--- a/include/asm-ppc64/atomic.h
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * PowerPC64 atomic operations
3 *
4 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _ASM_PPC64_ATOMIC_H_
14#define _ASM_PPC64_ATOMIC_H_
15
16#include <asm/memory.h>
17
18typedef struct { volatile int counter; } atomic_t;
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define atomic_read(v) ((v)->counter)
23#define atomic_set(v,i) (((v)->counter) = (i))
24
25static __inline__ void atomic_add(int a, atomic_t *v)
26{
27 int t;
28
29 __asm__ __volatile__(
30"1: lwarx %0,0,%3 # atomic_add\n\
31 add %0,%2,%0\n\
32 stwcx. %0,0,%3\n\
33 bne- 1b"
34 : "=&r" (t), "=m" (v->counter)
35 : "r" (a), "r" (&v->counter), "m" (v->counter)
36 : "cc");
37}
38
39static __inline__ int atomic_add_return(int a, atomic_t *v)
40{
41 int t;
42
43 __asm__ __volatile__(
44 EIEIO_ON_SMP
45"1: lwarx %0,0,%2 # atomic_add_return\n\
46 add %0,%1,%0\n\
47 stwcx. %0,0,%2\n\
48 bne- 1b"
49 ISYNC_ON_SMP
50 : "=&r" (t)
51 : "r" (a), "r" (&v->counter)
52 : "cc", "memory");
53
54 return t;
55}
56
57#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
58
59static __inline__ void atomic_sub(int a, atomic_t *v)
60{
61 int t;
62
63 __asm__ __volatile__(
64"1: lwarx %0,0,%3 # atomic_sub\n\
65 subf %0,%2,%0\n\
66 stwcx. %0,0,%3\n\
67 bne- 1b"
68 : "=&r" (t), "=m" (v->counter)
69 : "r" (a), "r" (&v->counter), "m" (v->counter)
70 : "cc");
71}
72
73static __inline__ int atomic_sub_return(int a, atomic_t *v)
74{
75 int t;
76
77 __asm__ __volatile__(
78 EIEIO_ON_SMP
79"1: lwarx %0,0,%2 # atomic_sub_return\n\
80 subf %0,%1,%0\n\
81 stwcx. %0,0,%2\n\
82 bne- 1b"
83 ISYNC_ON_SMP
84 : "=&r" (t)
85 : "r" (a), "r" (&v->counter)
86 : "cc", "memory");
87
88 return t;
89}
90
91static __inline__ void atomic_inc(atomic_t *v)
92{
93 int t;
94
95 __asm__ __volatile__(
96"1: lwarx %0,0,%2 # atomic_inc\n\
97 addic %0,%0,1\n\
98 stwcx. %0,0,%2\n\
99 bne- 1b"
100 : "=&r" (t), "=m" (v->counter)
101 : "r" (&v->counter), "m" (v->counter)
102 : "cc");
103}
104
105static __inline__ int atomic_inc_return(atomic_t *v)
106{
107 int t;
108
109 __asm__ __volatile__(
110 EIEIO_ON_SMP
111"1: lwarx %0,0,%1 # atomic_inc_return\n\
112 addic %0,%0,1\n\
113 stwcx. %0,0,%1\n\
114 bne- 1b"
115 ISYNC_ON_SMP
116 : "=&r" (t)
117 : "r" (&v->counter)
118 : "cc", "memory");
119
120 return t;
121}
122
123/*
124 * atomic_inc_and_test - increment and test
125 * @v: pointer of type atomic_t
126 *
127 * Atomically increments @v by 1
128 * and returns true if the result is zero, or false for all
129 * other cases.
130 */
131#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
132
133static __inline__ void atomic_dec(atomic_t *v)
134{
135 int t;
136
137 __asm__ __volatile__(
138"1: lwarx %0,0,%2 # atomic_dec\n\
139 addic %0,%0,-1\n\
140 stwcx. %0,0,%2\n\
141 bne- 1b"
142 : "=&r" (t), "=m" (v->counter)
143 : "r" (&v->counter), "m" (v->counter)
144 : "cc");
145}
146
147static __inline__ int atomic_dec_return(atomic_t *v)
148{
149 int t;
150
151 __asm__ __volatile__(
152 EIEIO_ON_SMP
153"1: lwarx %0,0,%1 # atomic_dec_return\n\
154 addic %0,%0,-1\n\
155 stwcx. %0,0,%1\n\
156 bne- 1b"
157 ISYNC_ON_SMP
158 : "=&r" (t)
159 : "r" (&v->counter)
160 : "cc", "memory");
161
162 return t;
163}
164
165#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
166#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
167
168/*
169 * Atomically test *v and decrement if it is greater than 0.
170 * The function returns the old value of *v minus 1.
171 */
172static __inline__ int atomic_dec_if_positive(atomic_t *v)
173{
174 int t;
175
176 __asm__ __volatile__(
177 EIEIO_ON_SMP
178"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
179 addic. %0,%0,-1\n\
180 blt- 2f\n\
181 stwcx. %0,0,%1\n\
182 bne- 1b"
183 ISYNC_ON_SMP
184 "\n\
1852:" : "=&r" (t)
186 : "r" (&v->counter)
187 : "cc", "memory");
188
189 return t;
190}
191
192#define smp_mb__before_atomic_dec() smp_mb()
193#define smp_mb__after_atomic_dec() smp_mb()
194#define smp_mb__before_atomic_inc() smp_mb()
195#define smp_mb__after_atomic_inc() smp_mb()
196
197#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
index a0f831224f96..dbfa42ef4a99 100644
--- a/include/asm-ppc64/bitops.h
+++ b/include/asm-ppc64/bitops.h
@@ -42,7 +42,7 @@
42 42
43#ifdef __KERNEL__ 43#ifdef __KERNEL__
44 44
45#include <asm/memory.h> 45#include <asm/synch.h>
46 46
47/* 47/*
48 * clear_bit doesn't imply a memory barrier 48 * clear_bit doesn't imply a memory barrier
diff --git a/include/asm-ppc64/bootinfo.h b/include/asm-ppc64/bootinfo.h
deleted file mode 100644
index f55e7cb48f46..000000000000
--- a/include/asm-ppc64/bootinfo.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Non-machine dependent bootinfo structure. Basic idea
3 * borrowed from the m68k.
4 *
5 * Copyright (C) 1999 Cort Dougan <cort@ppc.kernel.org>
6 * Copyright (c) 2001 PPC64 Team, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14
15#ifndef _PPC64_BOOTINFO_H
16#define _PPC64_BOOTINFO_H
17
18#include <asm/types.h>
19
20/* We use a u32 for the type of the fields since they're written by
21 * the bootloader which is a 32-bit process and read by the kernel
22 * which is a 64-bit process. This way they can both agree on the
23 * size of the type.
24 */
25typedef u32 bi_rec_field;
26
27struct bi_record {
28 bi_rec_field tag; /* tag ID */
29 bi_rec_field size; /* size of record (in bytes) */
30 bi_rec_field data[0]; /* data */
31};
32
33#define BI_FIRST 0x1010 /* first record - marker */
34#define BI_LAST 0x1011 /* last record - marker */
35#define BI_CMD_LINE 0x1012
36#define BI_BOOTLOADER_ID 0x1013
37#define BI_INITRD 0x1014
38#define BI_SYSMAP 0x1015
39#define BI_MACHTYPE 0x1016
40
41static __inline__ struct bi_record * bi_rec_init(unsigned long addr)
42{
43 struct bi_record *bi_recs;
44 bi_recs = (struct bi_record *)_ALIGN(addr, PAGE_SIZE);
45 bi_recs->size = 0;
46 return bi_recs;
47}
48
49static __inline__ struct bi_record * bi_rec_alloc(struct bi_record *rec,
50 unsigned long args)
51{
52 rec = (struct bi_record *)((unsigned long)rec + rec->size);
53 rec->size = sizeof(struct bi_record) + args*sizeof(bi_rec_field);
54 return rec;
55}
56
57static __inline__ struct bi_record * bi_rec_alloc_bytes(struct bi_record *rec,
58 unsigned long bytes)
59{
60 rec = (struct bi_record *)((unsigned long)rec + rec->size);
61 rec->size = sizeof(struct bi_record) + bytes;
62 return rec;
63}
64
65static __inline__ struct bi_record * bi_rec_next(struct bi_record *rec)
66{
67 return (struct bi_record *)((unsigned long)rec + rec->size);
68}
69
70#endif /* _PPC64_BOOTINFO_H */
diff --git a/include/asm-ppc64/btext.h b/include/asm-ppc64/btext.h
index 67aef0cc72c0..71cce36bc630 100644
--- a/include/asm-ppc64/btext.h
+++ b/include/asm-ppc64/btext.h
@@ -15,6 +15,7 @@ extern int boot_text_mapped;
15extern int btext_initialize(struct device_node *np); 15extern int btext_initialize(struct device_node *np);
16 16
17extern void map_boot_text(void); 17extern void map_boot_text(void);
18extern void init_boot_display(void);
18extern void btext_update_display(unsigned long phys, int width, int height, 19extern void btext_update_display(unsigned long phys, int width, int height,
19 int depth, int pitch); 20 int depth, int pitch);
20 21
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
deleted file mode 100644
index acc9b4d6c168..000000000000
--- a/include/asm-ppc64/cputable.h
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * include/asm-ppc64/cputable.h
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef __ASM_PPC_CPUTABLE_H
16#define __ASM_PPC_CPUTABLE_H
17
18#include <linux/config.h>
19#include <asm/page.h> /* for ASM_CONST */
20
21/* Exposed to userland CPU features - Must match ppc32 definitions */
22#define PPC_FEATURE_32 0x80000000
23#define PPC_FEATURE_64 0x40000000
24#define PPC_FEATURE_601_INSTR 0x20000000
25#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
26#define PPC_FEATURE_HAS_FPU 0x08000000
27#define PPC_FEATURE_HAS_MMU 0x04000000
28#define PPC_FEATURE_HAS_4xxMAC 0x02000000
29#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
30
31#ifdef __KERNEL__
32
33#ifndef __ASSEMBLY__
34
35/* This structure can grow, it's real size is used by head.S code
36 * via the mkdefs mechanism.
37 */
38struct cpu_spec;
39struct op_ppc64_model;
40
41typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
42
43struct cpu_spec {
44 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
45 unsigned int pvr_mask;
46 unsigned int pvr_value;
47
48 char *cpu_name;
49 unsigned long cpu_features; /* Kernel features */
50 unsigned int cpu_user_features; /* Userland features */
51
52 /* cache line sizes */
53 unsigned int icache_bsize;
54 unsigned int dcache_bsize;
55
56 /* number of performance monitor counters */
57 unsigned int num_pmcs;
58
59 /* this is called to initialize various CPU bits like L1 cache,
60 * BHT, SPD, etc... from head.S before branching to identify_machine
61 */
62 cpu_setup_t cpu_setup;
63
64 /* Used by oprofile userspace to select the right counters */
65 char *oprofile_cpu_type;
66
67 /* Processor specific oprofile operations */
68 struct op_ppc64_model *oprofile_model;
69};
70
71extern struct cpu_spec cpu_specs[];
72extern struct cpu_spec *cur_cpu_spec;
73
74static inline unsigned long cpu_has_feature(unsigned long feature)
75{
76 return cur_cpu_spec->cpu_features & feature;
77}
78
79#endif /* __ASSEMBLY__ */
80
81/* CPU kernel features */
82
83/* Retain the 32b definitions for the time being - use bottom half of word */
84#define CPU_FTR_SPLIT_ID_CACHE ASM_CONST(0x0000000000000001)
85#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
86#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
87#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
88#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
89#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
90#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
91#define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080)
92#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
93#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
94#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
95#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
96#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
97#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
98#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
99
100/* Add the 64b processor unique features in the top half of the word */
101#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
102#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
103#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
104#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
105#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
106#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
107#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
108/* unused ASM_CONST(0x0000008000000000) */
109#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
110#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
111#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
112#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
113#define CPU_FTR_CTRL ASM_CONST(0x0000100000000000)
114
115#ifndef __ASSEMBLY__
116
117#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
118 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
119
120#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
121 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
122 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
123
124/* iSeries doesn't support large pages */
125#ifdef CONFIG_PPC_ISERIES
126#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
127#else
128#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
129#endif /* CONFIG_PPC_ISERIES */
130
131#endif /* __ASSEMBLY */
132
133#ifdef __ASSEMBLY__
134
135#define BEGIN_FTR_SECTION 98:
136
137#define END_FTR_SECTION(msk, val) \
13899: \
139 .section __ftr_fixup,"a"; \
140 .align 3; \
141 .llong msk; \
142 .llong val; \
143 .llong 98b; \
144 .llong 99b; \
145 .previous
146
147#else
148
149#define BEGIN_FTR_SECTION "98:\n"
150#define END_FTR_SECTION(msk, val) \
151"99:\n" \
152" .section __ftr_fixup,\"a\";\n" \
153" .align 3;\n" \
154" .llong "#msk";\n" \
155" .llong "#val";\n" \
156" .llong 98b;\n" \
157" .llong 99b;\n" \
158" .previous\n"
159
160#endif /* __ASSEMBLY__ */
161
162#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
163#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
164
165#endif /* __ASM_PPC_CPUTABLE_H */
166#endif /* __KERNEL__ */
167
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
new file mode 100644
index 000000000000..cdf8a2dec05f
--- /dev/null
+++ b/include/asm-ppc64/dart.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_DART_H
20#define _ASM_DART_H
21
22
23/* physical base of DART registers */
24#define DART_BASE 0xf8033000UL
25
26/* Offset from base to control register */
27#define DARTCNTL 0
28/* Offset from base to exception register */
29#define DARTEXCP 0x10
30/* Offset from base to TLB tag registers */
31#define DARTTAG 0x1000
32
33
34/* Control Register fields */
35
36/* base address of table (pfn) */
37#define DARTCNTL_BASE_MASK 0xfffff
38#define DARTCNTL_BASE_SHIFT 12
39
40#define DARTCNTL_FLUSHTLB 0x400
41#define DARTCNTL_ENABLE 0x200
42
43/* size of table in pages */
44#define DARTCNTL_SIZE_MASK 0x1ff
45#define DARTCNTL_SIZE_SHIFT 0
46
47
48/* DART table fields */
49
50#define DARTMAP_VALID 0x80000000
51#define DARTMAP_RPNMASK 0x00ffffff
52
53
54#define DART_PAGE_SHIFT 12
55#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
56#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
57
58
59#endif
diff --git a/include/asm-ppc64/dbdma.h b/include/asm-ppc64/dbdma.h
deleted file mode 100644
index f2d5d5dc3377..000000000000
--- a/include/asm-ppc64/dbdma.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/dbdma.h>
2
diff --git a/include/asm-ppc64/dma.h b/include/asm-ppc64/dma.h
deleted file mode 100644
index dfd1f69059ba..000000000000
--- a/include/asm-ppc64/dma.h
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 * Changes for ppc sound by Christoph Nadig
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _ASM_DMA_H
15#define _ASM_DMA_H
16
17#include <linux/config.h>
18#include <asm/io.h>
19#include <linux/spinlock.h>
20#include <asm/system.h>
21
22#ifndef MAX_DMA_CHANNELS
23#define MAX_DMA_CHANNELS 8
24#endif
25
26/* The maximum address that we can perform a DMA transfer to on this platform */
27/* Doesn't really apply... */
28#define MAX_DMA_ADDRESS (~0UL)
29
30#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
31
32#define dma_outb outb
33#define dma_inb inb
34
35/*
36 * NOTES about DMA transfers:
37 *
38 * controller 1: channels 0-3, byte operations, ports 00-1F
39 * controller 2: channels 4-7, word operations, ports C0-DF
40 *
41 * - ALL registers are 8 bits only, regardless of transfer size
42 * - channel 4 is not used - cascades 1 into 2.
43 * - channels 0-3 are byte - addresses/counts are for physical bytes
44 * - channels 5-7 are word - addresses/counts are for physical words
45 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
46 * - transfer count loaded to registers is 1 less than actual count
47 * - controller 2 offsets are all even (2x offsets for controller 1)
48 * - page registers for 5-7 don't use data bit 0, represent 128K pages
49 * - page registers for 0-3 use bit 0, represent 64K pages
50 *
51 * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
52 * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
53 * Note that addresses loaded into registers must be _physical_ addresses,
54 * not logical addresses (which may differ if paging is active).
55 *
56 * Address mapping for channels 0-3:
57 *
58 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
59 * | ... | | ... | | ... |
60 * | ... | | ... | | ... |
61 * | ... | | ... | | ... |
62 * P7 ... P0 A7 ... A0 A7 ... A0
63 * | Page | Addr MSB | Addr LSB | (DMA registers)
64 *
65 * Address mapping for channels 5-7:
66 *
67 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
68 * | ... | \ \ ... \ \ \ ... \ \
69 * | ... | \ \ ... \ \ \ ... \ (not used)
70 * | ... | \ \ ... \ \ \ ... \
71 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
72 * | Page | Addr MSB | Addr LSB | (DMA registers)
73 *
74 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
75 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
76 * the hardware level, so odd-byte transfers aren't possible).
77 *
78 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
79 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
80 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
81 *
82 */
83
84/* 8237 DMA controllers */
85#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
86#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
87
88/* DMA controller registers */
89#define DMA1_CMD_REG 0x08 /* command register (w) */
90#define DMA1_STAT_REG 0x08 /* status register (r) */
91#define DMA1_REQ_REG 0x09 /* request register (w) */
92#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
93#define DMA1_MODE_REG 0x0B /* mode register (w) */
94#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
95#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
96#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
97#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
98#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
99
100#define DMA2_CMD_REG 0xD0 /* command register (w) */
101#define DMA2_STAT_REG 0xD0 /* status register (r) */
102#define DMA2_REQ_REG 0xD2 /* request register (w) */
103#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
104#define DMA2_MODE_REG 0xD6 /* mode register (w) */
105#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
106#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
107#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
108#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
109#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
110
111#define DMA_ADDR_0 0x00 /* DMA address registers */
112#define DMA_ADDR_1 0x02
113#define DMA_ADDR_2 0x04
114#define DMA_ADDR_3 0x06
115#define DMA_ADDR_4 0xC0
116#define DMA_ADDR_5 0xC4
117#define DMA_ADDR_6 0xC8
118#define DMA_ADDR_7 0xCC
119
120#define DMA_CNT_0 0x01 /* DMA count registers */
121#define DMA_CNT_1 0x03
122#define DMA_CNT_2 0x05
123#define DMA_CNT_3 0x07
124#define DMA_CNT_4 0xC2
125#define DMA_CNT_5 0xC6
126#define DMA_CNT_6 0xCA
127#define DMA_CNT_7 0xCE
128
129#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
130#define DMA_LO_PAGE_1 0x83
131#define DMA_LO_PAGE_2 0x81
132#define DMA_LO_PAGE_3 0x82
133#define DMA_LO_PAGE_5 0x8B
134#define DMA_LO_PAGE_6 0x89
135#define DMA_LO_PAGE_7 0x8A
136
137#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
138#define DMA_HI_PAGE_1 0x483
139#define DMA_HI_PAGE_2 0x481
140#define DMA_HI_PAGE_3 0x482
141#define DMA_HI_PAGE_5 0x48B
142#define DMA_HI_PAGE_6 0x489
143#define DMA_HI_PAGE_7 0x48A
144
145#define DMA1_EXT_REG 0x40B
146#define DMA2_EXT_REG 0x4D6
147
148#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
149#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
150#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
151
152#define DMA_AUTOINIT 0x10
153
154extern spinlock_t dma_spin_lock;
155
156static __inline__ unsigned long claim_dma_lock(void)
157{
158 unsigned long flags;
159 spin_lock_irqsave(&dma_spin_lock, flags);
160 return flags;
161}
162
163static __inline__ void release_dma_lock(unsigned long flags)
164{
165 spin_unlock_irqrestore(&dma_spin_lock, flags);
166}
167
168/* enable/disable a specific DMA channel */
169static __inline__ void enable_dma(unsigned int dmanr)
170{
171 unsigned char ucDmaCmd=0x00;
172
173 if (dmanr != 4)
174 {
175 dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
176 dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
177 }
178 if (dmanr<=3)
179 {
180 dma_outb(dmanr, DMA1_MASK_REG);
181 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
182 } else
183 {
184 dma_outb(dmanr & 3, DMA2_MASK_REG);
185 }
186}
187
188static __inline__ void disable_dma(unsigned int dmanr)
189{
190 if (dmanr<=3)
191 dma_outb(dmanr | 4, DMA1_MASK_REG);
192 else
193 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
194}
195
196/* Clear the 'DMA Pointer Flip Flop'.
197 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
198 * Use this once to initialize the FF to a known state.
199 * After that, keep track of it. :-)
200 * --- In order to do that, the DMA routines below should ---
201 * --- only be used while interrupts are disabled! ---
202 */
203static __inline__ void clear_dma_ff(unsigned int dmanr)
204{
205 if (dmanr<=3)
206 dma_outb(0, DMA1_CLEAR_FF_REG);
207 else
208 dma_outb(0, DMA2_CLEAR_FF_REG);
209}
210
211/* set mode (above) for a specific DMA channel */
212static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
213{
214 if (dmanr<=3)
215 dma_outb(mode | dmanr, DMA1_MODE_REG);
216 else
217 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
218}
219
220/* Set only the page register bits of the transfer address.
221 * This is used for successive transfers when we know the contents of
222 * the lower 16 bits of the DMA current address register, but a 64k boundary
223 * may have been crossed.
224 */
225static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
226{
227 switch(dmanr) {
228 case 0:
229 dma_outb(pagenr, DMA_LO_PAGE_0);
230 dma_outb(pagenr>>8, DMA_HI_PAGE_0);
231 break;
232 case 1:
233 dma_outb(pagenr, DMA_LO_PAGE_1);
234 dma_outb(pagenr>>8, DMA_HI_PAGE_1);
235 break;
236 case 2:
237 dma_outb(pagenr, DMA_LO_PAGE_2);
238 dma_outb(pagenr>>8, DMA_HI_PAGE_2);
239 break;
240 case 3:
241 dma_outb(pagenr, DMA_LO_PAGE_3);
242 dma_outb(pagenr>>8, DMA_HI_PAGE_3);
243 break;
244 case 5:
245 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
246 dma_outb(pagenr>>8, DMA_HI_PAGE_5);
247 break;
248 case 6:
249 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
250 dma_outb(pagenr>>8, DMA_HI_PAGE_6);
251 break;
252 case 7:
253 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
254 dma_outb(pagenr>>8, DMA_HI_PAGE_7);
255 break;
256 }
257}
258
259
260/* Set transfer address & page bits for specific DMA channel.
261 * Assumes dma flipflop is clear.
262 */
263static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
264{
265 if (dmanr <= 3) {
266 dma_outb( phys & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
267 dma_outb( (phys>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
268 } else {
269 dma_outb( (phys>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
270 dma_outb( (phys>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
271 }
272 set_dma_page(dmanr, phys>>16);
273}
274
275
276/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
277 * a specific DMA channel.
278 * You must ensure the parameters are valid.
279 * NOTE: from a manual: "the number of transfers is one more
280 * than the initial word count"! This is taken into account.
281 * Assumes dma flip-flop is clear.
282 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
283 */
284static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
285{
286 count--;
287 if (dmanr <= 3) {
288 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
289 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
290 } else {
291 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
292 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
293 }
294}
295
296
297/* Get DMA residue count. After a DMA transfer, this
298 * should return zero. Reading this while a DMA transfer is
299 * still in progress will return unpredictable results.
300 * If called before the channel has been used, it may return 1.
301 * Otherwise, it returns the number of _bytes_ left to transfer.
302 *
303 * Assumes DMA flip-flop is clear.
304 */
305static __inline__ int get_dma_residue(unsigned int dmanr)
306{
307 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
308 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
309
310 /* using short to get 16-bit wrap around */
311 unsigned short count;
312
313 count = 1 + dma_inb(io_port);
314 count += dma_inb(io_port) << 8;
315
316 return (dmanr <= 3)? count : (count<<1);
317}
318
319/* These are in kernel/dma.c: */
320extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
321extern void free_dma(unsigned int dmanr); /* release it again */
322
323#ifdef CONFIG_PCI
324extern int isa_dma_bridge_buggy;
325#else
326#define isa_dma_bridge_buggy (0)
327#endif
328#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
329#endif /* _ASM_DMA_H */
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h
index cb2640b3a408..266b460de44e 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-ppc64/futex.h
@@ -5,7 +5,7 @@
5 5
6#include <linux/futex.h> 6#include <linux/futex.h>
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/memory.h> 8#include <asm/synch.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-ppc64/hardirq.h b/include/asm-ppc64/hardirq.h
deleted file mode 100644
index 4ee72bb1fd48..000000000000
--- a/include/asm-ppc64/hardirq.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12#include <linux/cache.h>
13#include <linux/preempt.h>
14
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21static inline void ack_bad_irq(int irq)
22{
23 printk(KERN_CRIT "illegal vector %d received!\n", irq);
24 BUG();
25}
26
27#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index 59c958aea4db..bd7c9532d77b 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -15,7 +15,7 @@
15#ifdef CONFIG_PPC_ISERIES 15#ifdef CONFIG_PPC_ISERIES
16#include <asm/iSeries/iSeries_io.h> 16#include <asm/iSeries/iSeries_io.h>
17#endif 17#endif
18#include <asm/memory.h> 18#include <asm/synch.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20 20
21#include <asm-generic/iomap.h> 21#include <asm-generic/iomap.h>
diff --git a/include/asm-ppc64/irq.h b/include/asm-ppc64/irq.h
deleted file mode 100644
index 99782afb4cde..000000000000
--- a/include/asm-ppc64/irq.h
+++ /dev/null
@@ -1,120 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_IRQ_H
3#define _ASM_IRQ_H
4
5/*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14
15/*
16 * Maximum number of interrupt sources that we can handle.
17 */
18#define NR_IRQS 512
19
20/* this number is used when no interrupt has been assigned */
21#define NO_IRQ (-1)
22
23/*
24 * These constants are used for passing information about interrupt
25 * signal polarity and level/edge sensing to the low-level PIC chip
26 * drivers.
27 */
28#define IRQ_SENSE_MASK 0x1
29#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
30#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
31
32#define IRQ_POLARITY_MASK 0x2
33#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
34#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
35
36/*
37 * IRQ line status macro IRQ_PER_CPU is used
38 */
39#define ARCH_HAS_IRQ_PER_CPU
40
41#define get_irq_desc(irq) (&irq_desc[(irq)])
42
43/* Define a way to iterate across irqs. */
44#define for_each_irq(i) \
45 for ((i) = 0; (i) < NR_IRQS; ++(i))
46
47/* Interrupt numbers are virtual in case they are sparsely
48 * distributed by the hardware.
49 */
50extern unsigned int virt_irq_to_real_map[NR_IRQS];
51
52/* Create a mapping for a real_irq if it doesn't already exist.
53 * Return the virtual irq as a convenience.
54 */
55int virt_irq_create_mapping(unsigned int real_irq);
56void virt_irq_init(void);
57
58static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
59{
60 return virt_irq_to_real_map[virt_irq];
61}
62
63extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
64
65/*
66 * Because many systems have two overlapping names spaces for
67 * interrupts (ISA and XICS for example), and the ISA interrupts
68 * have historically not been easy to renumber, we allow ISA
69 * interrupts to take values 0 - 15, and shift up the remaining
70 * interrupts by 0x10.
71 */
72#define NUM_ISA_INTERRUPTS 0x10
73extern int __irq_offset_value;
74
75static inline int irq_offset_up(int irq)
76{
77 return(irq + __irq_offset_value);
78}
79
80static inline int irq_offset_down(int irq)
81{
82 return(irq - __irq_offset_value);
83}
84
85static inline int irq_offset_value(void)
86{
87 return __irq_offset_value;
88}
89
90static __inline__ int irq_canonicalize(int irq)
91{
92 return irq;
93}
94
95extern int distribute_irqs;
96
97struct irqaction;
98struct pt_regs;
99
100#ifdef CONFIG_IRQSTACKS
101/*
102 * Per-cpu stacks for handling hard and soft interrupts.
103 */
104extern struct thread_info *hardirq_ctx[NR_CPUS];
105extern struct thread_info *softirq_ctx[NR_CPUS];
106
107extern void irq_ctx_init(void);
108extern void call_do_softirq(struct thread_info *tp);
109extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
110 struct irqaction *action, struct thread_info *tp);
111
112#define __ARCH_HAS_DO_SOFTIRQ
113
114#else
115#define irq_ctx_init()
116
117#endif /* CONFIG_IRQSTACKS */
118
119#endif /* _ASM_IRQ_H */
120#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/keylargo.h b/include/asm-ppc64/keylargo.h
deleted file mode 100644
index 4d78e3d0314c..000000000000
--- a/include/asm-ppc64/keylargo.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/keylargo.h>
2
diff --git a/include/asm-ppc64/kmap_types.h b/include/asm-ppc64/kmap_types.h
deleted file mode 100644
index fd1574648223..000000000000
--- a/include/asm-ppc64/kmap_types.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H
4
5enum km_type {
6 KM_BOUNCE_READ,
7 KM_SKB_SUNRPC_DATA,
8 KM_SKB_DATA_SOFTIRQ,
9 KM_USER0,
10 KM_USER1,
11 KM_BIO_SRC_IRQ,
12 KM_BIO_DST_IRQ,
13 KM_PTE0,
14 KM_PTE1,
15 KM_IRQ0,
16 KM_IRQ1,
17 KM_SOFTIRQ0,
18 KM_SOFTIRQ1,
19 KM_TYPE_NR
20};
21
22#endif
23#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/macio.h b/include/asm-ppc64/macio.h
deleted file mode 100644
index a3028b364f70..000000000000
--- a/include/asm-ppc64/macio.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/macio.h>
2
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
deleted file mode 100644
index af53ffb55726..000000000000
--- a/include/asm-ppc64/memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _ASM_PPC64_MEMORY_H_
2#define _ASM_PPC64_MEMORY_H_
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12
13/*
14 * Arguably the bitops and *xchg operations don't imply any memory barrier
15 * or SMP ordering, but in fact a lot of drivers expect them to imply
16 * both, since they do on x86 cpus.
17 */
18#ifdef CONFIG_SMP
19#define EIEIO_ON_SMP "eieio\n"
20#define ISYNC_ON_SMP "\n\tisync"
21#define SYNC_ON_SMP "lwsync\n\t"
22#else
23#define EIEIO_ON_SMP
24#define ISYNC_ON_SMP
25#define SYNC_ON_SMP
26#endif
27
28static inline void eieio(void)
29{
30 __asm__ __volatile__ ("eieio" : : : "memory");
31}
32
33static inline void isync(void)
34{
35 __asm__ __volatile__ ("isync" : : : "memory");
36}
37
38#ifdef CONFIG_SMP
39#define eieio_on_smp() eieio()
40#define isync_on_smp() isync()
41#else
42#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
43#define isync_on_smp() __asm__ __volatile__("": : :"memory")
44#endif
45
46/* Macros for adjusting thread priority (hardware multi-threading) */
47#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
48#define HMT_low() asm volatile("or 1,1,1 # low priority")
49#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
50#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
51#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
52#define HMT_high() asm volatile("or 3,3,3 # high priority")
53
54#define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n"
55#define HMT_LOW "\tor 1,1,1 # low priority\n"
56#define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n"
57#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
58#define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n"
59#define HMT_HIGH "\tor 3,3,3 # high priority\n"
60
61#endif
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 7bc42eb087ad..e0505acb77d9 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -14,6 +14,7 @@
14#define _PPC64_MMU_H_ 14#define _PPC64_MMU_H_
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <asm/ppc_asm.h> /* for ASM_CONST */
17#include <asm/page.h> 18#include <asm/page.h>
18 19
19/* 20/*
@@ -29,7 +30,7 @@
29 30
30/* Location of cpu0's segment table */ 31/* Location of cpu0's segment table */
31#define STAB0_PAGE 0x6 32#define STAB0_PAGE 0x6
32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
33 34
34#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
35extern char initial_stab[]; 36extern char initial_stab[];
@@ -205,6 +206,10 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
205 unsigned long prpn, 206 unsigned long prpn,
206 unsigned long vflags, unsigned long rflags); 207 unsigned long vflags, unsigned long rflags);
207 208
209extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
210 unsigned long va, unsigned long prpn,
211 unsigned long vflags, unsigned long rflags);
212
208extern void stabs_alloc(void); 213extern void stabs_alloc(void);
209 214
210#endif /* __ASSEMBLY__ */ 215#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/of_device.h b/include/asm-ppc64/of_device.h
deleted file mode 100644
index 7bc136e22590..000000000000
--- a/include/asm-ppc64/of_device.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/of_device.h>
2
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a15422bcf30d..d404431f0a9a 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -11,13 +11,7 @@
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
14 14#include <asm/ppc_asm.h> /* for ASM_CONST */
15#ifdef __ASSEMBLY__
16 #define ASM_CONST(x) x
17#else
18 #define __ASM_CONST(x) x##UL
19 #define ASM_CONST(x) __ASM_CONST(x)
20#endif
21 15
22/* PAGE_SHIFT determines the page size */ 16/* PAGE_SHIFT determines the page size */
23#define PAGE_SHIFT 12 17#define PAGE_SHIFT 12
diff --git a/include/asm-ppc64/parport.h b/include/asm-ppc64/parport.h
deleted file mode 100644
index 2f8874c581cc..000000000000
--- a/include/asm-ppc64/parport.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * parport.h: platform-specific PC-style parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_PPC64_PARPORT_H
10#define _ASM_PPC64_PARPORT_H
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif /* !(_ASM_PPC_PARPORT_H) */
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
index d8991389ab39..60cf8c838af0 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-ppc64/pci-bridge.h
@@ -2,7 +2,9 @@
2#ifndef _ASM_PCI_BRIDGE_H 2#ifndef _ASM_PCI_BRIDGE_H
3#define _ASM_PCI_BRIDGE_H 3#define _ASM_PCI_BRIDGE_H
4 4
5#include <linux/config.h>
5#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/list.h>
6 8
7/* 9/*
8 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -34,7 +36,7 @@ struct pci_controller {
34 36
35 struct pci_ops *ops; 37 struct pci_ops *ops;
36 volatile unsigned int __iomem *cfg_addr; 38 volatile unsigned int __iomem *cfg_addr;
37 volatile unsigned char __iomem *cfg_data; 39 volatile void __iomem *cfg_data;
38 40
39 /* Currently, we limit ourselves to 1 IO range and 3 mem 41 /* Currently, we limit ourselves to 1 IO range and 3 mem
40 * ranges since the common pci_bus structure can't handle more 42 * ranges since the common pci_bus structure can't handle more
@@ -71,6 +73,12 @@ struct pci_dn {
71 struct iommu_table *iommu_table; /* for phb's or bridges */ 73 struct iommu_table *iommu_table; /* for phb's or bridges */
72 struct pci_dev *pcidev; /* back-pointer to the pci device */ 74 struct pci_dev *pcidev; /* back-pointer to the pci device */
73 struct device_node *node; /* back-pointer to the device_node */ 75 struct device_node *node; /* back-pointer to the device_node */
76#ifdef CONFIG_PPC_ISERIES
77 struct list_head Device_List;
78 int Irq; /* Assigned IRQ */
79 int Flags; /* Possible flags(disable/bist)*/
80 u8 LogicalSlot; /* Hv Slot Index for Tces */
81#endif
74 u32 config_space[16]; /* saved PCI config space */ 82 u32 config_space[16]; /* saved PCI config space */
75}; 83};
76 84
@@ -96,6 +104,16 @@ static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
96 return fetch_dev_dn(dev); 104 return fetch_dev_dn(dev);
97} 105}
98 106
107static inline int pci_device_from_OF_node(struct device_node *np,
108 u8 *bus, u8 *devfn)
109{
110 if (!PCI_DN(np))
111 return -ENODEV;
112 *bus = PCI_DN(np)->busno;
113 *devfn = PCI_DN(np)->devfn;
114 return 0;
115}
116
99static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 117static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
100{ 118{
101 if (bus->self) 119 if (bus->self)
@@ -105,7 +123,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
105} 123}
106 124
107extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, 125extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
108 struct device_node *dev); 126 struct device_node *dev, int primary);
109 127
110extern int pcibios_remove_root_bus(struct pci_controller *phb); 128extern int pcibios_remove_root_bus(struct pci_controller *phb);
111 129
diff --git a/include/asm-ppc64/pci.h b/include/asm-ppc64/pci.h
index a88bbfc26967..342e2d755550 100644
--- a/include/asm-ppc64/pci.h
+++ b/include/asm-ppc64/pci.h
@@ -168,7 +168,7 @@ extern void pcibios_add_platform_entries(struct pci_dev *dev);
168 168
169struct file; 169struct file;
170extern pgprot_t pci_phys_mem_access_prot(struct file *file, 170extern pgprot_t pci_phys_mem_access_prot(struct file *file,
171 unsigned long offset, 171 unsigned long pfn,
172 unsigned long size, 172 unsigned long size,
173 pgprot_t prot); 173 pgprot_t prot);
174 174
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 2eb1778a3a15..8c3f574046b6 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -471,7 +471,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
471#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 471#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
472 472
473struct file; 473struct file;
474extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 474extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
475 unsigned long size, pgprot_t vma_prot); 475 unsigned long size, pgprot_t vma_prot);
476#define __HAVE_PHYS_MEM_ACCESS_PROT 476#define __HAVE_PHYS_MEM_ACCESS_PROT
477 477
diff --git a/include/asm-ppc64/pmac_feature.h b/include/asm-ppc64/pmac_feature.h
deleted file mode 100644
index e07e36c4cbb2..000000000000
--- a/include/asm-ppc64/pmac_feature.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/pmac_feature.h>
2
diff --git a/include/asm-ppc64/pmac_low_i2c.h b/include/asm-ppc64/pmac_low_i2c.h
deleted file mode 100644
index 7bcfc72c5c8a..000000000000
--- a/include/asm-ppc64/pmac_low_i2c.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/pmac_low_i2c.h>
2
diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h
index 6b44a8caf395..3945a55d112a 100644
--- a/include/asm-ppc64/ppc32.h
+++ b/include/asm-ppc64/ppc32.h
@@ -70,18 +70,18 @@ typedef struct compat_siginfo {
70#define __old_sigaction32 old_sigaction32 70#define __old_sigaction32 old_sigaction32
71 71
72struct __old_sigaction32 { 72struct __old_sigaction32 {
73 unsigned sa_handler; 73 compat_uptr_t sa_handler;
74 compat_old_sigset_t sa_mask; 74 compat_old_sigset_t sa_mask;
75 unsigned int sa_flags; 75 unsigned int sa_flags;
76 unsigned sa_restorer; /* not used by Linux/SPARC yet */ 76 compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
77}; 77};
78 78
79 79
80 80
81struct sigaction32 { 81struct sigaction32 {
82 unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ 82 compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
83 unsigned int sa_flags; 83 unsigned int sa_flags;
84 unsigned int sa_restorer; /* Another 32 bit pointer */ 84 compat_uptr_t sa_restorer; /* Another 32 bit pointer */
85 compat_sigset_t sa_mask; /* A 32 bit mask */ 85 compat_sigset_t sa_mask; /* A 32 bit mask */
86}; 86};
87 87
@@ -94,9 +94,9 @@ typedef struct sigaltstack_32 {
94struct sigcontext32 { 94struct sigcontext32 {
95 unsigned int _unused[4]; 95 unsigned int _unused[4];
96 int signal; 96 int signal;
97 unsigned int handler; 97 compat_uptr_t handler;
98 unsigned int oldmask; 98 unsigned int oldmask;
99 u32 regs; /* 4 byte pointer to the pt_regs32 structure. */ 99 compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
100}; 100};
101 101
102struct mcontext32 { 102struct mcontext32 {
@@ -111,7 +111,7 @@ struct ucontext32 {
111 unsigned int uc_link; 111 unsigned int uc_link;
112 stack_32_t uc_stack; 112 stack_32_t uc_stack;
113 int uc_pad[7]; 113 int uc_pad[7];
114 u32 uc_regs; /* points to uc_mcontext field */ 114 compat_uptr_t uc_regs; /* points to uc_mcontext field */
115 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 115 compat_sigset_t uc_sigmask; /* mask last for extensibility */
116 /* glibc has 1024-bit signal masks, ours are 64-bit */ 116 /* glibc has 1024-bit signal masks, ours are 64-bit */
117 int uc_maskext[30]; 117 int uc_maskext[30];
diff --git a/include/asm-ppc64/ppc_asm.h b/include/asm-ppc64/ppc_asm.h
deleted file mode 100644
index 9031d8a29aca..000000000000
--- a/include/asm-ppc64/ppc_asm.h
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * arch/ppc64/kernel/ppc_asm.h
3 *
4 * Definitions used by various bits of low-level assembly code on PowerPC.
5 *
6 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _PPC64_PPC_ASM_H
15#define _PPC64_PPC_ASM_H
16/*
17 * Macros for storing registers into and loading registers from
18 * exception frames.
19 */
20#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
21#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
22#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
23#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
24#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
25#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
26#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
27#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
28#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
29#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
30
31#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
32#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
33
34#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
35#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
36#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
37#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
38#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
39#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
40#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
41#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
42#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
43#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
44#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
45#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
46
47#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
48#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
49#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
50#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
51#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
52#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
53#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
54#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
55#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
56#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
57#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
58#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
59
60/* Macros to adjust thread priority for Iseries hardware multithreading */
61#define HMT_LOW or 1,1,1
62#define HMT_MEDIUM or 2,2,2
63#define HMT_HIGH or 3,3,3
64
65/* Insert the high 32 bits of the MSR into what will be the new
66 MSR (via SRR1 and rfid) This preserves the MSR.SF and MSR.ISF
67 bits. */
68
69#define FIX_SRR1(ra, rb) \
70 mr rb,ra; \
71 mfmsr ra; \
72 rldimi ra,rb,0,32
73
74#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
75
76/*
77 * LOADADDR( rn, name )
78 * loads the address of 'name' into 'rn'
79 *
80 * LOADBASE( rn, name )
81 * loads the address (less the low 16 bits) of 'name' into 'rn'
82 * suitable for base+disp addressing
83 */
84#define LOADADDR(rn,name) \
85 lis rn,name##@highest; \
86 ori rn,rn,name##@higher; \
87 rldicr rn,rn,32,31; \
88 oris rn,rn,name##@h; \
89 ori rn,rn,name##@l
90
91#define LOADBASE(rn,name) \
92 lis rn,name@highest; \
93 ori rn,rn,name@higher; \
94 rldicr rn,rn,32,31; \
95 oris rn,rn,name@ha
96
97
98#define SET_REG_TO_CONST(reg, value) \
99 lis reg,(((value)>>48)&0xFFFF); \
100 ori reg,reg,(((value)>>32)&0xFFFF); \
101 rldicr reg,reg,32,31; \
102 oris reg,reg,(((value)>>16)&0xFFFF); \
103 ori reg,reg,((value)&0xFFFF);
104
105#define SET_REG_TO_LABEL(reg, label) \
106 lis reg,(label)@highest; \
107 ori reg,reg,(label)@higher; \
108 rldicr reg,reg,32,31; \
109 oris reg,reg,(label)@h; \
110 ori reg,reg,(label)@l;
111
112
113/* PPPBBB - DRENG If KERNELBASE is always 0xC0...,
114 * Then we can easily do this with one asm insn. -Peter
115 */
116#define tophys(rd,rs) \
117 lis rd,((KERNELBASE>>48)&0xFFFF); \
118 rldicr rd,rd,32,31; \
119 sub rd,rs,rd
120
121#define tovirt(rd,rs) \
122 lis rd,((KERNELBASE>>48)&0xFFFF); \
123 rldicr rd,rd,32,31; \
124 add rd,rs,rd
125
126/* Condition Register Bit Fields */
127
128#define cr0 0
129#define cr1 1
130#define cr2 2
131#define cr3 3
132#define cr4 4
133#define cr5 5
134#define cr6 6
135#define cr7 7
136
137
138/* General Purpose Registers (GPRs) */
139
140#define r0 0
141#define r1 1
142#define r2 2
143#define r3 3
144#define r4 4
145#define r5 5
146#define r6 6
147#define r7 7
148#define r8 8
149#define r9 9
150#define r10 10
151#define r11 11
152#define r12 12
153#define r13 13
154#define r14 14
155#define r15 15
156#define r16 16
157#define r17 17
158#define r18 18
159#define r19 19
160#define r20 20
161#define r21 21
162#define r22 22
163#define r23 23
164#define r24 24
165#define r25 25
166#define r26 26
167#define r27 27
168#define r28 28
169#define r29 29
170#define r30 30
171#define r31 31
172
173
174/* Floating Point Registers (FPRs) */
175
176#define fr0 0
177#define fr1 1
178#define fr2 2
179#define fr3 3
180#define fr4 4
181#define fr5 5
182#define fr6 6
183#define fr7 7
184#define fr8 8
185#define fr9 9
186#define fr10 10
187#define fr11 11
188#define fr12 12
189#define fr13 13
190#define fr14 14
191#define fr15 15
192#define fr16 16
193#define fr17 17
194#define fr18 18
195#define fr19 19
196#define fr20 20
197#define fr21 21
198#define fr22 22
199#define fr23 23
200#define fr24 24
201#define fr25 25
202#define fr26 26
203#define fr27 27
204#define fr28 28
205#define fr29 29
206#define fr30 30
207#define fr31 31
208
209#define vr0 0
210#define vr1 1
211#define vr2 2
212#define vr3 3
213#define vr4 4
214#define vr5 5
215#define vr6 6
216#define vr7 7
217#define vr8 8
218#define vr9 9
219#define vr10 10
220#define vr11 11
221#define vr12 12
222#define vr13 13
223#define vr14 14
224#define vr15 15
225#define vr16 16
226#define vr17 17
227#define vr18 18
228#define vr19 19
229#define vr20 20
230#define vr21 21
231#define vr22 22
232#define vr23 23
233#define vr24 24
234#define vr25 25
235#define vr26 26
236#define vr27 27
237#define vr28 28
238#define vr29 29
239#define vr30 30
240#define vr31 31
241
242#endif /* _PPC64_PPC_ASM_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
deleted file mode 100644
index 4146189006e3..000000000000
--- a/include/asm-ppc64/processor.h
+++ /dev/null
@@ -1,558 +0,0 @@
1#ifndef __ASM_PPC64_PROCESSOR_H
2#define __ASM_PPC64_PROCESSOR_H
3
4/*
5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/stringify.h>
14#ifndef __ASSEMBLY__
15#include <linux/config.h>
16#include <asm/atomic.h>
17#include <asm/ppcdebug.h>
18#include <asm/a.out.h>
19#endif
20#include <asm/ptrace.h>
21#include <asm/types.h>
22#include <asm/systemcfg.h>
23#include <asm/cputable.h>
24
25/* Machine State Register (MSR) Fields */
26#define MSR_SF_LG 63 /* Enable 64 bit mode */
27#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
28#define MSR_HV_LG 60 /* Hypervisor state */
29#define MSR_VEC_LG 25 /* Enable AltiVec */
30#define MSR_POW_LG 18 /* Enable Power Management */
31#define MSR_WE_LG 18 /* Wait State Enable */
32#define MSR_TGPR_LG 17 /* TLB Update registers in use */
33#define MSR_CE_LG 17 /* Critical Interrupt Enable */
34#define MSR_ILE_LG 16 /* Interrupt Little Endian */
35#define MSR_EE_LG 15 /* External Interrupt Enable */
36#define MSR_PR_LG 14 /* Problem State / Privilege Level */
37#define MSR_FP_LG 13 /* Floating Point enable */
38#define MSR_ME_LG 12 /* Machine Check Enable */
39#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
40#define MSR_SE_LG 10 /* Single Step */
41#define MSR_BE_LG 9 /* Branch Trace */
42#define MSR_DE_LG 9 /* Debug Exception Enable */
43#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
44#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
45#define MSR_IR_LG 5 /* Instruction Relocate */
46#define MSR_DR_LG 4 /* Data Relocate */
47#define MSR_PE_LG 3 /* Protection Enable */
48#define MSR_PX_LG 2 /* Protection Exclusive Mode */
49#define MSR_PMM_LG 2 /* Performance monitor */
50#define MSR_RI_LG 1 /* Recoverable Exception */
51#define MSR_LE_LG 0 /* Little Endian */
52
53#ifdef __ASSEMBLY__
54#define __MASK(X) (1<<(X))
55#else
56#define __MASK(X) (1UL<<(X))
57#endif
58
59#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
60#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
61#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
62#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
63#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
64#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
65#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
66#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
67#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
68#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
69#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
70#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
71#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
72#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
73#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
74#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
75#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
76#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
77#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
78#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
79#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
80#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
81#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
82#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
83#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
84#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
85
86#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
87#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
88
89#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
90#define MSR_USER64 MSR_USER32 | MSR_SF
91
92/* Floating Point Status and Control Register (FPSCR) Fields */
93
94#define FPSCR_FX 0x80000000 /* FPU exception summary */
95#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
96#define FPSCR_VX 0x20000000 /* Invalid operation summary */
97#define FPSCR_OX 0x10000000 /* Overflow exception summary */
98#define FPSCR_UX 0x08000000 /* Underflow exception summary */
99#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
100#define FPSCR_XX 0x02000000 /* Inexact exception summary */
101#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
102#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
103#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
104#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
105#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
106#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
107#define FPSCR_FR 0x00040000 /* Fraction rounded */
108#define FPSCR_FI 0x00020000 /* Fraction inexact */
109#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
110#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
111#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
112#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
113#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
114#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
115#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
116#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
117#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
118#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
119#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
120#define FPSCR_RN 0x00000003 /* FPU rounding control */
121
122/* Special Purpose Registers (SPRNs)*/
123
124#define SPRN_CTR 0x009 /* Count Register */
125#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
126#define DABR_TRANSLATION (1UL << 2)
127#define SPRN_DAR 0x013 /* Data Address Register */
128#define SPRN_DEC 0x016 /* Decrement Register */
129#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
130#define DSISR_NOHPTE 0x40000000 /* no translation found */
131#define DSISR_PROTFAULT 0x08000000 /* protection fault */
132#define DSISR_ISSTORE 0x02000000 /* access was a store */
133#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
134#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
135#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
136#define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
137#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
138#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
139#define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
140#define SPRN_HID4 0x3F4 /* 970 HID4 */
141#define SPRN_HID5 0x3F6 /* 970 HID5 */
142#define SPRN_HID6 0x3F9 /* BE HID 6 */
143#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
144#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
145#define SPRN_TSCR 0x399 /* Thread switch control on BE */
146#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
147#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
148#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
149#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
150#define SPRN_TSC 0x3FD /* Thread switch control on others */
151#define SPRN_TST 0x3FC /* Thread switch timeout on others */
152#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
153#define SPRN_LR 0x008 /* Link Register */
154#define SPRN_PIR 0x3FF /* Processor Identification Register */
155#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
156#define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
157#define SPRN_PVR 0x11F /* Processor Version Register */
158#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
159#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
160#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
161#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
162#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
163#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
164#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
165#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
166#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
167#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
168#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
169#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
170#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
171#define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
172#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
173#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
174#define SPRN_XER 0x001 /* Fixed Point Exception Register */
175#define SPRN_VRSAVE 0x100 /* Vector save */
176#define SPRN_CTRLF 0x088
177#define SPRN_CTRLT 0x098
178#define CTRL_RUNLATCH 0x1
179
180/* Performance monitor SPRs */
181#define SPRN_SIAR 780
182#define SPRN_SDAR 781
183#define SPRN_MMCRA 786
184#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
185#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
186#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
187#define SPRN_PMC1 787
188#define SPRN_PMC2 788
189#define SPRN_PMC3 789
190#define SPRN_PMC4 790
191#define SPRN_PMC5 791
192#define SPRN_PMC6 792
193#define SPRN_PMC7 793
194#define SPRN_PMC8 794
195#define SPRN_MMCR0 795
196#define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
197#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
198#define MMCR0_KERNEL_DISABLE MMCR0_FCS
199#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
200#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
201#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
202#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
203#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
204#define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
205/* time base exception enable */
206#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
207#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
208#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
209#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
210#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
211#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
212#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
213#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
214#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
215#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
216#define SPRN_MMCR1 798
217
218/* Short-hand versions for a number of the above SPRNs */
219
220#define CTR SPRN_CTR /* Counter Register */
221#define DAR SPRN_DAR /* Data Address Register */
222#define DABR SPRN_DABR /* Data Address Breakpoint Register */
223#define DEC SPRN_DEC /* Decrement Register */
224#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
225#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
226#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
227#define NIADORM SPRN_NIADORM /* NIA Dormant Register */
228#define TSC SPRN_TSC /* Thread switch control */
229#define TST SPRN_TST /* Thread switch timeout */
230#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
231#define L2CR SPRN_L2CR /* PPC 750 L2 control register */
232#define __LR SPRN_LR
233#define PVR SPRN_PVR /* Processor Version */
234#define PIR SPRN_PIR /* Processor ID */
235#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
236#define SDR1 SPRN_SDR1 /* MMU hash base register */
237#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
238#define SPR1 SPRN_SPRG1
239#define SPR2 SPRN_SPRG2
240#define SPR3 SPRN_SPRG3
241#define SPRG0 SPRN_SPRG0
242#define SPRG1 SPRN_SPRG1
243#define SPRG2 SPRN_SPRG2
244#define SPRG3 SPRN_SPRG3
245#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
246#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
247#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
248#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
249#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
250#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
251#define XER SPRN_XER
252
253/* Processor Version Register (PVR) field extraction */
254
255#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
256#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
257
258/* Processor Version Numbers */
259#define PV_NORTHSTAR 0x0033
260#define PV_PULSAR 0x0034
261#define PV_POWER4 0x0035
262#define PV_ICESTAR 0x0036
263#define PV_SSTAR 0x0037
264#define PV_POWER4p 0x0038
265#define PV_970 0x0039
266#define PV_POWER5 0x003A
267#define PV_POWER5p 0x003B
268#define PV_970FX 0x003C
269#define PV_630 0x0040
270#define PV_630p 0x0041
271#define PV_970MP 0x0044
272#define PV_BE 0x0070
273
274/* Platforms supported by PPC64 */
275#define PLATFORM_PSERIES 0x0100
276#define PLATFORM_PSERIES_LPAR 0x0101
277#define PLATFORM_ISERIES_LPAR 0x0201
278#define PLATFORM_LPAR 0x0001
279#define PLATFORM_POWERMAC 0x0400
280#define PLATFORM_MAPLE 0x0500
281#define PLATFORM_BPA 0x1000
282
283/* Compatibility with drivers coming from PPC32 world */
284#define _machine (systemcfg->platform)
285#define _MACH_Pmac PLATFORM_POWERMAC
286
287/*
288 * List of interrupt controllers.
289 */
290#define IC_INVALID 0
291#define IC_OPEN_PIC 1
292#define IC_PPC_XIC 2
293#define IC_BPA_IIC 3
294
295#define XGLUE(a,b) a##b
296#define GLUE(a,b) XGLUE(a,b)
297
298#ifdef __ASSEMBLY__
299
300#define _GLOBAL(name) \
301 .section ".text"; \
302 .align 2 ; \
303 .globl name; \
304 .globl GLUE(.,name); \
305 .section ".opd","aw"; \
306name: \
307 .quad GLUE(.,name); \
308 .quad .TOC.@tocbase; \
309 .quad 0; \
310 .previous; \
311 .type GLUE(.,name),@function; \
312GLUE(.,name):
313
314#define _KPROBE(name) \
315 .section ".kprobes.text","a"; \
316 .align 2 ; \
317 .globl name; \
318 .globl GLUE(.,name); \
319 .section ".opd","aw"; \
320name: \
321 .quad GLUE(.,name); \
322 .quad .TOC.@tocbase; \
323 .quad 0; \
324 .previous; \
325 .type GLUE(.,name),@function; \
326GLUE(.,name):
327
328#define _STATIC(name) \
329 .section ".text"; \
330 .align 2 ; \
331 .section ".opd","aw"; \
332name: \
333 .quad GLUE(.,name); \
334 .quad .TOC.@tocbase; \
335 .quad 0; \
336 .previous; \
337 .type GLUE(.,name),@function; \
338GLUE(.,name):
339
340#else /* __ASSEMBLY__ */
341
342/*
343 * Default implementation of macro that returns current
344 * instruction pointer ("program counter").
345 */
346#define current_text_addr() ({ __label__ _l; _l: &&_l;})
347
348/* Macros for setting and retrieving special purpose registers */
349
350#define mfmsr() ({unsigned long rval; \
351 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
352
353#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
354 : : "r" (v))
355#define mtmsrd(v) __mtmsrd((v), 0)
356
357#define mfspr(rn) ({unsigned long rval; \
358 asm volatile("mfspr %0," __stringify(rn) \
359 : "=r" (rval)); rval;})
360#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
361
362#define mftb() ({unsigned long rval; \
363 asm volatile("mftb %0" : "=r" (rval)); rval;})
364
365#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
366#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
367
368#define mfasr() ({unsigned long rval; \
369 asm volatile("mfasr %0" : "=r" (rval)); rval;})
370
371static inline void set_tb(unsigned int upper, unsigned int lower)
372{
373 mttbl(0);
374 mttbu(upper);
375 mttbl(lower);
376}
377
378#define __get_SP() ({unsigned long sp; \
379 asm volatile("mr %0,1": "=r" (sp)); sp;})
380
381#ifdef __KERNEL__
382
383extern int have_of;
384extern u64 ppc64_interrupt_controller;
385
386struct task_struct;
387void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
388void release_thread(struct task_struct *);
389
390/* Prepare to copy thread state - unlazy all lazy status */
391extern void prepare_to_copy(struct task_struct *tsk);
392
393/* Create a new kernel thread. */
394extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
395
396/* Lazy FPU handling on uni-processor */
397extern struct task_struct *last_task_used_math;
398extern struct task_struct *last_task_used_altivec;
399
400/* 64-bit user address space is 44-bits (16TB user VM) */
401#define TASK_SIZE_USER64 (0x0000100000000000UL)
402
403/*
404 * 32-bit user address space is 4GB - 1 page
405 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
406 */
407#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
408
409#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
410 TASK_SIZE_USER32 : TASK_SIZE_USER64)
411
412/* This decides where the kernel will search for a free chunk of vm
413 * space during mmap's.
414 */
415#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
416#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
417
418#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
419 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
420
421typedef struct {
422 unsigned long seg;
423} mm_segment_t;
424
425struct thread_struct {
426 unsigned long ksp; /* Kernel stack pointer */
427 unsigned long ksp_vsid;
428 struct pt_regs *regs; /* Pointer to saved register state */
429 mm_segment_t fs; /* for get_fs() validation */
430 double fpr[32]; /* Complete floating point set */
431 unsigned long fpscr; /* Floating point status (plus pad) */
432 unsigned long fpexc_mode; /* Floating-point exception mode */
433 unsigned long start_tb; /* Start purr when proc switched in */
434 unsigned long accum_tb; /* Total accumilated purr for process */
435 unsigned long vdso_base; /* base of the vDSO library */
436 unsigned long dabr; /* Data address breakpoint register */
437#ifdef CONFIG_ALTIVEC
438 /* Complete AltiVec register set */
439 vector128 vr[32] __attribute((aligned(16)));
440 /* AltiVec status */
441 vector128 vscr __attribute((aligned(16)));
442 unsigned long vrsave;
443 int used_vr; /* set if process has used altivec */
444#endif /* CONFIG_ALTIVEC */
445};
446
447#define ARCH_MIN_TASKALIGN 16
448
449#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
450
451#define INIT_THREAD { \
452 .ksp = INIT_SP, \
453 .regs = (struct pt_regs *)INIT_SP - 1, \
454 .fs = KERNEL_DS, \
455 .fpr = {0}, \
456 .fpscr = 0, \
457 .fpexc_mode = MSR_FE0|MSR_FE1, \
458}
459
460/*
461 * Return saved PC of a blocked thread. For now, this is the "user" PC
462 */
463#define thread_saved_pc(tsk) \
464 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
465
466unsigned long get_wchan(struct task_struct *p);
467
468#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
469#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
470
471/* Get/set floating-point exception mode */
472#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
473#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
474
475extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
476extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
477
478static inline unsigned int __unpack_fe01(unsigned long msr_bits)
479{
480 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
481}
482
483static inline unsigned long __pack_fe01(unsigned int fpmode)
484{
485 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
486}
487
488#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
489
490/*
491 * Prefetch macros.
492 */
493#define ARCH_HAS_PREFETCH
494#define ARCH_HAS_PREFETCHW
495#define ARCH_HAS_SPINLOCK_PREFETCH
496
497static inline void prefetch(const void *x)
498{
499 if (unlikely(!x))
500 return;
501
502 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
503}
504
505static inline void prefetchw(const void *x)
506{
507 if (unlikely(!x))
508 return;
509
510 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
511}
512
513#define spin_lock_prefetch(x) prefetchw(x)
514
515#define HAVE_ARCH_PICK_MMAP_LAYOUT
516
517static inline void ppc64_runlatch_on(void)
518{
519 unsigned long ctrl;
520
521 if (cpu_has_feature(CPU_FTR_CTRL)) {
522 ctrl = mfspr(SPRN_CTRLF);
523 ctrl |= CTRL_RUNLATCH;
524 mtspr(SPRN_CTRLT, ctrl);
525 }
526}
527
528static inline void ppc64_runlatch_off(void)
529{
530 unsigned long ctrl;
531
532 if (cpu_has_feature(CPU_FTR_CTRL)) {
533 ctrl = mfspr(SPRN_CTRLF);
534 ctrl &= ~CTRL_RUNLATCH;
535 mtspr(SPRN_CTRLT, ctrl);
536 }
537}
538
539#endif /* __KERNEL__ */
540
541#endif /* __ASSEMBLY__ */
542
543#ifdef __KERNEL__
544#define RUNLATCH_ON(REG) \
545BEGIN_FTR_SECTION \
546 mfspr (REG),SPRN_CTRLF; \
547 ori (REG),(REG),CTRL_RUNLATCH; \
548 mtspr SPRN_CTRLT,(REG); \
549END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
550#endif
551
552/*
553 * Number of entries in the SLB. If this ever changes we should handle
554 * it with a use a cpu feature fixup.
555 */
556#define SLB_NUM_ENTRIES 64
557
558#endif /* __ASM_PPC64_PROCESSOR_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index c02ec1d6b909..e8d0d2ab4c0f 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -14,6 +14,7 @@
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17#include <linux/config.h>
17#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19 20
@@ -137,6 +138,9 @@ struct device_node {
137 struct kref kref; 138 struct kref kref;
138 unsigned long _flags; 139 unsigned long _flags;
139 void *data; 140 void *data;
141#ifdef CONFIG_PPC_ISERIES
142 struct list_head Device_List;
143#endif
140}; 144};
141 145
142extern struct device_node *of_chosen; 146extern struct device_node *of_chosen;
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h
index d86f742e9a21..c5e9052e7967 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-ppc64/smp.h
@@ -77,7 +77,6 @@ extern int smt_enabled_at_boot;
77 77
78extern int smp_mpic_probe(void); 78extern int smp_mpic_probe(void);
79extern void smp_mpic_setup_cpu(int cpu); 79extern void smp_mpic_setup_cpu(int cpu);
80extern void smp_mpic_message_pass(int target, int msg);
81extern void smp_generic_kick_cpu(int nr); 80extern void smp_generic_kick_cpu(int nr);
82 81
83extern void smp_generic_give_timebase(void); 82extern void smp_generic_give_timebase(void);
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 375015c62f20..99b8ca52f101 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -13,7 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/memory.h> 16#include <asm/synch.h>
17 17
18/* 18/*
19 * Memory barrier. 19 * Memory barrier.
@@ -48,7 +48,7 @@
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49#define smp_mb() mb() 49#define smp_mb() mb()
50#define smp_rmb() rmb() 50#define smp_rmb() rmb()
51#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 51#define smp_wmb() eieio()
52#define smp_read_barrier_depends() read_barrier_depends() 52#define smp_read_barrier_depends() read_barrier_depends()
53#else 53#else
54#define smp_mb() __asm__ __volatile__("": : :"memory") 54#define smp_mb() __asm__ __volatile__("": : :"memory")
@@ -120,8 +120,8 @@ extern void giveup_altivec(struct task_struct *);
120extern void disable_kernel_altivec(void); 120extern void disable_kernel_altivec(void);
121extern void enable_kernel_altivec(void); 121extern void enable_kernel_altivec(void);
122extern int emulate_altivec(struct pt_regs *); 122extern int emulate_altivec(struct pt_regs *);
123extern void cvt_fd(float *from, double *to, unsigned long *fpscr); 123extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
124extern void cvt_df(double *from, float *to, unsigned long *fpscr); 124extern void cvt_df(double *from, float *to, struct thread_struct *thread);
125 125
126#ifdef CONFIG_ALTIVEC 126#ifdef CONFIG_ALTIVEC
127extern void flush_altivec_to_thread(struct task_struct *); 127extern void flush_altivec_to_thread(struct task_struct *);
@@ -131,7 +131,12 @@ static inline void flush_altivec_to_thread(struct task_struct *t)
131} 131}
132#endif 132#endif
133 133
134static inline void flush_spe_to_thread(struct task_struct *t)
135{
136}
137
134extern int mem_init_done; /* set on boot once kmalloc can be called */ 138extern int mem_init_done; /* set on boot once kmalloc can be called */
139extern unsigned long memory_limit;
135 140
136/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ 141/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
137extern unsigned char e2a(unsigned char); 142extern unsigned char e2a(unsigned char);
@@ -144,12 +149,7 @@ struct thread_struct;
144extern struct task_struct * _switch(struct thread_struct *prev, 149extern struct task_struct * _switch(struct thread_struct *prev,
145 struct thread_struct *next); 150 struct thread_struct *next);
146 151
147static inline int __is_processor(unsigned long pv) 152extern int powersave_nap; /* set if nap mode can be used in idle loop */
148{
149 unsigned long pvr;
150 asm("mfspr %0, 0x11F" : "=r" (pvr));
151 return(PVR_VER(pvr) == pv);
152}
153 153
154/* 154/*
155 * Atomic exchange 155 * Atomic exchange
diff --git a/include/asm-ppc64/tce.h b/include/asm-ppc64/tce.h
new file mode 100644
index 000000000000..d40b6b42ab35
--- /dev/null
+++ b/include/asm-ppc64/tce.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 * Rewrite, cleanup:
4 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _ASM_TCE_H
22#define _ASM_TCE_H
23
24/*
25 * Tces come in two formats, one for the virtual bus and a different
26 * format for PCI
27 */
28#define TCE_VB 0
29#define TCE_PCI 1
30
31/* TCE page size is 4096 bytes (1 << 12) */
32
33#define TCE_SHIFT 12
34#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
35#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
36
37
38/* tce_entry
39 * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
40 * abstracted so layout is irrelevant.
41 */
42union tce_entry {
43 unsigned long te_word;
44 struct {
45 unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
46 unsigned int tb_rsvd :6;
47 unsigned long tb_rpn :40; /* Real page number */
48 unsigned int tb_valid :1; /* Tce is valid (vb only) */
49 unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
50 unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
51 unsigned int tb_pciwr :1; /* Write allowed (pci only) */
52 unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
53 } te_bits;
54#define te_cacheBits te_bits.tb_cacheBits
55#define te_rpn te_bits.tb_rpn
56#define te_valid te_bits.tb_valid
57#define te_allio te_bits.tb_allio
58#define te_lpindex te_bits.tb_lpindex
59#define te_pciwr te_bits.tb_pciwr
60#define te_rdwr te_bits.tb_rdwr
61};
62
63
64#endif
diff --git a/include/asm-ppc64/time.h b/include/asm-ppc64/time.h
deleted file mode 100644
index c6c762cad8b0..000000000000
--- a/include/asm-ppc64/time.h
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * Common time prototypes and such for all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef __PPC64_TIME_H
14#define __PPC64_TIME_H
15
16#ifdef __KERNEL__
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/mc146818rtc.h>
20
21#include <asm/processor.h>
22#include <asm/paca.h>
23#include <asm/iSeries/HvCall.h>
24
25/* time.c */
26extern unsigned long tb_ticks_per_jiffy;
27extern unsigned long tb_ticks_per_usec;
28extern unsigned long tb_ticks_per_sec;
29extern unsigned long tb_to_xs;
30extern unsigned tb_to_us;
31extern unsigned long tb_last_stamp;
32
33struct rtc_time;
34extern void to_tm(int tim, struct rtc_time * tm);
35extern time_t last_rtc_update;
36
37void generic_calibrate_decr(void);
38void setup_default_decr(void);
39
40/* Some sane defaults: 125 MHz timebase, 1GHz processor */
41extern unsigned long ppc_proc_freq;
42#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
43extern unsigned long ppc_tb_freq;
44#define DEFAULT_TB_FREQ 125000000UL
45
46/*
47 * By putting all of this stuff into a single struct we
48 * reduce the number of cache lines touched by do_gettimeofday.
49 * Both by collecting all of the data in one cache line and
50 * by touching only one TOC entry
51 */
52struct gettimeofday_vars {
53 unsigned long tb_to_xs;
54 unsigned long stamp_xsec;
55 unsigned long tb_orig_stamp;
56};
57
58struct gettimeofday_struct {
59 unsigned long tb_ticks_per_sec;
60 struct gettimeofday_vars vars[2];
61 struct gettimeofday_vars * volatile varp;
62 unsigned var_idx;
63 unsigned tb_to_us;
64};
65
66struct div_result {
67 unsigned long result_high;
68 unsigned long result_low;
69};
70
71int via_calibrate_decr(void);
72
73static __inline__ unsigned long get_tb(void)
74{
75 return mftb();
76}
77
78/* Accessor functions for the decrementer register. */
79static __inline__ unsigned int get_dec(void)
80{
81 return (mfspr(SPRN_DEC));
82}
83
84static __inline__ void set_dec(int val)
85{
86#ifdef CONFIG_PPC_ISERIES
87 struct paca_struct *lpaca = get_paca();
88 int cur_dec;
89
90 if (lpaca->lppaca.shared_proc) {
91 lpaca->lppaca.virtual_decr = val;
92 cur_dec = get_dec();
93 if (cur_dec > val)
94 HvCall_setVirtualDecr();
95 } else
96#endif
97 mtspr(SPRN_DEC, val);
98}
99
100static inline unsigned long tb_ticks_since(unsigned long tstamp)
101{
102 return get_tb() - tstamp;
103}
104
105#define mulhwu(x,y) \
106({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
107#define mulhdu(x,y) \
108({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
109
110
111unsigned mulhwu_scale_factor(unsigned, unsigned);
112void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
113 unsigned divisor, struct div_result *dr );
114
115/* Used to store Processor Utilization register (purr) values */
116
117struct cpu_usage {
118 u64 current_tb; /* Holds the current purr register values */
119};
120
121DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
122
123#endif /* __KERNEL__ */
124#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 74271d7c1d16..626f505c6ee3 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -20,10 +20,8 @@
20struct mm_struct; 20struct mm_struct;
21struct ppc64_tlb_batch { 21struct ppc64_tlb_batch {
22 unsigned long index; 22 unsigned long index;
23 unsigned long context;
24 struct mm_struct *mm; 23 struct mm_struct *mm;
25 pte_t pte[PPC64_TLB_BATCH_NR]; 24 pte_t pte[PPC64_TLB_BATCH_NR];
26 unsigned long addr[PPC64_TLB_BATCH_NR];
27 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 25 unsigned long vaddr[PPC64_TLB_BATCH_NR];
28 unsigned int large; 26 unsigned int large;
29}; 27};
@@ -48,8 +46,7 @@ static inline void flush_tlb_pending(void)
48#define flush_tlb_kernel_range(start, end) flush_tlb_pending() 46#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
49#define flush_tlb_pgtables(mm, start, end) do { } while (0) 47#define flush_tlb_pgtables(mm, start, end) do { } while (0)
50 48
51extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 49extern void flush_hash_page(unsigned long va, pte_t pte, int local);
52 int local); 50void flush_hash_range(unsigned long number, int local);
53void flush_hash_range(unsigned long context, unsigned long number, int local);
54 51
55#endif /* _PPC64_TLBFLUSH_H */ 52#endif /* _PPC64_TLBFLUSH_H */
diff --git a/include/asm-ppc64/udbg.h b/include/asm-ppc64/udbg.h
index c786604aef02..8192fb8541cc 100644
--- a/include/asm-ppc64/udbg.h
+++ b/include/asm-ppc64/udbg.h
@@ -28,4 +28,7 @@ extern unsigned long udbg_ifdebug(unsigned long flags);
28extern void __init ppcdbg_initialize(void); 28extern void __init ppcdbg_initialize(void);
29 29
30extern void udbg_init_uart(void __iomem *comport, unsigned int speed); 30extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
31
32struct device_node;
33extern void udbg_init_scc(struct device_node *np);
31#endif 34#endif
diff --git a/include/asm-ppc64/uninorth.h b/include/asm-ppc64/uninorth.h
deleted file mode 100644
index 7ad7059f2c80..000000000000
--- a/include/asm-ppc64/uninorth.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/uninorth.h>
2
diff --git a/include/asm-ppc64/unistd.h b/include/asm-ppc64/unistd.h
deleted file mode 100644
index 977bc980c1af..000000000000
--- a/include/asm-ppc64/unistd.h
+++ /dev/null
@@ -1,487 +0,0 @@
1#ifndef _ASM_PPC_UNISTD_H_
2#define _ASM_PPC_UNISTD_H_
3
4/*
5 * This file contains the system call numbers.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define __NR_restart_syscall 0
14#define __NR_exit 1
15#define __NR_fork 2
16#define __NR_read 3
17#define __NR_write 4
18#define __NR_open 5
19#define __NR_close 6
20#define __NR_waitpid 7
21#define __NR_creat 8
22#define __NR_link 9
23#define __NR_unlink 10
24#define __NR_execve 11
25#define __NR_chdir 12
26#define __NR_time 13
27#define __NR_mknod 14
28#define __NR_chmod 15
29#define __NR_lchown 16
30#define __NR_break 17
31#define __NR_oldstat 18
32#define __NR_lseek 19
33#define __NR_getpid 20
34#define __NR_mount 21
35#define __NR_umount 22
36#define __NR_setuid 23
37#define __NR_getuid 24
38#define __NR_stime 25
39#define __NR_ptrace 26
40#define __NR_alarm 27
41#define __NR_oldfstat 28
42#define __NR_pause 29
43#define __NR_utime 30
44#define __NR_stty 31
45#define __NR_gtty 32
46#define __NR_access 33
47#define __NR_nice 34
48#define __NR_ftime 35
49#define __NR_sync 36
50#define __NR_kill 37
51#define __NR_rename 38
52#define __NR_mkdir 39
53#define __NR_rmdir 40
54#define __NR_dup 41
55#define __NR_pipe 42
56#define __NR_times 43
57#define __NR_prof 44
58#define __NR_brk 45
59#define __NR_setgid 46
60#define __NR_getgid 47
61#define __NR_signal 48
62#define __NR_geteuid 49
63#define __NR_getegid 50
64#define __NR_acct 51
65#define __NR_umount2 52
66#define __NR_lock 53
67#define __NR_ioctl 54
68#define __NR_fcntl 55
69#define __NR_mpx 56
70#define __NR_setpgid 57
71#define __NR_ulimit 58
72#define __NR_oldolduname 59
73#define __NR_umask 60
74#define __NR_chroot 61
75#define __NR_ustat 62
76#define __NR_dup2 63
77#define __NR_getppid 64
78#define __NR_getpgrp 65
79#define __NR_setsid 66
80#define __NR_sigaction 67
81#define __NR_sgetmask 68
82#define __NR_ssetmask 69
83#define __NR_setreuid 70
84#define __NR_setregid 71
85#define __NR_sigsuspend 72
86#define __NR_sigpending 73
87#define __NR_sethostname 74
88#define __NR_setrlimit 75
89#define __NR_getrlimit 76
90#define __NR_getrusage 77
91#define __NR_gettimeofday 78
92#define __NR_settimeofday 79
93#define __NR_getgroups 80
94#define __NR_setgroups 81
95#define __NR_select 82
96#define __NR_symlink 83
97#define __NR_oldlstat 84
98#define __NR_readlink 85
99#define __NR_uselib 86
100#define __NR_swapon 87
101#define __NR_reboot 88
102#define __NR_readdir 89
103#define __NR_mmap 90
104#define __NR_munmap 91
105#define __NR_truncate 92
106#define __NR_ftruncate 93
107#define __NR_fchmod 94
108#define __NR_fchown 95
109#define __NR_getpriority 96
110#define __NR_setpriority 97
111#define __NR_profil 98
112#define __NR_statfs 99
113#define __NR_fstatfs 100
114#define __NR_ioperm 101
115#define __NR_socketcall 102
116#define __NR_syslog 103
117#define __NR_setitimer 104
118#define __NR_getitimer 105
119#define __NR_stat 106
120#define __NR_lstat 107
121#define __NR_fstat 108
122#define __NR_olduname 109
123#define __NR_iopl 110
124#define __NR_vhangup 111
125#define __NR_idle 112
126#define __NR_vm86 113
127#define __NR_wait4 114
128#define __NR_swapoff 115
129#define __NR_sysinfo 116
130#define __NR_ipc 117
131#define __NR_fsync 118
132#define __NR_sigreturn 119
133#define __NR_clone 120
134#define __NR_setdomainname 121
135#define __NR_uname 122
136#define __NR_modify_ldt 123
137#define __NR_adjtimex 124
138#define __NR_mprotect 125
139#define __NR_sigprocmask 126
140#define __NR_create_module 127
141#define __NR_init_module 128
142#define __NR_delete_module 129
143#define __NR_get_kernel_syms 130
144#define __NR_quotactl 131
145#define __NR_getpgid 132
146#define __NR_fchdir 133
147#define __NR_bdflush 134
148#define __NR_sysfs 135
149#define __NR_personality 136
150#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
151#define __NR_setfsuid 138
152#define __NR_setfsgid 139
153#define __NR__llseek 140
154#define __NR_getdents 141
155#define __NR__newselect 142
156#define __NR_flock 143
157#define __NR_msync 144
158#define __NR_readv 145
159#define __NR_writev 146
160#define __NR_getsid 147
161#define __NR_fdatasync 148
162#define __NR__sysctl 149
163#define __NR_mlock 150
164#define __NR_munlock 151
165#define __NR_mlockall 152
166#define __NR_munlockall 153
167#define __NR_sched_setparam 154
168#define __NR_sched_getparam 155
169#define __NR_sched_setscheduler 156
170#define __NR_sched_getscheduler 157
171#define __NR_sched_yield 158
172#define __NR_sched_get_priority_max 159
173#define __NR_sched_get_priority_min 160
174#define __NR_sched_rr_get_interval 161
175#define __NR_nanosleep 162
176#define __NR_mremap 163
177#define __NR_setresuid 164
178#define __NR_getresuid 165
179#define __NR_query_module 166
180#define __NR_poll 167
181#define __NR_nfsservctl 168
182#define __NR_setresgid 169
183#define __NR_getresgid 170
184#define __NR_prctl 171
185#define __NR_rt_sigreturn 172
186#define __NR_rt_sigaction 173
187#define __NR_rt_sigprocmask 174
188#define __NR_rt_sigpending 175
189#define __NR_rt_sigtimedwait 176
190#define __NR_rt_sigqueueinfo 177
191#define __NR_rt_sigsuspend 178
192#define __NR_pread64 179
193#define __NR_pwrite64 180
194#define __NR_chown 181
195#define __NR_getcwd 182
196#define __NR_capget 183
197#define __NR_capset 184
198#define __NR_sigaltstack 185
199#define __NR_sendfile 186
200#define __NR_getpmsg 187 /* some people actually want streams */
201#define __NR_putpmsg 188 /* some people actually want streams */
202#define __NR_vfork 189
203#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
204#define __NR_readahead 191
205/* #define __NR_mmap2 192 32bit only */
206/* #define __NR_truncate64 193 32bit only */
207/* #define __NR_ftruncate64 194 32bit only */
208/* #define __NR_stat64 195 32bit only */
209/* #define __NR_lstat64 196 32bit only */
210/* #define __NR_fstat64 197 32bit only */
211#define __NR_pciconfig_read 198
212#define __NR_pciconfig_write 199
213#define __NR_pciconfig_iobase 200
214#define __NR_multiplexer 201
215#define __NR_getdents64 202
216#define __NR_pivot_root 203
217/* #define __NR_fcntl64 204 32bit only */
218#define __NR_madvise 205
219#define __NR_mincore 206
220#define __NR_gettid 207
221#define __NR_tkill 208
222#define __NR_setxattr 209
223#define __NR_lsetxattr 210
224#define __NR_fsetxattr 211
225#define __NR_getxattr 212
226#define __NR_lgetxattr 213
227#define __NR_fgetxattr 214
228#define __NR_listxattr 215
229#define __NR_llistxattr 216
230#define __NR_flistxattr 217
231#define __NR_removexattr 218
232#define __NR_lremovexattr 219
233#define __NR_fremovexattr 220
234#define __NR_futex 221
235#define __NR_sched_setaffinity 222
236#define __NR_sched_getaffinity 223
237/* 224 currently unused */
238#define __NR_tuxcall 225
239/* #define __NR_sendfile64 226 32bit only */
240#define __NR_io_setup 227
241#define __NR_io_destroy 228
242#define __NR_io_getevents 229
243#define __NR_io_submit 230
244#define __NR_io_cancel 231
245#define __NR_set_tid_address 232
246#define __NR_fadvise64 233
247#define __NR_exit_group 234
248#define __NR_lookup_dcookie 235
249#define __NR_epoll_create 236
250#define __NR_epoll_ctl 237
251#define __NR_epoll_wait 238
252#define __NR_remap_file_pages 239
253#define __NR_timer_create 240
254#define __NR_timer_settime 241
255#define __NR_timer_gettime 242
256#define __NR_timer_getoverrun 243
257#define __NR_timer_delete 244
258#define __NR_clock_settime 245
259#define __NR_clock_gettime 246
260#define __NR_clock_getres 247
261#define __NR_clock_nanosleep 248
262#define __NR_swapcontext 249
263#define __NR_tgkill 250
264#define __NR_utimes 251
265#define __NR_statfs64 252
266#define __NR_fstatfs64 253
267/* #define __NR_fadvise64_64 254 32bit only */
268#define __NR_rtas 255
269/* Number 256 is reserved for sys_debug_setcontext */
270/* Number 257 is reserved for vserver */
271/* 258 currently unused */
272#define __NR_mbind 259
273#define __NR_get_mempolicy 260
274#define __NR_set_mempolicy 261
275#define __NR_mq_open 262
276#define __NR_mq_unlink 263
277#define __NR_mq_timedsend 264
278#define __NR_mq_timedreceive 265
279#define __NR_mq_notify 266
280#define __NR_mq_getsetattr 267
281#define __NR_kexec_load 268
282#define __NR_add_key 269
283#define __NR_request_key 270
284#define __NR_keyctl 271
285#define __NR_waitid 272
286#define __NR_ioprio_set 273
287#define __NR_ioprio_get 274
288#define __NR_inotify_init 275
289#define __NR_inotify_add_watch 276
290#define __NR_inotify_rm_watch 277
291
292#define __NR_syscalls 278
293#ifdef __KERNEL__
294#define NR_syscalls __NR_syscalls
295#endif
296
297#ifndef __ASSEMBLY__
298
299/* On powerpc a system call basically clobbers the same registers like a
300 * function call, with the exception of LR (which is needed for the
301 * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
302 * an error return status).
303 */
304
305#define __syscall_nr(nr, type, name, args...) \
306 unsigned long __sc_ret, __sc_err; \
307 { \
308 register unsigned long __sc_0 __asm__ ("r0"); \
309 register unsigned long __sc_3 __asm__ ("r3"); \
310 register unsigned long __sc_4 __asm__ ("r4"); \
311 register unsigned long __sc_5 __asm__ ("r5"); \
312 register unsigned long __sc_6 __asm__ ("r6"); \
313 register unsigned long __sc_7 __asm__ ("r7"); \
314 register unsigned long __sc_8 __asm__ ("r8"); \
315 \
316 __sc_loadargs_##nr(name, args); \
317 __asm__ __volatile__ \
318 ("sc \n\t" \
319 "mfcr %0 " \
320 : "=&r" (__sc_0), \
321 "=&r" (__sc_3), "=&r" (__sc_4), \
322 "=&r" (__sc_5), "=&r" (__sc_6), \
323 "=&r" (__sc_7), "=&r" (__sc_8) \
324 : __sc_asm_input_##nr \
325 : "cr0", "ctr", "memory", \
326 "r9", "r10","r11", "r12"); \
327 __sc_ret = __sc_3; \
328 __sc_err = __sc_0; \
329 } \
330 if (__sc_err & 0x10000000) \
331 { \
332 errno = __sc_ret; \
333 __sc_ret = -1; \
334 } \
335 return (type) __sc_ret
336
337#define __sc_loadargs_0(name, dummy...) \
338 __sc_0 = __NR_##name
339#define __sc_loadargs_1(name, arg1) \
340 __sc_loadargs_0(name); \
341 __sc_3 = (unsigned long) (arg1)
342#define __sc_loadargs_2(name, arg1, arg2) \
343 __sc_loadargs_1(name, arg1); \
344 __sc_4 = (unsigned long) (arg2)
345#define __sc_loadargs_3(name, arg1, arg2, arg3) \
346 __sc_loadargs_2(name, arg1, arg2); \
347 __sc_5 = (unsigned long) (arg3)
348#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
349 __sc_loadargs_3(name, arg1, arg2, arg3); \
350 __sc_6 = (unsigned long) (arg4)
351#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
352 __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
353 __sc_7 = (unsigned long) (arg5)
354#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
355 __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \
356 __sc_8 = (unsigned long) (arg6)
357
358#define __sc_asm_input_0 "0" (__sc_0)
359#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
360#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
361#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
362#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
363#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
364#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
365
366#define _syscall0(type,name) \
367type name(void) \
368{ \
369 __syscall_nr(0, type, name); \
370}
371
372#define _syscall1(type,name,type1,arg1) \
373type name(type1 arg1) \
374{ \
375 __syscall_nr(1, type, name, arg1); \
376}
377
378#define _syscall2(type,name,type1,arg1,type2,arg2) \
379type name(type1 arg1, type2 arg2) \
380{ \
381 __syscall_nr(2, type, name, arg1, arg2); \
382}
383
384#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
385type name(type1 arg1, type2 arg2, type3 arg3) \
386{ \
387 __syscall_nr(3, type, name, arg1, arg2, arg3); \
388}
389
390#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
391type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
392{ \
393 __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
394}
395
396#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
397type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
398{ \
399 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
400}
401#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
402type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
403{ \
404 __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
405}
406
407#ifdef __KERNEL_SYSCALLS__
408
409/*
410 * Forking from kernel space will result in the child getting a new,
411 * empty kernel stack area. Thus the child cannot access automatic
412 * variables set in the parent unless they are in registers, and the
413 * procedure where the fork was done cannot return to its caller in
414 * the child.
415 */
416
417/*
418 * System call prototypes.
419 */
420static inline _syscall3(int, execve, __const__ char *, file, char **, argv,
421 char **,envp)
422
423#endif /* __KERNEL_SYSCALLS__ */
424
425#ifdef __KERNEL__
426
427#include <linux/types.h>
428#include <linux/compiler.h>
429#include <linux/linkage.h>
430
431#define __ARCH_WANT_IPC_PARSE_VERSION
432#define __ARCH_WANT_OLD_READDIR
433#define __ARCH_WANT_STAT64
434#define __ARCH_WANT_SYS_ALARM
435#define __ARCH_WANT_SYS_GETHOSTNAME
436#define __ARCH_WANT_SYS_PAUSE
437#define __ARCH_WANT_SYS_SGETMASK
438#define __ARCH_WANT_SYS_SIGNAL
439#define __ARCH_WANT_SYS_TIME
440#define __ARCH_WANT_COMPAT_SYS_TIME
441#define __ARCH_WANT_SYS_UTIME
442#define __ARCH_WANT_SYS_WAITPID
443#define __ARCH_WANT_SYS_SOCKETCALL
444#define __ARCH_WANT_SYS_FADVISE64
445#define __ARCH_WANT_SYS_GETPGRP
446#define __ARCH_WANT_SYS_LLSEEK
447#define __ARCH_WANT_SYS_NICE
448#define __ARCH_WANT_SYS_OLD_GETRLIMIT
449#define __ARCH_WANT_SYS_OLDUMOUNT
450#define __ARCH_WANT_SYS_SIGPENDING
451#define __ARCH_WANT_SYS_SIGPROCMASK
452#define __ARCH_WANT_SYS_RT_SIGACTION
453
454unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
455 unsigned long flags, unsigned long fd, off_t offset);
456struct pt_regs;
457int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
458 unsigned long a3, unsigned long a4, unsigned long a5,
459 struct pt_regs *regs);
460int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
461 unsigned long p4, unsigned long p5, unsigned long p6,
462 struct pt_regs *regs);
463int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
464 unsigned long p4, unsigned long p5, unsigned long p6,
465 struct pt_regs *regs);
466int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
467 unsigned long p4, unsigned long p5, unsigned long p6,
468 struct pt_regs *regs);
469int sys_pipe(int __user *fildes);
470int sys_ptrace(long request, long pid, long addr, long data);
471struct sigaction;
472long sys_rt_sigaction(int sig, const struct sigaction __user *act,
473 struct sigaction __user *oact, size_t sigsetsize);
474
475/*
476 * "Conditional" syscalls
477 *
478 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
479 * but it doesn't work on all toolchains, so we just do it by hand
480 */
481#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
482
483#endif /* __KERNEL__ */
484
485#endif /* __ASSEMBLY__ */
486
487#endif /* _ASM_PPC_UNISTD_H_ */
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 873def6f363a..702cf436698c 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -29,9 +29,6 @@ struct semaphore {
29#define __SEMAPHORE_INITIALIZER(name,count) \ 29#define __SEMAPHORE_INITIALIZER(name,count) \
30 { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } 30 { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
31 31
32#define __MUTEX_INITIALIZER(name) \
33 __SEMAPHORE_INITIALIZER(name,1)
34
35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37 34
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 0d51c484c2ea..348a88137445 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -8,11 +8,14 @@
8#ifndef _ASM_S390_SETUP_H 8#ifndef _ASM_S390_SETUP_H
9#define _ASM_S390_SETUP_H 9#define _ASM_S390_SETUP_H
10 10
11#include <asm/types.h>
12
11#define PARMAREA 0x10400 13#define PARMAREA 0x10400
12#define COMMAND_LINE_SIZE 896 14#define COMMAND_LINE_SIZE 896
13#define RAMDISK_ORIGIN 0x800000 15#define RAMDISK_ORIGIN 0x800000
14#define RAMDISK_SIZE 0x800000 16#define RAMDISK_SIZE 0x800000
15#define MEMORY_CHUNKS 16 /* max 0x7fff */ 17#define MEMORY_CHUNKS 16 /* max 0x7fff */
18#define IPL_PARMBLOCK_ORIGIN 0x2000
16 19
17#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
18 21
@@ -64,6 +67,53 @@ extern unsigned int console_irq;
64#define SET_CONSOLE_3215 do { console_mode = 2; } while (0) 67#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
65#define SET_CONSOLE_3270 do { console_mode = 3; } while (0) 68#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
66 69
70struct ipl_list_header {
71 u32 length;
72 u8 reserved[3];
73 u8 version;
74} __attribute__((packed));
75
76struct ipl_block_fcp {
77 u32 length;
78 u8 pbt;
79 u8 reserved1[322-1];
80 u16 devno;
81 u8 reserved2[4];
82 u64 wwpn;
83 u64 lun;
84 u32 bootprog;
85 u8 reserved3[12];
86 u64 br_lba;
87 u32 scp_data_len;
88 u8 reserved4[260];
89 u8 scp_data[];
90} __attribute__((packed));
91
92struct ipl_parameter_block {
93 union {
94 u32 length;
95 struct ipl_list_header header;
96 } hdr;
97 struct ipl_block_fcp fcp;
98} __attribute__((packed));
99
100#define IPL_MAX_SUPPORTED_VERSION (0)
101
102#define IPL_TYPE_FCP (0)
103
104/*
105 * IPL validity flags and parameters as detected in head.S
106 */
107extern u32 ipl_parameter_flags;
108extern u16 ipl_devno;
109
110#define IPL_DEVNO_VALID (ipl_parameter_flags & 1)
111#define IPL_PARMBLOCK_VALID (ipl_parameter_flags & 2)
112
113#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
114 IPL_PARMBLOCK_ORIGIN)
115#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.length)
116
67#else 117#else
68 118
69#ifndef __s390x__ 119#ifndef __s390x__
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 221e965da924..f97d92691f17 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -590,7 +590,6 @@ asmlinkage long sys_clone(struct pt_regs regs);
590asmlinkage long sys_fork(struct pt_regs regs); 590asmlinkage long sys_fork(struct pt_regs regs);
591asmlinkage long sys_vfork(struct pt_regs regs); 591asmlinkage long sys_vfork(struct pt_regs regs);
592asmlinkage long sys_pipe(unsigned long __user *fildes); 592asmlinkage long sys_pipe(unsigned long __user *fildes);
593asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
594struct sigaction; 593struct sigaction;
595asmlinkage long sys_rt_sigaction(int sig, 594asmlinkage long sys_rt_sigaction(int sig,
596 const struct sigaction __user *act, 595 const struct sigaction __user *act,
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 0f4bcaae61bd..aef8ae43de13 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -224,8 +224,6 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
224static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 224static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
225{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } 225{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
226 226
227#define page_pte(page) page_pte_prot(page, __pgprot(0))
228
229#define pmd_page_kernel(pmd) \ 227#define pmd_page_kernel(pmd) \
230((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 228((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
231 229
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index b923a77a8a7e..489f7847c5d9 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -33,9 +33,6 @@ struct semaphore {
33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34} 34}
35 35
36#define __MUTEX_INITIALIZER(name) \
37 __SEMAPHORE_INITIALIZER(name,1)
38
39#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 36#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 37 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41 38
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index ea89e8f223ea..f2c8e14d1fd9 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -503,7 +503,6 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
503asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, 503asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
504 unsigned long r6, unsigned long r7, 504 unsigned long r6, unsigned long r7,
505 struct pt_regs regs); 505 struct pt_regs regs);
506asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
507asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char *buf, 506asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char *buf,
508 size_t count, long dummy, loff_t pos); 507 size_t count, long dummy, loff_t pos);
509asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char *buf, 508asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char *buf,
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
index 51db4307bfaf..51b05818e4eb 100644
--- a/include/asm-sh64/pgtable.h
+++ b/include/asm-sh64/pgtable.h
@@ -457,9 +457,6 @@ extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
457extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 457extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
458{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } 458{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
459 459
460#define page_pte_prot(page, prot) mk_pte(page, prot)
461#define page_pte(page) page_pte_prot(page, __pgprot(0))
462
463typedef pte_t *pte_addr_t; 460typedef pte_t *pte_addr_t;
464#define pgtable_cache_init() do { } while (0) 461#define pgtable_cache_init() do { } while (0)
465 462
diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h
index fce22bb9a546..469526459149 100644
--- a/include/asm-sh64/semaphore.h
+++ b/include/asm-sh64/semaphore.h
@@ -40,9 +40,6 @@ struct semaphore {
40 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 40 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
41} 41}
42 42
43#define __MUTEX_INITIALIZER(name) \
44 __SEMAPHORE_INITIALIZER(name,1)
45
46#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 43#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
47 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 44 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
48 45
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h
index caf926116506..7a941b800b6b 100644
--- a/include/asm-sparc/floppy.h
+++ b/include/asm-sparc/floppy.h
@@ -17,10 +17,8 @@
17 17
18/* We don't need no stinkin' I/O port allocation crap. */ 18/* We don't need no stinkin' I/O port allocation crap. */
19#undef release_region 19#undef release_region
20#undef check_region
21#undef request_region 20#undef request_region
22#define release_region(X, Y) do { } while(0) 21#define release_region(X, Y) do { } while(0)
23#define check_region(X, Y) (0)
24#define request_region(X, Y, Z) (1) 22#define request_region(X, Y, Z) (1)
25 23
26/* References: 24/* References:
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index a14e98677500..b33c35411e82 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -255,8 +255,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte) 255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte) 256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
257 257
258#define page_pte_prot(page, prot) mk_pte(page, prot)
259#define page_pte(page) mk_pte(page, __pgprot(0))
260#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) 258#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
261 259
262BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t) 260BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 60ac5fd9eb48..f74ba31e265b 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -22,9 +22,6 @@ struct semaphore {
22 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 22 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
23} 23}
24 24
25#define __MUTEX_INITIALIZER(name) \
26 __SEMAPHORE_INITIALIZER(name,1)
27
28#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 25#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
29 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
30 27
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 8c6dfc6c7af6..9a02879b235d 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -231,9 +231,6 @@ extern struct page *mem_map_zero;
231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) 231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
232#define pte_page(x) pfn_to_page(pte_pfn(x)) 232#define pte_page(x) pfn_to_page(pte_pfn(x))
233 233
234#define page_pte_prot(page, prot) mk_pte(page, prot)
235#define page_pte(page) page_pte_prot(page, __pgprot(0))
236
237static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) 234static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
238{ 235{
239 pte_t __pte; 236 pte_t __pte;
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7419dd88b49e..093dcc6788db 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -22,9 +22,6 @@ struct semaphore {
22 { ATOMIC_INIT(count), \ 22 { ATOMIC_INIT(count), \
23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } 23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
24 24
25#define __MUTEX_INITIALIZER(name) \
26 __SEMAPHORE_INITIALIZER(name, 1)
27
28#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 25#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
29 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
30 27
diff --git a/include/asm-um/cache.h b/include/asm-um/cache.h
index 4b134fe8504e..a10602a5b2d6 100644
--- a/include/asm-um/cache.h
+++ b/include/asm-um/cache.h
@@ -1,10 +1,21 @@
1#ifndef __UM_CACHE_H 1#ifndef __UM_CACHE_H
2#define __UM_CACHE_H 2#define __UM_CACHE_H
3 3
4/* These are x86 numbers */ 4#include <linux/config.h>
5#define L1_CACHE_SHIFT 5
6#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7 5
8#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 6#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8#elif defined(CONFIG_UML_X86) /* 64-bit */
9# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
10#else
11/* XXX: this was taken from x86, now it's completely random. Luckily only
12 * affects SMP padding. */
13# define L1_CACHE_SHIFT 5
14#endif
15
16/* XXX: this is valid for x86 and x86_64. */
17#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
18
19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9 20
10#endif 21#endif
diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h
index 7dfce37adc8b..e3d62dcbd356 100644
--- a/include/asm-um/linkage.h
+++ b/include/asm-um/linkage.h
@@ -3,4 +3,12 @@
3 3
4#include "asm/arch/linkage.h" 4#include "asm/arch/linkage.h"
5 5
6#include <linux/config.h>
7
8/* <linux/linkage.h> will pick sane defaults */
9#ifdef CONFIG_GPROF
10#undef FASTCALL
11#undef fastcall
12#endif
13
6#endif 14#endif
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index c514062bb69e..df6cdecf6c1f 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -18,9 +18,6 @@ struct semaphore {
18 { ATOMIC_INIT (count), 0, \ 18 { ATOMIC_INIT (count), 0, \
19 __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) } 19 __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
20 20
21#define __MUTEX_INITIALIZER(name) \
22 __SEMAPHORE_INITIALIZER (name,1)
23
24#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 21#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
25 struct semaphore name = __SEMAPHORE_INITIALIZER (name,count) 22 struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
26 23
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 3b552096c0e8..5a86f8e976ec 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -452,7 +452,6 @@ unsigned long sys_mmap2(unsigned long addr, size_t len,
452struct pt_regs; 452struct pt_regs;
453int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs); 453int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs);
454int sys_pipe (int *fildes); 454int sys_pipe (int *fildes);
455int sys_ptrace(long request, long pid, long addr, long data);
456struct sigaction; 455struct sigaction;
457asmlinkage long sys_rt_sigaction(int sig, 456asmlinkage long sys_rt_sigaction(int sig,
458 const struct sigaction __user *act, 457 const struct sigaction __user *act,
diff --git a/include/asm-x86_64/mtrr.h b/include/asm-x86_64/mtrr.h
index c5959d6418bb..66ac1c0f27e1 100644
--- a/include/asm-x86_64/mtrr.h
+++ b/include/asm-x86_64/mtrr.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/ioctl.h> 27#include <linux/ioctl.h>
28#include <linux/compat.h>
28 29
29#define MTRR_IOCTL_BASE 'M' 30#define MTRR_IOCTL_BASE 'M'
30 31
@@ -105,4 +106,36 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base,
105 106
106#endif 107#endif
107 108
109#ifdef CONFIG_COMPAT
110
111struct mtrr_sentry32
112{
113 compat_ulong_t base; /* Base address */
114 compat_uint_t size; /* Size of region */
115 compat_uint_t type; /* Type of region */
116};
117
118struct mtrr_gentry32
119{
120 compat_ulong_t regnum; /* Register number */
121 compat_uint_t base; /* Base address */
122 compat_uint_t size; /* Size of region */
123 compat_uint_t type; /* Type of region */
124};
125
126#define MTRR_IOCTL_BASE 'M'
127
128#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
129#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
130#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
131#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
132#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
133#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
134#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
135#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
136#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
137#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
138
139#endif /* CONFIG_COMPAT */
140
108#endif /* _LINUX_MTRR_H */ 141#endif /* _LINUX_MTRR_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index dd8711ecaf2f..7a07196a7202 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -318,8 +318,6 @@ static inline int pmd_large(pmd_t pte) {
318 * and a page entry and page directory to the page they refer to. 318 * and a page entry and page directory to the page they refer to.
319 */ 319 */
320 320
321#define page_pte(page) page_pte_prot(page, __pgprot(0))
322
323/* 321/*
324 * Level 4 access. 322 * Level 4 access.
325 */ 323 */
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index f325e39bf3b9..a389aa6fe80f 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -56,9 +56,6 @@ struct semaphore {
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57} 57}
58 58
59#define __MUTEX_INITIALIZER(name) \
60 __SEMAPHORE_INITIALIZER(name,1)
61
62#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
63 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
64 61
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 11ba931cf82f..3c494b65d33a 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -780,8 +780,6 @@ asmlinkage long sys_pipe(int *fildes);
780#include <linux/types.h> 780#include <linux/types.h>
781#include <asm/ptrace.h> 781#include <asm/ptrace.h>
782 782
783asmlinkage long sys_ptrace(long request, long pid,
784 unsigned long addr, long data);
785asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs); 783asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs);
786asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on); 784asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on);
787struct sigaction; 785struct sigaction;
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index 09e89ab3eb61..2a10e193b929 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -29,9 +29,6 @@ struct semaphore {
29 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 29 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
30} 30}
31 31
32#define __MUTEX_INITIALIZER(name) \
33 __SEMAPHORE_INITIALIZER(name, 1)
34
35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37 34
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
new file mode 100644
index 000000000000..26f6ec38577a
--- /dev/null
+++ b/include/keys/user-type.h
@@ -0,0 +1,47 @@
1/* user-type.h: User-defined key type
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _KEYS_USER_TYPE_H
13#define _KEYS_USER_TYPE_H
14
15#include <linux/key.h>
16#include <linux/rcupdate.h>
17
18/*****************************************************************************/
19/*
20 * the payload for a key of type "user"
21 * - once filled in and attached to a key:
22 * - the payload struct is invariant may not be changed, only replaced
23 * - the payload must be read with RCU procedures or with the key semaphore
24 * held
25 * - the payload may only be replaced with the key semaphore write-locked
26 * - the key's data length is the size of the actual data, not including the
27 * payload wrapper
28 */
29struct user_key_payload {
30 struct rcu_head rcu; /* RCU destructor */
31 unsigned short datalen; /* length of this data */
32 char data[0]; /* actual data */
33};
34
35extern struct key_type key_type_user;
36
37extern int user_instantiate(struct key *key, const void *data, size_t datalen);
38extern int user_duplicate(struct key *key, const struct key *source);
39extern int user_update(struct key *key, const void *data, size_t datalen);
40extern int user_match(const struct key *key, const void *criterion);
41extern void user_destroy(struct key *key);
42extern void user_describe(const struct key *user, struct seq_file *m);
43extern long user_read(const struct key *key,
44 char __user *buffer, size_t buflen);
45
46
47#endif /* _KEYS_USER_TYPE_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 86dd5502b05c..7d8ff97b3e92 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -40,6 +40,8 @@
40 * bitmap_weight(src, nbits) Hamming Weight: number set bits 40 * bitmap_weight(src, nbits) Hamming Weight: number set bits
41 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 41 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
42 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 42 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
43 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
44 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
43 * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf 45 * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
44 * bitmap_parse(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf 46 * bitmap_parse(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
45 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf 47 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
@@ -104,6 +106,10 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len,
104 const unsigned long *src, int nbits); 106 const unsigned long *src, int nbits);
105extern int bitmap_parselist(const char *buf, unsigned long *maskp, 107extern int bitmap_parselist(const char *buf, unsigned long *maskp,
106 int nmaskbits); 108 int nmaskbits);
109extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
110 const unsigned long *old, const unsigned long *new, int bits);
111extern int bitmap_bitremap(int oldbit,
112 const unsigned long *old, const unsigned long *new, int bits);
107extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); 113extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
108extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 114extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
109extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 115extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c937d6e65502..1db061bb6b08 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -190,6 +190,7 @@ extern int buffer_heads_over_limit;
190 */ 190 */
191int try_to_release_page(struct page * page, gfp_t gfp_mask); 191int try_to_release_page(struct page * page, gfp_t gfp_mask);
192int block_invalidatepage(struct page *page, unsigned long offset); 192int block_invalidatepage(struct page *page, unsigned long offset);
193int do_invalidatepage(struct page *page, unsigned long offset);
193int block_write_full_page(struct page *page, get_block_t *get_block, 194int block_write_full_page(struct page *page, get_block_t *get_block,
194 struct writeback_control *wbc); 195 struct writeback_control *wbc);
195int block_read_full_page(struct page*, get_block_t*); 196int block_read_full_page(struct page*, get_block_t*);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 86980c68234a..1f7b2c097503 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -32,6 +32,7 @@ struct cpu {
32}; 32};
33 33
34extern int register_cpu(struct cpu *, int, struct node *); 34extern int register_cpu(struct cpu *, int, struct node *);
35extern struct sys_device *get_cpu_sysdev(int cpu);
35#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
36extern void unregister_cpu(struct cpu *, struct node *); 37extern void unregister_cpu(struct cpu *, struct node *);
37#endif 38#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ff7f80f48df1..d068176b7ad7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -23,6 +23,7 @@
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <asm/div64.h>
26 27
27#define CPUFREQ_NAME_LEN 16 28#define CPUFREQ_NAME_LEN 16
28 29
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9bdba8169b41..13e9f4a3ab26 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -12,6 +12,8 @@
12 * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c. 12 * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
13 * For details of cpulist_scnprintf() and cpulist_parse(), see 13 * For details of cpulist_scnprintf() and cpulist_parse(), see
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
15 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
16 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
15 * 17 *
16 * The available cpumask operations are: 18 * The available cpumask operations are:
17 * 19 *
@@ -50,6 +52,8 @@
50 * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask 52 * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
51 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing 53 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
52 * int cpulist_parse(buf, map) Parse ascii string as cpulist 54 * int cpulist_parse(buf, map) Parse ascii string as cpulist
55 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
56 * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
53 * 57 *
54 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask 58 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
55 * 59 *
@@ -294,6 +298,22 @@ static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
294 return bitmap_parselist(buf, dstp->bits, nbits); 298 return bitmap_parselist(buf, dstp->bits, nbits);
295} 299}
296 300
301#define cpu_remap(oldbit, old, new) \
302 __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
303static inline int __cpu_remap(int oldbit,
304 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
305{
306 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
307}
308
309#define cpus_remap(dst, src, old, new) \
310 __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
311static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
312 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
313{
314 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
315}
316
297#if NR_CPUS > 1 317#if NR_CPUS > 1
298#define for_each_cpu_mask(cpu, mask) \ 318#define for_each_cpu_mask(cpu, mask) \
299 for ((cpu) = first_cpu(mask); \ 319 for ((cpu) = first_cpu(mask); \
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index a415f1d93e9a..05f4132622fc 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -60,7 +60,7 @@ struct dmi_device {
60 void *device_data; /* Type specific data */ 60 void *device_data; /* Type specific data */
61}; 61};
62 62
63#if defined(CONFIG_X86) && !defined(CONFIG_X86_64) 63#if defined(CONFIG_X86_32)
64 64
65extern int dmi_check_system(struct dmi_system_id *list); 65extern int dmi_check_system(struct dmi_system_id *list);
66extern char * dmi_get_system_info(int field); 66extern char * dmi_get_system_info(int field);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f83d997c5582..6d6226732c93 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -574,7 +574,14 @@ struct file_ra_state {
574#define RA_FLAG_INCACHE 0x02 /* file is already in cache */ 574#define RA_FLAG_INCACHE 0x02 /* file is already in cache */
575 575
576struct file { 576struct file {
577 struct list_head f_list; 577 /*
578 * fu_list becomes invalid after file_free is called and queued via
579 * fu_rcuhead for RCU freeing
580 */
581 union {
582 struct list_head fu_list;
583 struct rcu_head fu_rcuhead;
584 } f_u;
578 struct dentry *f_dentry; 585 struct dentry *f_dentry;
579 struct vfsmount *f_vfsmnt; 586 struct vfsmount *f_vfsmnt;
580 struct file_operations *f_op; 587 struct file_operations *f_op;
@@ -598,7 +605,6 @@ struct file {
598 spinlock_t f_ep_lock; 605 spinlock_t f_ep_lock;
599#endif /* #ifdef CONFIG_EPOLL */ 606#endif /* #ifdef CONFIG_EPOLL */
600 struct address_space *f_mapping; 607 struct address_space *f_mapping;
601 struct rcu_head f_rcuhead;
602}; 608};
603extern spinlock_t files_lock; 609extern spinlock_t files_lock;
604#define file_list_lock() spin_lock(&files_lock); 610#define file_list_lock() spin_lock(&files_lock);
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 70f54af87b9f..114d5d59f695 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -47,16 +47,21 @@
47struct gianfar_platform_data { 47struct gianfar_platform_data {
48 /* device specific information */ 48 /* device specific information */
49 u32 device_flags; 49 u32 device_flags;
50 u32 phy_reg_addr;
51 50
52 /* board specific information */ 51 /* board specific information */
53 u32 board_flags; 52 u32 board_flags;
54 u32 phy_flags; 53 const char *bus_id;
55 u32 phyid;
56 u32 interruptPHY;
57 u8 mac_addr[6]; 54 u8 mac_addr[6];
58}; 55};
59 56
57struct gianfar_mdio_data {
58 /* device specific information */
59 u32 paddr;
60
61 /* board specific information */
62 int irq[32];
63};
64
60/* Flags related to gianfar device features */ 65/* Flags related to gianfar device features */
61#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001 66#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
62#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002 67#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index acbeb96a3353..f98854c2abd7 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -61,7 +61,6 @@ struct fuse_kstatfs {
61#define FATTR_SIZE (1 << 3) 61#define FATTR_SIZE (1 << 3)
62#define FATTR_ATIME (1 << 4) 62#define FATTR_ATIME (1 << 4)
63#define FATTR_MTIME (1 << 5) 63#define FATTR_MTIME (1 << 5)
64#define FATTR_CTIME (1 << 6)
65 64
66/** 65/**
67 * Flags returned by the OPEN request 66 * Flags returned by the OPEN request
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index cd623eccdbea..2401dea2b867 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -12,6 +12,7 @@
12#include <asm/io.h> 12#include <asm/io.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/timer.h>
15 16
16struct gameport { 17struct gameport {
17 18
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f88577ca3b3a..5e19a7ba69b2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -31,6 +31,7 @@
31#include <linux/i2c-id.h> 31#include <linux/i2c-id.h>
32#include <linux/mod_devicetable.h> 32#include <linux/mod_devicetable.h>
33#include <linux/device.h> /* for struct device */ 33#include <linux/device.h> /* for struct device */
34#include <linux/sched.h> /* for completion */
34#include <asm/semaphore.h> 35#include <asm/semaphore.h>
35 36
36/* --- For i2c-isa ---------------------------------------------------- */ 37/* --- For i2c-isa ---------------------------------------------------- */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 92300325dbcd..d79c8a4bc4f8 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -25,10 +25,14 @@
25/* How many different OSM's are we allowing */ 25/* How many different OSM's are we allowing */
26#define I2O_MAX_DRIVERS 8 26#define I2O_MAX_DRIVERS 8
27 27
28#include <asm/io.h>
29#include <asm/semaphore.h> /* Needed for MUTEX init macros */
30#include <linux/pci.h> 28#include <linux/pci.h>
31#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/string.h>
31#include <linux/slab.h>
32#include <linux/workqueue.h> /* work_struct */
33
34#include <asm/io.h>
35#include <asm/semaphore.h> /* Needed for MUTEX init macros */
32 36
33/* message queue empty */ 37/* message queue empty */
34#define I2O_QUEUE_EMPTY 0xffffffff 38#define I2O_QUEUE_EMPTY 0xffffffff
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4367ce4db52a..f1925ccc9fe1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -307,7 +307,7 @@ struct sysinfo {
307 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */ 307 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
308}; 308};
309 309
310/* Force a compilation error if condition is false */ 310/* Force a compilation error if condition is true */
311#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 311#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
312 312
313#ifdef CONFIG_SYSCTL 313#ifdef CONFIG_SYSCTL
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
index 7a2e332067c3..e8b8a7a5c496 100644
--- a/include/linux/key-ui.h
+++ b/include/linux/key-ui.h
@@ -24,7 +24,8 @@ extern spinlock_t key_serial_lock;
24#define KEY_WRITE 0x04 /* require permission to update / modify */ 24#define KEY_WRITE 0x04 /* require permission to update / modify */
25#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */ 25#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
26#define KEY_LINK 0x10 /* require permission to link */ 26#define KEY_LINK 0x10 /* require permission to link */
27#define KEY_ALL 0x1f /* all the above permissions */ 27#define KEY_SETATTR 0x20 /* require permission to change attributes */
28#define KEY_ALL 0x3f /* all the above permissions */
28 29
29/* 30/*
30 * the keyring payload contains a list of the keys to which the keyring is 31 * the keyring payload contains a list of the keys to which the keyring is
diff --git a/include/linux/key.h b/include/linux/key.h
index f1efa016dbf3..53513a3be53b 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -40,28 +40,32 @@ struct key;
40#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */ 40#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */
41#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */ 41#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */
42#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */ 42#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */
43#define KEY_POS_ALL 0x1f000000 43#define KEY_POS_SETATTR 0x20000000 /* possessor can set key attributes */
44#define KEY_POS_ALL 0x3f000000
44 45
45#define KEY_USR_VIEW 0x00010000 /* user permissions... */ 46#define KEY_USR_VIEW 0x00010000 /* user permissions... */
46#define KEY_USR_READ 0x00020000 47#define KEY_USR_READ 0x00020000
47#define KEY_USR_WRITE 0x00040000 48#define KEY_USR_WRITE 0x00040000
48#define KEY_USR_SEARCH 0x00080000 49#define KEY_USR_SEARCH 0x00080000
49#define KEY_USR_LINK 0x00100000 50#define KEY_USR_LINK 0x00100000
50#define KEY_USR_ALL 0x001f0000 51#define KEY_USR_SETATTR 0x00200000
52#define KEY_USR_ALL 0x003f0000
51 53
52#define KEY_GRP_VIEW 0x00000100 /* group permissions... */ 54#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
53#define KEY_GRP_READ 0x00000200 55#define KEY_GRP_READ 0x00000200
54#define KEY_GRP_WRITE 0x00000400 56#define KEY_GRP_WRITE 0x00000400
55#define KEY_GRP_SEARCH 0x00000800 57#define KEY_GRP_SEARCH 0x00000800
56#define KEY_GRP_LINK 0x00001000 58#define KEY_GRP_LINK 0x00001000
57#define KEY_GRP_ALL 0x00001f00 59#define KEY_GRP_SETATTR 0x00002000
60#define KEY_GRP_ALL 0x00003f00
58 61
59#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */ 62#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */
60#define KEY_OTH_READ 0x00000002 63#define KEY_OTH_READ 0x00000002
61#define KEY_OTH_WRITE 0x00000004 64#define KEY_OTH_WRITE 0x00000004
62#define KEY_OTH_SEARCH 0x00000008 65#define KEY_OTH_SEARCH 0x00000008
63#define KEY_OTH_LINK 0x00000010 66#define KEY_OTH_LINK 0x00000010
64#define KEY_OTH_ALL 0x0000001f 67#define KEY_OTH_SETATTR 0x00000020
68#define KEY_OTH_ALL 0x0000003f
65 69
66struct seq_file; 70struct seq_file;
67struct user_struct; 71struct user_struct;
@@ -119,6 +123,7 @@ struct key {
119 struct key_type *type; /* type of key */ 123 struct key_type *type; /* type of key */
120 struct rw_semaphore sem; /* change vs change sem */ 124 struct rw_semaphore sem; /* change vs change sem */
121 struct key_user *user; /* owner of this key */ 125 struct key_user *user; /* owner of this key */
126 void *security; /* security data for this key */
122 time_t expiry; /* time at which key expires (or 0) */ 127 time_t expiry; /* time at which key expires (or 0) */
123 uid_t uid; 128 uid_t uid;
124 gid_t gid; 129 gid_t gid;
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
index b6cc10bf8dfc..cbe7d8008042 100644
--- a/include/linux/kobj_map.h
+++ b/include/linux/kobj_map.h
@@ -1,5 +1,7 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2 2
3#include <asm/semaphore.h>
4
3typedef struct kobject *kobj_probe_t(dev_t, int *, void *); 5typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
4struct kobj_map; 6struct kobj_map;
5 7
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 3fa786448db3..ebdd41fd1082 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -70,6 +70,18 @@ void kthread_bind(struct task_struct *k, unsigned int cpu);
70int kthread_stop(struct task_struct *k); 70int kthread_stop(struct task_struct *k);
71 71
72/** 72/**
73 * kthread_stop_sem: stop a thread created by kthread_create().
74 * @k: thread created by kthread_create().
75 * @s: semaphore that @k waits on while idle.
76 *
77 * Does essentially the same thing as kthread_stop() above, but wakes
78 * @k by calling up(@s).
79 *
80 * Returns the result of threadfn(), or -EINTR if wake_up_process()
81 * was never called. */
82int kthread_stop_sem(struct task_struct *k, struct semaphore *s);
83
84/**
73 * kthread_should_stop: should this kthread return now? 85 * kthread_should_stop: should this kthread return now?
74 * 86 *
75 * When someone calls kthread_stop on your kthread, it will be woken 87 * When someone calls kthread_stop on your kthread, it will be woken
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 00a8a5738858..0ba3af7a1236 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -172,6 +172,13 @@ enum hsm_task_states {
172 HSM_ST_ERR, 172 HSM_ST_ERR,
173}; 173};
174 174
175enum ata_completion_errors {
176 AC_ERR_OTHER = (1 << 0),
177 AC_ERR_DEV = (1 << 1),
178 AC_ERR_ATA_BUS = (1 << 2),
179 AC_ERR_HOST_BUS = (1 << 3),
180};
181
175/* forward declarations */ 182/* forward declarations */
176struct scsi_device; 183struct scsi_device;
177struct ata_port_operations; 184struct ata_port_operations;
@@ -179,7 +186,7 @@ struct ata_port;
179struct ata_queued_cmd; 186struct ata_queued_cmd;
180 187
181/* typedefs */ 188/* typedefs */
182typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc, u8 drv_stat); 189typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc, unsigned int err_mask);
183 190
184struct ata_ioports { 191struct ata_ioports {
185 unsigned long cmd_addr; 192 unsigned long cmd_addr;
@@ -347,7 +354,6 @@ struct ata_port_operations {
347 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); 354 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
348 u8 (*check_status)(struct ata_port *ap); 355 u8 (*check_status)(struct ata_port *ap);
349 u8 (*check_altstatus)(struct ata_port *ap); 356 u8 (*check_altstatus)(struct ata_port *ap);
350 u8 (*check_err)(struct ata_port *ap);
351 void (*dev_select)(struct ata_port *ap, unsigned int device); 357 void (*dev_select)(struct ata_port *ap, unsigned int device);
352 358
353 void (*phy_reset) (struct ata_port *ap); 359 void (*phy_reset) (struct ata_port *ap);
@@ -434,7 +440,6 @@ extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
434extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 440extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
435extern u8 ata_check_status(struct ata_port *ap); 441extern u8 ata_check_status(struct ata_port *ap);
436extern u8 ata_altstatus(struct ata_port *ap); 442extern u8 ata_altstatus(struct ata_port *ap);
437extern u8 ata_chk_err(struct ata_port *ap);
438extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 443extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
439extern int ata_port_start (struct ata_port *ap); 444extern int ata_port_start (struct ata_port *ap);
440extern void ata_port_stop (struct ata_port *ap); 445extern void ata_port_stop (struct ata_port *ap);
@@ -455,7 +460,7 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
455extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 460extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
456extern u8 ata_bmdma_status(struct ata_port *ap); 461extern u8 ata_bmdma_status(struct ata_port *ap);
457extern void ata_bmdma_irq_clear(struct ata_port *ap); 462extern void ata_bmdma_irq_clear(struct ata_port *ap);
458extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat); 463extern void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask);
459extern void ata_eng_timeout(struct ata_port *ap); 464extern void ata_eng_timeout(struct ata_port *ap);
460extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd, 465extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd,
461 void (*done)(struct scsi_cmnd *)); 466 void (*done)(struct scsi_cmnd *));
@@ -718,4 +723,21 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
718 ata_id_has_flush_ext(dev->id); 723 ata_id_has_flush_ext(dev->id);
719} 724}
720 725
726static inline unsigned int ac_err_mask(u8 status)
727{
728 if (status & ATA_BUSY)
729 return AC_ERR_ATA_BUS;
730 if (status & (ATA_ERR | ATA_DF))
731 return AC_ERR_DEV;
732 return 0;
733}
734
735static inline unsigned int __ac_err_mask(u8 status)
736{
737 unsigned int mask = ac_err_mask(status);
738 if (mask == 0)
739 return AC_ERR_OTHER;
740 return mask;
741}
742
721#endif /* __LINUX_LIBATA_H__ */ 743#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7af8cb836e78..8b67cf837ca9 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -154,6 +154,7 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
154 154
155extern void numa_default_policy(void); 155extern void numa_default_policy(void);
156extern void numa_policy_init(void); 156extern void numa_policy_init(void);
157extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
157extern struct mempolicy default_policy; 158extern struct mempolicy default_policy;
158 159
159#else 160#else
@@ -226,6 +227,11 @@ static inline void numa_default_policy(void)
226{ 227{
227} 228}
228 229
230static inline void numa_policy_rebind(const nodemask_t *old,
231 const nodemask_t *new)
232{
233}
234
229#endif /* CONFIG_NUMA */ 235#endif /* CONFIG_NUMA */
230#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
231 237
diff --git a/include/linux/module.h b/include/linux/module.h
index f05372b7fe77..84d75f3a8aca 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -554,7 +554,9 @@ static inline void MODULE_PARM_(void) { }
554#ifdef MODULE 554#ifdef MODULE
555/* DEPRECATED: Do not use. */ 555/* DEPRECATED: Do not use. */
556#define MODULE_PARM(var,type) \ 556#define MODULE_PARM(var,type) \
557struct obsolete_modparm __parm_##var __attribute__((section("__obsparm"))) = \ 557extern struct obsolete_modparm __parm_##var \
558__attribute__((section("__obsparm"))); \
559struct obsolete_modparm __parm_##var = \
558{ __stringify(var), type, &MODULE_PARM_ }; \ 560{ __stringify(var), type, &MODULE_PARM_ }; \
559__MODULE_PARM_TYPE(var, type); 561__MODULE_PARM_TYPE(var, type);
560#else 562#else
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 9a3d27257984..941da5c016a0 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -282,6 +282,17 @@ static inline u8 fat_attr(struct inode *inode)
282 MSDOS_I(inode)->i_attrs; 282 MSDOS_I(inode)->i_attrs;
283} 283}
284 284
285static inline unsigned char fat_checksum(const __u8 *name)
286{
287 unsigned char s = name[0];
288 s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2];
289 s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4];
290 s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6];
291 s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8];
292 s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10];
293 return s;
294}
295
285static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) 296static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus)
286{ 297{
287 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus 298 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 142963f01d29..fc28841f3409 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -8,7 +8,10 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/string.h>
12
11#include <linux/mtd/compatmac.h> 13#include <linux/mtd/compatmac.h>
14
12#include <asm/unaligned.h> 15#include <asm/unaligned.h>
13#include <asm/system.h> 16#include <asm/system.h>
14#include <asm/io.h> 17#include <asm/io.h>
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index e96fe9062500..4726ef7ba8e8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -12,6 +12,8 @@
12 * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c. 12 * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
13 * For details of nodelist_scnprintf() and nodelist_parse(), see 13 * For details of nodelist_scnprintf() and nodelist_parse(), see
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
15 * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
16 * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
15 * 17 *
16 * The available nodemask operations are: 18 * The available nodemask operations are:
17 * 19 *
@@ -52,6 +54,8 @@
52 * int nodemask_parse(ubuf, ulen, mask) Parse ascii string as nodemask 54 * int nodemask_parse(ubuf, ulen, mask) Parse ascii string as nodemask
53 * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing 55 * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
54 * int nodelist_parse(buf, map) Parse ascii string as nodelist 56 * int nodelist_parse(buf, map) Parse ascii string as nodelist
57 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
58 * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst)
55 * 59 *
56 * for_each_node_mask(node, mask) for-loop node over mask 60 * for_each_node_mask(node, mask) for-loop node over mask
57 * 61 *
@@ -307,6 +311,22 @@ static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
307 return bitmap_parselist(buf, dstp->bits, nbits); 311 return bitmap_parselist(buf, dstp->bits, nbits);
308} 312}
309 313
314#define node_remap(oldbit, old, new) \
315 __node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
316static inline int __node_remap(int oldbit,
317 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
318{
319 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
320}
321
322#define nodes_remap(dst, src, old, new) \
323 __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
324static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
325 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
326{
327 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
328}
329
310#if MAX_NUMNODES > 1 330#if MAX_NUMNODES > 1
311#define for_each_node_mask(node, mask) \ 331#define for_each_node_mask(node, mask) \
312 for ((node) = first_node(mask); \ 332 for ((node) = first_node(mask); \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index c61d5de837ef..1514098d156d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -170,6 +170,7 @@ typedef int __bitwise suspend_disk_method_t;
170 170
171struct pm_ops { 171struct pm_ops {
172 suspend_disk_method_t pm_disk_mode; 172 suspend_disk_method_t pm_disk_mode;
173 int (*valid)(suspend_state_t state);
173 int (*prepare)(suspend_state_t state); 174 int (*prepare)(suspend_state_t state);
174 int (*enter)(suspend_state_t state); 175 int (*enter)(suspend_state_t state);
175 int (*finish)(suspend_state_t state); 176 int (*finish)(suspend_state_t state);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 70191a5a148f..cce25591eec2 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -275,6 +275,7 @@ static inline int rcu_pending(int cpu)
275extern void rcu_init(void); 275extern void rcu_init(void);
276extern void rcu_check_callbacks(int cpu, int user); 276extern void rcu_check_callbacks(int cpu, int user);
277extern void rcu_restart_cpu(int cpu); 277extern void rcu_restart_cpu(int cpu);
278extern long rcu_batches_completed(void);
278 279
279/* Exported interfaces */ 280/* Exported interfaces */
280extern void FASTCALL(call_rcu(struct rcu_head *head, 281extern void FASTCALL(call_rcu(struct rcu_head *head,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c30bc308ef1..03b68a7b4b82 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -940,7 +940,7 @@ extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
940#else 940#else
941static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 941static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
942{ 942{
943 if (!cpus_intersects(new_mask, cpu_online_map)) 943 if (!cpu_isset(0, new_mask))
944 return -EINVAL; 944 return -EINVAL;
945 return 0; 945 return 0;
946} 946}
@@ -1084,6 +1084,11 @@ extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned lon
1084#define SEND_SIG_PRIV ((struct siginfo *) 1) 1084#define SEND_SIG_PRIV ((struct siginfo *) 1)
1085#define SEND_SIG_FORCED ((struct siginfo *) 2) 1085#define SEND_SIG_FORCED ((struct siginfo *) 2)
1086 1086
1087static inline int is_si_special(const struct siginfo *info)
1088{
1089 return info <= SEND_SIG_FORCED;
1090}
1091
1087/* True if we are on the alternate signal stack. */ 1092/* True if we are on the alternate signal stack. */
1088 1093
1089static inline int on_sig_stack(unsigned long sp) 1094static inline int on_sig_stack(unsigned long sp)
@@ -1211,7 +1216,7 @@ extern void unhash_process(struct task_struct *p);
1211/* 1216/*
1212 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring 1217 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
1213 * subscriptions and synchronises with wait4(). Also used in procfs. Also 1218 * subscriptions and synchronises with wait4(). Also used in procfs. Also
1214 * pins the final release of task.io_context. 1219 * pins the final release of task.io_context. Also protects ->cpuset.
1215 * 1220 *
1216 * Nests both inside and outside of read_lock(&tasklist_lock). 1221 * Nests both inside and outside of read_lock(&tasklist_lock).
1217 * It must not be nested with write_lock_irq(&tasklist_lock), 1222 * It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/security.h b/include/linux/security.h
index dac956ed98f0..f7e0ae018712 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
30#include <linux/shm.h> 30#include <linux/shm.h>
31#include <linux/msg.h> 31#include <linux/msg.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/key.h>
33 34
34struct ctl_table; 35struct ctl_table;
35 36
@@ -385,6 +386,9 @@ struct swap_info_struct;
385 * NULL to request the size of the buffer required. @size indicates 386 * NULL to request the size of the buffer required. @size indicates
386 * the size of @buffer in bytes. Note that @name is the remainder 387 * the size of @buffer in bytes. Note that @name is the remainder
387 * of the attribute name after the security. prefix has been removed. 388 * of the attribute name after the security. prefix has been removed.
389 * @err is the return value from the preceding fs getxattr call,
390 * and can be used by the security module to determine whether it
391 * should try and canonicalize the attribute value.
388 * Return number of bytes used/required on success. 392 * Return number of bytes used/required on success.
389 * @inode_setsecurity: 393 * @inode_setsecurity:
390 * Set the security label associated with @name for @inode from the 394 * Set the security label associated with @name for @inode from the
@@ -785,6 +789,27 @@ struct swap_info_struct;
785 * @sk_free_security: 789 * @sk_free_security:
786 * Deallocate security structure. 790 * Deallocate security structure.
787 * 791 *
792 * Security hooks affecting all Key Management operations
793 *
794 * @key_alloc:
795 * Permit allocation of a key and assign security data. Note that key does
796 * not have a serial number assigned at this point.
797 * @key points to the key.
798 * Return 0 if permission is granted, -ve error otherwise.
799 * @key_free:
800 * Notification of destruction; free security data.
801 * @key points to the key.
802 * No return value.
803 * @key_permission:
804 * See whether a specific operational right is granted to a process on a
805 * key.
806 * @key_ref refers to the key (key pointer + possession attribute bit).
807 * @context points to the process to provide the context against which to
808 * evaluate the security data on the key.
809 * @perm describes the combination of permissions required of this key.
810 * Return 1 if permission granted, 0 if permission denied and -ve it the
811 * normal permissions model should be effected.
812 *
788 * Security hooks affecting all System V IPC operations. 813 * Security hooks affecting all System V IPC operations.
789 * 814 *
790 * @ipc_permission: 815 * @ipc_permission:
@@ -1091,7 +1116,7 @@ struct security_operations {
1091 int (*inode_getxattr) (struct dentry *dentry, char *name); 1116 int (*inode_getxattr) (struct dentry *dentry, char *name);
1092 int (*inode_listxattr) (struct dentry *dentry); 1117 int (*inode_listxattr) (struct dentry *dentry);
1093 int (*inode_removexattr) (struct dentry *dentry, char *name); 1118 int (*inode_removexattr) (struct dentry *dentry, char *name);
1094 int (*inode_getsecurity)(struct inode *inode, const char *name, void *buffer, size_t size); 1119 int (*inode_getsecurity)(struct inode *inode, const char *name, void *buffer, size_t size, int err);
1095 int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); 1120 int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
1096 int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); 1121 int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
1097 1122
@@ -1213,6 +1238,17 @@ struct security_operations {
1213 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1238 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
1214 void (*sk_free_security) (struct sock *sk); 1239 void (*sk_free_security) (struct sock *sk);
1215#endif /* CONFIG_SECURITY_NETWORK */ 1240#endif /* CONFIG_SECURITY_NETWORK */
1241
1242 /* key management security hooks */
1243#ifdef CONFIG_KEYS
1244 int (*key_alloc)(struct key *key);
1245 void (*key_free)(struct key *key);
1246 int (*key_permission)(key_ref_t key_ref,
1247 struct task_struct *context,
1248 key_perm_t perm);
1249
1250#endif /* CONFIG_KEYS */
1251
1216}; 1252};
1217 1253
1218/* global variables */ 1254/* global variables */
@@ -1580,11 +1616,11 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
1580 return security_ops->inode_removexattr (dentry, name); 1616 return security_ops->inode_removexattr (dentry, name);
1581} 1617}
1582 1618
1583static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size) 1619static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
1584{ 1620{
1585 if (unlikely (IS_PRIVATE (inode))) 1621 if (unlikely (IS_PRIVATE (inode)))
1586 return 0; 1622 return 0;
1587 return security_ops->inode_getsecurity(inode, name, buffer, size); 1623 return security_ops->inode_getsecurity(inode, name, buffer, size, err);
1588} 1624}
1589 1625
1590static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) 1626static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -2222,7 +2258,7 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
2222 return cap_inode_removexattr(dentry, name); 2258 return cap_inode_removexattr(dentry, name);
2223} 2259}
2224 2260
2225static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size) 2261static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
2226{ 2262{
2227 return -EOPNOTSUPP; 2263 return -EOPNOTSUPP;
2228} 2264}
@@ -2761,5 +2797,45 @@ static inline void security_sk_free(struct sock *sk)
2761} 2797}
2762#endif /* CONFIG_SECURITY_NETWORK */ 2798#endif /* CONFIG_SECURITY_NETWORK */
2763 2799
2800#ifdef CONFIG_KEYS
2801#ifdef CONFIG_SECURITY
2802static inline int security_key_alloc(struct key *key)
2803{
2804 return security_ops->key_alloc(key);
2805}
2806
2807static inline void security_key_free(struct key *key)
2808{
2809 security_ops->key_free(key);
2810}
2811
2812static inline int security_key_permission(key_ref_t key_ref,
2813 struct task_struct *context,
2814 key_perm_t perm)
2815{
2816 return security_ops->key_permission(key_ref, context, perm);
2817}
2818
2819#else
2820
2821static inline int security_key_alloc(struct key *key)
2822{
2823 return 0;
2824}
2825
2826static inline void security_key_free(struct key *key)
2827{
2828}
2829
2830static inline int security_key_permission(key_ref_t key_ref,
2831 struct task_struct *context,
2832 key_perm_t perm)
2833{
2834 return 0;
2835}
2836
2837#endif
2838#endif /* CONFIG_KEYS */
2839
2764#endif /* ! __LINUX_SECURITY_H */ 2840#endif /* ! __LINUX_SECURITY_H */
2765 2841
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 12cd9cf65e8f..33fc8cb8ddfb 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -11,6 +11,7 @@
11#define _LINUX_SERIAL_H 11#define _LINUX_SERIAL_H
12 12
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14#include <linux/types.h>
14#include <asm/page.h> 15#include <asm/page.h>
15 16
16/* 17/*
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 7be18b5e2fb4..5dd5f02c5c5f 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -25,7 +25,6 @@
25 25
26struct sigqueue { 26struct sigqueue {
27 struct list_head list; 27 struct list_head list;
28 spinlock_t *lock;
29 int flags; 28 int flags;
30 siginfo_t info; 29 siginfo_t info;
31 struct user_struct *user; 30 struct user_struct *user;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index cdc99a27840d..0e9682c9def5 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -171,23 +171,42 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
171#define write_lock_irq(lock) _write_lock_irq(lock) 171#define write_lock_irq(lock) _write_lock_irq(lock)
172#define write_lock_bh(lock) _write_lock_bh(lock) 172#define write_lock_bh(lock) _write_lock_bh(lock)
173 173
174#define spin_unlock(lock) _spin_unlock(lock) 174/*
175#define write_unlock(lock) _write_unlock(lock) 175 * We inline the unlock functions in the nondebug case:
176#define read_unlock(lock) _read_unlock(lock) 176 */
177#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
178# define spin_unlock(lock) _spin_unlock(lock)
179# define read_unlock(lock) _read_unlock(lock)
180# define write_unlock(lock) _write_unlock(lock)
181#else
182# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
183# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
184# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
185#endif
186
187#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
188# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
189# define read_unlock_irq(lock) _read_unlock_irq(lock)
190# define write_unlock_irq(lock) _write_unlock_irq(lock)
191#else
192# define spin_unlock_irq(lock) \
193 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
194# define read_unlock_irq(lock) \
195 do { __raw_read_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
196# define write_unlock_irq(lock) \
197 do { __raw_write_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
198#endif
177 199
178#define spin_unlock_irqrestore(lock, flags) \ 200#define spin_unlock_irqrestore(lock, flags) \
179 _spin_unlock_irqrestore(lock, flags) 201 _spin_unlock_irqrestore(lock, flags)
180#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
181#define spin_unlock_bh(lock) _spin_unlock_bh(lock) 202#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
182 203
183#define read_unlock_irqrestore(lock, flags) \ 204#define read_unlock_irqrestore(lock, flags) \
184 _read_unlock_irqrestore(lock, flags) 205 _read_unlock_irqrestore(lock, flags)
185#define read_unlock_irq(lock) _read_unlock_irq(lock)
186#define read_unlock_bh(lock) _read_unlock_bh(lock) 206#define read_unlock_bh(lock) _read_unlock_bh(lock)
187 207
188#define write_unlock_irqrestore(lock, flags) \ 208#define write_unlock_irqrestore(lock, flags) \
189 _write_unlock_irqrestore(lock, flags) 209 _write_unlock_irqrestore(lock, flags)
190#define write_unlock_irq(lock) _write_unlock_irq(lock)
191#define write_unlock_bh(lock) _write_unlock_bh(lock) 210#define write_unlock_bh(lock) _write_unlock_bh(lock)
192 211
193#define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) 212#define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index ba448c760168..a61c04f804b2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,7 +71,12 @@ void restore_processor_state(void);
71struct saved_context; 71struct saved_context;
72void __save_processor_state(struct saved_context *ctxt); 72void __save_processor_state(struct saved_context *ctxt);
73void __restore_processor_state(struct saved_context *ctxt); 73void __restore_processor_state(struct saved_context *ctxt);
74extern unsigned long get_usable_page(gfp_t gfp_mask); 74unsigned long get_safe_page(gfp_t gfp_mask);
75extern void free_eaten_memory(void); 75
76/*
77 * XXX: We try to keep some more pages free so that I/O operations succeed
78 * without paging. Might this be more?
79 */
80#define PAGES_FOR_IO 512
76 81
77#endif /* _LINUX_SWSUSP_H */ 82#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a6f03e473737..c7007b1db91d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -491,6 +491,7 @@ asmlinkage long sys_nfsservctl(int cmd,
491asmlinkage long sys_syslog(int type, char __user *buf, int len); 491asmlinkage long sys_syslog(int type, char __user *buf, int len);
492asmlinkage long sys_uselib(const char __user *library); 492asmlinkage long sys_uselib(const char __user *library);
493asmlinkage long sys_ni_syscall(void); 493asmlinkage long sys_ni_syscall(void);
494asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
494 495
495asmlinkage long sys_add_key(const char __user *_type, 496asmlinkage long sys_add_key(const char __user *_type,
496 const char __user *_description, 497 const char __user *_description,
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index fc5bb4e91a58..7dac8f04d28e 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/err.h> 10#include <linux/err.h>
11#include <linux/slab.h>
11 12
12struct ts_config; 13struct ts_config;
13 14
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 3340f3bd135d..72f3a7781106 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -12,16 +12,12 @@ struct timer_list {
12 struct list_head entry; 12 struct list_head entry;
13 unsigned long expires; 13 unsigned long expires;
14 14
15 unsigned long magic;
16
17 void (*function)(unsigned long); 15 void (*function)(unsigned long);
18 unsigned long data; 16 unsigned long data;
19 17
20 struct timer_base_s *base; 18 struct timer_base_s *base;
21}; 19};
22 20
23#define TIMER_MAGIC 0x4b87ad6e
24
25extern struct timer_base_s __init_timer_base; 21extern struct timer_base_s __init_timer_base;
26 22
27#define TIMER_INITIALIZER(_function, _expires, _data) { \ 23#define TIMER_INITIALIZER(_function, _expires, _data) { \
@@ -29,7 +25,6 @@ extern struct timer_base_s __init_timer_base;
29 .expires = (_expires), \ 25 .expires = (_expires), \
30 .data = (_data), \ 26 .data = (_data), \
31 .base = &__init_timer_base, \ 27 .base = &__init_timer_base, \
32 .magic = TIMER_MAGIC, \
33 } 28 }
34 29
35#define DEFINE_TIMER(_name, _function, _expires, _data) \ 30#define DEFINE_TIMER(_name, _function, _expires, _data) \
@@ -38,6 +33,15 @@ extern struct timer_base_s __init_timer_base;
38 33
39void fastcall init_timer(struct timer_list * timer); 34void fastcall init_timer(struct timer_list * timer);
40 35
36static inline void setup_timer(struct timer_list * timer,
37 void (*function)(unsigned long),
38 unsigned long data)
39{
40 timer->function = function;
41 timer->data = data;
42 init_timer(timer);
43}
44
41/*** 45/***
42 * timer_pending - is a timer pending? 46 * timer_pending - is a timer pending?
43 * @timer: the timer in question 47 * @timer: the timer in question
@@ -74,8 +78,9 @@ extern unsigned long next_timer_interrupt(void);
74 * Timers with an ->expired field in the past will be executed in the next 78 * Timers with an ->expired field in the past will be executed in the next
75 * timer tick. 79 * timer tick.
76 */ 80 */
77static inline void add_timer(struct timer_list * timer) 81static inline void add_timer(struct timer_list *timer)
78{ 82{
83 BUG_ON(timer_pending(timer));
79 __mod_timer(timer, timer->expires); 84 __mod_timer(timer, timer->expires);
80} 85}
81 86
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 7e050a2cc35b..04a4a8cb4ed3 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -282,6 +282,13 @@ static inline int ntp_synced(void)
282 return !(time_status & STA_UNSYNC); 282 return !(time_status & STA_UNSYNC);
283} 283}
284 284
285/* Required to safely shift negative values */
286#define shift_right(x, s) ({ \
287 __typeof__(x) __x = (x); \
288 __typeof__(s) __s = (s); \
289 __x < 0 ? -(-__x >> __s) : __x >> __s; \
290})
291
285 292
286#ifdef CONFIG_TIME_INTERPOLATION 293#ifdef CONFIG_TIME_INTERPOLATION
287 294
diff --git a/include/linux/zutil.h b/include/linux/zutil.h
index fdfd5ed41ec4..ee0c59cf2136 100644
--- a/include/linux/zutil.h
+++ b/include/linux/zutil.h
@@ -15,7 +15,6 @@
15 15
16#include <linux/zlib.h> 16#include <linux/zlib.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20 19
21typedef unsigned char uch; 20typedef unsigned char uch;
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index c8592c7e8eaa..e788bbc5657d 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -17,6 +17,7 @@
17 17
18#include <linux/config.h> 18#include <linux/config.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/sched.h> /* task_struct, completion */
20 21
21#include <pcmcia/cs_types.h> 22#include <pcmcia/cs_types.h>
22#include <pcmcia/cs.h> 23#include <pcmcia/cs.h>
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index e6b61fab66dd..7529f4388bb4 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -4,6 +4,7 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/timer.h>
7 8
8struct request; 9struct request;
9struct scatterlist; 10struct scatterlist;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b0d445437372..c04405bead2d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -28,6 +28,7 @@
28#define SCSI_TRANSPORT_FC_H 28#define SCSI_TRANSPORT_FC_H
29 29
30#include <linux/config.h> 30#include <linux/config.h>
31#include <linux/sched.h>
31 32
32struct scsi_transport_template; 33struct scsi_transport_template;
33 34
diff --git a/init/Kconfig b/init/Kconfig
index d5a1a1228fab..3dcbd5bfd498 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,8 +60,8 @@ config INIT_ENV_ARG_LIMIT
60 default 32 if !USERMODE 60 default 32 if !USERMODE
61 default 128 if USERMODE 61 default 128 if USERMODE
62 help 62 help
63 This is the value of the two limits on the number of argument and of 63 Maximum of each of the number of arguments and environment
64 env.var passed to init from the kernel command line. 64 variables passed to init from the kernel command line.
65 65
66endmenu 66endmenu
67 67
diff --git a/init/main.c b/init/main.c
index f142d4035341..4075d97e94b1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -64,10 +64,6 @@
64#endif 64#endif
65#endif 65#endif
66 66
67#ifdef CONFIG_X86_LOCAL_APIC
68#include <asm/smp.h>
69#endif
70
71/* 67/*
72 * Versions of gcc older than that listed below may actually compile 68 * Versions of gcc older than that listed below may actually compile
73 * and link okay, but the end product can have subtle run time bugs. 69 * and link okay, but the end product can have subtle run time bugs.
@@ -314,14 +310,7 @@ extern void setup_arch(char **);
314 310
315#ifndef CONFIG_SMP 311#ifndef CONFIG_SMP
316 312
317#ifdef CONFIG_X86_LOCAL_APIC
318static void __init smp_init(void)
319{
320 APIC_init_uniprocessor();
321}
322#else
323#define smp_init() do { } while (0) 313#define smp_init() do { } while (0)
324#endif
325 314
326static inline void setup_per_cpu_areas(void) { } 315static inline void setup_per_cpu_areas(void) { }
327static inline void smp_prepare_cpus(unsigned int maxcpus) { } 316static inline void smp_prepare_cpus(unsigned int maxcpus) { }
diff --git a/kernel/Makefile b/kernel/Makefile
index ff4dc02ce170..4f5a1453093a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_KEXEC) += kexec.o
22obj-$(CONFIG_COMPAT) += compat.o 22obj-$(CONFIG_COMPAT) += compat.o
23obj-$(CONFIG_CPUSETS) += cpuset.o 23obj-$(CONFIG_CPUSETS) += cpuset.o
24obj-$(CONFIG_IKCONFIG) += configs.o 24obj-$(CONFIG_IKCONFIG) += configs.o
25obj-$(CONFIG_IKCONFIG_PROC) += configs.o
26obj-$(CONFIG_STOP_MACHINE) += stop_machine.o 25obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
27obj-$(CONFIG_AUDIT) += audit.o 26obj-$(CONFIG_AUDIT) += audit.o
28obj-$(CONFIG_AUDITSYSCALL) += auditsc.o 27obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
@@ -32,6 +31,7 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
32obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ 31obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
33obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 32obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
34obj-$(CONFIG_SECCOMP) += seccomp.o 33obj-$(CONFIG_SECCOMP) += seccomp.o
34obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
35 35
36ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) 36ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
37# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 37# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 53d8263ae12e..3619e939182e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -17,6 +17,7 @@
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19DECLARE_MUTEX(cpucontrol); 19DECLARE_MUTEX(cpucontrol);
20EXPORT_SYMBOL_GPL(cpucontrol);
20 21
21static struct notifier_block *cpu_chain; 22static struct notifier_block *cpu_chain;
22 23
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 28176d083f7b..5a737ed9dac7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -32,6 +32,7 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/kmod.h> 33#include <linux/kmod.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/mempolicy.h>
35#include <linux/mm.h> 36#include <linux/mm.h>
36#include <linux/module.h> 37#include <linux/module.h>
37#include <linux/mount.h> 38#include <linux/mount.h>
@@ -60,6 +61,9 @@ struct cpuset {
60 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 61 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
61 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 62 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
62 63
64 /*
65 * Count is atomic so can incr (fork) or decr (exit) without a lock.
66 */
63 atomic_t count; /* count tasks using this cpuset */ 67 atomic_t count; /* count tasks using this cpuset */
64 68
65 /* 69 /*
@@ -142,80 +146,91 @@ static struct vfsmount *cpuset_mount;
142static struct super_block *cpuset_sb = NULL; 146static struct super_block *cpuset_sb = NULL;
143 147
144/* 148/*
145 * cpuset_sem should be held by anyone who is depending on the children 149 * We have two global cpuset semaphores below. They can nest.
146 * or sibling lists of any cpuset, or performing non-atomic operations 150 * It is ok to first take manage_sem, then nest callback_sem. We also
147 * on the flags or *_allowed values of a cpuset, such as raising the 151 * require taking task_lock() when dereferencing a tasks cpuset pointer.
148 * CS_REMOVED flag bit iff it is not already raised, or reading and 152 * See "The task_lock() exception", at the end of this comment.
149 * conditionally modifying the *_allowed values. One kernel global 153 *
150 * cpuset semaphore should be sufficient - these things don't change 154 * A task must hold both semaphores to modify cpusets. If a task
151 * that much. 155 * holds manage_sem, then it blocks others wanting that semaphore,
152 * 156 * ensuring that it is the only task able to also acquire callback_sem
153 * The code that modifies cpusets holds cpuset_sem across the entire 157 * and be able to modify cpusets. It can perform various checks on
154 * operation, from cpuset_common_file_write() down, single threading 158 * the cpuset structure first, knowing nothing will change. It can
155 * all cpuset modifications (except for counter manipulations from 159 * also allocate memory while just holding manage_sem. While it is
156 * fork and exit) across the system. This presumes that cpuset 160 * performing these checks, various callback routines can briefly
157 * modifications are rare - better kept simple and safe, even if slow. 161 * acquire callback_sem to query cpusets. Once it is ready to make
158 * 162 * the changes, it takes callback_sem, blocking everyone else.
159 * The code that reads cpusets, such as in cpuset_common_file_read() 163 *
160 * and below, only holds cpuset_sem across small pieces of code, such 164 * Calls to the kernel memory allocator can not be made while holding
161 * as when reading out possibly multi-word cpumasks and nodemasks, as 165 * callback_sem, as that would risk double tripping on callback_sem
162 * the risks are less, and the desire for performance a little greater. 166 * from one of the callbacks into the cpuset code from within
163 * The proc_cpuset_show() routine needs to hold cpuset_sem to insure 167 * __alloc_pages().
164 * that no cs->dentry is NULL, as it walks up the cpuset tree to root. 168 *
165 * 169 * If a task is only holding callback_sem, then it has read-only
166 * The hooks from fork and exit, cpuset_fork() and cpuset_exit(), don't 170 * access to cpusets.
167 * (usually) grab cpuset_sem. These are the two most performance 171 *
168 * critical pieces of code here. The exception occurs on exit(), 172 * The task_struct fields mems_allowed and mems_generation may only
169 * when a task in a notify_on_release cpuset exits. Then cpuset_sem 173 * be accessed in the context of that task, so require no locks.
174 *
175 * Any task can increment and decrement the count field without lock.
176 * So in general, code holding manage_sem or callback_sem can't rely
177 * on the count field not changing. However, if the count goes to
178 * zero, then only attach_task(), which holds both semaphores, can
179 * increment it again. Because a count of zero means that no tasks
180 * are currently attached, therefore there is no way a task attached
181 * to that cpuset can fork (the other way to increment the count).
182 * So code holding manage_sem or callback_sem can safely assume that
183 * if the count is zero, it will stay zero. Similarly, if a task
184 * holds manage_sem or callback_sem on a cpuset with zero count, it
185 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
186 * both of those semaphores.
187 *
188 * A possible optimization to improve parallelism would be to make
189 * callback_sem a R/W semaphore (rwsem), allowing the callback routines
190 * to proceed in parallel, with read access, until the holder of
191 * manage_sem needed to take this rwsem for exclusive write access
192 * and modify some cpusets.
193 *
194 * The cpuset_common_file_write handler for operations that modify
195 * the cpuset hierarchy holds manage_sem across the entire operation,
196 * single threading all such cpuset modifications across the system.
197 *
198 * The cpuset_common_file_read() handlers only hold callback_sem across
199 * small pieces of code, such as when reading out possibly multi-word
200 * cpumasks and nodemasks.
201 *
202 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
203 * (usually) take either semaphore. These are the two most performance
204 * critical pieces of code here. The exception occurs on cpuset_exit(),
205 * when a task in a notify_on_release cpuset exits. Then manage_sem
170 * is taken, and if the cpuset count is zero, a usermode call made 206 * is taken, and if the cpuset count is zero, a usermode call made
171 * to /sbin/cpuset_release_agent with the name of the cpuset (path 207 * to /sbin/cpuset_release_agent with the name of the cpuset (path
172 * relative to the root of cpuset file system) as the argument. 208 * relative to the root of cpuset file system) as the argument.
173 * 209 *
174 * A cpuset can only be deleted if both its 'count' of using tasks is 210 * A cpuset can only be deleted if both its 'count' of using tasks
175 * zero, and its list of 'children' cpusets is empty. Since all tasks 211 * is zero, and its list of 'children' cpusets is empty. Since all
176 * in the system use _some_ cpuset, and since there is always at least 212 * tasks in the system use _some_ cpuset, and since there is always at
177 * one task in the system (init, pid == 1), therefore, top_cpuset 213 * least one task in the system (init, pid == 1), therefore, top_cpuset
178 * always has either children cpusets and/or using tasks. So no need 214 * always has either children cpusets and/or using tasks. So we don't
179 * for any special hack to ensure that top_cpuset cannot be deleted. 215 * need a special hack to ensure that top_cpuset cannot be deleted.
216 *
217 * The above "Tale of Two Semaphores" would be complete, but for:
218 *
219 * The task_lock() exception
220 *
221 * The need for this exception arises from the action of attach_task(),
222 * which overwrites one tasks cpuset pointer with another. It does
223 * so using both semaphores, however there are several performance
224 * critical places that need to reference task->cpuset without the
225 * expense of grabbing a system global semaphore. Therefore except as
226 * noted below, when dereferencing or, as in attach_task(), modifying
227 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
228 * (task->alloc_lock) already in the task_struct routinely used for
229 * such matters.
180 */ 230 */
181 231
182static DECLARE_MUTEX(cpuset_sem); 232static DECLARE_MUTEX(manage_sem);
183static struct task_struct *cpuset_sem_owner; 233static DECLARE_MUTEX(callback_sem);
184static int cpuset_sem_depth;
185
186/*
187 * The global cpuset semaphore cpuset_sem can be needed by the
188 * memory allocator to update a tasks mems_allowed (see the calls
189 * to cpuset_update_current_mems_allowed()) or to walk up the
190 * cpuset hierarchy to find a mem_exclusive cpuset see the calls
191 * to cpuset_excl_nodes_overlap()).
192 *
193 * But if the memory allocation is being done by cpuset.c code, it
194 * usually already holds cpuset_sem. Double tripping on a kernel
195 * semaphore deadlocks the current task, and any other task that
196 * subsequently tries to obtain the lock.
197 *
198 * Run all up's and down's on cpuset_sem through the following
199 * wrappers, which will detect this nested locking, and avoid
200 * deadlocking.
201 */
202
203static inline void cpuset_down(struct semaphore *psem)
204{
205 if (cpuset_sem_owner != current) {
206 down(psem);
207 cpuset_sem_owner = current;
208 }
209 cpuset_sem_depth++;
210}
211
212static inline void cpuset_up(struct semaphore *psem)
213{
214 if (--cpuset_sem_depth == 0) {
215 cpuset_sem_owner = NULL;
216 up(psem);
217 }
218}
219 234
220/* 235/*
221 * A couple of forward declarations required, due to cyclic reference loop: 236 * A couple of forward declarations required, due to cyclic reference loop:
@@ -390,7 +405,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
390} 405}
391 406
392/* 407/*
393 * Call with cpuset_sem held. Writes path of cpuset into buf. 408 * Call with manage_sem held. Writes path of cpuset into buf.
394 * Returns 0 on success, -errno on error. 409 * Returns 0 on success, -errno on error.
395 */ 410 */
396 411
@@ -442,10 +457,11 @@ static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
442 * status of the /sbin/cpuset_release_agent task, so no sense holding 457 * status of the /sbin/cpuset_release_agent task, so no sense holding
443 * our caller up for that. 458 * our caller up for that.
444 * 459 *
445 * The simple act of forking that task might require more memory, 460 * When we had only one cpuset semaphore, we had to call this
446 * which might need cpuset_sem. So this routine must be called while 461 * without holding it, to avoid deadlock when call_usermodehelper()
447 * cpuset_sem is not held, to avoid a possible deadlock. See also 462 * allocated memory. With two locks, we could now call this while
448 * comments for check_for_release(), below. 463 * holding manage_sem, but we still don't, so as to minimize
464 * the time manage_sem is held.
449 */ 465 */
450 466
451static void cpuset_release_agent(const char *pathbuf) 467static void cpuset_release_agent(const char *pathbuf)
@@ -477,15 +493,15 @@ static void cpuset_release_agent(const char *pathbuf)
477 * cs is notify_on_release() and now both the user count is zero and 493 * cs is notify_on_release() and now both the user count is zero and
478 * the list of children is empty, prepare cpuset path in a kmalloc'd 494 * the list of children is empty, prepare cpuset path in a kmalloc'd
479 * buffer, to be returned via ppathbuf, so that the caller can invoke 495 * buffer, to be returned via ppathbuf, so that the caller can invoke
480 * cpuset_release_agent() with it later on, once cpuset_sem is dropped. 496 * cpuset_release_agent() with it later on, once manage_sem is dropped.
481 * Call here with cpuset_sem held. 497 * Call here with manage_sem held.
482 * 498 *
483 * This check_for_release() routine is responsible for kmalloc'ing 499 * This check_for_release() routine is responsible for kmalloc'ing
484 * pathbuf. The above cpuset_release_agent() is responsible for 500 * pathbuf. The above cpuset_release_agent() is responsible for
485 * kfree'ing pathbuf. The caller of these routines is responsible 501 * kfree'ing pathbuf. The caller of these routines is responsible
486 * for providing a pathbuf pointer, initialized to NULL, then 502 * for providing a pathbuf pointer, initialized to NULL, then
487 * calling check_for_release() with cpuset_sem held and the address 503 * calling check_for_release() with manage_sem held and the address
488 * of the pathbuf pointer, then dropping cpuset_sem, then calling 504 * of the pathbuf pointer, then dropping manage_sem, then calling
489 * cpuset_release_agent() with pathbuf, as set by check_for_release(). 505 * cpuset_release_agent() with pathbuf, as set by check_for_release().
490 */ 506 */
491 507
@@ -516,7 +532,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf)
516 * One way or another, we guarantee to return some non-empty subset 532 * One way or another, we guarantee to return some non-empty subset
517 * of cpu_online_map. 533 * of cpu_online_map.
518 * 534 *
519 * Call with cpuset_sem held. 535 * Call with callback_sem held.
520 */ 536 */
521 537
522static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 538static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
@@ -540,7 +556,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
540 * One way or another, we guarantee to return some non-empty subset 556 * One way or another, we guarantee to return some non-empty subset
541 * of node_online_map. 557 * of node_online_map.
542 * 558 *
543 * Call with cpuset_sem held. 559 * Call with callback_sem held.
544 */ 560 */
545 561
546static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 562static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
@@ -555,22 +571,47 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
555} 571}
556 572
557/* 573/*
558 * Refresh current tasks mems_allowed and mems_generation from 574 * Refresh current tasks mems_allowed and mems_generation from current
559 * current tasks cpuset. Call with cpuset_sem held. 575 * tasks cpuset.
560 * 576 *
561 * This routine is needed to update the per-task mems_allowed 577 * Call without callback_sem or task_lock() held. May be called with
562 * data, within the tasks context, when it is trying to allocate 578 * or without manage_sem held. Will acquire task_lock() and might
563 * memory (in various mm/mempolicy.c routines) and notices 579 * acquire callback_sem during call.
564 * that some other task has been modifying its cpuset. 580 *
581 * The task_lock() is required to dereference current->cpuset safely.
582 * Without it, we could pick up the pointer value of current->cpuset
583 * in one instruction, and then attach_task could give us a different
584 * cpuset, and then the cpuset we had could be removed and freed,
585 * and then on our next instruction, we could dereference a no longer
586 * valid cpuset pointer to get its mems_generation field.
587 *
588 * This routine is needed to update the per-task mems_allowed data,
589 * within the tasks context, when it is trying to allocate memory
590 * (in various mm/mempolicy.c routines) and notices that some other
591 * task has been modifying its cpuset.
565 */ 592 */
566 593
567static void refresh_mems(void) 594static void refresh_mems(void)
568{ 595{
569 struct cpuset *cs = current->cpuset; 596 int my_cpusets_mem_gen;
597
598 task_lock(current);
599 my_cpusets_mem_gen = current->cpuset->mems_generation;
600 task_unlock(current);
570 601
571 if (current->cpuset_mems_generation != cs->mems_generation) { 602 if (current->cpuset_mems_generation != my_cpusets_mem_gen) {
603 struct cpuset *cs;
604 nodemask_t oldmem = current->mems_allowed;
605
606 down(&callback_sem);
607 task_lock(current);
608 cs = current->cpuset;
572 guarantee_online_mems(cs, &current->mems_allowed); 609 guarantee_online_mems(cs, &current->mems_allowed);
573 current->cpuset_mems_generation = cs->mems_generation; 610 current->cpuset_mems_generation = cs->mems_generation;
611 task_unlock(current);
612 up(&callback_sem);
613 if (!nodes_equal(oldmem, current->mems_allowed))
614 numa_policy_rebind(&oldmem, &current->mems_allowed);
574 } 615 }
575} 616}
576 617
@@ -579,7 +620,7 @@ static void refresh_mems(void)
579 * 620 *
580 * One cpuset is a subset of another if all its allowed CPUs and 621 * One cpuset is a subset of another if all its allowed CPUs and
581 * Memory Nodes are a subset of the other, and its exclusive flags 622 * Memory Nodes are a subset of the other, and its exclusive flags
582 * are only set if the other's are set. 623 * are only set if the other's are set. Call holding manage_sem.
583 */ 624 */
584 625
585static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 626static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -597,7 +638,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
597 * If we replaced the flag and mask values of the current cpuset 638 * If we replaced the flag and mask values of the current cpuset
598 * (cur) with those values in the trial cpuset (trial), would 639 * (cur) with those values in the trial cpuset (trial), would
599 * our various subset and exclusive rules still be valid? Presumes 640 * our various subset and exclusive rules still be valid? Presumes
600 * cpuset_sem held. 641 * manage_sem held.
601 * 642 *
602 * 'cur' is the address of an actual, in-use cpuset. Operations 643 * 'cur' is the address of an actual, in-use cpuset. Operations
603 * such as list traversal that depend on the actual address of the 644 * such as list traversal that depend on the actual address of the
@@ -651,7 +692,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
651 * exclusive child cpusets 692 * exclusive child cpusets
652 * Build these two partitions by calling partition_sched_domains 693 * Build these two partitions by calling partition_sched_domains
653 * 694 *
654 * Call with cpuset_sem held. May nest a call to the 695 * Call with manage_sem held. May nest a call to the
655 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 696 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
656 */ 697 */
657 698
@@ -696,6 +737,10 @@ static void update_cpu_domains(struct cpuset *cur)
696 unlock_cpu_hotplug(); 737 unlock_cpu_hotplug();
697} 738}
698 739
740/*
741 * Call with manage_sem held. May take callback_sem during call.
742 */
743
699static int update_cpumask(struct cpuset *cs, char *buf) 744static int update_cpumask(struct cpuset *cs, char *buf)
700{ 745{
701 struct cpuset trialcs; 746 struct cpuset trialcs;
@@ -712,12 +757,18 @@ static int update_cpumask(struct cpuset *cs, char *buf)
712 if (retval < 0) 757 if (retval < 0)
713 return retval; 758 return retval;
714 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); 759 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
760 down(&callback_sem);
715 cs->cpus_allowed = trialcs.cpus_allowed; 761 cs->cpus_allowed = trialcs.cpus_allowed;
762 up(&callback_sem);
716 if (is_cpu_exclusive(cs) && !cpus_unchanged) 763 if (is_cpu_exclusive(cs) && !cpus_unchanged)
717 update_cpu_domains(cs); 764 update_cpu_domains(cs);
718 return 0; 765 return 0;
719} 766}
720 767
768/*
769 * Call with manage_sem held. May take callback_sem during call.
770 */
771
721static int update_nodemask(struct cpuset *cs, char *buf) 772static int update_nodemask(struct cpuset *cs, char *buf)
722{ 773{
723 struct cpuset trialcs; 774 struct cpuset trialcs;
@@ -732,9 +783,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
732 return -ENOSPC; 783 return -ENOSPC;
733 retval = validate_change(cs, &trialcs); 784 retval = validate_change(cs, &trialcs);
734 if (retval == 0) { 785 if (retval == 0) {
786 down(&callback_sem);
735 cs->mems_allowed = trialcs.mems_allowed; 787 cs->mems_allowed = trialcs.mems_allowed;
736 atomic_inc(&cpuset_mems_generation); 788 atomic_inc(&cpuset_mems_generation);
737 cs->mems_generation = atomic_read(&cpuset_mems_generation); 789 cs->mems_generation = atomic_read(&cpuset_mems_generation);
790 up(&callback_sem);
738 } 791 }
739 return retval; 792 return retval;
740} 793}
@@ -745,6 +798,8 @@ static int update_nodemask(struct cpuset *cs, char *buf)
745 * CS_NOTIFY_ON_RELEASE) 798 * CS_NOTIFY_ON_RELEASE)
746 * cs: the cpuset to update 799 * cs: the cpuset to update
747 * buf: the buffer where we read the 0 or 1 800 * buf: the buffer where we read the 0 or 1
801 *
802 * Call with manage_sem held.
748 */ 803 */
749 804
750static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) 805static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
@@ -766,16 +821,27 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
766 return err; 821 return err;
767 cpu_exclusive_changed = 822 cpu_exclusive_changed =
768 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); 823 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
824 down(&callback_sem);
769 if (turning_on) 825 if (turning_on)
770 set_bit(bit, &cs->flags); 826 set_bit(bit, &cs->flags);
771 else 827 else
772 clear_bit(bit, &cs->flags); 828 clear_bit(bit, &cs->flags);
829 up(&callback_sem);
773 830
774 if (cpu_exclusive_changed) 831 if (cpu_exclusive_changed)
775 update_cpu_domains(cs); 832 update_cpu_domains(cs);
776 return 0; 833 return 0;
777} 834}
778 835
836/*
837 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
838 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
839 * notified on release.
840 *
841 * Call holding manage_sem. May take callback_sem and task_lock of
842 * the task 'pid' during call.
843 */
844
779static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) 845static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
780{ 846{
781 pid_t pid; 847 pid_t pid;
@@ -792,7 +858,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
792 read_lock(&tasklist_lock); 858 read_lock(&tasklist_lock);
793 859
794 tsk = find_task_by_pid(pid); 860 tsk = find_task_by_pid(pid);
795 if (!tsk) { 861 if (!tsk || tsk->flags & PF_EXITING) {
796 read_unlock(&tasklist_lock); 862 read_unlock(&tasklist_lock);
797 return -ESRCH; 863 return -ESRCH;
798 } 864 }
@@ -810,10 +876,13 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
810 get_task_struct(tsk); 876 get_task_struct(tsk);
811 } 877 }
812 878
879 down(&callback_sem);
880
813 task_lock(tsk); 881 task_lock(tsk);
814 oldcs = tsk->cpuset; 882 oldcs = tsk->cpuset;
815 if (!oldcs) { 883 if (!oldcs) {
816 task_unlock(tsk); 884 task_unlock(tsk);
885 up(&callback_sem);
817 put_task_struct(tsk); 886 put_task_struct(tsk);
818 return -ESRCH; 887 return -ESRCH;
819 } 888 }
@@ -824,6 +893,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
824 guarantee_online_cpus(cs, &cpus); 893 guarantee_online_cpus(cs, &cpus);
825 set_cpus_allowed(tsk, cpus); 894 set_cpus_allowed(tsk, cpus);
826 895
896 up(&callback_sem);
827 put_task_struct(tsk); 897 put_task_struct(tsk);
828 if (atomic_dec_and_test(&oldcs->count)) 898 if (atomic_dec_and_test(&oldcs->count))
829 check_for_release(oldcs, ppathbuf); 899 check_for_release(oldcs, ppathbuf);
@@ -867,7 +937,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
867 } 937 }
868 buffer[nbytes] = 0; /* nul-terminate */ 938 buffer[nbytes] = 0; /* nul-terminate */
869 939
870 cpuset_down(&cpuset_sem); 940 down(&manage_sem);
871 941
872 if (is_removed(cs)) { 942 if (is_removed(cs)) {
873 retval = -ENODEV; 943 retval = -ENODEV;
@@ -901,7 +971,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
901 if (retval == 0) 971 if (retval == 0)
902 retval = nbytes; 972 retval = nbytes;
903out2: 973out2:
904 cpuset_up(&cpuset_sem); 974 up(&manage_sem);
905 cpuset_release_agent(pathbuf); 975 cpuset_release_agent(pathbuf);
906out1: 976out1:
907 kfree(buffer); 977 kfree(buffer);
@@ -941,9 +1011,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
941{ 1011{
942 cpumask_t mask; 1012 cpumask_t mask;
943 1013
944 cpuset_down(&cpuset_sem); 1014 down(&callback_sem);
945 mask = cs->cpus_allowed; 1015 mask = cs->cpus_allowed;
946 cpuset_up(&cpuset_sem); 1016 up(&callback_sem);
947 1017
948 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1018 return cpulist_scnprintf(page, PAGE_SIZE, mask);
949} 1019}
@@ -952,9 +1022,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
952{ 1022{
953 nodemask_t mask; 1023 nodemask_t mask;
954 1024
955 cpuset_down(&cpuset_sem); 1025 down(&callback_sem);
956 mask = cs->mems_allowed; 1026 mask = cs->mems_allowed;
957 cpuset_up(&cpuset_sem); 1027 up(&callback_sem);
958 1028
959 return nodelist_scnprintf(page, PAGE_SIZE, mask); 1029 return nodelist_scnprintf(page, PAGE_SIZE, mask);
960} 1030}
@@ -995,7 +1065,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
995 goto out; 1065 goto out;
996 } 1066 }
997 *s++ = '\n'; 1067 *s++ = '\n';
998 *s = '\0';
999 1068
1000 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); 1069 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1001out: 1070out:
@@ -1048,6 +1117,21 @@ static int cpuset_file_release(struct inode *inode, struct file *file)
1048 return 0; 1117 return 0;
1049} 1118}
1050 1119
1120/*
1121 * cpuset_rename - Only allow simple rename of directories in place.
1122 */
1123static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
1124 struct inode *new_dir, struct dentry *new_dentry)
1125{
1126 if (!S_ISDIR(old_dentry->d_inode->i_mode))
1127 return -ENOTDIR;
1128 if (new_dentry->d_inode)
1129 return -EEXIST;
1130 if (old_dir != new_dir)
1131 return -EIO;
1132 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1133}
1134
1051static struct file_operations cpuset_file_operations = { 1135static struct file_operations cpuset_file_operations = {
1052 .read = cpuset_file_read, 1136 .read = cpuset_file_read,
1053 .write = cpuset_file_write, 1137 .write = cpuset_file_write,
@@ -1060,6 +1144,7 @@ static struct inode_operations cpuset_dir_inode_operations = {
1060 .lookup = simple_lookup, 1144 .lookup = simple_lookup,
1061 .mkdir = cpuset_mkdir, 1145 .mkdir = cpuset_mkdir,
1062 .rmdir = cpuset_rmdir, 1146 .rmdir = cpuset_rmdir,
1147 .rename = cpuset_rename,
1063}; 1148};
1064 1149
1065static int cpuset_create_file(struct dentry *dentry, int mode) 1150static int cpuset_create_file(struct dentry *dentry, int mode)
@@ -1163,7 +1248,9 @@ struct ctr_struct {
1163 1248
1164/* 1249/*
1165 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. 1250 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1166 * Return actual number of pids loaded. 1251 * Return actual number of pids loaded. No need to task_lock(p)
1252 * when reading out p->cpuset, as we don't really care if it changes
1253 * on the next cycle, and we are not going to try to dereference it.
1167 */ 1254 */
1168static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1255static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1169{ 1256{
@@ -1205,6 +1292,12 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1205 return cnt; 1292 return cnt;
1206} 1293}
1207 1294
1295/*
1296 * Handle an open on 'tasks' file. Prepare a buffer listing the
1297 * process id's of tasks currently attached to the cpuset being opened.
1298 *
1299 * Does not require any specific cpuset semaphores, and does not take any.
1300 */
1208static int cpuset_tasks_open(struct inode *unused, struct file *file) 1301static int cpuset_tasks_open(struct inode *unused, struct file *file)
1209{ 1302{
1210 struct cpuset *cs = __d_cs(file->f_dentry->d_parent); 1303 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
@@ -1352,7 +1445,8 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1352 if (!cs) 1445 if (!cs)
1353 return -ENOMEM; 1446 return -ENOMEM;
1354 1447
1355 cpuset_down(&cpuset_sem); 1448 down(&manage_sem);
1449 refresh_mems();
1356 cs->flags = 0; 1450 cs->flags = 0;
1357 if (notify_on_release(parent)) 1451 if (notify_on_release(parent))
1358 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); 1452 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1366,25 +1460,27 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1366 1460
1367 cs->parent = parent; 1461 cs->parent = parent;
1368 1462
1463 down(&callback_sem);
1369 list_add(&cs->sibling, &cs->parent->children); 1464 list_add(&cs->sibling, &cs->parent->children);
1465 up(&callback_sem);
1370 1466
1371 err = cpuset_create_dir(cs, name, mode); 1467 err = cpuset_create_dir(cs, name, mode);
1372 if (err < 0) 1468 if (err < 0)
1373 goto err; 1469 goto err;
1374 1470
1375 /* 1471 /*
1376 * Release cpuset_sem before cpuset_populate_dir() because it 1472 * Release manage_sem before cpuset_populate_dir() because it
1377 * will down() this new directory's i_sem and if we race with 1473 * will down() this new directory's i_sem and if we race with
1378 * another mkdir, we might deadlock. 1474 * another mkdir, we might deadlock.
1379 */ 1475 */
1380 cpuset_up(&cpuset_sem); 1476 up(&manage_sem);
1381 1477
1382 err = cpuset_populate_dir(cs->dentry); 1478 err = cpuset_populate_dir(cs->dentry);
1383 /* If err < 0, we have a half-filled directory - oh well ;) */ 1479 /* If err < 0, we have a half-filled directory - oh well ;) */
1384 return 0; 1480 return 0;
1385err: 1481err:
1386 list_del(&cs->sibling); 1482 list_del(&cs->sibling);
1387 cpuset_up(&cpuset_sem); 1483 up(&manage_sem);
1388 kfree(cs); 1484 kfree(cs);
1389 return err; 1485 return err;
1390} 1486}
@@ -1406,29 +1502,32 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1406 1502
1407 /* the vfs holds both inode->i_sem already */ 1503 /* the vfs holds both inode->i_sem already */
1408 1504
1409 cpuset_down(&cpuset_sem); 1505 down(&manage_sem);
1506 refresh_mems();
1410 if (atomic_read(&cs->count) > 0) { 1507 if (atomic_read(&cs->count) > 0) {
1411 cpuset_up(&cpuset_sem); 1508 up(&manage_sem);
1412 return -EBUSY; 1509 return -EBUSY;
1413 } 1510 }
1414 if (!list_empty(&cs->children)) { 1511 if (!list_empty(&cs->children)) {
1415 cpuset_up(&cpuset_sem); 1512 up(&manage_sem);
1416 return -EBUSY; 1513 return -EBUSY;
1417 } 1514 }
1418 parent = cs->parent; 1515 parent = cs->parent;
1516 down(&callback_sem);
1419 set_bit(CS_REMOVED, &cs->flags); 1517 set_bit(CS_REMOVED, &cs->flags);
1420 if (is_cpu_exclusive(cs)) 1518 if (is_cpu_exclusive(cs))
1421 update_cpu_domains(cs); 1519 update_cpu_domains(cs);
1422 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1520 list_del(&cs->sibling); /* delete my sibling from parent->children */
1423 if (list_empty(&parent->children))
1424 check_for_release(parent, &pathbuf);
1425 spin_lock(&cs->dentry->d_lock); 1521 spin_lock(&cs->dentry->d_lock);
1426 d = dget(cs->dentry); 1522 d = dget(cs->dentry);
1427 cs->dentry = NULL; 1523 cs->dentry = NULL;
1428 spin_unlock(&d->d_lock); 1524 spin_unlock(&d->d_lock);
1429 cpuset_d_remove_dir(d); 1525 cpuset_d_remove_dir(d);
1430 dput(d); 1526 dput(d);
1431 cpuset_up(&cpuset_sem); 1527 up(&callback_sem);
1528 if (list_empty(&parent->children))
1529 check_for_release(parent, &pathbuf);
1530 up(&manage_sem);
1432 cpuset_release_agent(pathbuf); 1531 cpuset_release_agent(pathbuf);
1433 return 0; 1532 return 0;
1434} 1533}
@@ -1488,16 +1587,26 @@ void __init cpuset_init_smp(void)
1488 * cpuset_fork - attach newly forked task to its parents cpuset. 1587 * cpuset_fork - attach newly forked task to its parents cpuset.
1489 * @tsk: pointer to task_struct of forking parent process. 1588 * @tsk: pointer to task_struct of forking parent process.
1490 * 1589 *
1491 * Description: By default, on fork, a task inherits its 1590 * Description: A task inherits its parent's cpuset at fork().
1492 * parent's cpuset. The pointer to the shared cpuset is 1591 *
1493 * automatically copied in fork.c by dup_task_struct(). 1592 * A pointer to the shared cpuset was automatically copied in fork.c
1494 * This cpuset_fork() routine need only increment the usage 1593 * by dup_task_struct(). However, we ignore that copy, since it was
1495 * counter in that cpuset. 1594 * not made under the protection of task_lock(), so might no longer be
1595 * a valid cpuset pointer. attach_task() might have already changed
1596 * current->cpuset, allowing the previously referenced cpuset to
1597 * be removed and freed. Instead, we task_lock(current) and copy
1598 * its present value of current->cpuset for our freshly forked child.
1599 *
1600 * At the point that cpuset_fork() is called, 'current' is the parent
1601 * task, and the passed argument 'child' points to the child task.
1496 **/ 1602 **/
1497 1603
1498void cpuset_fork(struct task_struct *tsk) 1604void cpuset_fork(struct task_struct *child)
1499{ 1605{
1500 atomic_inc(&tsk->cpuset->count); 1606 task_lock(current);
1607 child->cpuset = current->cpuset;
1608 atomic_inc(&child->cpuset->count);
1609 task_unlock(current);
1501} 1610}
1502 1611
1503/** 1612/**
@@ -1506,35 +1615,42 @@ void cpuset_fork(struct task_struct *tsk)
1506 * 1615 *
1507 * Description: Detach cpuset from @tsk and release it. 1616 * Description: Detach cpuset from @tsk and release it.
1508 * 1617 *
1509 * Note that cpusets marked notify_on_release force every task 1618 * Note that cpusets marked notify_on_release force every task in
1510 * in them to take the global cpuset_sem semaphore when exiting. 1619 * them to take the global manage_sem semaphore when exiting.
1511 * This could impact scaling on very large systems. Be reluctant 1620 * This could impact scaling on very large systems. Be reluctant to
1512 * to use notify_on_release cpusets where very high task exit 1621 * use notify_on_release cpusets where very high task exit scaling
1513 * scaling is required on large systems. 1622 * is required on large systems.
1514 * 1623 *
1515 * Don't even think about derefencing 'cs' after the cpuset use 1624 * Don't even think about derefencing 'cs' after the cpuset use count
1516 * count goes to zero, except inside a critical section guarded 1625 * goes to zero, except inside a critical section guarded by manage_sem
1517 * by the cpuset_sem semaphore. If you don't hold cpuset_sem, 1626 * or callback_sem. Otherwise a zero cpuset use count is a license to
1518 * then a zero cpuset use count is a license to any other task to 1627 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
1519 * nuke the cpuset immediately. 1628 *
1629 * This routine has to take manage_sem, not callback_sem, because
1630 * it is holding that semaphore while calling check_for_release(),
1631 * which calls kmalloc(), so can't be called holding callback__sem().
1632 *
1633 * We don't need to task_lock() this reference to tsk->cpuset,
1634 * because tsk is already marked PF_EXITING, so attach_task() won't
1635 * mess with it.
1520 **/ 1636 **/
1521 1637
1522void cpuset_exit(struct task_struct *tsk) 1638void cpuset_exit(struct task_struct *tsk)
1523{ 1639{
1524 struct cpuset *cs; 1640 struct cpuset *cs;
1525 1641
1526 task_lock(tsk); 1642 BUG_ON(!(tsk->flags & PF_EXITING));
1643
1527 cs = tsk->cpuset; 1644 cs = tsk->cpuset;
1528 tsk->cpuset = NULL; 1645 tsk->cpuset = NULL;
1529 task_unlock(tsk);
1530 1646
1531 if (notify_on_release(cs)) { 1647 if (notify_on_release(cs)) {
1532 char *pathbuf = NULL; 1648 char *pathbuf = NULL;
1533 1649
1534 cpuset_down(&cpuset_sem); 1650 down(&manage_sem);
1535 if (atomic_dec_and_test(&cs->count)) 1651 if (atomic_dec_and_test(&cs->count))
1536 check_for_release(cs, &pathbuf); 1652 check_for_release(cs, &pathbuf);
1537 cpuset_up(&cpuset_sem); 1653 up(&manage_sem);
1538 cpuset_release_agent(pathbuf); 1654 cpuset_release_agent(pathbuf);
1539 } else { 1655 } else {
1540 atomic_dec(&cs->count); 1656 atomic_dec(&cs->count);
@@ -1555,11 +1671,11 @@ cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
1555{ 1671{
1556 cpumask_t mask; 1672 cpumask_t mask;
1557 1673
1558 cpuset_down(&cpuset_sem); 1674 down(&callback_sem);
1559 task_lock((struct task_struct *)tsk); 1675 task_lock((struct task_struct *)tsk);
1560 guarantee_online_cpus(tsk->cpuset, &mask); 1676 guarantee_online_cpus(tsk->cpuset, &mask);
1561 task_unlock((struct task_struct *)tsk); 1677 task_unlock((struct task_struct *)tsk);
1562 cpuset_up(&cpuset_sem); 1678 up(&callback_sem);
1563 1679
1564 return mask; 1680 return mask;
1565} 1681}
@@ -1575,19 +1691,28 @@ void cpuset_init_current_mems_allowed(void)
1575 * If the current tasks cpusets mems_allowed changed behind our backs, 1691 * If the current tasks cpusets mems_allowed changed behind our backs,
1576 * update current->mems_allowed and mems_generation to the new value. 1692 * update current->mems_allowed and mems_generation to the new value.
1577 * Do not call this routine if in_interrupt(). 1693 * Do not call this routine if in_interrupt().
1694 *
1695 * Call without callback_sem or task_lock() held. May be called
1696 * with or without manage_sem held. Unless exiting, it will acquire
1697 * task_lock(). Also might acquire callback_sem during call to
1698 * refresh_mems().
1578 */ 1699 */
1579 1700
1580void cpuset_update_current_mems_allowed(void) 1701void cpuset_update_current_mems_allowed(void)
1581{ 1702{
1582 struct cpuset *cs = current->cpuset; 1703 struct cpuset *cs;
1704 int need_to_refresh = 0;
1583 1705
1706 task_lock(current);
1707 cs = current->cpuset;
1584 if (!cs) 1708 if (!cs)
1585 return; /* task is exiting */ 1709 goto done;
1586 if (current->cpuset_mems_generation != cs->mems_generation) { 1710 if (current->cpuset_mems_generation != cs->mems_generation)
1587 cpuset_down(&cpuset_sem); 1711 need_to_refresh = 1;
1712done:
1713 task_unlock(current);
1714 if (need_to_refresh)
1588 refresh_mems(); 1715 refresh_mems();
1589 cpuset_up(&cpuset_sem);
1590 }
1591} 1716}
1592 1717
1593/** 1718/**
@@ -1621,7 +1746,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
1621 1746
1622/* 1747/*
1623 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive 1748 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
1624 * ancestor to the specified cpuset. Call while holding cpuset_sem. 1749 * ancestor to the specified cpuset. Call holding callback_sem.
1625 * If no ancestor is mem_exclusive (an unusual configuration), then 1750 * If no ancestor is mem_exclusive (an unusual configuration), then
1626 * returns the root cpuset. 1751 * returns the root cpuset.
1627 */ 1752 */
@@ -1648,12 +1773,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
1648 * GFP_KERNEL allocations are not so marked, so can escape to the 1773 * GFP_KERNEL allocations are not so marked, so can escape to the
1649 * nearest mem_exclusive ancestor cpuset. 1774 * nearest mem_exclusive ancestor cpuset.
1650 * 1775 *
1651 * Scanning up parent cpusets requires cpuset_sem. The __alloc_pages() 1776 * Scanning up parent cpusets requires callback_sem. The __alloc_pages()
1652 * routine only calls here with __GFP_HARDWALL bit _not_ set if 1777 * routine only calls here with __GFP_HARDWALL bit _not_ set if
1653 * it's a GFP_KERNEL allocation, and all nodes in the current tasks 1778 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
1654 * mems_allowed came up empty on the first pass over the zonelist. 1779 * mems_allowed came up empty on the first pass over the zonelist.
1655 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 1780 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
1656 * short of memory, might require taking the cpuset_sem semaphore. 1781 * short of memory, might require taking the callback_sem semaphore.
1657 * 1782 *
1658 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() 1783 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
1659 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing 1784 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
@@ -1685,14 +1810,16 @@ int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
1685 return 0; 1810 return 0;
1686 1811
1687 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 1812 /* Not hardwall and node outside mems_allowed: scan up cpusets */
1688 cpuset_down(&cpuset_sem); 1813 down(&callback_sem);
1689 cs = current->cpuset; 1814
1690 if (!cs) 1815 if (current->flags & PF_EXITING) /* Let dying task have memory */
1691 goto done; /* current task exiting */ 1816 return 1;
1692 cs = nearest_exclusive_ancestor(cs); 1817 task_lock(current);
1818 cs = nearest_exclusive_ancestor(current->cpuset);
1819 task_unlock(current);
1820
1693 allowed = node_isset(node, cs->mems_allowed); 1821 allowed = node_isset(node, cs->mems_allowed);
1694done: 1822 up(&callback_sem);
1695 cpuset_up(&cpuset_sem);
1696 return allowed; 1823 return allowed;
1697} 1824}
1698 1825
@@ -1705,7 +1832,7 @@ done:
1705 * determine if task @p's memory usage might impact the memory 1832 * determine if task @p's memory usage might impact the memory
1706 * available to the current task. 1833 * available to the current task.
1707 * 1834 *
1708 * Acquires cpuset_sem - not suitable for calling from a fast path. 1835 * Acquires callback_sem - not suitable for calling from a fast path.
1709 **/ 1836 **/
1710 1837
1711int cpuset_excl_nodes_overlap(const struct task_struct *p) 1838int cpuset_excl_nodes_overlap(const struct task_struct *p)
@@ -1713,18 +1840,27 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p)
1713 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 1840 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
1714 int overlap = 0; /* do cpusets overlap? */ 1841 int overlap = 0; /* do cpusets overlap? */
1715 1842
1716 cpuset_down(&cpuset_sem); 1843 down(&callback_sem);
1717 cs1 = current->cpuset; 1844
1718 if (!cs1) 1845 task_lock(current);
1719 goto done; /* current task exiting */ 1846 if (current->flags & PF_EXITING) {
1720 cs2 = p->cpuset; 1847 task_unlock(current);
1721 if (!cs2) 1848 goto done;
1722 goto done; /* task p is exiting */ 1849 }
1723 cs1 = nearest_exclusive_ancestor(cs1); 1850 cs1 = nearest_exclusive_ancestor(current->cpuset);
1724 cs2 = nearest_exclusive_ancestor(cs2); 1851 task_unlock(current);
1852
1853 task_lock((struct task_struct *)p);
1854 if (p->flags & PF_EXITING) {
1855 task_unlock((struct task_struct *)p);
1856 goto done;
1857 }
1858 cs2 = nearest_exclusive_ancestor(p->cpuset);
1859 task_unlock((struct task_struct *)p);
1860
1725 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); 1861 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
1726done: 1862done:
1727 cpuset_up(&cpuset_sem); 1863 up(&callback_sem);
1728 1864
1729 return overlap; 1865 return overlap;
1730} 1866}
@@ -1733,6 +1869,10 @@ done:
1733 * proc_cpuset_show() 1869 * proc_cpuset_show()
1734 * - Print tasks cpuset path into seq_file. 1870 * - Print tasks cpuset path into seq_file.
1735 * - Used for /proc/<pid>/cpuset. 1871 * - Used for /proc/<pid>/cpuset.
1872 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
1873 * doesn't really matter if tsk->cpuset changes after we read it,
1874 * and we take manage_sem, keeping attach_task() from changing it
1875 * anyway.
1736 */ 1876 */
1737 1877
1738static int proc_cpuset_show(struct seq_file *m, void *v) 1878static int proc_cpuset_show(struct seq_file *m, void *v)
@@ -1747,10 +1887,8 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
1747 return -ENOMEM; 1887 return -ENOMEM;
1748 1888
1749 tsk = m->private; 1889 tsk = m->private;
1750 cpuset_down(&cpuset_sem); 1890 down(&manage_sem);
1751 task_lock(tsk);
1752 cs = tsk->cpuset; 1891 cs = tsk->cpuset;
1753 task_unlock(tsk);
1754 if (!cs) { 1892 if (!cs) {
1755 retval = -EINVAL; 1893 retval = -EINVAL;
1756 goto out; 1894 goto out;
@@ -1762,7 +1900,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
1762 seq_puts(m, buf); 1900 seq_puts(m, buf);
1763 seq_putc(m, '\n'); 1901 seq_putc(m, '\n');
1764out: 1902out:
1765 cpuset_up(&cpuset_sem); 1903 up(&manage_sem);
1766 kfree(buf); 1904 kfree(buf);
1767 return retval; 1905 return retval;
1768} 1906}
diff --git a/kernel/exit.c b/kernel/exit.c
index 79f52b85d6ed..537394b25e8d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -547,7 +547,7 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced)
547 547
548 if (p->pdeath_signal) 548 if (p->pdeath_signal)
549 /* We already hold the tasklist_lock here. */ 549 /* We already hold the tasklist_lock here. */
550 group_send_sig_info(p->pdeath_signal, (void *) 0, p); 550 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
551 551
552 /* Move the child from its dying parent to the new one. */ 552 /* Move the child from its dying parent to the new one. */
553 if (unlikely(traced)) { 553 if (unlikely(traced)) {
@@ -591,8 +591,8 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced)
591 int pgrp = process_group(p); 591 int pgrp = process_group(p);
592 592
593 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) { 593 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
594 __kill_pg_info(SIGHUP, (void *)1, pgrp); 594 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
595 __kill_pg_info(SIGCONT, (void *)1, pgrp); 595 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
596 } 596 }
597 } 597 }
598} 598}
@@ -727,8 +727,8 @@ static void exit_notify(struct task_struct *tsk)
727 (t->signal->session == tsk->signal->session) && 727 (t->signal->session == tsk->signal->session) &&
728 will_become_orphaned_pgrp(process_group(tsk), tsk) && 728 will_become_orphaned_pgrp(process_group(tsk), tsk) &&
729 has_stopped_jobs(process_group(tsk))) { 729 has_stopped_jobs(process_group(tsk))) {
730 __kill_pg_info(SIGHUP, (void *)1, process_group(tsk)); 730 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
731 __kill_pg_info(SIGCONT, (void *)1, process_group(tsk)); 731 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
732 } 732 }
733 733
734 /* Let father know we died 734 /* Let father know we died
@@ -783,10 +783,6 @@ static void exit_notify(struct task_struct *tsk)
783 /* If the process is dead, release it - nobody will wait for it */ 783 /* If the process is dead, release it - nobody will wait for it */
784 if (state == EXIT_DEAD) 784 if (state == EXIT_DEAD)
785 release_task(tsk); 785 release_task(tsk);
786
787 /* PF_DEAD causes final put_task_struct after we schedule. */
788 preempt_disable();
789 tsk->flags |= PF_DEAD;
790} 786}
791 787
792fastcall NORET_TYPE void do_exit(long code) 788fastcall NORET_TYPE void do_exit(long code)
@@ -873,7 +869,11 @@ fastcall NORET_TYPE void do_exit(long code)
873 tsk->mempolicy = NULL; 869 tsk->mempolicy = NULL;
874#endif 870#endif
875 871
876 BUG_ON(!(current->flags & PF_DEAD)); 872 /* PF_DEAD causes final put_task_struct after we schedule. */
873 preempt_disable();
874 BUG_ON(tsk->flags & PF_DEAD);
875 tsk->flags |= PF_DEAD;
876
877 schedule(); 877 schedule();
878 BUG(); 878 BUG();
879 /* Avoid "noreturn function does return". */ 879 /* Avoid "noreturn function does return". */
@@ -1383,6 +1383,15 @@ repeat:
1383 1383
1384 switch (p->state) { 1384 switch (p->state) {
1385 case TASK_TRACED: 1385 case TASK_TRACED:
1386 /*
1387 * When we hit the race with PTRACE_ATTACH,
1388 * we will not report this child. But the
1389 * race means it has not yet been moved to
1390 * our ptrace_children list, so we need to
1391 * set the flag here to avoid a spurious ECHILD
1392 * when the race happens with the only child.
1393 */
1394 flag = 1;
1386 if (!my_ptrace_child(p)) 1395 if (!my_ptrace_child(p))
1387 continue; 1396 continue;
1388 /*FALLTHROUGH*/ 1397 /*FALLTHROUGH*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3ff7b925c387..51df337b37db 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,14 +117,16 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
117 /* 117 /*
118 * No locking required for CPU-local interrupts: 118 * No locking required for CPU-local interrupts:
119 */ 119 */
120 desc->handler->ack(irq); 120 if (desc->handler->ack)
121 desc->handler->ack(irq);
121 action_ret = handle_IRQ_event(irq, regs, desc->action); 122 action_ret = handle_IRQ_event(irq, regs, desc->action);
122 desc->handler->end(irq); 123 desc->handler->end(irq);
123 return 1; 124 return 1;
124 } 125 }
125 126
126 spin_lock(&desc->lock); 127 spin_lock(&desc->lock);
127 desc->handler->ack(irq); 128 if (desc->handler->ack)
129 desc->handler->ack(irq);
128 /* 130 /*
129 * REPLAY is when Linux resends an IRQ that was dropped earlier 131 * REPLAY is when Linux resends an IRQ that was dropped earlier
130 * WAITING is used by probe to mark irqs that are being tested 132 * WAITING is used by probe to mark irqs that are being tested
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 13bcec151b57..39277dd6bf90 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -18,6 +18,7 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/sched.h> /* for cond_resched */
21#include <linux/mm.h> 22#include <linux/mm.h>
22 23
23#include <asm/sections.h> 24#include <asm/sections.h>
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 44166e3bb8af..51a892063aaa 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -131,14 +131,14 @@ struct subprocess_info {
131static int ____call_usermodehelper(void *data) 131static int ____call_usermodehelper(void *data)
132{ 132{
133 struct subprocess_info *sub_info = data; 133 struct subprocess_info *sub_info = data;
134 struct key *old_session; 134 struct key *new_session, *old_session;
135 int retval; 135 int retval;
136 136
137 /* Unblock all signals and set the session keyring. */ 137 /* Unblock all signals and set the session keyring. */
138 key_get(sub_info->ring); 138 new_session = key_get(sub_info->ring);
139 flush_signals(current); 139 flush_signals(current);
140 spin_lock_irq(&current->sighand->siglock); 140 spin_lock_irq(&current->sighand->siglock);
141 old_session = __install_session_keyring(current, sub_info->ring); 141 old_session = __install_session_keyring(current, new_session);
142 flush_signal_handlers(current, 1); 142 flush_signal_handlers(current, 1);
143 sigemptyset(&current->blocked); 143 sigemptyset(&current->blocked);
144 recalc_sigpending(); 144 recalc_sigpending();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f3ea492ab44d..ce4915dd683a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/slab.h>
38#include <linux/module.h> 39#include <linux/module.h>
39#include <linux/moduleloader.h> 40#include <linux/moduleloader.h>
40#include <asm-generic/sections.h> 41#include <asm-generic/sections.h>
diff --git a/kernel/kthread.c b/kernel/kthread.c
index f50f174e92da..e75950a1092c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -165,6 +165,12 @@ EXPORT_SYMBOL(kthread_bind);
165 165
166int kthread_stop(struct task_struct *k) 166int kthread_stop(struct task_struct *k)
167{ 167{
168 return kthread_stop_sem(k, NULL);
169}
170EXPORT_SYMBOL(kthread_stop);
171
172int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
173{
168 int ret; 174 int ret;
169 175
170 down(&kthread_stop_lock); 176 down(&kthread_stop_lock);
@@ -178,7 +184,10 @@ int kthread_stop(struct task_struct *k)
178 184
179 /* Now set kthread_should_stop() to true, and wake it up. */ 185 /* Now set kthread_should_stop() to true, and wake it up. */
180 kthread_stop_info.k = k; 186 kthread_stop_info.k = k;
181 wake_up_process(k); 187 if (s)
188 up(s);
189 else
190 wake_up_process(k);
182 put_task_struct(k); 191 put_task_struct(k);
183 192
184 /* Once it dies, reset stop ptr, gather result and we're done. */ 193 /* Once it dies, reset stop ptr, gather result and we're done. */
@@ -189,7 +198,7 @@ int kthread_stop(struct task_struct *k)
189 198
190 return ret; 199 return ret;
191} 200}
192EXPORT_SYMBOL(kthread_stop); 201EXPORT_SYMBOL(kthread_stop_sem);
193 202
194static __init int helper_init(void) 203static __init int helper_init(void)
195{ 204{
diff --git a/kernel/params.c b/kernel/params.c
index 1a8614bac5d5..47ba69547945 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h>
26 27
27#if 0 28#if 0
28#define DEBUGP printk 29#define DEBUGP printk
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bf374fceb39c..91a894264941 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1225,7 +1225,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1225 /* 1225 /*
1226 * The task was cleaned up already, no future firings. 1226 * The task was cleaned up already, no future firings.
1227 */ 1227 */
1228 return; 1228 goto out;
1229 1229
1230 /* 1230 /*
1231 * Fetch the current sample and update the timer's expiry time. 1231 * Fetch the current sample and update the timer's expiry time.
@@ -1235,7 +1235,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1235 bump_cpu_timer(timer, now); 1235 bump_cpu_timer(timer, now);
1236 if (unlikely(p->exit_state)) { 1236 if (unlikely(p->exit_state)) {
1237 clear_dead_task(timer, now); 1237 clear_dead_task(timer, now);
1238 return; 1238 goto out;
1239 } 1239 }
1240 read_lock(&tasklist_lock); /* arm_timer needs it. */ 1240 read_lock(&tasklist_lock); /* arm_timer needs it. */
1241 } else { 1241 } else {
@@ -1248,8 +1248,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1248 put_task_struct(p); 1248 put_task_struct(p);
1249 timer->it.cpu.task = p = NULL; 1249 timer->it.cpu.task = p = NULL;
1250 timer->it.cpu.expires.sched = 0; 1250 timer->it.cpu.expires.sched = 0;
1251 read_unlock(&tasklist_lock); 1251 goto out_unlock;
1252 return;
1253 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1252 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1254 /* 1253 /*
1255 * We've noticed that the thread is dead, but 1254 * We've noticed that the thread is dead, but
@@ -1257,8 +1256,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1257 * drop our task ref. 1256 * drop our task ref.
1258 */ 1257 */
1259 clear_dead_task(timer, now); 1258 clear_dead_task(timer, now);
1260 read_unlock(&tasklist_lock); 1259 goto out_unlock;
1261 return;
1262 } 1260 }
1263 cpu_clock_sample_group(timer->it_clock, p, &now); 1261 cpu_clock_sample_group(timer->it_clock, p, &now);
1264 bump_cpu_timer(timer, now); 1262 bump_cpu_timer(timer, now);
@@ -1270,7 +1268,13 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1270 */ 1268 */
1271 arm_timer(timer, now); 1269 arm_timer(timer, now);
1272 1270
1271out_unlock:
1273 read_unlock(&tasklist_lock); 1272 read_unlock(&tasklist_lock);
1273
1274out:
1275 timer->it_overrun_last = timer->it_overrun;
1276 timer->it_overrun = -1;
1277 ++timer->it_requeue_pending;
1274} 1278}
1275 1279
1276/* 1280/*
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index dda3cda73c77..ea55c7a1cd75 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1295,13 +1295,6 @@ sys_clock_getres(clockid_t which_clock, struct timespec __user *tp)
1295 return error; 1295 return error;
1296} 1296}
1297 1297
1298static void nanosleep_wake_up(unsigned long __data)
1299{
1300 struct task_struct *p = (struct task_struct *) __data;
1301
1302 wake_up_process(p);
1303}
1304
1305/* 1298/*
1306 * The standard says that an absolute nanosleep call MUST wake up at 1299 * The standard says that an absolute nanosleep call MUST wake up at
1307 * the requested time in spite of clock settings. Here is what we do: 1300 * the requested time in spite of clock settings. Here is what we do:
@@ -1442,7 +1435,6 @@ static int common_nsleep(clockid_t which_clock,
1442 int flags, struct timespec *tsave) 1435 int flags, struct timespec *tsave)
1443{ 1436{
1444 struct timespec t, dum; 1437 struct timespec t, dum;
1445 struct timer_list new_timer;
1446 DECLARE_WAITQUEUE(abs_wqueue, current); 1438 DECLARE_WAITQUEUE(abs_wqueue, current);
1447 u64 rq_time = (u64)0; 1439 u64 rq_time = (u64)0;
1448 s64 left; 1440 s64 left;
@@ -1451,10 +1443,6 @@ static int common_nsleep(clockid_t which_clock,
1451 &current_thread_info()->restart_block; 1443 &current_thread_info()->restart_block;
1452 1444
1453 abs_wqueue.flags = 0; 1445 abs_wqueue.flags = 0;
1454 init_timer(&new_timer);
1455 new_timer.expires = 0;
1456 new_timer.data = (unsigned long) current;
1457 new_timer.function = nanosleep_wake_up;
1458 abs = flags & TIMER_ABSTIME; 1446 abs = flags & TIMER_ABSTIME;
1459 1447
1460 if (restart_block->fn == clock_nanosleep_restart) { 1448 if (restart_block->fn == clock_nanosleep_restart) {
@@ -1490,13 +1478,8 @@ static int common_nsleep(clockid_t which_clock,
1490 if (left < (s64)0) 1478 if (left < (s64)0)
1491 break; 1479 break;
1492 1480
1493 new_timer.expires = jiffies + left; 1481 schedule_timeout_interruptible(left);
1494 __set_current_state(TASK_INTERRUPTIBLE);
1495 add_timer(&new_timer);
1496
1497 schedule();
1498 1482
1499 del_timer_sync(&new_timer);
1500 left = rq_time - get_jiffies_64(); 1483 left = rq_time - get_jiffies_64();
1501 } while (left > (s64)0 && !test_thread_flag(TIF_SIGPENDING)); 1484 } while (left > (s64)0 && !test_thread_flag(TIF_SIGPENDING));
1502 1485
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 2f438d0eaa13..c71eb4579c07 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,7 @@ EXTRA_CFLAGS += -DDEBUG
4endif 4endif
5 5
6obj-y := main.o process.o console.o pm.o 6obj-y := main.o process.o console.o pm.o
7obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o 7obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o
8 8
9obj-$(CONFIG_SUSPEND_SMP) += smp.o 9obj-$(CONFIG_SUSPEND_SMP) += smp.o
10 10
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 761956e813f5..027322a564f4 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -30,7 +30,6 @@ extern int swsusp_check(void);
30extern int swsusp_read(void); 30extern int swsusp_read(void);
31extern void swsusp_close(void); 31extern void swsusp_close(void);
32extern int swsusp_resume(void); 32extern int swsusp_resume(void);
33extern int swsusp_free(void);
34 33
35 34
36static int noresume = 0; 35static int noresume = 0;
@@ -93,10 +92,7 @@ static void free_some_memory(void)
93 printk("Freeing memory... "); 92 printk("Freeing memory... ");
94 while ((tmp = shrink_all_memory(10000))) { 93 while ((tmp = shrink_all_memory(10000))) {
95 pages += tmp; 94 pages += tmp;
96 printk("\b%c", p[i]); 95 printk("\b%c", p[i++ % 4]);
97 i++;
98 if (i > 3)
99 i = 0;
100 } 96 }
101 printk("\bdone (%li pages freed)\n", pages); 97 printk("\bdone (%li pages freed)\n", pages);
102} 98}
@@ -178,13 +174,12 @@ int pm_suspend_disk(void)
178 goto Done; 174 goto Done;
179 175
180 if (in_suspend) { 176 if (in_suspend) {
177 device_resume();
181 pr_debug("PM: writing image.\n"); 178 pr_debug("PM: writing image.\n");
182 error = swsusp_write(); 179 error = swsusp_write();
183 if (!error) 180 if (!error)
184 power_down(pm_disk_mode); 181 power_down(pm_disk_mode);
185 else { 182 else {
186 /* swsusp_write can not fail in device_resume,
187 no need to do second device_resume */
188 swsusp_free(); 183 swsusp_free();
189 unprepare_processes(); 184 unprepare_processes();
190 return error; 185 return error;
@@ -252,14 +247,17 @@ static int software_resume(void)
252 247
253 pr_debug("PM: Reading swsusp image.\n"); 248 pr_debug("PM: Reading swsusp image.\n");
254 249
255 if ((error = swsusp_read())) 250 if ((error = swsusp_read())) {
256 goto Cleanup; 251 swsusp_free();
252 goto Thaw;
253 }
257 254
258 pr_debug("PM: Preparing devices for restore.\n"); 255 pr_debug("PM: Preparing devices for restore.\n");
259 256
260 if ((error = device_suspend(PMSG_FREEZE))) { 257 if ((error = device_suspend(PMSG_FREEZE))) {
261 printk("Some devices failed to suspend\n"); 258 printk("Some devices failed to suspend\n");
262 goto Free; 259 swsusp_free();
260 goto Thaw;
263 } 261 }
264 262
265 mb(); 263 mb();
@@ -268,9 +266,7 @@ static int software_resume(void)
268 swsusp_resume(); 266 swsusp_resume();
269 pr_debug("PM: Restore failed, recovering.n"); 267 pr_debug("PM: Restore failed, recovering.n");
270 device_resume(); 268 device_resume();
271 Free: 269 Thaw:
272 swsusp_free();
273 Cleanup:
274 unprepare_processes(); 270 unprepare_processes();
275 Done: 271 Done:
276 /* For success case, the suspend path will release the lock */ 272 /* For success case, the suspend path will release the lock */
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 22bdc93cc038..18d7d693fbba 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -167,6 +167,8 @@ static int enter_state(suspend_state_t state)
167{ 167{
168 int error; 168 int error;
169 169
170 if (pm_ops->valid && !pm_ops->valid(state))
171 return -ENODEV;
170 if (down_trylock(&pm_sem)) 172 if (down_trylock(&pm_sem))
171 return -EBUSY; 173 return -EBUSY;
172 174
@@ -236,7 +238,8 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
236 char * s = buf; 238 char * s = buf;
237 239
238 for (i = 0; i < PM_SUSPEND_MAX; i++) { 240 for (i = 0; i < PM_SUSPEND_MAX; i++) {
239 if (pm_states[i]) 241 if (pm_states[i] && pm_ops && (!pm_ops->valid
242 ||(pm_ops->valid && pm_ops->valid(i))))
240 s += sprintf(s,"%s ",pm_states[i]); 243 s += sprintf(s,"%s ",pm_states[i]);
241 } 244 }
242 s += sprintf(s,"\n"); 245 s += sprintf(s,"\n");
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 6748de23e83c..d4fd96a135ab 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -53,3 +53,20 @@ extern void thaw_processes(void);
53 53
54extern int pm_prepare_console(void); 54extern int pm_prepare_console(void);
55extern void pm_restore_console(void); 55extern void pm_restore_console(void);
56
57
58/* References to section boundaries */
59extern const void __nosave_begin, __nosave_end;
60
61extern unsigned int nr_copy_pages;
62extern suspend_pagedir_t *pagedir_nosave;
63extern suspend_pagedir_t *pagedir_save;
64
65extern asmlinkage int swsusp_arch_suspend(void);
66extern asmlinkage int swsusp_arch_resume(void);
67
68extern int restore_highmem(void);
69extern struct pbe * alloc_pagedir(unsigned nr_pages);
70extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
71extern void swsusp_free(void);
72extern int enough_swap(unsigned nr_pages);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
new file mode 100644
index 000000000000..42a628704398
--- /dev/null
+++ b/kernel/power/snapshot.c
@@ -0,0 +1,435 @@
1/*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provide system snapshot/restore functionality.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 *
8 * This file is released under the GPLv2, and is based on swsusp.c.
9 *
10 */
11
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/suspend.h>
16#include <linux/smp_lock.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/bootmem.h>
24#include <linux/syscalls.h>
25#include <linux/console.h>
26#include <linux/highmem.h>
27
28#include <asm/uaccess.h>
29#include <asm/mmu_context.h>
30#include <asm/pgtable.h>
31#include <asm/tlbflush.h>
32#include <asm/io.h>
33
34#include "power.h"
35
36#ifdef CONFIG_HIGHMEM
37struct highmem_page {
38 char *data;
39 struct page *page;
40 struct highmem_page *next;
41};
42
43static struct highmem_page *highmem_copy;
44
45static int save_highmem_zone(struct zone *zone)
46{
47 unsigned long zone_pfn;
48 mark_free_pages(zone);
49 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
50 struct page *page;
51 struct highmem_page *save;
52 void *kaddr;
53 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
54
55 if (!(pfn%1000))
56 printk(".");
57 if (!pfn_valid(pfn))
58 continue;
59 page = pfn_to_page(pfn);
60 /*
61 * This condition results from rvmalloc() sans vmalloc_32()
62 * and architectural memory reservations. This should be
63 * corrected eventually when the cases giving rise to this
64 * are better understood.
65 */
66 if (PageReserved(page)) {
67 printk("highmem reserved page?!\n");
68 continue;
69 }
70 BUG_ON(PageNosave(page));
71 if (PageNosaveFree(page))
72 continue;
73 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
74 if (!save)
75 return -ENOMEM;
76 save->next = highmem_copy;
77 save->page = page;
78 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
79 if (!save->data) {
80 kfree(save);
81 return -ENOMEM;
82 }
83 kaddr = kmap_atomic(page, KM_USER0);
84 memcpy(save->data, kaddr, PAGE_SIZE);
85 kunmap_atomic(kaddr, KM_USER0);
86 highmem_copy = save;
87 }
88 return 0;
89}
90
91
92static int save_highmem(void)
93{
94 struct zone *zone;
95 int res = 0;
96
97 pr_debug("swsusp: Saving Highmem\n");
98 for_each_zone (zone) {
99 if (is_highmem(zone))
100 res = save_highmem_zone(zone);
101 if (res)
102 return res;
103 }
104 return 0;
105}
106
107int restore_highmem(void)
108{
109 printk("swsusp: Restoring Highmem\n");
110 while (highmem_copy) {
111 struct highmem_page *save = highmem_copy;
112 void *kaddr;
113 highmem_copy = save->next;
114
115 kaddr = kmap_atomic(save->page, KM_USER0);
116 memcpy(kaddr, save->data, PAGE_SIZE);
117 kunmap_atomic(kaddr, KM_USER0);
118 free_page((long) save->data);
119 kfree(save);
120 }
121 return 0;
122}
123#else
124static int save_highmem(void) { return 0; }
125int restore_highmem(void) { return 0; }
126#endif /* CONFIG_HIGHMEM */
127
128
129static int pfn_is_nosave(unsigned long pfn)
130{
131 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
132 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
133 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
134}
135
136/**
137 * saveable - Determine whether a page should be cloned or not.
138 * @pfn: The page
139 *
140 * We save a page if it's Reserved, and not in the range of pages
141 * statically defined as 'unsaveable', or if it isn't reserved, and
142 * isn't part of a free chunk of pages.
143 */
144
145static int saveable(struct zone *zone, unsigned long *zone_pfn)
146{
147 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
148 struct page *page;
149
150 if (!pfn_valid(pfn))
151 return 0;
152
153 page = pfn_to_page(pfn);
154 BUG_ON(PageReserved(page) && PageNosave(page));
155 if (PageNosave(page))
156 return 0;
157 if (PageReserved(page) && pfn_is_nosave(pfn)) {
158 pr_debug("[nosave pfn 0x%lx]", pfn);
159 return 0;
160 }
161 if (PageNosaveFree(page))
162 return 0;
163
164 return 1;
165}
166
167static unsigned count_data_pages(void)
168{
169 struct zone *zone;
170 unsigned long zone_pfn;
171 unsigned n;
172
173 n = 0;
174 for_each_zone (zone) {
175 if (is_highmem(zone))
176 continue;
177 mark_free_pages(zone);
178 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
179 n += saveable(zone, &zone_pfn);
180 }
181 return n;
182}
183
184static void copy_data_pages(struct pbe *pblist)
185{
186 struct zone *zone;
187 unsigned long zone_pfn;
188 struct pbe *pbe, *p;
189
190 pbe = pblist;
191 for_each_zone (zone) {
192 if (is_highmem(zone))
193 continue;
194 mark_free_pages(zone);
195 /* This is necessary for swsusp_free() */
196 for_each_pb_page (p, pblist)
197 SetPageNosaveFree(virt_to_page(p));
198 for_each_pbe (p, pblist)
199 SetPageNosaveFree(virt_to_page(p->address));
200 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
201 if (saveable(zone, &zone_pfn)) {
202 struct page *page;
203 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
204 BUG_ON(!pbe);
205 pbe->orig_address = (unsigned long)page_address(page);
206 /* copy_page is not usable for copying task structs. */
207 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
208 pbe = pbe->next;
209 }
210 }
211 }
212 BUG_ON(pbe);
213}
214
215
216/**
217 * free_pagedir - free pages allocated with alloc_pagedir()
218 */
219
220static void free_pagedir(struct pbe *pblist)
221{
222 struct pbe *pbe;
223
224 while (pblist) {
225 pbe = (pblist + PB_PAGE_SKIP)->next;
226 ClearPageNosave(virt_to_page(pblist));
227 ClearPageNosaveFree(virt_to_page(pblist));
228 free_page((unsigned long)pblist);
229 pblist = pbe;
230 }
231}
232
233/**
234 * fill_pb_page - Create a list of PBEs on a given memory page
235 */
236
237static inline void fill_pb_page(struct pbe *pbpage)
238{
239 struct pbe *p;
240
241 p = pbpage;
242 pbpage += PB_PAGE_SKIP;
243 do
244 p->next = p + 1;
245 while (++p < pbpage);
246}
247
248/**
249 * create_pbe_list - Create a list of PBEs on top of a given chain
250 * of memory pages allocated with alloc_pagedir()
251 */
252
253void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
254{
255 struct pbe *pbpage, *p;
256 unsigned num = PBES_PER_PAGE;
257
258 for_each_pb_page (pbpage, pblist) {
259 if (num >= nr_pages)
260 break;
261
262 fill_pb_page(pbpage);
263 num += PBES_PER_PAGE;
264 }
265 if (pbpage) {
266 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
267 p->next = p + 1;
268 p->next = NULL;
269 }
270 pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
271}
272
273static void *alloc_image_page(void)
274{
275 void *res = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
276 if (res) {
277 SetPageNosave(virt_to_page(res));
278 SetPageNosaveFree(virt_to_page(res));
279 }
280 return res;
281}
282
283/**
284 * alloc_pagedir - Allocate the page directory.
285 *
286 * First, determine exactly how many pages we need and
287 * allocate them.
288 *
289 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
290 * struct pbe elements (pbes) and the last element in the page points
291 * to the next page.
292 *
293 * On each page we set up a list of struct_pbe elements.
294 */
295
296struct pbe *alloc_pagedir(unsigned nr_pages)
297{
298 unsigned num;
299 struct pbe *pblist, *pbe;
300
301 if (!nr_pages)
302 return NULL;
303
304 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
305 pblist = alloc_image_page();
306 /* FIXME: rewrite this ugly loop */
307 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
308 pbe = pbe->next, num += PBES_PER_PAGE) {
309 pbe += PB_PAGE_SKIP;
310 pbe->next = alloc_image_page();
311 }
312 if (!pbe) { /* get_zeroed_page() failed */
313 free_pagedir(pblist);
314 pblist = NULL;
315 }
316 return pblist;
317}
318
319/**
320 * Free pages we allocated for suspend. Suspend pages are alocated
321 * before atomic copy, so we need to free them after resume.
322 */
323
324void swsusp_free(void)
325{
326 struct zone *zone;
327 unsigned long zone_pfn;
328
329 for_each_zone(zone) {
330 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
331 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
332 struct page * page;
333 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
334 if (PageNosave(page) && PageNosaveFree(page)) {
335 ClearPageNosave(page);
336 ClearPageNosaveFree(page);
337 free_page((long) page_address(page));
338 }
339 }
340 }
341}
342
343
344/**
345 * enough_free_mem - Make sure we enough free memory to snapshot.
346 *
347 * Returns TRUE or FALSE after checking the number of available
348 * free pages.
349 */
350
351static int enough_free_mem(unsigned nr_pages)
352{
353 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
354 return nr_free_pages() > (nr_pages + PAGES_FOR_IO +
355 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
356}
357
358
359static struct pbe *swsusp_alloc(unsigned nr_pages)
360{
361 struct pbe *pblist, *p;
362
363 if (!(pblist = alloc_pagedir(nr_pages))) {
364 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
365 return NULL;
366 }
367 create_pbe_list(pblist, nr_pages);
368
369 for_each_pbe (p, pblist) {
370 p->address = (unsigned long)alloc_image_page();
371 if (!p->address) {
372 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
373 swsusp_free();
374 return NULL;
375 }
376 }
377
378 return pblist;
379}
380
381asmlinkage int swsusp_save(void)
382{
383 unsigned nr_pages;
384
385 pr_debug("swsusp: critical section: \n");
386 if (save_highmem()) {
387 printk(KERN_CRIT "swsusp: Not enough free pages for highmem\n");
388 restore_highmem();
389 return -ENOMEM;
390 }
391
392 drain_local_pages();
393 nr_pages = count_data_pages();
394 printk("swsusp: Need to copy %u pages\n", nr_pages);
395
396 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
397 nr_pages,
398 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
399 PAGES_FOR_IO, nr_free_pages());
400
401 /* This is needed because of the fixed size of swsusp_info */
402 if (MAX_PBES < (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE)
403 return -ENOSPC;
404
405 if (!enough_free_mem(nr_pages)) {
406 printk(KERN_ERR "swsusp: Not enough free memory\n");
407 return -ENOMEM;
408 }
409
410 if (!enough_swap(nr_pages)) {
411 printk(KERN_ERR "swsusp: Not enough free swap\n");
412 return -ENOSPC;
413 }
414
415 pagedir_nosave = swsusp_alloc(nr_pages);
416 if (!pagedir_nosave)
417 return -ENOMEM;
418
419 /* During allocating of suspend pagedir, new cold pages may appear.
420 * Kill them.
421 */
422 drain_local_pages();
423 copy_data_pages(pagedir_nosave);
424
425 /*
426 * End of critical section. From now on, we can write to memory,
427 * but we should not touch disk. This specially means we must _not_
428 * touch swap space! Except we must write out our image of course.
429 */
430
431 nr_copy_pages = nr_pages;
432
433 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
434 return 0;
435}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 016504ccfccf..12db1d2ad61f 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1,11 +1,10 @@
1/* 1/*
2 * linux/kernel/power/swsusp.c 2 * linux/kernel/power/swsusp.c
3 * 3 *
4 * This file is to realize architecture-independent 4 * This file provides code to write suspend image to swap and read it back.
5 * machine suspend feature using pretty near only high-level routines
6 * 5 *
7 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> 6 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
8 * Copyright (C) 1998,2001-2004 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
9 * 8 *
10 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
11 * 10 *
@@ -47,11 +46,7 @@
47#include <linux/utsname.h> 46#include <linux/utsname.h>
48#include <linux/version.h> 47#include <linux/version.h>
49#include <linux/delay.h> 48#include <linux/delay.h>
50#include <linux/reboot.h>
51#include <linux/bitops.h> 49#include <linux/bitops.h>
52#include <linux/vt_kern.h>
53#include <linux/kbd_kern.h>
54#include <linux/keyboard.h>
55#include <linux/spinlock.h> 50#include <linux/spinlock.h>
56#include <linux/genhd.h> 51#include <linux/genhd.h>
57#include <linux/kernel.h> 52#include <linux/kernel.h>
@@ -63,10 +58,8 @@
63#include <linux/swapops.h> 58#include <linux/swapops.h>
64#include <linux/bootmem.h> 59#include <linux/bootmem.h>
65#include <linux/syscalls.h> 60#include <linux/syscalls.h>
66#include <linux/console.h>
67#include <linux/highmem.h> 61#include <linux/highmem.h>
68#include <linux/bio.h> 62#include <linux/bio.h>
69#include <linux/mount.h>
70 63
71#include <asm/uaccess.h> 64#include <asm/uaccess.h>
72#include <asm/mmu_context.h> 65#include <asm/mmu_context.h>
@@ -84,16 +77,10 @@
84#define MAXKEY 32 77#define MAXKEY 32
85#define MAXIV 32 78#define MAXIV 32
86 79
87/* References to section boundaries */
88extern const void __nosave_begin, __nosave_end;
89
90/* Variables to be preserved over suspend */
91static int nr_copy_pages_check;
92
93extern char resume_file[]; 80extern char resume_file[];
94 81
95/* Local variables that should not be affected by save */ 82/* Local variables that should not be affected by save */
96static unsigned int nr_copy_pages __nosavedata = 0; 83unsigned int nr_copy_pages __nosavedata = 0;
97 84
98/* Suspend pagedir is allocated before final copy, therefore it 85/* Suspend pagedir is allocated before final copy, therefore it
99 must be freed after resume 86 must be freed after resume
@@ -109,7 +96,7 @@ static unsigned int nr_copy_pages __nosavedata = 0;
109 MMU hardware. 96 MMU hardware.
110 */ 97 */
111suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; 98suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
112static suspend_pagedir_t *pagedir_save; 99suspend_pagedir_t *pagedir_save;
113 100
114#define SWSUSP_SIG "S1SUSPEND" 101#define SWSUSP_SIG "S1SUSPEND"
115 102
@@ -124,12 +111,6 @@ static struct swsusp_header {
124static struct swsusp_info swsusp_info; 111static struct swsusp_info swsusp_info;
125 112
126/* 113/*
127 * XXX: We try to keep some more pages free so that I/O operations succeed
128 * without paging. Might this be more?
129 */
130#define PAGES_FOR_IO 512
131
132/*
133 * Saving part... 114 * Saving part...
134 */ 115 */
135 116
@@ -552,353 +533,6 @@ static int write_suspend_image(void)
552 goto Done; 533 goto Done;
553} 534}
554 535
555
556#ifdef CONFIG_HIGHMEM
557struct highmem_page {
558 char *data;
559 struct page *page;
560 struct highmem_page *next;
561};
562
563static struct highmem_page *highmem_copy;
564
565static int save_highmem_zone(struct zone *zone)
566{
567 unsigned long zone_pfn;
568 mark_free_pages(zone);
569 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
570 struct page *page;
571 struct highmem_page *save;
572 void *kaddr;
573 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
574
575 if (!(pfn%1000))
576 printk(".");
577 if (!pfn_valid(pfn))
578 continue;
579 page = pfn_to_page(pfn);
580 /*
581 * PageReserved results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations.
583 *
584 * rvmalloc should not cause this, because all implementations
585 * appear to always be using vmalloc_32 on architectures with
586 * highmem. This is a good thing, because we would like to save
587 * rvmalloc pages.
588 *
589 * It appears to be triggered by pages which do not point to
590 * valid memory (see arch/i386/mm/init.c:one_highpage_init(),
591 * which sets PageReserved if the page does not point to valid
592 * RAM.
593 *
594 * XXX: must remove usage of PageReserved!
595 */
596 if (PageReserved(page))
597 continue;
598 BUG_ON(PageNosave(page));
599 if (PageNosaveFree(page))
600 continue;
601 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
602 if (!save)
603 return -ENOMEM;
604 save->next = highmem_copy;
605 save->page = page;
606 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
607 if (!save->data) {
608 kfree(save);
609 return -ENOMEM;
610 }
611 kaddr = kmap_atomic(page, KM_USER0);
612 memcpy(save->data, kaddr, PAGE_SIZE);
613 kunmap_atomic(kaddr, KM_USER0);
614 highmem_copy = save;
615 }
616 return 0;
617}
618#endif /* CONFIG_HIGHMEM */
619
620
621static int save_highmem(void)
622{
623#ifdef CONFIG_HIGHMEM
624 struct zone *zone;
625 int res = 0;
626
627 pr_debug("swsusp: Saving Highmem\n");
628 for_each_zone (zone) {
629 if (is_highmem(zone))
630 res = save_highmem_zone(zone);
631 if (res)
632 return res;
633 }
634#endif
635 return 0;
636}
637
638static int restore_highmem(void)
639{
640#ifdef CONFIG_HIGHMEM
641 printk("swsusp: Restoring Highmem\n");
642 while (highmem_copy) {
643 struct highmem_page *save = highmem_copy;
644 void *kaddr;
645 highmem_copy = save->next;
646
647 kaddr = kmap_atomic(save->page, KM_USER0);
648 memcpy(kaddr, save->data, PAGE_SIZE);
649 kunmap_atomic(kaddr, KM_USER0);
650 free_page((long) save->data);
651 kfree(save);
652 }
653#endif
654 return 0;
655}
656
657
658static int pfn_is_nosave(unsigned long pfn)
659{
660 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
661 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
662 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
663}
664
665/**
666 * saveable - Determine whether a page should be cloned or not.
667 * @pfn: The page
668 *
669 * We save a page if it's Reserved, and not in the range of pages
670 * statically defined as 'unsaveable', or if it isn't reserved, and
671 * isn't part of a free chunk of pages.
672 */
673
674static int saveable(struct zone * zone, unsigned long * zone_pfn)
675{
676 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
677 struct page * page;
678
679 if (!pfn_valid(pfn))
680 return 0;
681
682 page = pfn_to_page(pfn);
683 if (PageNosave(page))
684 return 0;
685 if (pfn_is_nosave(pfn)) {
686 pr_debug("[nosave pfn 0x%lx]", pfn);
687 return 0;
688 }
689 if (PageNosaveFree(page))
690 return 0;
691
692 return 1;
693}
694
695static void count_data_pages(void)
696{
697 struct zone *zone;
698 unsigned long zone_pfn;
699
700 nr_copy_pages = 0;
701
702 for_each_zone (zone) {
703 if (is_highmem(zone))
704 continue;
705 mark_free_pages(zone);
706 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
707 nr_copy_pages += saveable(zone, &zone_pfn);
708 }
709}
710
711
712static void copy_data_pages(void)
713{
714 struct zone *zone;
715 unsigned long zone_pfn;
716 struct pbe * pbe = pagedir_nosave;
717
718 pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages);
719 for_each_zone (zone) {
720 if (is_highmem(zone))
721 continue;
722 mark_free_pages(zone);
723 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
724 if (saveable(zone, &zone_pfn)) {
725 struct page * page;
726 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
727 BUG_ON(!pbe);
728 pbe->orig_address = (long) page_address(page);
729 /* copy_page is not usable for copying task structs. */
730 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
731 pbe = pbe->next;
732 }
733 }
734 }
735 BUG_ON(pbe);
736}
737
738
739/**
740 * calc_nr - Determine the number of pages needed for a pbe list.
741 */
742
743static int calc_nr(int nr_copy)
744{
745 return nr_copy + (nr_copy+PBES_PER_PAGE-2)/(PBES_PER_PAGE-1);
746}
747
748/**
749 * free_pagedir - free pages allocated with alloc_pagedir()
750 */
751
752static inline void free_pagedir(struct pbe *pblist)
753{
754 struct pbe *pbe;
755
756 while (pblist) {
757 pbe = (pblist + PB_PAGE_SKIP)->next;
758 free_page((unsigned long)pblist);
759 pblist = pbe;
760 }
761}
762
763/**
764 * fill_pb_page - Create a list of PBEs on a given memory page
765 */
766
767static inline void fill_pb_page(struct pbe *pbpage)
768{
769 struct pbe *p;
770
771 p = pbpage;
772 pbpage += PB_PAGE_SKIP;
773 do
774 p->next = p + 1;
775 while (++p < pbpage);
776}
777
778/**
779 * create_pbe_list - Create a list of PBEs on top of a given chain
780 * of memory pages allocated with alloc_pagedir()
781 */
782
783static void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
784{
785 struct pbe *pbpage, *p;
786 unsigned num = PBES_PER_PAGE;
787
788 for_each_pb_page (pbpage, pblist) {
789 if (num >= nr_pages)
790 break;
791
792 fill_pb_page(pbpage);
793 num += PBES_PER_PAGE;
794 }
795 if (pbpage) {
796 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
797 p->next = p + 1;
798 p->next = NULL;
799 }
800 pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
801}
802
803/**
804 * alloc_pagedir - Allocate the page directory.
805 *
806 * First, determine exactly how many pages we need and
807 * allocate them.
808 *
809 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
810 * struct pbe elements (pbes) and the last element in the page points
811 * to the next page.
812 *
813 * On each page we set up a list of struct_pbe elements.
814 */
815
816static struct pbe * alloc_pagedir(unsigned nr_pages)
817{
818 unsigned num;
819 struct pbe *pblist, *pbe;
820
821 if (!nr_pages)
822 return NULL;
823
824 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
825 pblist = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
826 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
827 pbe = pbe->next, num += PBES_PER_PAGE) {
828 pbe += PB_PAGE_SKIP;
829 pbe->next = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
830 }
831 if (!pbe) { /* get_zeroed_page() failed */
832 free_pagedir(pblist);
833 pblist = NULL;
834 }
835 return pblist;
836}
837
838/**
839 * free_image_pages - Free pages allocated for snapshot
840 */
841
842static void free_image_pages(void)
843{
844 struct pbe * p;
845
846 for_each_pbe (p, pagedir_save) {
847 if (p->address) {
848 ClearPageNosave(virt_to_page(p->address));
849 free_page(p->address);
850 p->address = 0;
851 }
852 }
853}
854
855/**
856 * alloc_image_pages - Allocate pages for the snapshot.
857 */
858
859static int alloc_image_pages(void)
860{
861 struct pbe * p;
862
863 for_each_pbe (p, pagedir_save) {
864 p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
865 if (!p->address)
866 return -ENOMEM;
867 SetPageNosave(virt_to_page(p->address));
868 }
869 return 0;
870}
871
872/* Free pages we allocated for suspend. Suspend pages are alocated
873 * before atomic copy, so we need to free them after resume.
874 */
875void swsusp_free(void)
876{
877 BUG_ON(PageNosave(virt_to_page(pagedir_save)));
878 BUG_ON(PageNosaveFree(virt_to_page(pagedir_save)));
879 free_image_pages();
880 free_pagedir(pagedir_save);
881}
882
883
884/**
885 * enough_free_mem - Make sure we enough free memory to snapshot.
886 *
887 * Returns TRUE or FALSE after checking the number of available
888 * free pages.
889 */
890
891static int enough_free_mem(void)
892{
893 if (nr_free_pages() < (nr_copy_pages + PAGES_FOR_IO)) {
894 pr_debug("swsusp: Not enough free pages: Have %d\n",
895 nr_free_pages());
896 return 0;
897 }
898 return 1;
899}
900
901
902/** 536/**
903 * enough_swap - Make sure we have enough swap to save the image. 537 * enough_swap - Make sure we have enough swap to save the image.
904 * 538 *
@@ -909,87 +543,14 @@ static int enough_free_mem(void)
909 * We should only consider resume_device. 543 * We should only consider resume_device.
910 */ 544 */
911 545
912static int enough_swap(void) 546int enough_swap(unsigned nr_pages)
913{ 547{
914 struct sysinfo i; 548 struct sysinfo i;
915 549
916 si_swapinfo(&i); 550 si_swapinfo(&i);
917 if (i.freeswap < (nr_copy_pages + PAGES_FOR_IO)) { 551 pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
918 pr_debug("swsusp: Not enough swap. Need %ld\n",i.freeswap); 552 return i.freeswap > (nr_pages + PAGES_FOR_IO +
919 return 0; 553 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
920 }
921 return 1;
922}
923
924static int swsusp_alloc(void)
925{
926 int error;
927
928 pagedir_nosave = NULL;
929 nr_copy_pages = calc_nr(nr_copy_pages);
930 nr_copy_pages_check = nr_copy_pages;
931
932 pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
933 nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
934
935 if (!enough_free_mem())
936 return -ENOMEM;
937
938 if (!enough_swap())
939 return -ENOSPC;
940
941 if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
942 !!(nr_copy_pages % PBES_PER_PAGE))
943 return -ENOSPC;
944
945 if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
946 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
947 return -ENOMEM;
948 }
949 create_pbe_list(pagedir_save, nr_copy_pages);
950 pagedir_nosave = pagedir_save;
951 if ((error = alloc_image_pages())) {
952 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
953 swsusp_free();
954 return error;
955 }
956
957 return 0;
958}
959
960static int suspend_prepare_image(void)
961{
962 int error;
963
964 pr_debug("swsusp: critical section: \n");
965 if (save_highmem()) {
966 printk(KERN_CRIT "Suspend machine: Not enough free pages for highmem\n");
967 restore_highmem();
968 return -ENOMEM;
969 }
970
971 drain_local_pages();
972 count_data_pages();
973 printk("swsusp: Need to copy %u pages\n", nr_copy_pages);
974
975 error = swsusp_alloc();
976 if (error)
977 return error;
978
979 /* During allocating of suspend pagedir, new cold pages may appear.
980 * Kill them.
981 */
982 drain_local_pages();
983 copy_data_pages();
984
985 /*
986 * End of critical section. From now on, we can write to memory,
987 * but we should not touch disk. This specially means we must _not_
988 * touch swap space! Except we must write out our image of course.
989 */
990
991 printk("swsusp: critical section/: done (%d pages copied)\n", nr_copy_pages );
992 return 0;
993} 554}
994 555
995 556
@@ -1001,7 +562,7 @@ static int suspend_prepare_image(void)
1001int swsusp_write(void) 562int swsusp_write(void)
1002{ 563{
1003 int error; 564 int error;
1004 device_resume(); 565
1005 lock_swapdevices(); 566 lock_swapdevices();
1006 error = write_suspend_image(); 567 error = write_suspend_image();
1007 /* This will unlock ignored swap devices since writing is finished */ 568 /* This will unlock ignored swap devices since writing is finished */
@@ -1011,14 +572,6 @@ int swsusp_write(void)
1011} 572}
1012 573
1013 574
1014extern asmlinkage int swsusp_arch_suspend(void);
1015extern asmlinkage int swsusp_arch_resume(void);
1016
1017
1018asmlinkage int swsusp_save(void)
1019{
1020 return suspend_prepare_image();
1021}
1022 575
1023int swsusp_suspend(void) 576int swsusp_suspend(void)
1024{ 577{
@@ -1050,7 +603,6 @@ int swsusp_suspend(void)
1050 printk(KERN_ERR "Error %d suspending\n", error); 603 printk(KERN_ERR "Error %d suspending\n", error);
1051 /* Restore control flow magically appears here */ 604 /* Restore control flow magically appears here */
1052 restore_processor_state(); 605 restore_processor_state();
1053 BUG_ON (nr_copy_pages_check != nr_copy_pages);
1054 restore_highmem(); 606 restore_highmem();
1055 device_power_up(); 607 device_power_up();
1056 local_irq_enable(); 608 local_irq_enable();
@@ -1070,6 +622,11 @@ int swsusp_resume(void)
1070 * execution continues at place where swsusp_arch_suspend was called 622 * execution continues at place where swsusp_arch_suspend was called
1071 */ 623 */
1072 BUG_ON(!error); 624 BUG_ON(!error);
625 /* The only reason why swsusp_arch_resume() can fail is memory being
626 * very tight, so we have to free it as soon as we can to avoid
627 * subsequent failures
628 */
629 swsusp_free();
1073 restore_processor_state(); 630 restore_processor_state();
1074 restore_highmem(); 631 restore_highmem();
1075 touch_softlockup_watchdog(); 632 touch_softlockup_watchdog();
@@ -1085,54 +642,28 @@ int swsusp_resume(void)
1085 * 642 *
1086 * We don't know which pages are usable until we allocate them. 643 * We don't know which pages are usable until we allocate them.
1087 * 644 *
1088 * Allocated but unusable (ie eaten) memory pages are linked together 645 * Allocated but unusable (ie eaten) memory pages are marked so that
1089 * to create a list, so that we can free them easily 646 * swsusp_free() can release them
1090 *
1091 * We could have used a type other than (void *)
1092 * for this purpose, but ...
1093 */ 647 */
1094static void **eaten_memory = NULL;
1095
1096static inline void eat_page(void *page)
1097{
1098 void **c;
1099
1100 c = eaten_memory;
1101 eaten_memory = page;
1102 *eaten_memory = c;
1103}
1104 648
1105unsigned long get_usable_page(gfp_t gfp_mask) 649unsigned long get_safe_page(gfp_t gfp_mask)
1106{ 650{
1107 unsigned long m; 651 unsigned long m;
1108 652
1109 m = get_zeroed_page(gfp_mask); 653 do {
1110 while (!PageNosaveFree(virt_to_page(m))) {
1111 eat_page((void *)m);
1112 m = get_zeroed_page(gfp_mask); 654 m = get_zeroed_page(gfp_mask);
1113 if (!m) 655 if (m && PageNosaveFree(virt_to_page(m)))
1114 break; 656 /* This is for swsusp_free() */
657 SetPageNosave(virt_to_page(m));
658 } while (m && PageNosaveFree(virt_to_page(m)));
659 if (m) {
660 /* This is for swsusp_free() */
661 SetPageNosave(virt_to_page(m));
662 SetPageNosaveFree(virt_to_page(m));
1115 } 663 }
1116 return m; 664 return m;
1117} 665}
1118 666
1119void free_eaten_memory(void)
1120{
1121 unsigned long m;
1122 void **c;
1123 int i = 0;
1124
1125 c = eaten_memory;
1126 while (c) {
1127 m = (unsigned long)c;
1128 c = *c;
1129 free_page(m);
1130 i++;
1131 }
1132 eaten_memory = NULL;
1133 pr_debug("swsusp: %d unused pages freed\n", i);
1134}
1135
1136/** 667/**
1137 * check_pagedir - We ensure here that pages that the PBEs point to 668 * check_pagedir - We ensure here that pages that the PBEs point to
1138 * won't collide with pages where we're going to restore from the loaded 669 * won't collide with pages where we're going to restore from the loaded
@@ -1150,7 +681,7 @@ static int check_pagedir(struct pbe *pblist)
1150 p->address = 0UL; 681 p->address = 0UL;
1151 682
1152 for_each_pbe (p, pblist) { 683 for_each_pbe (p, pblist) {
1153 p->address = get_usable_page(GFP_ATOMIC); 684 p->address = get_safe_page(GFP_ATOMIC);
1154 if (!p->address) 685 if (!p->address)
1155 return -ENOMEM; 686 return -ENOMEM;
1156 } 687 }
@@ -1169,7 +700,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1169 unsigned long zone_pfn; 700 unsigned long zone_pfn;
1170 struct pbe *pbpage, *tail, *p; 701 struct pbe *pbpage, *tail, *p;
1171 void *m; 702 void *m;
1172 int rel = 0, error = 0; 703 int rel = 0;
1173 704
1174 if (!pblist) /* a sanity check */ 705 if (!pblist) /* a sanity check */
1175 return NULL; 706 return NULL;
@@ -1177,41 +708,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1177 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", 708 pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
1178 swsusp_info.pagedir_pages); 709 swsusp_info.pagedir_pages);
1179 710
1180 /* Set page flags */ 711 /* Clear page flags */
1181 712
1182 for_each_zone (zone) { 713 for_each_zone (zone) {
1183 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 714 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
1184 SetPageNosaveFree(pfn_to_page(zone_pfn + 715 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
716 ClearPageNosaveFree(pfn_to_page(zone_pfn +
1185 zone->zone_start_pfn)); 717 zone->zone_start_pfn));
1186 } 718 }
1187 719
1188 /* Clear orig addresses */ 720 /* Mark orig addresses */
1189 721
1190 for_each_pbe (p, pblist) 722 for_each_pbe (p, pblist)
1191 ClearPageNosaveFree(virt_to_page(p->orig_address)); 723 SetPageNosaveFree(virt_to_page(p->orig_address));
1192 724
1193 tail = pblist + PB_PAGE_SKIP; 725 tail = pblist + PB_PAGE_SKIP;
1194 726
1195 /* Relocate colliding pages */ 727 /* Relocate colliding pages */
1196 728
1197 for_each_pb_page (pbpage, pblist) { 729 for_each_pb_page (pbpage, pblist) {
1198 if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { 730 if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
1199 m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); 731 m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
1200 if (!m) { 732 if (!m)
1201 error = -ENOMEM; 733 return NULL;
1202 break;
1203 }
1204 memcpy(m, (void *)pbpage, PAGE_SIZE); 734 memcpy(m, (void *)pbpage, PAGE_SIZE);
1205 if (pbpage == pblist) 735 if (pbpage == pblist)
1206 pblist = (struct pbe *)m; 736 pblist = (struct pbe *)m;
1207 else 737 else
1208 tail->next = (struct pbe *)m; 738 tail->next = (struct pbe *)m;
1209
1210 eat_page((void *)pbpage);
1211 pbpage = (struct pbe *)m; 739 pbpage = (struct pbe *)m;
1212 740
1213 /* We have to link the PBEs again */ 741 /* We have to link the PBEs again */
1214
1215 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) 742 for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
1216 if (p->next) /* needed to save the end */ 743 if (p->next) /* needed to save the end */
1217 p->next = p + 1; 744 p->next = p + 1;
@@ -1221,15 +748,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
1221 tail = pbpage + PB_PAGE_SKIP; 748 tail = pbpage + PB_PAGE_SKIP;
1222 } 749 }
1223 750
1224 if (error) { 751 /* This is for swsusp_free() */
1225 printk("\nswsusp: Out of memory\n\n"); 752 for_each_pb_page (pbpage, pblist) {
1226 free_pagedir(pblist); 753 SetPageNosave(virt_to_page(pbpage));
1227 free_eaten_memory(); 754 SetPageNosaveFree(virt_to_page(pbpage));
1228 pblist = NULL; 755 }
1229 /* Is this even worth handling? It should never ever happen, and we 756
1230 have just lost user's state, anyway... */ 757 printk("swsusp: Relocated %d pages\n", rel);
1231 } else
1232 printk("swsusp: Relocated %d pages\n", rel);
1233 758
1234 return pblist; 759 return pblist;
1235} 760}
@@ -1447,9 +972,7 @@ static int read_pagedir(struct pbe *pblist)
1447 break; 972 break;
1448 } 973 }
1449 974
1450 if (error) 975 if (!error)
1451 free_pagedir(pblist);
1452 else
1453 BUG_ON(i != swsusp_info.pagedir_pages); 976 BUG_ON(i != swsusp_info.pagedir_pages);
1454 977
1455 return error; 978 return error;
@@ -1492,15 +1015,6 @@ static int read_suspend_image(void)
1492 if (!error) 1015 if (!error)
1493 error = data_read(pagedir_nosave); 1016 error = data_read(pagedir_nosave);
1494 1017
1495 if (error) { /* We fail cleanly */
1496 free_eaten_memory();
1497 for_each_pbe (p, pagedir_nosave)
1498 if (p->address) {
1499 free_page(p->address);
1500 p->address = 0UL;
1501 }
1502 free_pagedir(pagedir_nosave);
1503 }
1504 return error; 1018 return error;
1505} 1019}
1506 1020
diff --git a/kernel/printk.c b/kernel/printk.c
index 4b8f0f9230a4..3cb9708209bc 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -10,7 +10,7 @@
10 * elsewhere, in preparation for a serial line console (someday). 10 * elsewhere, in preparation for a serial line console (someday).
11 * Ted Ts'o, 2/11/93. 11 * Ted Ts'o, 2/11/93.
12 * Modified for sysctl support, 1/8/97, Chris Horn. 12 * Modified for sysctl support, 1/8/97, Chris Horn.
13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul 13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
14 * manfreds@colorfullife.com 14 * manfreds@colorfullife.com
15 * Rewrote bits to get rid of console_lock 15 * Rewrote bits to get rid of console_lock
16 * 01Mar01 Andrew Morton <andrewm@uow.edu.au> 16 * 01Mar01 Andrew Morton <andrewm@uow.edu.au>
@@ -148,7 +148,7 @@ static int __init console_setup(char *str)
148 if (!strcmp(str, "ttyb")) 148 if (!strcmp(str, "ttyb"))
149 strcpy(name, "ttyS1"); 149 strcpy(name, "ttyS1");
150#endif 150#endif
151 for(s = name; *s; s++) 151 for (s = name; *s; s++)
152 if ((*s >= '0' && *s <= '9') || *s == ',') 152 if ((*s >= '0' && *s <= '9') || *s == ',')
153 break; 153 break;
154 idx = simple_strtoul(s, NULL, 10); 154 idx = simple_strtoul(s, NULL, 10);
@@ -169,11 +169,11 @@ static int __init log_buf_len_setup(char *str)
169 size = roundup_pow_of_two(size); 169 size = roundup_pow_of_two(size);
170 if (size > log_buf_len) { 170 if (size > log_buf_len) {
171 unsigned long start, dest_idx, offset; 171 unsigned long start, dest_idx, offset;
172 char * new_log_buf; 172 char *new_log_buf;
173 173
174 new_log_buf = alloc_bootmem(size); 174 new_log_buf = alloc_bootmem(size);
175 if (!new_log_buf) { 175 if (!new_log_buf) {
176 printk("log_buf_len: allocation failed\n"); 176 printk(KERN_WARNING "log_buf_len: allocation failed\n");
177 goto out; 177 goto out;
178 } 178 }
179 179
@@ -193,10 +193,9 @@ static int __init log_buf_len_setup(char *str)
193 log_end -= offset; 193 log_end -= offset;
194 spin_unlock_irqrestore(&logbuf_lock, flags); 194 spin_unlock_irqrestore(&logbuf_lock, flags);
195 195
196 printk("log_buf_len: %d\n", log_buf_len); 196 printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len);
197 } 197 }
198out: 198out:
199
200 return 1; 199 return 1;
201} 200}
202 201
@@ -217,7 +216,7 @@ __setup("log_buf_len=", log_buf_len_setup);
217 * 9 -- Return number of unread characters in the log buffer 216 * 9 -- Return number of unread characters in the log buffer
218 * 10 -- Return size of the log buffer 217 * 10 -- Return size of the log buffer
219 */ 218 */
220int do_syslog(int type, char __user * buf, int len) 219int do_syslog(int type, char __user *buf, int len)
221{ 220{
222 unsigned long i, j, limit, count; 221 unsigned long i, j, limit, count;
223 int do_clear = 0; 222 int do_clear = 0;
@@ -244,7 +243,8 @@ int do_syslog(int type, char __user * buf, int len)
244 error = -EFAULT; 243 error = -EFAULT;
245 goto out; 244 goto out;
246 } 245 }
247 error = wait_event_interruptible(log_wait, (log_start - log_end)); 246 error = wait_event_interruptible(log_wait,
247 (log_start - log_end));
248 if (error) 248 if (error)
249 goto out; 249 goto out;
250 i = 0; 250 i = 0;
@@ -264,7 +264,7 @@ int do_syslog(int type, char __user * buf, int len)
264 error = i; 264 error = i;
265 break; 265 break;
266 case 4: /* Read/clear last kernel messages */ 266 case 4: /* Read/clear last kernel messages */
267 do_clear = 1; 267 do_clear = 1;
268 /* FALL THRU */ 268 /* FALL THRU */
269 case 3: /* Read last kernel messages */ 269 case 3: /* Read last kernel messages */
270 error = -EINVAL; 270 error = -EINVAL;
@@ -288,11 +288,11 @@ int do_syslog(int type, char __user * buf, int len)
288 limit = log_end; 288 limit = log_end;
289 /* 289 /*
290 * __put_user() could sleep, and while we sleep 290 * __put_user() could sleep, and while we sleep
291 * printk() could overwrite the messages 291 * printk() could overwrite the messages
292 * we try to copy to user space. Therefore 292 * we try to copy to user space. Therefore
293 * the messages are copied in reverse. <manfreds> 293 * the messages are copied in reverse. <manfreds>
294 */ 294 */
295 for(i = 0; i < count && !error; i++) { 295 for (i = 0; i < count && !error; i++) {
296 j = limit-1-i; 296 j = limit-1-i;
297 if (j + log_buf_len < log_end) 297 if (j + log_buf_len < log_end)
298 break; 298 break;
@@ -306,10 +306,10 @@ int do_syslog(int type, char __user * buf, int len)
306 if (error) 306 if (error)
307 break; 307 break;
308 error = i; 308 error = i;
309 if(i != count) { 309 if (i != count) {
310 int offset = count-error; 310 int offset = count-error;
311 /* buffer overflow during copy, correct user buffer. */ 311 /* buffer overflow during copy, correct user buffer. */
312 for(i=0;i<error;i++) { 312 for (i = 0; i < error; i++) {
313 if (__get_user(c,&buf[i+offset]) || 313 if (__get_user(c,&buf[i+offset]) ||
314 __put_user(c,&buf[i])) { 314 __put_user(c,&buf[i])) {
315 error = -EFAULT; 315 error = -EFAULT;
@@ -351,7 +351,7 @@ out:
351 return error; 351 return error;
352} 352}
353 353
354asmlinkage long sys_syslog(int type, char __user * buf, int len) 354asmlinkage long sys_syslog(int type, char __user *buf, int len)
355{ 355{
356 return do_syslog(type, buf, len); 356 return do_syslog(type, buf, len);
357} 357}
@@ -404,21 +404,19 @@ static void call_console_drivers(unsigned long start, unsigned long end)
404 cur_index = start; 404 cur_index = start;
405 start_print = start; 405 start_print = start;
406 while (cur_index != end) { 406 while (cur_index != end) {
407 if ( msg_level < 0 && 407 if (msg_level < 0 && ((end - cur_index) > 2) &&
408 ((end - cur_index) > 2) && 408 LOG_BUF(cur_index + 0) == '<' &&
409 LOG_BUF(cur_index + 0) == '<' && 409 LOG_BUF(cur_index + 1) >= '0' &&
410 LOG_BUF(cur_index + 1) >= '0' && 410 LOG_BUF(cur_index + 1) <= '7' &&
411 LOG_BUF(cur_index + 1) <= '7' && 411 LOG_BUF(cur_index + 2) == '>') {
412 LOG_BUF(cur_index + 2) == '>')
413 {
414 msg_level = LOG_BUF(cur_index + 1) - '0'; 412 msg_level = LOG_BUF(cur_index + 1) - '0';
415 cur_index += 3; 413 cur_index += 3;
416 start_print = cur_index; 414 start_print = cur_index;
417 } 415 }
418 while (cur_index != end) { 416 while (cur_index != end) {
419 char c = LOG_BUF(cur_index); 417 char c = LOG_BUF(cur_index);
420 cur_index++;
421 418
419 cur_index++;
422 if (c == '\n') { 420 if (c == '\n') {
423 if (msg_level < 0) { 421 if (msg_level < 0) {
424 /* 422 /*
@@ -461,7 +459,7 @@ static void zap_locks(void)
461 static unsigned long oops_timestamp; 459 static unsigned long oops_timestamp;
462 460
463 if (time_after_eq(jiffies, oops_timestamp) && 461 if (time_after_eq(jiffies, oops_timestamp) &&
464 !time_after(jiffies, oops_timestamp + 30*HZ)) 462 !time_after(jiffies, oops_timestamp + 30 * HZ))
465 return; 463 return;
466 464
467 oops_timestamp = jiffies; 465 oops_timestamp = jiffies;
@@ -495,7 +493,7 @@ __attribute__((weak)) unsigned long long printk_clock(void)
495 493
496/* 494/*
497 * This is printk. It can be called from any context. We want it to work. 495 * This is printk. It can be called from any context. We want it to work.
498 * 496 *
499 * We try to grab the console_sem. If we succeed, it's easy - we log the output and 497 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
500 * call the console drivers. If we fail to get the semaphore we place the output 498 * call the console drivers. If we fail to get the semaphore we place the output
501 * into the log buffer and return. The current holder of the console_sem will 499 * into the log buffer and return. The current holder of the console_sem will
@@ -639,13 +637,19 @@ EXPORT_SYMBOL(vprintk);
639 637
640#else 638#else
641 639
642asmlinkage long sys_syslog(int type, char __user * buf, int len) 640asmlinkage long sys_syslog(int type, char __user *buf, int len)
643{ 641{
644 return 0; 642 return 0;
645} 643}
646 644
647int do_syslog(int type, char __user * buf, int len) { return 0; } 645int do_syslog(int type, char __user *buf, int len)
648static void call_console_drivers(unsigned long start, unsigned long end) {} 646{
647 return 0;
648}
649
650static void call_console_drivers(unsigned long start, unsigned long end)
651{
652}
649 653
650#endif 654#endif
651 655
@@ -851,9 +855,9 @@ EXPORT_SYMBOL(console_start);
851 * print any messages that were printed by the kernel before the 855 * print any messages that were printed by the kernel before the
852 * console driver was initialized. 856 * console driver was initialized.
853 */ 857 */
854void register_console(struct console * console) 858void register_console(struct console *console)
855{ 859{
856 int i; 860 int i;
857 unsigned long flags; 861 unsigned long flags;
858 862
859 if (preferred_console < 0) 863 if (preferred_console < 0)
@@ -878,7 +882,8 @@ void register_console(struct console * console)
878 * See if this console matches one we selected on 882 * See if this console matches one we selected on
879 * the command line. 883 * the command line.
880 */ 884 */
881 for(i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) { 885 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
886 i++) {
882 if (strcmp(console_cmdline[i].name, console->name) != 0) 887 if (strcmp(console_cmdline[i].name, console->name) != 0)
883 continue; 888 continue;
884 if (console->index >= 0 && 889 if (console->index >= 0 &&
@@ -933,9 +938,9 @@ void register_console(struct console * console)
933} 938}
934EXPORT_SYMBOL(register_console); 939EXPORT_SYMBOL(register_console);
935 940
936int unregister_console(struct console * console) 941int unregister_console(struct console *console)
937{ 942{
938 struct console *a,*b; 943 struct console *a, *b;
939 int res = 1; 944 int res = 1;
940 945
941 acquire_console_sem(); 946 acquire_console_sem();
@@ -949,10 +954,10 @@ int unregister_console(struct console * console)
949 b->next = a->next; 954 b->next = a->next;
950 res = 0; 955 res = 0;
951 break; 956 break;
952 } 957 }
953 } 958 }
954 } 959 }
955 960
956 /* If last console is removed, we re-enable picking the first 961 /* If last console is removed, we re-enable picking the first
957 * one that gets registered. Without that, pmac early boot console 962 * one that gets registered. Without that, pmac early boot console
958 * would prevent fbcon from taking over. 963 * would prevent fbcon from taking over.
@@ -994,7 +999,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
994int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) 999int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
995{ 1000{
996 static DEFINE_SPINLOCK(ratelimit_lock); 1001 static DEFINE_SPINLOCK(ratelimit_lock);
997 static unsigned long toks = 10*5*HZ; 1002 static unsigned long toks = 10 * 5 * HZ;
998 static unsigned long last_msg; 1003 static unsigned long last_msg;
999 static int missed; 1004 static int missed;
1000 unsigned long flags; 1005 unsigned long flags;
@@ -1007,6 +1012,7 @@ int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
1007 toks = ratelimit_burst * ratelimit_jiffies; 1012 toks = ratelimit_burst * ratelimit_jiffies;
1008 if (toks >= ratelimit_jiffies) { 1013 if (toks >= ratelimit_jiffies) {
1009 int lost = missed; 1014 int lost = missed;
1015
1010 missed = 0; 1016 missed = 0;
1011 toks -= ratelimit_jiffies; 1017 toks -= ratelimit_jiffies;
1012 spin_unlock_irqrestore(&ratelimit_lock, flags); 1018 spin_unlock_irqrestore(&ratelimit_lock, flags);
@@ -1021,7 +1027,7 @@ int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
1021EXPORT_SYMBOL(__printk_ratelimit); 1027EXPORT_SYMBOL(__printk_ratelimit);
1022 1028
1023/* minimum time in jiffies between messages */ 1029/* minimum time in jiffies between messages */
1024int printk_ratelimit_jiffies = 5*HZ; 1030int printk_ratelimit_jiffies = 5 * HZ;
1025 1031
1026/* number of messages we send before ratelimiting */ 1032/* number of messages we send before ratelimiting */
1027int printk_ratelimit_burst = 10; 1033int printk_ratelimit_burst = 10;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 019e04ec065a..863eee8bff47 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -56,6 +56,10 @@ void ptrace_untrace(task_t *child)
56 signal_wake_up(child, 1); 56 signal_wake_up(child, 1);
57 } 57 }
58 } 58 }
59 if (child->signal->flags & SIGNAL_GROUP_EXIT) {
60 sigaddset(&child->pending.signal, SIGKILL);
61 signal_wake_up(child, 1);
62 }
59 spin_unlock(&child->sighand->siglock); 63 spin_unlock(&child->sighand->siglock);
60} 64}
61 65
@@ -77,8 +81,7 @@ void __ptrace_unlink(task_t *child)
77 SET_LINKS(child); 81 SET_LINKS(child);
78 } 82 }
79 83
80 if (child->state == TASK_TRACED) 84 ptrace_untrace(child);
81 ptrace_untrace(child);
82} 85}
83 86
84/* 87/*
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2559d4b8f23f..c4d159a21e04 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -154,6 +154,15 @@ void fastcall call_rcu_bh(struct rcu_head *head,
154} 154}
155 155
156/* 156/*
157 * Return the number of RCU batches processed thus far. Useful
158 * for debug and statistics.
159 */
160long rcu_batches_completed(void)
161{
162 return rcu_ctrlblk.completed;
163}
164
165/*
157 * Invoke the completed RCU callbacks. They are expected to be in 166 * Invoke the completed RCU callbacks. They are expected to be in
158 * a per-cpu list. 167 * a per-cpu list.
159 */ 168 */
@@ -501,6 +510,7 @@ void synchronize_kernel(void)
501} 510}
502 511
503module_param(maxbatch, int, 0); 512module_param(maxbatch, int, 0);
513EXPORT_SYMBOL_GPL(rcu_batches_completed);
504EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ 514EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
505EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ 515EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
506EXPORT_SYMBOL_GPL(synchronize_rcu); 516EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
new file mode 100644
index 000000000000..9b58f1eff3ca
--- /dev/null
+++ b/kernel/rcutorture.c
@@ -0,0 +1,492 @@
1/*
2 * Read-Copy Update /proc-based torture test facility
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2005
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 *
22 * See also: Documentation/RCU/torture.txt
23 */
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/err.h>
30#include <linux/spinlock.h>
31#include <linux/smp.h>
32#include <linux/rcupdate.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <asm/atomic.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/completion.h>
39#include <linux/moduleparam.h>
40#include <linux/percpu.h>
41#include <linux/notifier.h>
42#include <linux/rcuref.h>
43#include <linux/cpu.h>
44#include <linux/random.h>
45#include <linux/delay.h>
46#include <linux/byteorder/swabb.h>
47#include <linux/stat.h>
48
49MODULE_LICENSE("GPL");
50
51static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
52static int stat_interval = 0; /* Interval between stats, in seconds. */
53 /* Defaults to "only at end of test". */
54static int verbose = 0; /* Print more debug info. */
55
56MODULE_PARM(nreaders, "i");
57MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
58MODULE_PARM(stat_interval, "i");
59MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
60MODULE_PARM(verbose, "i");
61MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
62#define TORTURE_FLAG "rcutorture: "
63#define PRINTK_STRING(s) \
64 do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
65#define VERBOSE_PRINTK_STRING(s) \
66 do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
67#define VERBOSE_PRINTK_ERRSTRING(s) \
68 do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
69
70static char printk_buf[4096];
71
72static int nrealreaders;
73static struct task_struct *writer_task;
74static struct task_struct **reader_tasks;
75static struct task_struct *stats_task;
76
77#define RCU_TORTURE_PIPE_LEN 10
78
79struct rcu_torture {
80 struct rcu_head rtort_rcu;
81 int rtort_pipe_count;
82 struct list_head rtort_free;
83};
84
85static int fullstop = 0; /* stop generating callbacks at test end. */
86static LIST_HEAD(rcu_torture_freelist);
87static struct rcu_torture *rcu_torture_current = NULL;
88static long rcu_torture_current_version = 0;
89static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
90static DEFINE_SPINLOCK(rcu_torture_lock);
91static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
92 { 0 };
93static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
94 { 0 };
95static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
96atomic_t n_rcu_torture_alloc;
97atomic_t n_rcu_torture_alloc_fail;
98atomic_t n_rcu_torture_free;
99
100/*
101 * Allocate an element from the rcu_tortures pool.
102 */
103struct rcu_torture *
104rcu_torture_alloc(void)
105{
106 struct list_head *p;
107
108 spin_lock(&rcu_torture_lock);
109 if (list_empty(&rcu_torture_freelist)) {
110 atomic_inc(&n_rcu_torture_alloc_fail);
111 spin_unlock(&rcu_torture_lock);
112 return NULL;
113 }
114 atomic_inc(&n_rcu_torture_alloc);
115 p = rcu_torture_freelist.next;
116 list_del_init(p);
117 spin_unlock(&rcu_torture_lock);
118 return container_of(p, struct rcu_torture, rtort_free);
119}
120
121/*
122 * Free an element to the rcu_tortures pool.
123 */
124static void
125rcu_torture_free(struct rcu_torture *p)
126{
127 atomic_inc(&n_rcu_torture_free);
128 spin_lock(&rcu_torture_lock);
129 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
130 spin_unlock(&rcu_torture_lock);
131}
132
133static void
134rcu_torture_cb(struct rcu_head *p)
135{
136 int i;
137 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
138
139 if (fullstop) {
140 /* Test is ending, just drop callbacks on the floor. */
141 /* The next initialization will pick up the pieces. */
142 return;
143 }
144 i = rp->rtort_pipe_count;
145 if (i > RCU_TORTURE_PIPE_LEN)
146 i = RCU_TORTURE_PIPE_LEN;
147 atomic_inc(&rcu_torture_wcount[i]);
148 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN)
149 rcu_torture_free(rp);
150 else
151 call_rcu(p, rcu_torture_cb);
152}
153
154struct rcu_random_state {
155 unsigned long rrs_state;
156 unsigned long rrs_count;
157};
158
159#define RCU_RANDOM_MULT 39916801 /* prime */
160#define RCU_RANDOM_ADD 479001701 /* prime */
161#define RCU_RANDOM_REFRESH 10000
162
163#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
164
165/*
166 * Crude but fast random-number generator. Uses a linear congruential
167 * generator, with occasional help from get_random_bytes().
168 */
169static long
170rcu_random(struct rcu_random_state *rrsp)
171{
172 long refresh;
173
174 if (--rrsp->rrs_count < 0) {
175 get_random_bytes(&refresh, sizeof(refresh));
176 rrsp->rrs_state += refresh;
177 rrsp->rrs_count = RCU_RANDOM_REFRESH;
178 }
179 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
180 return swahw32(rrsp->rrs_state);
181}
182
183/*
184 * RCU torture writer kthread. Repeatedly substitutes a new structure
185 * for that pointed to by rcu_torture_current, freeing the old structure
186 * after a series of grace periods (the "pipeline").
187 */
188static int
189rcu_torture_writer(void *arg)
190{
191 int i;
192 long oldbatch = rcu_batches_completed();
193 struct rcu_torture *rp;
194 struct rcu_torture *old_rp;
195 static DEFINE_RCU_RANDOM(rand);
196
197 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
198 do {
199 schedule_timeout_uninterruptible(1);
200 if (rcu_batches_completed() == oldbatch)
201 continue;
202 if ((rp = rcu_torture_alloc()) == NULL)
203 continue;
204 rp->rtort_pipe_count = 0;
205 udelay(rcu_random(&rand) & 0x3ff);
206 old_rp = rcu_torture_current;
207 rcu_assign_pointer(rcu_torture_current, rp);
208 smp_wmb();
209 if (old_rp != NULL) {
210 i = old_rp->rtort_pipe_count;
211 if (i > RCU_TORTURE_PIPE_LEN)
212 i = RCU_TORTURE_PIPE_LEN;
213 atomic_inc(&rcu_torture_wcount[i]);
214 old_rp->rtort_pipe_count++;
215 call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
216 }
217 rcu_torture_current_version++;
218 oldbatch = rcu_batches_completed();
219 } while (!kthread_should_stop() && !fullstop);
220 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
221 while (!kthread_should_stop())
222 schedule_timeout_uninterruptible(1);
223 return 0;
224}
225
226/*
227 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
228 * incrementing the corresponding element of the pipeline array. The
229 * counter in the element should never be greater than 1, otherwise, the
230 * RCU implementation is broken.
231 */
232static int
233rcu_torture_reader(void *arg)
234{
235 int completed;
236 DEFINE_RCU_RANDOM(rand);
237 struct rcu_torture *p;
238 int pipe_count;
239
240 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
241 do {
242 rcu_read_lock();
243 completed = rcu_batches_completed();
244 p = rcu_dereference(rcu_torture_current);
245 if (p == NULL) {
246 /* Wait for rcu_torture_writer to get underway */
247 rcu_read_unlock();
248 schedule_timeout_interruptible(HZ);
249 continue;
250 }
251 udelay(rcu_random(&rand) & 0x7f);
252 preempt_disable();
253 pipe_count = p->rtort_pipe_count;
254 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
255 /* Should not happen, but... */
256 pipe_count = RCU_TORTURE_PIPE_LEN;
257 }
258 ++__get_cpu_var(rcu_torture_count)[pipe_count];
259 completed = rcu_batches_completed() - completed;
260 if (completed > RCU_TORTURE_PIPE_LEN) {
261 /* Should not happen, but... */
262 completed = RCU_TORTURE_PIPE_LEN;
263 }
264 ++__get_cpu_var(rcu_torture_batch)[completed];
265 preempt_enable();
266 rcu_read_unlock();
267 schedule();
268 } while (!kthread_should_stop() && !fullstop);
269 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
270 while (!kthread_should_stop())
271 schedule_timeout_uninterruptible(1);
272 return 0;
273}
274
275/*
276 * Create an RCU-torture statistics message in the specified buffer.
277 */
278static int
279rcu_torture_printk(char *page)
280{
281 int cnt = 0;
282 int cpu;
283 int i;
284 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
285 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
286
287 for_each_cpu(cpu) {
288 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
289 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
290 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
291 }
292 }
293 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
294 if (pipesummary[i] != 0)
295 break;
296 }
297 cnt += sprintf(&page[cnt], "rcutorture: ");
298 cnt += sprintf(&page[cnt],
299 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d",
300 rcu_torture_current,
301 rcu_torture_current_version,
302 list_empty(&rcu_torture_freelist),
303 atomic_read(&n_rcu_torture_alloc),
304 atomic_read(&n_rcu_torture_alloc_fail),
305 atomic_read(&n_rcu_torture_free));
306 cnt += sprintf(&page[cnt], "\nrcutorture: ");
307 if (i > 1)
308 cnt += sprintf(&page[cnt], "!!! ");
309 cnt += sprintf(&page[cnt], "Reader Pipe: ");
310 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
311 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
312 cnt += sprintf(&page[cnt], "\nrcutorture: ");
313 cnt += sprintf(&page[cnt], "Reader Batch: ");
314 for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
315 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
316 cnt += sprintf(&page[cnt], "\nrcutorture: ");
317 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
318 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
319 cnt += sprintf(&page[cnt], " %d",
320 atomic_read(&rcu_torture_wcount[i]));
321 }
322 cnt += sprintf(&page[cnt], "\n");
323 return cnt;
324}
325
326/*
327 * Print torture statistics. Caller must ensure that there is only
328 * one call to this function at a given time!!! This is normally
329 * accomplished by relying on the module system to only have one copy
330 * of the module loaded, and then by giving the rcu_torture_stats
331 * kthread full control (or the init/cleanup functions when rcu_torture_stats
332 * thread is not running).
333 */
334static void
335rcu_torture_stats_print(void)
336{
337 int cnt;
338
339 cnt = rcu_torture_printk(printk_buf);
340 printk(KERN_ALERT "%s", printk_buf);
341}
342
343/*
344 * Periodically prints torture statistics, if periodic statistics printing
345 * was specified via the stat_interval module parameter.
346 *
347 * No need to worry about fullstop here, since this one doesn't reference
348 * volatile state or register callbacks.
349 */
350static int
351rcu_torture_stats(void *arg)
352{
353 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
354 do {
355 schedule_timeout_interruptible(stat_interval * HZ);
356 rcu_torture_stats_print();
357 } while (!kthread_should_stop());
358 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
359 return 0;
360}
361
362static void
363rcu_torture_cleanup(void)
364{
365 int i;
366
367 fullstop = 1;
368 if (writer_task != NULL) {
369 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
370 kthread_stop(writer_task);
371 }
372 writer_task = NULL;
373
374 if (reader_tasks != NULL) {
375 for (i = 0; i < nrealreaders; i++) {
376 if (reader_tasks[i] != NULL) {
377 VERBOSE_PRINTK_STRING(
378 "Stopping rcu_torture_reader task");
379 kthread_stop(reader_tasks[i]);
380 }
381 reader_tasks[i] = NULL;
382 }
383 kfree(reader_tasks);
384 reader_tasks = NULL;
385 }
386 rcu_torture_current = NULL;
387
388 if (stats_task != NULL) {
389 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
390 kthread_stop(stats_task);
391 }
392 stats_task = NULL;
393
394 /* Wait for all RCU callbacks to fire. */
395
396 for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
397 synchronize_rcu();
398 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
399 PRINTK_STRING("--- End of test");
400}
401
402static int
403rcu_torture_init(void)
404{
405 int i;
406 int cpu;
407 int firsterr = 0;
408
409 /* Process args and tell the world that the torturer is on the job. */
410
411 if (nreaders >= 0)
412 nrealreaders = nreaders;
413 else
414 nrealreaders = 2 * num_online_cpus();
415 printk(KERN_ALERT TORTURE_FLAG
416 "--- Start of test: nreaders=%d stat_interval=%d verbose=%d\n",
417 nrealreaders, stat_interval, verbose);
418 fullstop = 0;
419
420 /* Set up the freelist. */
421
422 INIT_LIST_HEAD(&rcu_torture_freelist);
423 for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
424 list_add_tail(&rcu_tortures[i].rtort_free,
425 &rcu_torture_freelist);
426 }
427
428 /* Initialize the statistics so that each run gets its own numbers. */
429
430 rcu_torture_current = NULL;
431 rcu_torture_current_version = 0;
432 atomic_set(&n_rcu_torture_alloc, 0);
433 atomic_set(&n_rcu_torture_alloc_fail, 0);
434 atomic_set(&n_rcu_torture_free, 0);
435 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
436 atomic_set(&rcu_torture_wcount[i], 0);
437 for_each_cpu(cpu) {
438 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
439 per_cpu(rcu_torture_count, cpu)[i] = 0;
440 per_cpu(rcu_torture_batch, cpu)[i] = 0;
441 }
442 }
443
444 /* Start up the kthreads. */
445
446 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
447 writer_task = kthread_run(rcu_torture_writer, NULL,
448 "rcu_torture_writer");
449 if (IS_ERR(writer_task)) {
450 firsterr = PTR_ERR(writer_task);
451 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
452 writer_task = NULL;
453 goto unwind;
454 }
455 reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
456 GFP_KERNEL);
457 if (reader_tasks == NULL) {
458 VERBOSE_PRINTK_ERRSTRING("out of memory");
459 firsterr = -ENOMEM;
460 goto unwind;
461 }
462 for (i = 0; i < nrealreaders; i++) {
463 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
464 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
465 "rcu_torture_reader");
466 if (IS_ERR(reader_tasks[i])) {
467 firsterr = PTR_ERR(reader_tasks[i]);
468 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
469 reader_tasks[i] = NULL;
470 goto unwind;
471 }
472 }
473 if (stat_interval > 0) {
474 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
475 stats_task = kthread_run(rcu_torture_stats, NULL,
476 "rcu_torture_stats");
477 if (IS_ERR(stats_task)) {
478 firsterr = PTR_ERR(stats_task);
479 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
480 stats_task = NULL;
481 goto unwind;
482 }
483 }
484 return 0;
485
486unwind:
487 rcu_torture_cleanup();
488 return firsterr;
489}
490
491module_init(rcu_torture_init);
492module_exit(rcu_torture_cleanup);
diff --git a/kernel/sched.c b/kernel/sched.c
index 4f26c544d02c..340dd238c16d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3877,7 +3877,6 @@ EXPORT_SYMBOL(cpu_present_map);
3877 3877
3878#ifndef CONFIG_SMP 3878#ifndef CONFIG_SMP
3879cpumask_t cpu_online_map = CPU_MASK_ALL; 3879cpumask_t cpu_online_map = CPU_MASK_ALL;
3880EXPORT_SYMBOL_GPL(cpu_online_map);
3881cpumask_t cpu_possible_map = CPU_MASK_ALL; 3880cpumask_t cpu_possible_map = CPU_MASK_ALL;
3882#endif 3881#endif
3883 3882
diff --git a/kernel/signal.c b/kernel/signal.c
index 6904bbbfe116..1bf3c39d6109 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -277,7 +277,6 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
277 } else { 277 } else {
278 INIT_LIST_HEAD(&q->list); 278 INIT_LIST_HEAD(&q->list);
279 q->flags = 0; 279 q->flags = 0;
280 q->lock = NULL;
281 q->user = get_uid(t->user); 280 q->user = get_uid(t->user);
282 } 281 }
283 return(q); 282 return(q);
@@ -652,8 +651,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
652 if (!valid_signal(sig)) 651 if (!valid_signal(sig))
653 return error; 652 return error;
654 error = -EPERM; 653 error = -EPERM;
655 if ((!info || ((unsigned long)info != 1 && 654 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
656 (unsigned long)info != 2 && SI_FROMUSER(info)))
657 && ((sig != SIGCONT) || 655 && ((sig != SIGCONT) ||
658 (current->signal->session != t->signal->session)) 656 (current->signal->session != t->signal->session))
659 && (current->euid ^ t->suid) && (current->euid ^ t->uid) 657 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
@@ -790,7 +788,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
790 * fast-pathed signals for kernel-internal things like SIGSTOP 788 * fast-pathed signals for kernel-internal things like SIGSTOP
791 * or SIGKILL. 789 * or SIGKILL.
792 */ 790 */
793 if ((unsigned long)info == 2) 791 if (info == SEND_SIG_FORCED)
794 goto out_set; 792 goto out_set;
795 793
796 /* Real-time signals must be queued if sent by sigqueue, or 794 /* Real-time signals must be queued if sent by sigqueue, or
@@ -802,19 +800,19 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
802 pass on the info struct. */ 800 pass on the info struct. */
803 801
804 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 802 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
805 ((unsigned long) info < 2 || 803 (is_si_special(info) ||
806 info->si_code >= 0))); 804 info->si_code >= 0)));
807 if (q) { 805 if (q) {
808 list_add_tail(&q->list, &signals->list); 806 list_add_tail(&q->list, &signals->list);
809 switch ((unsigned long) info) { 807 switch ((unsigned long) info) {
810 case 0: 808 case (unsigned long) SEND_SIG_NOINFO:
811 q->info.si_signo = sig; 809 q->info.si_signo = sig;
812 q->info.si_errno = 0; 810 q->info.si_errno = 0;
813 q->info.si_code = SI_USER; 811 q->info.si_code = SI_USER;
814 q->info.si_pid = current->pid; 812 q->info.si_pid = current->pid;
815 q->info.si_uid = current->uid; 813 q->info.si_uid = current->uid;
816 break; 814 break;
817 case 1: 815 case (unsigned long) SEND_SIG_PRIV:
818 q->info.si_signo = sig; 816 q->info.si_signo = sig;
819 q->info.si_errno = 0; 817 q->info.si_errno = 0;
820 q->info.si_code = SI_KERNEL; 818 q->info.si_code = SI_KERNEL;
@@ -825,20 +823,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
825 copy_siginfo(&q->info, info); 823 copy_siginfo(&q->info, info);
826 break; 824 break;
827 } 825 }
828 } else { 826 } else if (!is_si_special(info)) {
829 if (sig >= SIGRTMIN && info && (unsigned long)info != 1 827 if (sig >= SIGRTMIN && info->si_code != SI_USER)
830 && info->si_code != SI_USER)
831 /* 828 /*
832 * Queue overflow, abort. We may abort if the signal was rt 829 * Queue overflow, abort. We may abort if the signal was rt
833 * and sent by user using something other than kill(). 830 * and sent by user using something other than kill().
834 */ 831 */
835 return -EAGAIN; 832 return -EAGAIN;
836 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
837 /*
838 * Set up a return to indicate that we dropped
839 * the signal.
840 */
841 ret = info->si_sys_private;
842 } 833 }
843 834
844out_set: 835out_set:
@@ -859,12 +850,6 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
859 BUG(); 850 BUG();
860 assert_spin_locked(&t->sighand->siglock); 851 assert_spin_locked(&t->sighand->siglock);
861 852
862 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
863 /*
864 * Set up a return to indicate that we dropped the signal.
865 */
866 ret = info->si_sys_private;
867
868 /* Short-circuit ignored signals. */ 853 /* Short-circuit ignored signals. */
869 if (sig_ignored(t, sig)) 854 if (sig_ignored(t, sig))
870 goto out; 855 goto out;
@@ -894,11 +879,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
894 int ret; 879 int ret;
895 880
896 spin_lock_irqsave(&t->sighand->siglock, flags); 881 spin_lock_irqsave(&t->sighand->siglock, flags);
897 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { 882 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
898 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; 883 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
884 }
885 if (sigismember(&t->blocked, sig)) {
899 sigdelset(&t->blocked, sig); 886 sigdelset(&t->blocked, sig);
900 recalc_sigpending_tsk(t);
901 } 887 }
888 recalc_sigpending_tsk(t);
902 ret = specific_send_sig_info(sig, info, t); 889 ret = specific_send_sig_info(sig, info, t);
903 spin_unlock_irqrestore(&t->sighand->siglock, flags); 890 spin_unlock_irqrestore(&t->sighand->siglock, flags);
904 891
@@ -908,15 +895,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
908void 895void
909force_sig_specific(int sig, struct task_struct *t) 896force_sig_specific(int sig, struct task_struct *t)
910{ 897{
911 unsigned long int flags; 898 force_sig_info(sig, SEND_SIG_FORCED, t);
912
913 spin_lock_irqsave(&t->sighand->siglock, flags);
914 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
915 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
916 sigdelset(&t->blocked, sig);
917 recalc_sigpending_tsk(t);
918 specific_send_sig_info(sig, (void *)2, t);
919 spin_unlock_irqrestore(&t->sighand->siglock, flags);
920} 899}
921 900
922/* 901/*
@@ -1051,12 +1030,6 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1051 assert_spin_locked(&p->sighand->siglock); 1030 assert_spin_locked(&p->sighand->siglock);
1052 handle_stop_signal(sig, p); 1031 handle_stop_signal(sig, p);
1053 1032
1054 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1055 /*
1056 * Set up a return to indicate that we dropped the signal.
1057 */
1058 ret = info->si_sys_private;
1059
1060 /* Short-circuit ignored signals. */ 1033 /* Short-circuit ignored signals. */
1061 if (sig_ignored(p, sig)) 1034 if (sig_ignored(p, sig))
1062 return ret; 1035 return ret;
@@ -1109,8 +1082,8 @@ void zap_other_threads(struct task_struct *p)
1109 if (t != p->group_leader) 1082 if (t != p->group_leader)
1110 t->exit_signal = -1; 1083 t->exit_signal = -1;
1111 1084
1085 /* SIGKILL will be handled before any pending SIGSTOP */
1112 sigaddset(&t->pending.signal, SIGKILL); 1086 sigaddset(&t->pending.signal, SIGKILL);
1113 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1114 signal_wake_up(t, 1); 1087 signal_wake_up(t, 1);
1115 } 1088 }
1116} 1089}
@@ -1286,10 +1259,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1286 return ret; 1259 return ret;
1287} 1260}
1288 1261
1262#define __si_special(priv) \
1263 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1264
1289int 1265int
1290send_sig(int sig, struct task_struct *p, int priv) 1266send_sig(int sig, struct task_struct *p, int priv)
1291{ 1267{
1292 return send_sig_info(sig, (void*)(long)(priv != 0), p); 1268 return send_sig_info(sig, __si_special(priv), p);
1293} 1269}
1294 1270
1295/* 1271/*
@@ -1309,7 +1285,7 @@ send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1309void 1285void
1310force_sig(int sig, struct task_struct *p) 1286force_sig(int sig, struct task_struct *p)
1311{ 1287{
1312 force_sig_info(sig, (void*)1L, p); 1288 force_sig_info(sig, SEND_SIG_PRIV, p);
1313} 1289}
1314 1290
1315/* 1291/*
@@ -1334,13 +1310,13 @@ force_sigsegv(int sig, struct task_struct *p)
1334int 1310int
1335kill_pg(pid_t pgrp, int sig, int priv) 1311kill_pg(pid_t pgrp, int sig, int priv)
1336{ 1312{
1337 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp); 1313 return kill_pg_info(sig, __si_special(priv), pgrp);
1338} 1314}
1339 1315
1340int 1316int
1341kill_proc(pid_t pid, int sig, int priv) 1317kill_proc(pid_t pid, int sig, int priv)
1342{ 1318{
1343 return kill_proc_info(sig, (void *)(long)(priv != 0), pid); 1319 return kill_proc_info(sig, __si_special(priv), pid);
1344} 1320}
1345 1321
1346/* 1322/*
@@ -1371,11 +1347,12 @@ void sigqueue_free(struct sigqueue *q)
1371 * pending queue. 1347 * pending queue.
1372 */ 1348 */
1373 if (unlikely(!list_empty(&q->list))) { 1349 if (unlikely(!list_empty(&q->list))) {
1374 read_lock(&tasklist_lock); 1350 spinlock_t *lock = &current->sighand->siglock;
1375 spin_lock_irqsave(q->lock, flags); 1351 read_lock(&tasklist_lock);
1352 spin_lock_irqsave(lock, flags);
1376 if (!list_empty(&q->list)) 1353 if (!list_empty(&q->list))
1377 list_del_init(&q->list); 1354 list_del_init(&q->list);
1378 spin_unlock_irqrestore(q->lock, flags); 1355 spin_unlock_irqrestore(lock, flags);
1379 read_unlock(&tasklist_lock); 1356 read_unlock(&tasklist_lock);
1380 } 1357 }
1381 q->flags &= ~SIGQUEUE_PREALLOC; 1358 q->flags &= ~SIGQUEUE_PREALLOC;
@@ -1414,7 +1391,6 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1414 goto out; 1391 goto out;
1415 } 1392 }
1416 1393
1417 q->lock = &p->sighand->siglock;
1418 list_add_tail(&q->list, &p->pending.list); 1394 list_add_tail(&q->list, &p->pending.list);
1419 sigaddset(&p->pending.signal, sig); 1395 sigaddset(&p->pending.signal, sig);
1420 if (!sigismember(&p->blocked, sig)) 1396 if (!sigismember(&p->blocked, sig))
@@ -1462,7 +1438,6 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1462 * We always use the shared queue for process-wide signals, 1438 * We always use the shared queue for process-wide signals,
1463 * to avoid several races. 1439 * to avoid several races.
1464 */ 1440 */
1465 q->lock = &p->sighand->siglock;
1466 list_add_tail(&q->list, &p->signal->shared_pending.list); 1441 list_add_tail(&q->list, &p->signal->shared_pending.list);
1467 sigaddset(&p->signal->shared_pending.signal, sig); 1442 sigaddset(&p->signal->shared_pending.signal, sig);
1468 1443
@@ -1881,9 +1856,9 @@ relock:
1881 /* Let the debugger run. */ 1856 /* Let the debugger run. */
1882 ptrace_stop(signr, signr, info); 1857 ptrace_stop(signr, signr, info);
1883 1858
1884 /* We're back. Did the debugger cancel the sig? */ 1859 /* We're back. Did the debugger cancel the sig or group_exit? */
1885 signr = current->exit_code; 1860 signr = current->exit_code;
1886 if (signr == 0) 1861 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1887 continue; 1862 continue;
1888 1863
1889 current->exit_code = 0; 1864 current->exit_code = 0;
@@ -2285,26 +2260,13 @@ sys_kill(int pid, int sig)
2285 return kill_something_info(sig, &info, pid); 2260 return kill_something_info(sig, &info, pid);
2286} 2261}
2287 2262
2288/** 2263static int do_tkill(int tgid, int pid, int sig)
2289 * sys_tgkill - send signal to one specific thread
2290 * @tgid: the thread group ID of the thread
2291 * @pid: the PID of the thread
2292 * @sig: signal to be sent
2293 *
2294 * This syscall also checks the tgid and returns -ESRCH even if the PID
2295 * exists but it's not belonging to the target process anymore. This
2296 * method solves the problem of threads exiting and PIDs getting reused.
2297 */
2298asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2299{ 2264{
2300 struct siginfo info;
2301 int error; 2265 int error;
2266 struct siginfo info;
2302 struct task_struct *p; 2267 struct task_struct *p;
2303 2268
2304 /* This is only valid for single tasks */ 2269 error = -ESRCH;
2305 if (pid <= 0 || tgid <= 0)
2306 return -EINVAL;
2307
2308 info.si_signo = sig; 2270 info.si_signo = sig;
2309 info.si_errno = 0; 2271 info.si_errno = 0;
2310 info.si_code = SI_TKILL; 2272 info.si_code = SI_TKILL;
@@ -2313,8 +2275,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2313 2275
2314 read_lock(&tasklist_lock); 2276 read_lock(&tasklist_lock);
2315 p = find_task_by_pid(pid); 2277 p = find_task_by_pid(pid);
2316 error = -ESRCH; 2278 if (p && (tgid <= 0 || p->tgid == tgid)) {
2317 if (p && (p->tgid == tgid)) {
2318 error = check_kill_permission(sig, &info, p); 2279 error = check_kill_permission(sig, &info, p);
2319 /* 2280 /*
2320 * The null signal is a permissions and process existence 2281 * The null signal is a permissions and process existence
@@ -2328,47 +2289,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2328 } 2289 }
2329 } 2290 }
2330 read_unlock(&tasklist_lock); 2291 read_unlock(&tasklist_lock);
2292
2331 return error; 2293 return error;
2332} 2294}
2333 2295
2296/**
2297 * sys_tgkill - send signal to one specific thread
2298 * @tgid: the thread group ID of the thread
2299 * @pid: the PID of the thread
2300 * @sig: signal to be sent
2301 *
2302 * This syscall also checks the tgid and returns -ESRCH even if the PID
2303 * exists but it's not belonging to the target process anymore. This
2304 * method solves the problem of threads exiting and PIDs getting reused.
2305 */
2306asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2307{
2308 /* This is only valid for single tasks */
2309 if (pid <= 0 || tgid <= 0)
2310 return -EINVAL;
2311
2312 return do_tkill(tgid, pid, sig);
2313}
2314
2334/* 2315/*
2335 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2316 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2336 */ 2317 */
2337asmlinkage long 2318asmlinkage long
2338sys_tkill(int pid, int sig) 2319sys_tkill(int pid, int sig)
2339{ 2320{
2340 struct siginfo info;
2341 int error;
2342 struct task_struct *p;
2343
2344 /* This is only valid for single tasks */ 2321 /* This is only valid for single tasks */
2345 if (pid <= 0) 2322 if (pid <= 0)
2346 return -EINVAL; 2323 return -EINVAL;
2347 2324
2348 info.si_signo = sig; 2325 return do_tkill(0, pid, sig);
2349 info.si_errno = 0;
2350 info.si_code = SI_TKILL;
2351 info.si_pid = current->tgid;
2352 info.si_uid = current->uid;
2353
2354 read_lock(&tasklist_lock);
2355 p = find_task_by_pid(pid);
2356 error = -ESRCH;
2357 if (p) {
2358 error = check_kill_permission(sig, &info, p);
2359 /*
2360 * The null signal is a permissions and process existence
2361 * probe. No signal is actually delivered.
2362 */
2363 if (!error && sig && p->sighand) {
2364 spin_lock_irq(&p->sighand->siglock);
2365 handle_stop_signal(sig, p);
2366 error = specific_send_sig_info(sig, &info, p);
2367 spin_unlock_irq(&p->sighand->siglock);
2368 }
2369 }
2370 read_unlock(&tasklist_lock);
2371 return error;
2372} 2326}
2373 2327
2374asmlinkage long 2328asmlinkage long
diff --git a/kernel/time.c b/kernel/time.c
index a3c2100470e1..245d595a13cb 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -338,30 +338,20 @@ int do_adjtimex(struct timex *txc)
338 if (mtemp >= MINSEC) { 338 if (mtemp >= MINSEC) {
339 ltemp = (time_offset / mtemp) << (SHIFT_USEC - 339 ltemp = (time_offset / mtemp) << (SHIFT_USEC -
340 SHIFT_UPDATE); 340 SHIFT_UPDATE);
341 if (ltemp < 0) 341 time_freq += shift_right(ltemp, SHIFT_KH);
342 time_freq -= -ltemp >> SHIFT_KH;
343 else
344 time_freq += ltemp >> SHIFT_KH;
345 } else /* calibration interval too short (p. 12) */ 342 } else /* calibration interval too short (p. 12) */
346 result = TIME_ERROR; 343 result = TIME_ERROR;
347 } else { /* PLL mode */ 344 } else { /* PLL mode */
348 if (mtemp < MAXSEC) { 345 if (mtemp < MAXSEC) {
349 ltemp *= mtemp; 346 ltemp *= mtemp;
350 if (ltemp < 0) 347 time_freq += shift_right(ltemp,(time_constant +
351 time_freq -= -ltemp >> (time_constant +
352 time_constant +
353 SHIFT_KF - SHIFT_USEC);
354 else
355 time_freq += ltemp >> (time_constant +
356 time_constant + 348 time_constant +
357 SHIFT_KF - SHIFT_USEC); 349 SHIFT_KF - SHIFT_USEC));
358 } else /* calibration interval too long (p. 12) */ 350 } else /* calibration interval too long (p. 12) */
359 result = TIME_ERROR; 351 result = TIME_ERROR;
360 } 352 }
361 if (time_freq > time_tolerance) 353 time_freq = min(time_freq, time_tolerance);
362 time_freq = time_tolerance; 354 time_freq = max(time_freq, -time_tolerance);
363 else if (time_freq < -time_tolerance)
364 time_freq = -time_tolerance;
365 } /* STA_PLL || STA_PPSTIME */ 355 } /* STA_PLL || STA_PPSTIME */
366 } /* txc->modes & ADJ_OFFSET */ 356 } /* txc->modes & ADJ_OFFSET */
367 if (txc->modes & ADJ_TICK) { 357 if (txc->modes & ADJ_TICK) {
@@ -384,10 +374,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
384 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) 374 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
385 txc->offset = save_adjust; 375 txc->offset = save_adjust;
386 else { 376 else {
387 if (time_offset < 0) 377 txc->offset = shift_right(time_offset, SHIFT_UPDATE);
388 txc->offset = -(-time_offset >> SHIFT_UPDATE);
389 else
390 txc->offset = time_offset >> SHIFT_UPDATE;
391 } 378 }
392 txc->freq = time_freq + pps_freq; 379 txc->freq = time_freq + pps_freq;
393 txc->maxerror = time_maxerror; 380 txc->maxerror = time_maxerror;
diff --git a/kernel/timer.c b/kernel/timer.c
index 6a2e5f8dc725..fd74268d8663 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -46,6 +46,10 @@ static void time_interpolator_update(long delta_nsec);
46#define time_interpolator_update(x) 46#define time_interpolator_update(x)
47#endif 47#endif
48 48
49u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
50
51EXPORT_SYMBOL(jiffies_64);
52
49/* 53/*
50 * per-CPU timer vector definitions: 54 * per-CPU timer vector definitions:
51 */ 55 */
@@ -91,30 +95,6 @@ static inline void set_running_timer(tvec_base_t *base,
91#endif 95#endif
92} 96}
93 97
94static void check_timer_failed(struct timer_list *timer)
95{
96 static int whine_count;
97 if (whine_count < 16) {
98 whine_count++;
99 printk("Uninitialised timer!\n");
100 printk("This is just a warning. Your computer is OK\n");
101 printk("function=0x%p, data=0x%lx\n",
102 timer->function, timer->data);
103 dump_stack();
104 }
105 /*
106 * Now fix it up
107 */
108 timer->magic = TIMER_MAGIC;
109}
110
111static inline void check_timer(struct timer_list *timer)
112{
113 if (timer->magic != TIMER_MAGIC)
114 check_timer_failed(timer);
115}
116
117
118static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) 98static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
119{ 99{
120 unsigned long expires = timer->expires; 100 unsigned long expires = timer->expires;
@@ -177,7 +157,6 @@ void fastcall init_timer(struct timer_list *timer)
177{ 157{
178 timer->entry.next = NULL; 158 timer->entry.next = NULL;
179 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; 159 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base;
180 timer->magic = TIMER_MAGIC;
181} 160}
182EXPORT_SYMBOL(init_timer); 161EXPORT_SYMBOL(init_timer);
183 162
@@ -230,7 +209,6 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
230 int ret = 0; 209 int ret = 0;
231 210
232 BUG_ON(!timer->function); 211 BUG_ON(!timer->function);
233 check_timer(timer);
234 212
235 base = lock_timer_base(timer, &flags); 213 base = lock_timer_base(timer, &flags);
236 214
@@ -283,9 +261,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
283 unsigned long flags; 261 unsigned long flags;
284 262
285 BUG_ON(timer_pending(timer) || !timer->function); 263 BUG_ON(timer_pending(timer) || !timer->function);
286
287 check_timer(timer);
288
289 spin_lock_irqsave(&base->t_base.lock, flags); 264 spin_lock_irqsave(&base->t_base.lock, flags);
290 timer->base = &base->t_base; 265 timer->base = &base->t_base;
291 internal_add_timer(base, timer); 266 internal_add_timer(base, timer);
@@ -316,8 +291,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
316{ 291{
317 BUG_ON(!timer->function); 292 BUG_ON(!timer->function);
318 293
319 check_timer(timer);
320
321 /* 294 /*
322 * This is a common optimization triggered by the 295 * This is a common optimization triggered by the
323 * networking code - if the timer is re-modified 296 * networking code - if the timer is re-modified
@@ -348,8 +321,6 @@ int del_timer(struct timer_list *timer)
348 unsigned long flags; 321 unsigned long flags;
349 int ret = 0; 322 int ret = 0;
350 323
351 check_timer(timer);
352
353 if (timer_pending(timer)) { 324 if (timer_pending(timer)) {
354 base = lock_timer_base(timer, &flags); 325 base = lock_timer_base(timer, &flags);
355 if (timer_pending(timer)) { 326 if (timer_pending(timer)) {
@@ -412,8 +383,6 @@ out:
412 */ 383 */
413int del_timer_sync(struct timer_list *timer) 384int del_timer_sync(struct timer_list *timer)
414{ 385{
415 check_timer(timer);
416
417 for (;;) { 386 for (;;) {
418 int ret = try_to_del_timer_sync(timer); 387 int ret = try_to_del_timer_sync(timer);
419 if (ret >= 0) 388 if (ret >= 0)
@@ -632,143 +601,118 @@ long time_next_adjust;
632 */ 601 */
633static void second_overflow(void) 602static void second_overflow(void)
634{ 603{
635 long ltemp; 604 long ltemp;
636 605
637 /* Bump the maxerror field */ 606 /* Bump the maxerror field */
638 time_maxerror += time_tolerance >> SHIFT_USEC; 607 time_maxerror += time_tolerance >> SHIFT_USEC;
639 if ( time_maxerror > NTP_PHASE_LIMIT ) { 608 if (time_maxerror > NTP_PHASE_LIMIT) {
640 time_maxerror = NTP_PHASE_LIMIT; 609 time_maxerror = NTP_PHASE_LIMIT;
641 time_status |= STA_UNSYNC; 610 time_status |= STA_UNSYNC;
642 }
643
644 /*
645 * Leap second processing. If in leap-insert state at
646 * the end of the day, the system clock is set back one
647 * second; if in leap-delete state, the system clock is
648 * set ahead one second. The microtime() routine or
649 * external clock driver will insure that reported time
650 * is always monotonic. The ugly divides should be
651 * replaced.
652 */
653 switch (time_state) {
654
655 case TIME_OK:
656 if (time_status & STA_INS)
657 time_state = TIME_INS;
658 else if (time_status & STA_DEL)
659 time_state = TIME_DEL;
660 break;
661
662 case TIME_INS:
663 if (xtime.tv_sec % 86400 == 0) {
664 xtime.tv_sec--;
665 wall_to_monotonic.tv_sec++;
666 /* The timer interpolator will make time change gradually instead
667 * of an immediate jump by one second.
668 */
669 time_interpolator_update(-NSEC_PER_SEC);
670 time_state = TIME_OOP;
671 clock_was_set();
672 printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
673 } 611 }
674 break; 612
675 613 /*
676 case TIME_DEL: 614 * Leap second processing. If in leap-insert state at the end of the
677 if ((xtime.tv_sec + 1) % 86400 == 0) { 615 * day, the system clock is set back one second; if in leap-delete
678 xtime.tv_sec++; 616 * state, the system clock is set ahead one second. The microtime()
679 wall_to_monotonic.tv_sec--; 617 * routine or external clock driver will insure that reported time is
680 /* Use of time interpolator for a gradual change of time */ 618 * always monotonic. The ugly divides should be replaced.
681 time_interpolator_update(NSEC_PER_SEC); 619 */
682 time_state = TIME_WAIT; 620 switch (time_state) {
683 clock_was_set(); 621 case TIME_OK:
684 printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n"); 622 if (time_status & STA_INS)
623 time_state = TIME_INS;
624 else if (time_status & STA_DEL)
625 time_state = TIME_DEL;
626 break;
627 case TIME_INS:
628 if (xtime.tv_sec % 86400 == 0) {
629 xtime.tv_sec--;
630 wall_to_monotonic.tv_sec++;
631 /*
632 * The timer interpolator will make time change
633 * gradually instead of an immediate jump by one second
634 */
635 time_interpolator_update(-NSEC_PER_SEC);
636 time_state = TIME_OOP;
637 clock_was_set();
638 printk(KERN_NOTICE "Clock: inserting leap second "
639 "23:59:60 UTC\n");
640 }
641 break;
642 case TIME_DEL:
643 if ((xtime.tv_sec + 1) % 86400 == 0) {
644 xtime.tv_sec++;
645 wall_to_monotonic.tv_sec--;
646 /*
647 * Use of time interpolator for a gradual change of
648 * time
649 */
650 time_interpolator_update(NSEC_PER_SEC);
651 time_state = TIME_WAIT;
652 clock_was_set();
653 printk(KERN_NOTICE "Clock: deleting leap second "
654 "23:59:59 UTC\n");
655 }
656 break;
657 case TIME_OOP:
658 time_state = TIME_WAIT;
659 break;
660 case TIME_WAIT:
661 if (!(time_status & (STA_INS | STA_DEL)))
662 time_state = TIME_OK;
685 } 663 }
686 break; 664
687 665 /*
688 case TIME_OOP: 666 * Compute the phase adjustment for the next second. In PLL mode, the
689 time_state = TIME_WAIT; 667 * offset is reduced by a fixed factor times the time constant. In FLL
690 break; 668 * mode the offset is used directly. In either mode, the maximum phase
691 669 * adjustment for each second is clamped so as to spread the adjustment
692 case TIME_WAIT: 670 * over not more than the number of seconds between updates.
693 if (!(time_status & (STA_INS | STA_DEL))) 671 */
694 time_state = TIME_OK;
695 }
696
697 /*
698 * Compute the phase adjustment for the next second. In
699 * PLL mode, the offset is reduced by a fixed factor
700 * times the time constant. In FLL mode the offset is
701 * used directly. In either mode, the maximum phase
702 * adjustment for each second is clamped so as to spread
703 * the adjustment over not more than the number of
704 * seconds between updates.
705 */
706 if (time_offset < 0) {
707 ltemp = -time_offset;
708 if (!(time_status & STA_FLL))
709 ltemp >>= SHIFT_KG + time_constant;
710 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
711 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
712 time_offset += ltemp;
713 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
714 } else {
715 ltemp = time_offset; 672 ltemp = time_offset;
716 if (!(time_status & STA_FLL)) 673 if (!(time_status & STA_FLL))
717 ltemp >>= SHIFT_KG + time_constant; 674 ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
718 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE) 675 ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
719 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE; 676 ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
720 time_offset -= ltemp; 677 time_offset -= ltemp;
721 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 678 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
722 } 679
723 680 /*
724 /* 681 * Compute the frequency estimate and additional phase adjustment due
725 * Compute the frequency estimate and additional phase 682 * to frequency error for the next second. When the PPS signal is
726 * adjustment due to frequency error for the next 683 * engaged, gnaw on the watchdog counter and update the frequency
727 * second. When the PPS signal is engaged, gnaw on the 684 * computed by the pll and the PPS signal.
728 * watchdog counter and update the frequency computed by 685 */
729 * the pll and the PPS signal. 686 pps_valid++;
730 */ 687 if (pps_valid == PPS_VALID) { /* PPS signal lost */
731 pps_valid++; 688 pps_jitter = MAXTIME;
732 if (pps_valid == PPS_VALID) { /* PPS signal lost */ 689 pps_stabil = MAXFREQ;
733 pps_jitter = MAXTIME; 690 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
734 pps_stabil = MAXFREQ; 691 STA_PPSWANDER | STA_PPSERROR);
735 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 692 }
736 STA_PPSWANDER | STA_PPSERROR); 693 ltemp = time_freq + pps_freq;
737 } 694 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
738 ltemp = time_freq + pps_freq;
739 if (ltemp < 0)
740 time_adj -= -ltemp >>
741 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
742 else
743 time_adj += ltemp >>
744 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
745 695
746#if HZ == 100 696#if HZ == 100
747 /* Compensate for (HZ==100) != (1 << SHIFT_HZ). 697 /*
748 * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14) 698 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
749 */ 699 * get 128.125; => only 0.125% error (p. 14)
750 if (time_adj < 0) 700 */
751 time_adj -= (-time_adj >> 2) + (-time_adj >> 5); 701 time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
752 else
753 time_adj += (time_adj >> 2) + (time_adj >> 5);
754#endif 702#endif
755#if HZ == 250 703#if HZ == 250
756 /* Compensate for (HZ==250) != (1 << SHIFT_HZ). 704 /*
757 * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14) 705 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
758 */ 706 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
759 if (time_adj < 0) 707 */
760 time_adj -= (-time_adj >> 6) + (-time_adj >> 7); 708 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
761 else
762 time_adj += (time_adj >> 6) + (time_adj >> 7);
763#endif 709#endif
764#if HZ == 1000 710#if HZ == 1000
765 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). 711 /*
766 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) 712 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
767 */ 713 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
768 if (time_adj < 0) 714 */
769 time_adj -= (-time_adj >> 6) + (-time_adj >> 7); 715 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
770 else
771 time_adj += (time_adj >> 6) + (time_adj >> 7);
772#endif 716#endif
773} 717}
774 718
@@ -777,23 +721,20 @@ static void update_wall_time_one_tick(void)
777{ 721{
778 long time_adjust_step, delta_nsec; 722 long time_adjust_step, delta_nsec;
779 723
780 if ( (time_adjust_step = time_adjust) != 0 ) { 724 if ((time_adjust_step = time_adjust) != 0 ) {
781 /* We are doing an adjtime thing. 725 /*
782 * 726 * We are doing an adjtime thing. Prepare time_adjust_step to
783 * Prepare time_adjust_step to be within bounds. 727 * be within bounds. Note that a positive time_adjust means we
784 * Note that a positive time_adjust means we want the clock 728 * want the clock to run faster.
785 * to run faster. 729 *
786 * 730 * Limit the amount of the step to be in the range
787 * Limit the amount of the step to be in the range 731 * -tickadj .. +tickadj
788 * -tickadj .. +tickadj 732 */
789 */ 733 time_adjust_step = min(time_adjust_step, (long)tickadj);
790 if (time_adjust > tickadj) 734 time_adjust_step = max(time_adjust_step, (long)-tickadj);
791 time_adjust_step = tickadj; 735
792 else if (time_adjust < -tickadj) 736 /* Reduce by this step the amount of time left */
793 time_adjust_step = -tickadj; 737 time_adjust -= time_adjust_step;
794
795 /* Reduce by this step the amount of time left */
796 time_adjust -= time_adjust_step;
797 } 738 }
798 delta_nsec = tick_nsec + time_adjust_step * 1000; 739 delta_nsec = tick_nsec + time_adjust_step * 1000;
799 /* 740 /*
@@ -801,13 +742,8 @@ static void update_wall_time_one_tick(void)
801 * advance the tick more. 742 * advance the tick more.
802 */ 743 */
803 time_phase += time_adj; 744 time_phase += time_adj;
804 if (time_phase <= -FINENSEC) { 745 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
805 long ltemp = -time_phase >> (SHIFT_SCALE - 10); 746 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
806 time_phase += ltemp << (SHIFT_SCALE - 10);
807 delta_nsec -= ltemp;
808 }
809 else if (time_phase >= FINENSEC) {
810 long ltemp = time_phase >> (SHIFT_SCALE - 10);
811 time_phase -= ltemp << (SHIFT_SCALE - 10); 747 time_phase -= ltemp << (SHIFT_SCALE - 10);
812 delta_nsec += ltemp; 748 delta_nsec += ltemp;
813 } 749 }
@@ -1137,8 +1073,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
1137 if (timeout < 0) 1073 if (timeout < 0)
1138 { 1074 {
1139 printk(KERN_ERR "schedule_timeout: wrong timeout " 1075 printk(KERN_ERR "schedule_timeout: wrong timeout "
1140 "value %lx from %p\n", timeout, 1076 "value %lx from %p\n", timeout,
1141 __builtin_return_address(0)); 1077 __builtin_return_address(0));
1142 current->state = TASK_RUNNING; 1078 current->state = TASK_RUNNING;
1143 goto out; 1079 goto out;
1144 } 1080 }
@@ -1146,12 +1082,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
1146 1082
1147 expire = timeout + jiffies; 1083 expire = timeout + jiffies;
1148 1084
1149 init_timer(&timer); 1085 setup_timer(&timer, process_timeout, (unsigned long)current);
1150 timer.expires = expire; 1086 __mod_timer(&timer, expire);
1151 timer.data = (unsigned long) current;
1152 timer.function = process_timeout;
1153
1154 add_timer(&timer);
1155 schedule(); 1087 schedule();
1156 del_singleshot_timer_sync(&timer); 1088 del_singleshot_timer_sync(&timer);
1157 1089
@@ -1168,15 +1100,15 @@ EXPORT_SYMBOL(schedule_timeout);
1168 */ 1100 */
1169signed long __sched schedule_timeout_interruptible(signed long timeout) 1101signed long __sched schedule_timeout_interruptible(signed long timeout)
1170{ 1102{
1171 __set_current_state(TASK_INTERRUPTIBLE); 1103 __set_current_state(TASK_INTERRUPTIBLE);
1172 return schedule_timeout(timeout); 1104 return schedule_timeout(timeout);
1173} 1105}
1174EXPORT_SYMBOL(schedule_timeout_interruptible); 1106EXPORT_SYMBOL(schedule_timeout_interruptible);
1175 1107
1176signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1108signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1177{ 1109{
1178 __set_current_state(TASK_UNINTERRUPTIBLE); 1110 __set_current_state(TASK_UNINTERRUPTIBLE);
1179 return schedule_timeout(timeout); 1111 return schedule_timeout(timeout);
1180} 1112}
1181EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1113EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1182 1114
@@ -1516,16 +1448,18 @@ static void time_interpolator_update(long delta_nsec)
1516 if (!time_interpolator) 1448 if (!time_interpolator)
1517 return; 1449 return;
1518 1450
1519 /* The interpolator compensates for late ticks by accumulating 1451 /*
1520 * the late time in time_interpolator->offset. A tick earlier than 1452 * The interpolator compensates for late ticks by accumulating the late
1521 * expected will lead to a reset of the offset and a corresponding 1453 * time in time_interpolator->offset. A tick earlier than expected will
1522 * jump of the clock forward. Again this only works if the 1454 * lead to a reset of the offset and a corresponding jump of the clock
1523 * interpolator clock is running slightly slower than the regular clock 1455 * forward. Again this only works if the interpolator clock is running
1524 * and the tuning logic insures that. 1456 * slightly slower than the regular clock and the tuning logic insures
1525 */ 1457 * that.
1458 */
1526 1459
1527 counter = time_interpolator_get_counter(1); 1460 counter = time_interpolator_get_counter(1);
1528 offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator); 1461 offset = time_interpolator->offset +
1462 GET_TI_NSECS(counter, time_interpolator);
1529 1463
1530 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) 1464 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1531 time_interpolator->offset = offset - delta_nsec; 1465 time_interpolator->offset = offset - delta_nsec;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 91bacb13a7e2..7cee222231bc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -12,6 +12,8 @@
12 * Andrew Morton <andrewm@uow.edu.au> 12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
15 */ 17 */
16 18
17#include <linux/module.h> 19#include <linux/module.h>
@@ -57,7 +59,7 @@ struct cpu_workqueue_struct {
57 * per-CPU workqueues: 59 * per-CPU workqueues:
58 */ 60 */
59struct workqueue_struct { 61struct workqueue_struct {
60 struct cpu_workqueue_struct cpu_wq[NR_CPUS]; 62 struct cpu_workqueue_struct *cpu_wq;
61 const char *name; 63 const char *name;
62 struct list_head list; /* Empty if single thread */ 64 struct list_head list; /* Empty if single thread */
63}; 65};
@@ -102,7 +104,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
102 if (unlikely(is_single_threaded(wq))) 104 if (unlikely(is_single_threaded(wq)))
103 cpu = 0; 105 cpu = 0;
104 BUG_ON(!list_empty(&work->entry)); 106 BUG_ON(!list_empty(&work->entry));
105 __queue_work(wq->cpu_wq + cpu, work); 107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
106 ret = 1; 108 ret = 1;
107 } 109 }
108 put_cpu(); 110 put_cpu();
@@ -118,7 +120,7 @@ static void delayed_work_timer_fn(unsigned long __data)
118 if (unlikely(is_single_threaded(wq))) 120 if (unlikely(is_single_threaded(wq)))
119 cpu = 0; 121 cpu = 0;
120 122
121 __queue_work(wq->cpu_wq + cpu, work); 123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
122} 124}
123 125
124int fastcall queue_delayed_work(struct workqueue_struct *wq, 126int fastcall queue_delayed_work(struct workqueue_struct *wq,
@@ -265,13 +267,13 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
265 267
266 if (is_single_threaded(wq)) { 268 if (is_single_threaded(wq)) {
267 /* Always use cpu 0's area. */ 269 /* Always use cpu 0's area. */
268 flush_cpu_workqueue(wq->cpu_wq + 0); 270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
269 } else { 271 } else {
270 int cpu; 272 int cpu;
271 273
272 lock_cpu_hotplug(); 274 lock_cpu_hotplug();
273 for_each_online_cpu(cpu) 275 for_each_online_cpu(cpu)
274 flush_cpu_workqueue(wq->cpu_wq + cpu); 276 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
275 unlock_cpu_hotplug(); 277 unlock_cpu_hotplug();
276 } 278 }
277} 279}
@@ -279,7 +281,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
279static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 281static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
280 int cpu) 282 int cpu)
281{ 283{
282 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 284 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
283 struct task_struct *p; 285 struct task_struct *p;
284 286
285 spin_lock_init(&cwq->lock); 287 spin_lock_init(&cwq->lock);
@@ -312,6 +314,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
312 if (!wq) 314 if (!wq)
313 return NULL; 315 return NULL;
314 316
317 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
315 wq->name = name; 318 wq->name = name;
316 /* We don't need the distraction of CPUs appearing and vanishing. */ 319 /* We don't need the distraction of CPUs appearing and vanishing. */
317 lock_cpu_hotplug(); 320 lock_cpu_hotplug();
@@ -353,7 +356,7 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
353 unsigned long flags; 356 unsigned long flags;
354 struct task_struct *p; 357 struct task_struct *p;
355 358
356 cwq = wq->cpu_wq + cpu; 359 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
357 spin_lock_irqsave(&cwq->lock, flags); 360 spin_lock_irqsave(&cwq->lock, flags);
358 p = cwq->thread; 361 p = cwq->thread;
359 cwq->thread = NULL; 362 cwq->thread = NULL;
@@ -380,6 +383,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
380 spin_unlock(&workqueue_lock); 383 spin_unlock(&workqueue_lock);
381 } 384 }
382 unlock_cpu_hotplug(); 385 unlock_cpu_hotplug();
386 free_percpu(wq->cpu_wq);
383 kfree(wq); 387 kfree(wq);
384} 388}
385 389
@@ -458,7 +462,7 @@ int current_is_keventd(void)
458 462
459 BUG_ON(!keventd_wq); 463 BUG_ON(!keventd_wq);
460 464
461 cwq = keventd_wq->cpu_wq + cpu; 465 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
462 if (current == cwq->thread) 466 if (current == cwq->thread)
463 ret = 1; 467 ret = 1;
464 468
@@ -470,7 +474,7 @@ int current_is_keventd(void)
470/* Take the work from this (downed) CPU. */ 474/* Take the work from this (downed) CPU. */
471static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 475static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
472{ 476{
473 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 477 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
474 LIST_HEAD(list); 478 LIST_HEAD(list);
475 struct work_struct *work; 479 struct work_struct *work;
476 480
@@ -481,7 +485,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
481 printk("Taking work for %s\n", wq->name); 485 printk("Taking work for %s\n", wq->name);
482 work = list_entry(list.next,struct work_struct,entry); 486 work = list_entry(list.next,struct work_struct,entry);
483 list_del(&work->entry); 487 list_del(&work->entry);
484 __queue_work(wq->cpu_wq + smp_processor_id(), work); 488 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
485 } 489 }
486 spin_unlock_irq(&cwq->lock); 490 spin_unlock_irq(&cwq->lock);
487} 491}
@@ -508,15 +512,18 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
508 case CPU_ONLINE: 512 case CPU_ONLINE:
509 /* Kick off worker threads. */ 513 /* Kick off worker threads. */
510 list_for_each_entry(wq, &workqueues, list) { 514 list_for_each_entry(wq, &workqueues, list) {
511 kthread_bind(wq->cpu_wq[hotcpu].thread, hotcpu); 515 struct cpu_workqueue_struct *cwq;
512 wake_up_process(wq->cpu_wq[hotcpu].thread); 516
517 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
518 kthread_bind(cwq->thread, hotcpu);
519 wake_up_process(cwq->thread);
513 } 520 }
514 break; 521 break;
515 522
516 case CPU_UP_CANCELED: 523 case CPU_UP_CANCELED:
517 list_for_each_entry(wq, &workqueues, list) { 524 list_for_each_entry(wq, &workqueues, list) {
518 /* Unbind so it can run. */ 525 /* Unbind so it can run. */
519 kthread_bind(wq->cpu_wq[hotcpu].thread, 526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
520 smp_processor_id()); 527 smp_processor_id());
521 cleanup_workqueue_thread(wq, hotcpu); 528 cleanup_workqueue_thread(wq, hotcpu);
522 } 529 }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 016e89a44ac8..156822e3cc79 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -128,7 +128,7 @@ config DEBUG_HIGHMEM
128config DEBUG_BUGVERBOSE 128config DEBUG_BUGVERBOSE
129 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED 129 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
130 depends on BUG 130 depends on BUG
131 depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV 131 depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
132 default !EMBEDDED 132 default !EMBEDDED
133 help 133 help
134 Say Y here to make BUG() panics output the file name and line number 134 Say Y here to make BUG() panics output the file name and line number
@@ -168,13 +168,34 @@ config DEBUG_FS
168 168
169 If unsure, say N. 169 If unsure, say N.
170 170
171config DEBUG_VM
172 bool "Debug VM"
173 depends on DEBUG_KERNEL
174 help
175 Enable this to debug the virtual-memory system.
176
177 If unsure, say N.
178
171config FRAME_POINTER 179config FRAME_POINTER
172 bool "Compile the kernel with frame pointers" 180 bool "Compile the kernel with frame pointers"
173 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) 181 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML)
174 default y if DEBUG_INFO && UML 182 default y if DEBUG_INFO && UML
175 help 183 help
176 If you say Y here the resulting kernel image will be slightly larger 184 If you say Y here the resulting kernel image will be slightly larger
177 and slower, but it might give very useful debugging information 185 and slower, but it might give very useful debugging information on
178 on some architectures or you use external debuggers. 186 some architectures or if you use external debuggers.
179 If you don't debug the kernel, you can say N. 187 If you don't debug the kernel, you can say N.
180 188
189config RCU_TORTURE_TEST
190 tristate "torture tests for RCU"
191 depends on DEBUG_KERNEL
192 default n
193 help
194 This option provides a kernel module that runs torture tests
195 on the RCU infrastructure. The kernel module may be built
196 after the fact on the running kernel to be tested, if desired.
197
198 Say Y here if you want RCU torture tests to start automatically
199 at boot time (you probably don't).
200 Say M if you want the RCU torture tests to build as a module.
201 Say N if you are unsure.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index fb9371fdd44a..23d3b1147fe9 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -511,6 +511,172 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
511} 511}
512EXPORT_SYMBOL(bitmap_parselist); 512EXPORT_SYMBOL(bitmap_parselist);
513 513
514/*
515 * bitmap_pos_to_ord(buf, pos, bits)
516 * @buf: pointer to a bitmap
517 * @pos: a bit position in @buf (0 <= @pos < @bits)
518 * @bits: number of valid bit positions in @buf
519 *
520 * Map the bit at position @pos in @buf (of length @bits) to the
521 * ordinal of which set bit it is. If it is not set or if @pos
522 * is not a valid bit position, map to zero (0).
523 *
524 * If for example, just bits 4 through 7 are set in @buf, then @pos
525 * values 4 through 7 will get mapped to 0 through 3, respectively,
526 * and other @pos values will get mapped to 0. When @pos value 7
527 * gets mapped to (returns) @ord value 3 in this example, that means
528 * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
529 *
530 * The bit positions 0 through @bits are valid positions in @buf.
531 */
532static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
533{
534 int ord = 0;
535
536 if (pos >= 0 && pos < bits) {
537 int i;
538
539 for (i = find_first_bit(buf, bits);
540 i < pos;
541 i = find_next_bit(buf, bits, i + 1))
542 ord++;
543 if (i > pos)
544 ord = 0;
545 }
546 return ord;
547}
548
549/**
550 * bitmap_ord_to_pos(buf, ord, bits)
551 * @buf: pointer to bitmap
552 * @ord: ordinal bit position (n-th set bit, n >= 0)
553 * @bits: number of valid bit positions in @buf
554 *
555 * Map the ordinal offset of bit @ord in @buf to its position in @buf.
556 * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0).
557 *
558 * If for example, just bits 4 through 7 are set in @buf, then @ord
559 * values 0 through 3 will get mapped to 4 through 7, respectively,
560 * and all other @ord valuds will get mapped to 0. When @ord value 3
561 * gets mapped to (returns) @pos value 7 in this example, that means
562 * that the 3rd set bit (starting with 0th) is at position 7 in @buf.
563 *
564 * The bit positions 0 through @bits are valid positions in @buf.
565 */
566static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
567{
568 int pos = 0;
569
570 if (ord >= 0 && ord < bits) {
571 int i;
572
573 for (i = find_first_bit(buf, bits);
574 i < bits && ord > 0;
575 i = find_next_bit(buf, bits, i + 1))
576 ord--;
577 if (i < bits && ord == 0)
578 pos = i;
579 }
580
581 return pos;
582}
583
584/**
585 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
586 * @src: subset to be remapped
587 * @dst: remapped result
588 * @old: defines domain of map
589 * @new: defines range of map
590 * @bits: number of bits in each of these bitmaps
591 *
592 * Let @old and @new define a mapping of bit positions, such that
593 * whatever position is held by the n-th set bit in @old is mapped
594 * to the n-th set bit in @new. In the more general case, allowing
595 * for the possibility that the weight 'w' of @new is less than the
596 * weight of @old, map the position of the n-th set bit in @old to
597 * the position of the m-th set bit in @new, where m == n % w.
598 *
599 * If either of the @old and @new bitmaps are empty, or if@src and @dst
600 * point to the same location, then this routine does nothing.
601 *
602 * The positions of unset bits in @old are mapped to the position of
603 * the first set bit in @new.
604 *
605 * Apply the above specified mapping to @src, placing the result in
606 * @dst, clearing any bits previously set in @dst.
607 *
608 * The resulting value of @dst will have either the same weight as
609 * @src, or less weight in the general case that the mapping wasn't
610 * injective due to the weight of @new being less than that of @old.
611 * The resulting value of @dst will never have greater weight than
612 * that of @src, except perhaps in the case that one of the above
613 * conditions was not met and this routine just returned.
614 *
615 * For example, lets say that @old has bits 4 through 7 set, and
616 * @new has bits 12 through 15 set. This defines the mapping of bit
617 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
618 * bit positions to 12 (the first set bit in @new. So if say @src
619 * comes into this routine with bits 1, 5 and 7 set, then @dst should
620 * leave with bits 12, 13 and 15 set.
621 */
622void bitmap_remap(unsigned long *dst, const unsigned long *src,
623 const unsigned long *old, const unsigned long *new,
624 int bits)
625{
626 int s;
627
628 if (bitmap_weight(old, bits) == 0)
629 return;
630 if (bitmap_weight(new, bits) == 0)
631 return;
632 if (dst == src) /* following doesn't handle inplace remaps */
633 return;
634
635 bitmap_zero(dst, bits);
636 for (s = find_first_bit(src, bits);
637 s < bits;
638 s = find_next_bit(src, bits, s + 1)) {
639 int x = bitmap_pos_to_ord(old, s, bits);
640 int y = bitmap_ord_to_pos(new, x, bits);
641 set_bit(y, dst);
642 }
643}
644EXPORT_SYMBOL(bitmap_remap);
645
646/**
647 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
648 * @oldbit - bit position to be mapped
649 * @old: defines domain of map
650 * @new: defines range of map
651 * @bits: number of bits in each of these bitmaps
652 *
653 * Let @old and @new define a mapping of bit positions, such that
654 * whatever position is held by the n-th set bit in @old is mapped
655 * to the n-th set bit in @new. In the more general case, allowing
656 * for the possibility that the weight 'w' of @new is less than the
657 * weight of @old, map the position of the n-th set bit in @old to
658 * the position of the m-th set bit in @new, where m == n % w.
659 *
660 * The positions of unset bits in @old are mapped to the position of
661 * the first set bit in @new.
662 *
663 * Apply the above specified mapping to bit position @oldbit, returning
664 * the new bit position.
665 *
666 * For example, lets say that @old has bits 4 through 7 set, and
667 * @new has bits 12 through 15 set. This defines the mapping of bit
668 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
669 * bit positions to 12 (the first set bit in @new. So if say @oldbit
670 * is 5, then this routine returns 13.
671 */
672int bitmap_bitremap(int oldbit, const unsigned long *old,
673 const unsigned long *new, int bits)
674{
675 int x = bitmap_pos_to_ord(old, oldbit, bits);
676 return bitmap_ord_to_pos(new, x, bits);
677}
678EXPORT_SYMBOL(bitmap_bitremap);
679
514/** 680/**
515 * bitmap_find_free_region - find a contiguous aligned mem region 681 * bitmap_find_free_region - find a contiguous aligned mem region
516 * @bitmap: an array of unsigned longs corresponding to the bitmap 682 * @bitmap: an array of unsigned longs corresponding to the bitmap
diff --git a/lib/extable.c b/lib/extable.c
index 3f677a8f0c3c..18df57c029df 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -16,9 +16,6 @@
16#include <linux/sort.h> 16#include <linux/sort.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18 18
19extern struct exception_table_entry __start___ex_table[];
20extern struct exception_table_entry __stop___ex_table[];
21
22#ifndef ARCH_HAS_SORT_EXTABLE 19#ifndef ARCH_HAS_SORT_EXTABLE
23/* 20/*
24 * The exception table needs to be sorted so that the binary 21 * The exception table needs to be sorted so that the binary
diff --git a/lib/idr.c b/lib/idr.c
index 6414b2fb482d..d226259c3c28 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,20 +6,20 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Small id to pointer translation service. 9 * Small id to pointer translation service.
10 * 10 *
11 * It uses a radix tree like structure as a sparse array indexed 11 * It uses a radix tree like structure as a sparse array indexed
12 * by the id to obtain the pointer. The bitmap makes allocating 12 * by the id to obtain the pointer. The bitmap makes allocating
13 * a new id quick. 13 * a new id quick.
14 * 14 *
15 * You call it to allocate an id (an int) an associate with that id a 15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this 16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass 17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer. 18 * that id to this code and it returns your pointer.
19 19
20 * You can release ids at any time. When all ids are released, most of 20 * You can release ids at any time. When all ids are released, most of
21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22 * don't need to go to the memory "store" during an id allocate, just 22 * don't need to go to the memory "store" during an id allocate, just
23 * so you don't need to be too concerned about locking and conflicts 23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator. 24 * with the slab allocator.
25 */ 25 */
@@ -77,7 +77,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
77 while (idp->id_free_cnt < IDR_FREE_MAX) { 77 while (idp->id_free_cnt < IDR_FREE_MAX) {
78 struct idr_layer *new; 78 struct idr_layer *new;
79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
80 if(new == NULL) 80 if (new == NULL)
81 return (0); 81 return (0);
82 free_layer(idp, new); 82 free_layer(idp, new);
83 } 83 }
@@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
107 if (m == IDR_SIZE) { 107 if (m == IDR_SIZE) {
108 /* no space available go back to previous layer. */ 108 /* no space available go back to previous layer. */
109 l++; 109 l++;
110 id = (id | ((1 << (IDR_BITS*l))-1)) + 1; 110 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
111 if (!(p = pa[l])) { 111 if (!(p = pa[l])) {
112 *starting_id = id; 112 *starting_id = id;
113 return -2; 113 return -2;
@@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
161{ 161{
162 struct idr_layer *p, *new; 162 struct idr_layer *p, *new;
163 int layers, v, id; 163 int layers, v, id;
164 164
165 id = starting_id; 165 id = starting_id;
166build_up: 166build_up:
167 p = idp->top; 167 p = idp->top;
@@ -225,6 +225,7 @@ build_up:
225int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 225int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
226{ 226{
227 int rv; 227 int rv;
228
228 rv = idr_get_new_above_int(idp, ptr, starting_id); 229 rv = idr_get_new_above_int(idp, ptr, starting_id);
229 /* 230 /*
230 * This is a cheap hack until the IDR code can be fixed to 231 * This is a cheap hack until the IDR code can be fixed to
@@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above);
259int idr_get_new(struct idr *idp, void *ptr, int *id) 260int idr_get_new(struct idr *idp, void *ptr, int *id)
260{ 261{
261 int rv; 262 int rv;
263
262 rv = idr_get_new_above_int(idp, ptr, 0); 264 rv = idr_get_new_above_int(idp, ptr, 0);
263 /* 265 /*
264 * This is a cheap hack until the IDR code can be fixed to 266 * This is a cheap hack until the IDR code can be fixed to
@@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id)
306 free_layer(idp, **paa); 308 free_layer(idp, **paa);
307 **paa-- = NULL; 309 **paa-- = NULL;
308 } 310 }
309 if ( ! *paa ) 311 if (!*paa)
310 idp->layers = 0; 312 idp->layers = 0;
311 } else { 313 } else
312 idr_remove_warning(id); 314 idr_remove_warning(id);
313 }
314} 315}
315 316
316/** 317/**
@@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id)
326 id &= MAX_ID_MASK; 327 id &= MAX_ID_MASK;
327 328
328 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 329 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
329 if ( idp->top && idp->top->count == 1 && 330 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
330 (idp->layers > 1) && 331 idp->top->ary[0]) { // We can drop a layer
331 idp->top->ary[0]){ // We can drop a layer
332 332
333 p = idp->top->ary[0]; 333 p = idp->top->ary[0];
334 idp->top->bitmap = idp->top->count = 0; 334 idp->top->bitmap = idp->top->count = 0;
@@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id)
337 --idp->layers; 337 --idp->layers;
338 } 338 }
339 while (idp->id_free_cnt >= IDR_FREE_MAX) { 339 while (idp->id_free_cnt >= IDR_FREE_MAX) {
340
341 p = alloc_layer(idp); 340 p = alloc_layer(idp);
342 kmem_cache_free(idr_layer_cache, p); 341 kmem_cache_free(idr_layer_cache, p);
343 return; 342 return;
@@ -391,8 +390,8 @@ void *idr_find(struct idr *idp, int id)
391} 390}
392EXPORT_SYMBOL(idr_find); 391EXPORT_SYMBOL(idr_find);
393 392
394static void idr_cache_ctor(void * idr_layer, 393static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
395 kmem_cache_t *idr_layer_cache, unsigned long flags) 394 unsigned long flags)
396{ 395{
397 memset(idr_layer, 0, sizeof(struct idr_layer)); 396 memset(idr_layer, 0, sizeof(struct idr_layer));
398} 397}
@@ -400,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer,
400static int init_id_cache(void) 399static int init_id_cache(void)
401{ 400{
402 if (!idr_layer_cache) 401 if (!idr_layer_cache)
403 idr_layer_cache = kmem_cache_create("idr_layer_cache", 402 idr_layer_cache = kmem_cache_create("idr_layer_cache",
404 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); 403 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
405 return 0; 404 return 0;
406} 405}
diff --git a/lib/kobject.c b/lib/kobject.c
index 253d3004ace9..a181abed89f6 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/slab.h>
17 18
18/** 19/**
19 * populate_dir - populate directory with attributes. 20 * populate_dir - populate directory with attributes.
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 42c08ef828c5..eddc9b3d3876 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h>
8 9
9unsigned int debug_smp_processor_id(void) 10unsigned int debug_smp_processor_id(void)
10{ 11{
diff --git a/lib/sort.c b/lib/sort.c
index ddc4d35df289..5f3b51ffa1dc 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -7,6 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/sort.h> 9#include <linux/sort.h>
10#include <linux/slab.h>
10 11
11static void u32_swap(void *a, void *b, int size) 12static void u32_swap(void *a, void *b, int size)
12{ 13{
diff --git a/lib/string.c b/lib/string.c
index d886ef157c12..037a48acedbb 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -36,11 +36,13 @@ int strnicmp(const char *s1, const char *s2, size_t len)
36 /* Yes, Virginia, it had better be unsigned */ 36 /* Yes, Virginia, it had better be unsigned */
37 unsigned char c1, c2; 37 unsigned char c1, c2;
38 38
39 c1 = 0; c2 = 0; 39 c1 = c2 = 0;
40 if (len) { 40 if (len) {
41 do { 41 do {
42 c1 = *s1; c2 = *s2; 42 c1 = *s1;
43 s1++; s2++; 43 c2 = *s2;
44 s1++;
45 s2++;
44 if (!c1) 46 if (!c1)
45 break; 47 break;
46 if (!c2) 48 if (!c2)
@@ -55,7 +57,6 @@ int strnicmp(const char *s1, const char *s2, size_t len)
55 } 57 }
56 return (int)c1 - (int)c2; 58 return (int)c1 - (int)c2;
57} 59}
58
59EXPORT_SYMBOL(strnicmp); 60EXPORT_SYMBOL(strnicmp);
60#endif 61#endif
61 62
@@ -66,7 +67,7 @@ EXPORT_SYMBOL(strnicmp);
66 * @src: Where to copy the string from 67 * @src: Where to copy the string from
67 */ 68 */
68#undef strcpy 69#undef strcpy
69char * strcpy(char * dest,const char *src) 70char *strcpy(char *dest, const char *src)
70{ 71{
71 char *tmp = dest; 72 char *tmp = dest;
72 73
@@ -91,12 +92,13 @@ EXPORT_SYMBOL(strcpy);
91 * count, the remainder of @dest will be padded with %NUL. 92 * count, the remainder of @dest will be padded with %NUL.
92 * 93 *
93 */ 94 */
94char * strncpy(char * dest,const char *src,size_t count) 95char *strncpy(char *dest, const char *src, size_t count)
95{ 96{
96 char *tmp = dest; 97 char *tmp = dest;
97 98
98 while (count) { 99 while (count) {
99 if ((*tmp = *src) != 0) src++; 100 if ((*tmp = *src) != 0)
101 src++;
100 tmp++; 102 tmp++;
101 count--; 103 count--;
102 } 104 }
@@ -122,7 +124,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
122 size_t ret = strlen(src); 124 size_t ret = strlen(src);
123 125
124 if (size) { 126 if (size) {
125 size_t len = (ret >= size) ? size-1 : ret; 127 size_t len = (ret >= size) ? size - 1 : ret;
126 memcpy(dest, src, len); 128 memcpy(dest, src, len);
127 dest[len] = '\0'; 129 dest[len] = '\0';
128 } 130 }
@@ -138,7 +140,7 @@ EXPORT_SYMBOL(strlcpy);
138 * @src: The string to append to it 140 * @src: The string to append to it
139 */ 141 */
140#undef strcat 142#undef strcat
141char * strcat(char * dest, const char * src) 143char *strcat(char *dest, const char *src)
142{ 144{
143 char *tmp = dest; 145 char *tmp = dest;
144 146
@@ -146,7 +148,6 @@ char * strcat(char * dest, const char * src)
146 dest++; 148 dest++;
147 while ((*dest++ = *src++) != '\0') 149 while ((*dest++ = *src++) != '\0')
148 ; 150 ;
149
150 return tmp; 151 return tmp;
151} 152}
152EXPORT_SYMBOL(strcat); 153EXPORT_SYMBOL(strcat);
@@ -162,7 +163,7 @@ EXPORT_SYMBOL(strcat);
162 * Note that in contrast to strncpy, strncat ensures the result is 163 * Note that in contrast to strncpy, strncat ensures the result is
163 * terminated. 164 * terminated.
164 */ 165 */
165char * strncat(char *dest, const char *src, size_t count) 166char *strncat(char *dest, const char *src, size_t count)
166{ 167{
167 char *tmp = dest; 168 char *tmp = dest;
168 169
@@ -176,7 +177,6 @@ char * strncat(char *dest, const char *src, size_t count)
176 } 177 }
177 } 178 }
178 } 179 }
179
180 return tmp; 180 return tmp;
181} 181}
182EXPORT_SYMBOL(strncat); 182EXPORT_SYMBOL(strncat);
@@ -216,15 +216,14 @@ EXPORT_SYMBOL(strlcat);
216 * @ct: Another string 216 * @ct: Another string
217 */ 217 */
218#undef strcmp 218#undef strcmp
219int strcmp(const char * cs,const char * ct) 219int strcmp(const char *cs, const char *ct)
220{ 220{
221 register signed char __res; 221 signed char __res;
222 222
223 while (1) { 223 while (1) {
224 if ((__res = *cs - *ct++) != 0 || !*cs++) 224 if ((__res = *cs - *ct++) != 0 || !*cs++)
225 break; 225 break;
226 } 226 }
227
228 return __res; 227 return __res;
229} 228}
230EXPORT_SYMBOL(strcmp); 229EXPORT_SYMBOL(strcmp);
@@ -237,16 +236,15 @@ EXPORT_SYMBOL(strcmp);
237 * @ct: Another string 236 * @ct: Another string
238 * @count: The maximum number of bytes to compare 237 * @count: The maximum number of bytes to compare
239 */ 238 */
240int strncmp(const char * cs,const char * ct,size_t count) 239int strncmp(const char *cs, const char *ct, size_t count)
241{ 240{
242 register signed char __res = 0; 241 signed char __res = 0;
243 242
244 while (count) { 243 while (count) {
245 if ((__res = *cs - *ct++) != 0 || !*cs++) 244 if ((__res = *cs - *ct++) != 0 || !*cs++)
246 break; 245 break;
247 count--; 246 count--;
248 } 247 }
249
250 return __res; 248 return __res;
251} 249}
252EXPORT_SYMBOL(strncmp); 250EXPORT_SYMBOL(strncmp);
@@ -258,12 +256,12 @@ EXPORT_SYMBOL(strncmp);
258 * @s: The string to be searched 256 * @s: The string to be searched
259 * @c: The character to search for 257 * @c: The character to search for
260 */ 258 */
261char * strchr(const char * s, int c) 259char *strchr(const char *s, int c)
262{ 260{
263 for(; *s != (char) c; ++s) 261 for (; *s != (char)c; ++s)
264 if (*s == '\0') 262 if (*s == '\0')
265 return NULL; 263 return NULL;
266 return (char *) s; 264 return (char *)s;
267} 265}
268EXPORT_SYMBOL(strchr); 266EXPORT_SYMBOL(strchr);
269#endif 267#endif
@@ -274,7 +272,7 @@ EXPORT_SYMBOL(strchr);
274 * @s: The string to be searched 272 * @s: The string to be searched
275 * @c: The character to search for 273 * @c: The character to search for
276 */ 274 */
277char * strrchr(const char * s, int c) 275char *strrchr(const char *s, int c)
278{ 276{
279 const char *p = s + strlen(s); 277 const char *p = s + strlen(s);
280 do { 278 do {
@@ -296,8 +294,8 @@ EXPORT_SYMBOL(strrchr);
296char *strnchr(const char *s, size_t count, int c) 294char *strnchr(const char *s, size_t count, int c)
297{ 295{
298 for (; count-- && *s != '\0'; ++s) 296 for (; count-- && *s != '\0'; ++s)
299 if (*s == (char) c) 297 if (*s == (char)c)
300 return (char *) s; 298 return (char *)s;
301 return NULL; 299 return NULL;
302} 300}
303EXPORT_SYMBOL(strnchr); 301EXPORT_SYMBOL(strnchr);
@@ -308,7 +306,7 @@ EXPORT_SYMBOL(strnchr);
308 * strlen - Find the length of a string 306 * strlen - Find the length of a string
309 * @s: The string to be sized 307 * @s: The string to be sized
310 */ 308 */
311size_t strlen(const char * s) 309size_t strlen(const char *s)
312{ 310{
313 const char *sc; 311 const char *sc;
314 312
@@ -325,7 +323,7 @@ EXPORT_SYMBOL(strlen);
325 * @s: The string to be sized 323 * @s: The string to be sized
326 * @count: The maximum number of bytes to search 324 * @count: The maximum number of bytes to search
327 */ 325 */
328size_t strnlen(const char * s, size_t count) 326size_t strnlen(const char *s, size_t count)
329{ 327{
330 const char *sc; 328 const char *sc;
331 329
@@ -358,7 +356,6 @@ size_t strspn(const char *s, const char *accept)
358 return count; 356 return count;
359 ++count; 357 ++count;
360 } 358 }
361
362 return count; 359 return count;
363} 360}
364 361
@@ -384,9 +381,8 @@ size_t strcspn(const char *s, const char *reject)
384 } 381 }
385 ++count; 382 ++count;
386 } 383 }
387
388 return count; 384 return count;
389} 385}
390EXPORT_SYMBOL(strcspn); 386EXPORT_SYMBOL(strcspn);
391 387
392#ifndef __HAVE_ARCH_STRPBRK 388#ifndef __HAVE_ARCH_STRPBRK
@@ -395,14 +391,14 @@ EXPORT_SYMBOL(strcspn);
395 * @cs: The string to be searched 391 * @cs: The string to be searched
396 * @ct: The characters to search for 392 * @ct: The characters to search for
397 */ 393 */
398char * strpbrk(const char * cs,const char * ct) 394char *strpbrk(const char *cs, const char *ct)
399{ 395{
400 const char *sc1,*sc2; 396 const char *sc1, *sc2;
401 397
402 for( sc1 = cs; *sc1 != '\0'; ++sc1) { 398 for (sc1 = cs; *sc1 != '\0'; ++sc1) {
403 for( sc2 = ct; *sc2 != '\0'; ++sc2) { 399 for (sc2 = ct; *sc2 != '\0'; ++sc2) {
404 if (*sc1 == *sc2) 400 if (*sc1 == *sc2)
405 return (char *) sc1; 401 return (char *)sc1;
406 } 402 }
407 } 403 }
408 return NULL; 404 return NULL;
@@ -422,9 +418,10 @@ EXPORT_SYMBOL(strpbrk);
422 * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. 418 * of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
423 * Same semantics, slimmer shape. ;) 419 * Same semantics, slimmer shape. ;)
424 */ 420 */
425char * strsep(char **s, const char *ct) 421char *strsep(char **s, const char *ct)
426{ 422{
427 char *sbegin = *s, *end; 423 char *sbegin = *s;
424 char *end;
428 425
429 if (sbegin == NULL) 426 if (sbegin == NULL)
430 return NULL; 427 return NULL;
@@ -433,10 +430,8 @@ char * strsep(char **s, const char *ct)
433 if (end) 430 if (end)
434 *end++ = '\0'; 431 *end++ = '\0';
435 *s = end; 432 *s = end;
436
437 return sbegin; 433 return sbegin;
438} 434}
439
440EXPORT_SYMBOL(strsep); 435EXPORT_SYMBOL(strsep);
441#endif 436#endif
442 437
@@ -449,13 +444,12 @@ EXPORT_SYMBOL(strsep);
449 * 444 *
450 * Do not use memset() to access IO space, use memset_io() instead. 445 * Do not use memset() to access IO space, use memset_io() instead.
451 */ 446 */
452void * memset(void * s,int c,size_t count) 447void *memset(void *s, int c, size_t count)
453{ 448{
454 char *xs = (char *) s; 449 char *xs = s;
455 450
456 while (count--) 451 while (count--)
457 *xs++ = c; 452 *xs++ = c;
458
459 return s; 453 return s;
460} 454}
461EXPORT_SYMBOL(memset); 455EXPORT_SYMBOL(memset);
@@ -471,13 +465,13 @@ EXPORT_SYMBOL(memset);
471 * You should not use this function to access IO space, use memcpy_toio() 465 * You should not use this function to access IO space, use memcpy_toio()
472 * or memcpy_fromio() instead. 466 * or memcpy_fromio() instead.
473 */ 467 */
474void * memcpy(void * dest,const void *src,size_t count) 468void *memcpy(void *dest, const void *src, size_t count)
475{ 469{
476 char *tmp = (char *) dest, *s = (char *) src; 470 char *tmp = dest;
471 char *s = src;
477 472
478 while (count--) 473 while (count--)
479 *tmp++ = *s++; 474 *tmp++ = *s++;
480
481 return dest; 475 return dest;
482} 476}
483EXPORT_SYMBOL(memcpy); 477EXPORT_SYMBOL(memcpy);
@@ -492,23 +486,24 @@ EXPORT_SYMBOL(memcpy);
492 * 486 *
493 * Unlike memcpy(), memmove() copes with overlapping areas. 487 * Unlike memcpy(), memmove() copes with overlapping areas.
494 */ 488 */
495void * memmove(void * dest,const void *src,size_t count) 489void *memmove(void *dest, const void *src, size_t count)
496{ 490{
497 char *tmp, *s; 491 char *tmp;
492 const char *s;
498 493
499 if (dest <= src) { 494 if (dest <= src) {
500 tmp = (char *) dest; 495 tmp = dest;
501 s = (char *) src; 496 s = src;
502 while (count--) 497 while (count--)
503 *tmp++ = *s++; 498 *tmp++ = *s++;
504 } 499 } else {
505 else { 500 tmp = dest;
506 tmp = (char *) dest + count; 501 tmp += count;
507 s = (char *) src + count; 502 s = src;
503 s += count;
508 while (count--) 504 while (count--)
509 *--tmp = *--s; 505 *--tmp = *--s;
510 } 506 }
511
512 return dest; 507 return dest;
513} 508}
514EXPORT_SYMBOL(memmove); 509EXPORT_SYMBOL(memmove);
@@ -522,12 +517,12 @@ EXPORT_SYMBOL(memmove);
522 * @count: The size of the area. 517 * @count: The size of the area.
523 */ 518 */
524#undef memcmp 519#undef memcmp
525int memcmp(const void * cs,const void * ct,size_t count) 520int memcmp(const void *cs, const void *ct, size_t count)
526{ 521{
527 const unsigned char *su1, *su2; 522 const unsigned char *su1, *su2;
528 int res = 0; 523 int res = 0;
529 524
530 for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 525 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
531 if ((res = *su1 - *su2) != 0) 526 if ((res = *su1 - *su2) != 0)
532 break; 527 break;
533 return res; 528 return res;
@@ -545,17 +540,17 @@ EXPORT_SYMBOL(memcmp);
545 * returns the address of the first occurrence of @c, or 1 byte past 540 * returns the address of the first occurrence of @c, or 1 byte past
546 * the area if @c is not found 541 * the area if @c is not found
547 */ 542 */
548void * memscan(void * addr, int c, size_t size) 543void *memscan(void *addr, int c, size_t size)
549{ 544{
550 unsigned char * p = (unsigned char *) addr; 545 unsigned char *p = addr;
551 546
552 while (size) { 547 while (size) {
553 if (*p == c) 548 if (*p == c)
554 return (void *) p; 549 return (void *)p;
555 p++; 550 p++;
556 size--; 551 size--;
557 } 552 }
558 return (void *) p; 553 return (void *)p;
559} 554}
560EXPORT_SYMBOL(memscan); 555EXPORT_SYMBOL(memscan);
561#endif 556#endif
@@ -566,18 +561,18 @@ EXPORT_SYMBOL(memscan);
566 * @s1: The string to be searched 561 * @s1: The string to be searched
567 * @s2: The string to search for 562 * @s2: The string to search for
568 */ 563 */
569char * strstr(const char * s1,const char * s2) 564char *strstr(const char *s1, const char *s2)
570{ 565{
571 int l1, l2; 566 int l1, l2;
572 567
573 l2 = strlen(s2); 568 l2 = strlen(s2);
574 if (!l2) 569 if (!l2)
575 return (char *) s1; 570 return (char *)s1;
576 l1 = strlen(s1); 571 l1 = strlen(s1);
577 while (l1 >= l2) { 572 while (l1 >= l2) {
578 l1--; 573 l1--;
579 if (!memcmp(s1,s2,l2)) 574 if (!memcmp(s1, s2, l2))
580 return (char *) s1; 575 return (char *)s1;
581 s1++; 576 s1++;
582 } 577 }
583 return NULL; 578 return NULL;
@@ -600,7 +595,7 @@ void *memchr(const void *s, int c, size_t n)
600 const unsigned char *p = s; 595 const unsigned char *p = s;
601 while (n-- != 0) { 596 while (n-- != 0) {
602 if ((unsigned char)c == *p++) { 597 if ((unsigned char)c == *p++) {
603 return (void *)(p-1); 598 return (void *)(p - 1);
604 } 599 }
605 } 600 }
606 return NULL; 601 return NULL;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e4e9031dd9c3..b07db5ca3f66 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -23,6 +23,7 @@
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26#include <asm/page.h> /* for PAGE_SIZE */
26#include <asm/div64.h> 27#include <asm/div64.h>
27 28
28/** 29/**
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 3d94cb90c1d3..31b9e9054bf7 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -3,7 +3,6 @@
3 * For conditions of distribution and use, see copyright notice in zlib.h 3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */ 4 */
5 5
6#include <linux/module.h>
7#include <linux/zutil.h> 6#include <linux/zutil.h>
8#include "infblock.h" 7#include "infblock.h"
9#include "infutil.h" 8#include "infutil.h"
diff --git a/mm/filemap.c b/mm/filemap.c
index 768687f1d46b..5d6e4c2000dc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1030,8 +1030,8 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1030 desc.error = 0; 1030 desc.error = 0;
1031 do_generic_file_read(filp,ppos,&desc,file_read_actor); 1031 do_generic_file_read(filp,ppos,&desc,file_read_actor);
1032 retval += desc.written; 1032 retval += desc.written;
1033 if (!retval) { 1033 if (desc.error) {
1034 retval = desc.error; 1034 retval = retval ?: desc.error;
1035 break; 1035 break;
1036 } 1036 }
1037 } 1037 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2076b1542b8a..5abc57c2b8bd 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -457,6 +457,7 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
457 struct vm_area_struct *vma = NULL; 457 struct vm_area_struct *vma = NULL;
458 struct mempolicy *pol = current->mempolicy; 458 struct mempolicy *pol = current->mempolicy;
459 459
460 cpuset_update_current_mems_allowed();
460 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) 461 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
461 return -EINVAL; 462 return -EINVAL;
462 if (flags & MPOL_F_ADDR) { 463 if (flags & MPOL_F_ADDR) {
@@ -1206,3 +1207,66 @@ void numa_default_policy(void)
1206{ 1207{
1207 do_set_mempolicy(MPOL_DEFAULT, NULL); 1208 do_set_mempolicy(MPOL_DEFAULT, NULL);
1208} 1209}
1210
1211/* Migrate a policy to a different set of nodes */
1212static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
1213 const nodemask_t *new)
1214{
1215 nodemask_t tmp;
1216
1217 if (!pol)
1218 return;
1219
1220 switch (pol->policy) {
1221 case MPOL_DEFAULT:
1222 break;
1223 case MPOL_INTERLEAVE:
1224 nodes_remap(tmp, pol->v.nodes, *old, *new);
1225 pol->v.nodes = tmp;
1226 current->il_next = node_remap(current->il_next, *old, *new);
1227 break;
1228 case MPOL_PREFERRED:
1229 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1230 *old, *new);
1231 break;
1232 case MPOL_BIND: {
1233 nodemask_t nodes;
1234 struct zone **z;
1235 struct zonelist *zonelist;
1236
1237 nodes_clear(nodes);
1238 for (z = pol->v.zonelist->zones; *z; z++)
1239 node_set((*z)->zone_pgdat->node_id, nodes);
1240 nodes_remap(tmp, nodes, *old, *new);
1241 nodes = tmp;
1242
1243 zonelist = bind_zonelist(&nodes);
1244
1245 /* If no mem, then zonelist is NULL and we keep old zonelist.
1246 * If that old zonelist has no remaining mems_allowed nodes,
1247 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1248 */
1249
1250 if (zonelist) {
1251 /* Good - got mem - substitute new zonelist */
1252 kfree(pol->v.zonelist);
1253 pol->v.zonelist = zonelist;
1254 }
1255 break;
1256 }
1257 default:
1258 BUG();
1259 break;
1260 }
1261}
1262
1263/*
1264 * Someone moved this task to different nodes. Fixup mempolicies.
1265 *
1266 * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well,
1267 * once we have a cpuset mechanism to mark which cpuset subtree is migrating.
1268 */
1269void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
1270{
1271 rebind_policy(current->mempolicy, old, new);
1272}
diff --git a/mm/mmap.c b/mm/mmap.c
index 5ecc2cf3e1d7..320dda1778c3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1840,7 +1840,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
1840 1840
1841static inline void verify_mm_writelocked(struct mm_struct *mm) 1841static inline void verify_mm_writelocked(struct mm_struct *mm)
1842{ 1842{
1843#ifdef CONFIG_DEBUG_KERNEL 1843#ifdef CONFIG_DEBUG_VM
1844 if (unlikely(down_read_trylock(&mm->mmap_sem))) { 1844 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
1845 WARN_ON(1); 1845 WARN_ON(1);
1846 up_read(&mm->mmap_sem); 1846 up_read(&mm->mmap_sem);
diff --git a/mm/pdflush.c b/mm/pdflush.c
index d6781951267e..52822c98c489 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> // Needed by writeback.h 20#include <linux/fs.h> // Needed by writeback.h
21#include <linux/writeback.h> // Prototypes pdflush_operation() 21#include <linux/writeback.h> // Prototypes pdflush_operation()
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/cpuset.h>
23 24
24 25
25/* 26/*
@@ -170,12 +171,24 @@ static int __pdflush(struct pdflush_work *my_work)
170static int pdflush(void *dummy) 171static int pdflush(void *dummy)
171{ 172{
172 struct pdflush_work my_work; 173 struct pdflush_work my_work;
174 cpumask_t cpus_allowed;
173 175
174 /* 176 /*
175 * pdflush can spend a lot of time doing encryption via dm-crypt. We 177 * pdflush can spend a lot of time doing encryption via dm-crypt. We
176 * don't want to do that at keventd's priority. 178 * don't want to do that at keventd's priority.
177 */ 179 */
178 set_user_nice(current, 0); 180 set_user_nice(current, 0);
181
182 /*
183 * Some configs put our parent kthread in a limited cpuset,
184 * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
185 * Our needs are more modest - cut back to our cpusets cpus_allowed.
186 * This is needed as pdflush's are dynamically created and destroyed.
187 * The boottime pdflush's are easily placed w/o these 2 lines.
188 */
189 cpus_allowed = cpuset_cpus_allowed(current);
190 set_cpus_allowed(current, cpus_allowed);
191
179 return __pdflush(&my_work); 192 return __pdflush(&my_work);
180} 193}
181 194
diff --git a/mm/swap.c b/mm/swap.c
index b89512877ec2..96387e20184a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -270,7 +270,6 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
270 struct pagevec pages_to_free; 270 struct pagevec pages_to_free;
271 271
272 pagevec_init(&pages_to_free, pvec->cold); 272 pagevec_init(&pages_to_free, pvec->cold);
273 pages_to_free.cold = pvec->cold;
274 for (i = 0; i < pagevec_count(pvec); i++) { 273 for (i = 0; i < pagevec_count(pvec); i++) {
275 struct page *page = pvec->pages[i]; 274 struct page *page = pvec->pages[i];
276 275
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index c13a2161bca2..b58abcf44ed6 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -31,11 +31,14 @@ static struct vfsmount *shm_mnt;
31 31
32static int __init init_tmpfs(void) 32static int __init init_tmpfs(void)
33{ 33{
34 register_filesystem(&tmpfs_fs_type); 34 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
35
35#ifdef CONFIG_TMPFS 36#ifdef CONFIG_TMPFS
36 devfs_mk_dir("shm"); 37 devfs_mk_dir("shm");
37#endif 38#endif
38 shm_mnt = kern_mount(&tmpfs_fs_type); 39 shm_mnt = kern_mount(&tmpfs_fs_type);
40 BUG_ON(IS_ERR(shm_mnt));
41
39 return 0; 42 return 0;
40} 43}
41module_init(init_tmpfs) 44module_init(init_tmpfs)
diff --git a/mm/truncate.c b/mm/truncate.c
index 60c8764bfac2..29c18f68dc35 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -13,18 +13,9 @@
13#include <linux/pagemap.h> 13#include <linux/pagemap.h>
14#include <linux/pagevec.h> 14#include <linux/pagevec.h>
15#include <linux/buffer_head.h> /* grr. try_to_release_page, 15#include <linux/buffer_head.h> /* grr. try_to_release_page,
16 block_invalidatepage */ 16 do_invalidatepage */
17 17
18 18
19static int do_invalidatepage(struct page *page, unsigned long offset)
20{
21 int (*invalidatepage)(struct page *, unsigned long);
22 invalidatepage = page->mapping->a_ops->invalidatepage;
23 if (invalidatepage == NULL)
24 invalidatepage = block_invalidatepage;
25 return (*invalidatepage)(page, offset);
26}
27
28static inline void truncate_partial_page(struct page *page, unsigned partial) 19static inline void truncate_partial_page(struct page *page, unsigned partial)
29{ 20{
30 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); 21 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index f5909a4c3fc7..e19c2a52d00c 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -48,7 +48,7 @@ static int checkentry(const char *tablename, const struct ipt_ip *ip,
48 unsigned int hook_mask) 48 unsigned int hook_mask)
49{ 49{
50 if (matchsize != IPT_ALIGN(sizeof(struct ipt_addrtype_info))) { 50 if (matchsize != IPT_ALIGN(sizeof(struct ipt_addrtype_info))) {
51 printk(KERN_ERR "ipt_addrtype: invalid size (%u != %Zu)\n.", 51 printk(KERN_ERR "ipt_addrtype: invalid size (%u != %Zu)\n",
52 matchsize, IPT_ALIGN(sizeof(struct ipt_addrtype_info))); 52 matchsize, IPT_ALIGN(sizeof(struct ipt_addrtype_info)));
53 return 0; 53 return 0;
54 } 54 }
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 2fcb244a9e18..0dd96919de3e 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -116,6 +116,15 @@ endif
116clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \ 116clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \
117 .tmp_gtkcheck zconf.tab.c zconf.tab.h lex.zconf.c 117 .tmp_gtkcheck zconf.tab.c zconf.tab.h lex.zconf.c
118 118
119# Needed for systems without gettext
120KBUILD_HAVE_NLS := $(shell \
121 if echo "\#include <libint.h>" | $(HOSTCC) $(HOSTCFLAGS) -E - > /dev/null 2>&1 ; \
122 then echo yes ; \
123 else echo no ; fi)
124ifeq ($(KBUILD_HAVE_NLS),no)
125HOSTCFLAGS += -DKBUILD_NO_NLS
126endif
127
119# generated files seem to need this to find local include files 128# generated files seem to need this to find local include files
120HOSTCFLAGS_lex.zconf.o := -I$(src) 129HOSTCFLAGS_lex.zconf.o := -I$(src)
121HOSTCFLAGS_zconf.tab.o := -I$(src) 130HOSTCFLAGS_zconf.tab.o := -I$(src)
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index c3d25786a64d..5fba1feff2a8 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -8,7 +8,13 @@
8 8
9#include "expr.h" 9#include "expr.h"
10 10
11#include <libintl.h> 11#ifndef KBUILD_NO_NLS
12# include <libintl.h>
13#else
14# define gettext(Msgid) ((const char *) (Msgid))
15# define textdomain(Domainname) ((const char *) (Domainname))
16# define bindtextdomain(Domainname, Dirname) ((const char *) (Dirname))
17#endif
12 18
13#ifdef __cplusplus 19#ifdef __cplusplus
14extern "C" { 20extern "C" {
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 457bec29511d..d1ad40531ee5 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -219,6 +219,7 @@ save_config_help[] = N_(
219search_help[] = N_( 219search_help[] = N_(
220 "\n" 220 "\n"
221 "Search for CONFIG_ symbols and display their relations.\n" 221 "Search for CONFIG_ symbols and display their relations.\n"
222 "Regular expressions are allowed.\n"
222 "Example: search for \"^FOO\"\n" 223 "Example: search for \"^FOO\"\n"
223 "Result:\n" 224 "Result:\n"
224 "-----------------------------------------------------------------\n" 225 "-----------------------------------------------------------------\n"
@@ -531,7 +532,7 @@ again:
531 cprint("--title"); 532 cprint("--title");
532 cprint(_("Search Configuration Parameter")); 533 cprint(_("Search Configuration Parameter"));
533 cprint("--inputbox"); 534 cprint("--inputbox");
534 cprint(_("Enter Keyword")); 535 cprint(_("Enter CONFIG_ (sub)string to search for (omit CONFIG_)"));
535 cprint("10"); 536 cprint("10");
536 cprint("75"); 537 cprint("75");
537 cprint(""); 538 cprint("");
diff --git a/security/dummy.c b/security/dummy.c
index 3d34f3de7e82..3ca5f2b828a0 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -377,7 +377,7 @@ static int dummy_inode_removexattr (struct dentry *dentry, char *name)
377 return 0; 377 return 0;
378} 378}
379 379
380static int dummy_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size) 380static int dummy_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
381{ 381{
382 return -EOPNOTSUPP; 382 return -EOPNOTSUPP;
383} 383}
@@ -803,6 +803,23 @@ static int dummy_setprocattr(struct task_struct *p, char *name, void *value, siz
803 return -EINVAL; 803 return -EINVAL;
804} 804}
805 805
806#ifdef CONFIG_KEYS
807static inline int dummy_key_alloc(struct key *key)
808{
809 return 0;
810}
811
812static inline void dummy_key_free(struct key *key)
813{
814}
815
816static inline int dummy_key_permission(key_ref_t key_ref,
817 struct task_struct *context,
818 key_perm_t perm)
819{
820 return 0;
821}
822#endif /* CONFIG_KEYS */
806 823
807struct security_operations dummy_security_ops; 824struct security_operations dummy_security_ops;
808 825
@@ -954,5 +971,11 @@ void security_fixup_ops (struct security_operations *ops)
954 set_to_dummy_if_null(ops, sk_alloc_security); 971 set_to_dummy_if_null(ops, sk_alloc_security);
955 set_to_dummy_if_null(ops, sk_free_security); 972 set_to_dummy_if_null(ops, sk_free_security);
956#endif /* CONFIG_SECURITY_NETWORK */ 973#endif /* CONFIG_SECURITY_NETWORK */
974#ifdef CONFIG_KEYS
975 set_to_dummy_if_null(ops, key_alloc);
976 set_to_dummy_if_null(ops, key_free);
977 set_to_dummy_if_null(ops, key_permission);
978#endif /* CONFIG_KEYS */
979
957} 980}
958 981
diff --git a/security/keys/key.c b/security/keys/key.c
index 2182be9e9309..ccde17aff616 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -1,6 +1,6 @@
1/* key.c: basic authentication token and access key management 1/* key.c: basic authentication token and access key management
2 * 2 *
3 * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/security.h>
16#include <linux/workqueue.h> 17#include <linux/workqueue.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include "internal.h" 19#include "internal.h"
@@ -253,6 +254,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
253 struct key_user *user = NULL; 254 struct key_user *user = NULL;
254 struct key *key; 255 struct key *key;
255 size_t desclen, quotalen; 256 size_t desclen, quotalen;
257 int ret;
256 258
257 key = ERR_PTR(-EINVAL); 259 key = ERR_PTR(-EINVAL);
258 if (!desc || !*desc) 260 if (!desc || !*desc)
@@ -305,6 +307,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
305 key->flags = 0; 307 key->flags = 0;
306 key->expiry = 0; 308 key->expiry = 0;
307 key->payload.data = NULL; 309 key->payload.data = NULL;
310 key->security = NULL;
308 311
309 if (!not_in_quota) 312 if (!not_in_quota)
310 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 313 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
@@ -315,16 +318,34 @@ struct key *key_alloc(struct key_type *type, const char *desc,
315 key->magic = KEY_DEBUG_MAGIC; 318 key->magic = KEY_DEBUG_MAGIC;
316#endif 319#endif
317 320
321 /* let the security module know about the key */
322 ret = security_key_alloc(key);
323 if (ret < 0)
324 goto security_error;
325
318 /* publish the key by giving it a serial number */ 326 /* publish the key by giving it a serial number */
319 atomic_inc(&user->nkeys); 327 atomic_inc(&user->nkeys);
320 key_alloc_serial(key); 328 key_alloc_serial(key);
321 329
322 error: 330error:
323 return key; 331 return key;
324 332
325 no_memory_3: 333security_error:
334 kfree(key->description);
335 kmem_cache_free(key_jar, key);
336 if (!not_in_quota) {
337 spin_lock(&user->lock);
338 user->qnkeys--;
339 user->qnbytes -= quotalen;
340 spin_unlock(&user->lock);
341 }
342 key_user_put(user);
343 key = ERR_PTR(ret);
344 goto error;
345
346no_memory_3:
326 kmem_cache_free(key_jar, key); 347 kmem_cache_free(key_jar, key);
327 no_memory_2: 348no_memory_2:
328 if (!not_in_quota) { 349 if (!not_in_quota) {
329 spin_lock(&user->lock); 350 spin_lock(&user->lock);
330 user->qnkeys--; 351 user->qnkeys--;
@@ -332,11 +353,11 @@ struct key *key_alloc(struct key_type *type, const char *desc,
332 spin_unlock(&user->lock); 353 spin_unlock(&user->lock);
333 } 354 }
334 key_user_put(user); 355 key_user_put(user);
335 no_memory_1: 356no_memory_1:
336 key = ERR_PTR(-ENOMEM); 357 key = ERR_PTR(-ENOMEM);
337 goto error; 358 goto error;
338 359
339 no_quota: 360no_quota:
340 spin_unlock(&user->lock); 361 spin_unlock(&user->lock);
341 key_user_put(user); 362 key_user_put(user);
342 key = ERR_PTR(-EDQUOT); 363 key = ERR_PTR(-EDQUOT);
@@ -556,6 +577,8 @@ static void key_cleanup(void *data)
556 577
557 key_check(key); 578 key_check(key);
558 579
580 security_key_free(key);
581
559 /* deal with the user's key tracking and quota */ 582 /* deal with the user's key tracking and quota */
560 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 583 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
561 spin_lock(&key->user->lock); 584 spin_lock(&key->user->lock);
@@ -700,8 +723,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
700 int ret; 723 int ret;
701 724
702 /* need write permission on the key to update it */ 725 /* need write permission on the key to update it */
703 ret = -EACCES; 726 ret = key_permission(key_ref, KEY_WRITE);
704 if (!key_permission(key_ref, KEY_WRITE)) 727 if (ret < 0)
705 goto error; 728 goto error;
706 729
707 ret = -EEXIST; 730 ret = -EEXIST;
@@ -711,7 +734,6 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
711 down_write(&key->sem); 734 down_write(&key->sem);
712 735
713 ret = key->type->update(key, payload, plen); 736 ret = key->type->update(key, payload, plen);
714
715 if (ret == 0) 737 if (ret == 0)
716 /* updating a negative key instantiates it */ 738 /* updating a negative key instantiates it */
717 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 739 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
@@ -768,9 +790,11 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
768 790
769 /* if we're going to allocate a new key, we're going to have 791 /* if we're going to allocate a new key, we're going to have
770 * to modify the keyring */ 792 * to modify the keyring */
771 key_ref = ERR_PTR(-EACCES); 793 ret = key_permission(keyring_ref, KEY_WRITE);
772 if (!key_permission(keyring_ref, KEY_WRITE)) 794 if (ret < 0) {
795 key_ref = ERR_PTR(ret);
773 goto error_3; 796 goto error_3;
797 }
774 798
775 /* search for an existing key of the same type and description in the 799 /* search for an existing key of the same type and description in the
776 * destination keyring 800 * destination keyring
@@ -780,8 +804,8 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
780 goto found_matching_key; 804 goto found_matching_key;
781 805
782 /* decide on the permissions we want */ 806 /* decide on the permissions we want */
783 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK; 807 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
784 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; 808 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
785 809
786 if (ktype->read) 810 if (ktype->read)
787 perm |= KEY_POS_READ | KEY_USR_READ; 811 perm |= KEY_POS_READ | KEY_USR_READ;
@@ -840,16 +864,16 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
840 key_check(key); 864 key_check(key);
841 865
842 /* the key must be writable */ 866 /* the key must be writable */
843 ret = -EACCES; 867 ret = key_permission(key_ref, KEY_WRITE);
844 if (!key_permission(key_ref, KEY_WRITE)) 868 if (ret < 0)
845 goto error; 869 goto error;
846 870
847 /* attempt to update it if supported */ 871 /* attempt to update it if supported */
848 ret = -EOPNOTSUPP; 872 ret = -EOPNOTSUPP;
849 if (key->type->update) { 873 if (key->type->update) {
850 down_write(&key->sem); 874 down_write(&key->sem);
851 ret = key->type->update(key, payload, plen);
852 875
876 ret = key->type->update(key, payload, plen);
853 if (ret == 0) 877 if (ret == 0)
854 /* updating a negative key instantiates it */ 878 /* updating a negative key instantiates it */
855 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 879 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 4c670ee6acf9..b7a468fabdf9 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -624,8 +624,8 @@ long keyctl_keyring_search(key_serial_t ringid,
624 624
625 /* link the resulting key to the destination keyring if we can */ 625 /* link the resulting key to the destination keyring if we can */
626 if (dest_ref) { 626 if (dest_ref) {
627 ret = -EACCES; 627 ret = key_permission(key_ref, KEY_LINK);
628 if (!key_permission(key_ref, KEY_LINK)) 628 if (ret < 0)
629 goto error6; 629 goto error6;
630 630
631 ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); 631 ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
@@ -676,8 +676,11 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
676 key = key_ref_to_ptr(key_ref); 676 key = key_ref_to_ptr(key_ref);
677 677
678 /* see if we can read it directly */ 678 /* see if we can read it directly */
679 if (key_permission(key_ref, KEY_READ)) 679 ret = key_permission(key_ref, KEY_READ);
680 if (ret == 0)
680 goto can_read_key; 681 goto can_read_key;
682 if (ret != -EACCES)
683 goto error;
681 684
682 /* we can't; see if it's searchable from this process's keyrings 685 /* we can't; see if it's searchable from this process's keyrings
683 * - we automatically take account of the fact that it may be 686 * - we automatically take account of the fact that it may be
@@ -726,7 +729,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
726 if (uid == (uid_t) -1 && gid == (gid_t) -1) 729 if (uid == (uid_t) -1 && gid == (gid_t) -1)
727 goto error; 730 goto error;
728 731
729 key_ref = lookup_user_key(NULL, id, 1, 1, 0); 732 key_ref = lookup_user_key(NULL, id, 1, 1, KEY_SETATTR);
730 if (IS_ERR(key_ref)) { 733 if (IS_ERR(key_ref)) {
731 ret = PTR_ERR(key_ref); 734 ret = PTR_ERR(key_ref);
732 goto error; 735 goto error;
@@ -786,7 +789,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
786 if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) 789 if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
787 goto error; 790 goto error;
788 791
789 key_ref = lookup_user_key(NULL, id, 1, 1, 0); 792 key_ref = lookup_user_key(NULL, id, 1, 1, KEY_SETATTR);
790 if (IS_ERR(key_ref)) { 793 if (IS_ERR(key_ref)) {
791 ret = PTR_ERR(key_ref); 794 ret = PTR_ERR(key_ref);
792 goto error; 795 goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 0639396dd441..e1cc4dd79012 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/security.h>
16#include <linux/seq_file.h> 17#include <linux/seq_file.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
@@ -309,7 +310,9 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
309 int ret; 310 int ret;
310 311
311 keyring = key_alloc(&key_type_keyring, description, 312 keyring = key_alloc(&key_type_keyring, description,
312 uid, gid, KEY_POS_ALL | KEY_USR_ALL, not_in_quota); 313 uid, gid,
314 (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
315 not_in_quota);
313 316
314 if (!IS_ERR(keyring)) { 317 if (!IS_ERR(keyring)) {
315 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); 318 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
@@ -359,9 +362,11 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
359 key_check(keyring); 362 key_check(keyring);
360 363
361 /* top keyring must have search permission to begin the search */ 364 /* top keyring must have search permission to begin the search */
362 key_ref = ERR_PTR(-EACCES); 365 err = key_task_permission(keyring_ref, context, KEY_SEARCH);
363 if (!key_task_permission(keyring_ref, context, KEY_SEARCH)) 366 if (err < 0) {
367 key_ref = ERR_PTR(err);
364 goto error; 368 goto error;
369 }
365 370
366 key_ref = ERR_PTR(-ENOTDIR); 371 key_ref = ERR_PTR(-ENOTDIR);
367 if (keyring->type != &key_type_keyring) 372 if (keyring->type != &key_type_keyring)
@@ -402,8 +407,8 @@ descend:
402 continue; 407 continue;
403 408
404 /* key must have search permissions */ 409 /* key must have search permissions */
405 if (!key_task_permission(make_key_ref(key, possessed), 410 if (key_task_permission(make_key_ref(key, possessed),
406 context, KEY_SEARCH)) 411 context, KEY_SEARCH) < 0)
407 continue; 412 continue;
408 413
409 /* we set a different error code if we find a negative key */ 414 /* we set a different error code if we find a negative key */
@@ -430,7 +435,7 @@ ascend:
430 continue; 435 continue;
431 436
432 if (!key_task_permission(make_key_ref(key, possessed), 437 if (!key_task_permission(make_key_ref(key, possessed),
433 context, KEY_SEARCH)) 438 context, KEY_SEARCH) < 0)
434 continue; 439 continue;
435 440
436 /* stack the current position */ 441 /* stack the current position */
@@ -521,7 +526,7 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
521 (!key->type->match || 526 (!key->type->match ||
522 key->type->match(key, description)) && 527 key->type->match(key, description)) &&
523 key_permission(make_key_ref(key, possessed), 528 key_permission(make_key_ref(key, possessed),
524 perm) && 529 perm) < 0 &&
525 !test_bit(KEY_FLAG_REVOKED, &key->flags) 530 !test_bit(KEY_FLAG_REVOKED, &key->flags)
526 ) 531 )
527 goto found; 532 goto found;
@@ -617,7 +622,7 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound)
617 continue; 622 continue;
618 623
619 if (!key_permission(make_key_ref(keyring, 0), 624 if (!key_permission(make_key_ref(keyring, 0),
620 KEY_SEARCH)) 625 KEY_SEARCH) < 0)
621 continue; 626 continue;
622 627
623 /* found a potential candidate, but we still need to 628 /* found a potential candidate, but we still need to
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 03db073ba45c..e7f579c0eaf5 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/security.h>
13#include "internal.h" 14#include "internal.h"
14 15
15/*****************************************************************************/ 16/*****************************************************************************/
@@ -63,7 +64,11 @@ use_these_perms:
63 64
64 kperm = kperm & perm & KEY_ALL; 65 kperm = kperm & perm & KEY_ALL;
65 66
66 return kperm == perm; 67 if (kperm != perm)
68 return -EACCES;
69
70 /* let LSM be the final arbiter */
71 return security_key_permission(key_ref, context, perm);
67 72
68} /* end key_task_permission() */ 73} /* end key_task_permission() */
69 74
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index d42d2158ce13..566b1cc0118a 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -39,7 +39,7 @@ struct key root_user_keyring = {
39 .type = &key_type_keyring, 39 .type = &key_type_keyring,
40 .user = &root_key_user, 40 .user = &root_key_user,
41 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), 41 .sem = __RWSEM_INITIALIZER(root_user_keyring.sem),
42 .perm = KEY_POS_ALL | KEY_USR_ALL, 42 .perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
43 .flags = 1 << KEY_FLAG_INSTANTIATED, 43 .flags = 1 << KEY_FLAG_INSTANTIATED,
44 .description = "_uid.0", 44 .description = "_uid.0",
45#ifdef KEY_DEBUGGING 45#ifdef KEY_DEBUGGING
@@ -54,7 +54,7 @@ struct key root_session_keyring = {
54 .type = &key_type_keyring, 54 .type = &key_type_keyring,
55 .user = &root_key_user, 55 .user = &root_key_user,
56 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), 56 .sem = __RWSEM_INITIALIZER(root_session_keyring.sem),
57 .perm = KEY_POS_ALL | KEY_USR_ALL, 57 .perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
58 .flags = 1 << KEY_FLAG_INSTANTIATED, 58 .flags = 1 << KEY_FLAG_INSTANTIATED,
59 .description = "_uid_ses.0", 59 .description = "_uid_ses.0",
60#ifdef KEY_DEBUGGING 60#ifdef KEY_DEBUGGING
@@ -666,9 +666,8 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
666 goto invalid_key; 666 goto invalid_key;
667 667
668 /* check the permissions */ 668 /* check the permissions */
669 ret = -EACCES; 669 ret = key_task_permission(key_ref, context, perm);
670 670 if (ret < 0)
671 if (!key_task_permission(key_ref, context, perm))
672 goto invalid_key; 671 goto invalid_key;
673 672
674error: 673error:
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index e446acba73d3..cbda3b2780a1 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -15,18 +15,10 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <keys/user-type.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19#include "internal.h" 20#include "internal.h"
20 21
21static int user_instantiate(struct key *key, const void *data, size_t datalen);
22static int user_duplicate(struct key *key, const struct key *source);
23static int user_update(struct key *key, const void *data, size_t datalen);
24static int user_match(const struct key *key, const void *criterion);
25static void user_destroy(struct key *key);
26static void user_describe(const struct key *user, struct seq_file *m);
27static long user_read(const struct key *key,
28 char __user *buffer, size_t buflen);
29
30/* 22/*
31 * user defined keys take an arbitrary string as the description and an 23 * user defined keys take an arbitrary string as the description and an
32 * arbitrary blob of data as the payload 24 * arbitrary blob of data as the payload
@@ -42,19 +34,13 @@ struct key_type key_type_user = {
42 .read = user_read, 34 .read = user_read,
43}; 35};
44 36
45struct user_key_payload {
46 struct rcu_head rcu; /* RCU destructor */
47 unsigned short datalen; /* length of this data */
48 char data[0]; /* actual data */
49};
50
51EXPORT_SYMBOL_GPL(key_type_user); 37EXPORT_SYMBOL_GPL(key_type_user);
52 38
53/*****************************************************************************/ 39/*****************************************************************************/
54/* 40/*
55 * instantiate a user defined key 41 * instantiate a user defined key
56 */ 42 */
57static int user_instantiate(struct key *key, const void *data, size_t datalen) 43int user_instantiate(struct key *key, const void *data, size_t datalen)
58{ 44{
59 struct user_key_payload *upayload; 45 struct user_key_payload *upayload;
60 int ret; 46 int ret;
@@ -78,18 +64,20 @@ static int user_instantiate(struct key *key, const void *data, size_t datalen)
78 rcu_assign_pointer(key->payload.data, upayload); 64 rcu_assign_pointer(key->payload.data, upayload);
79 ret = 0; 65 ret = 0;
80 66
81 error: 67error:
82 return ret; 68 return ret;
83 69
84} /* end user_instantiate() */ 70} /* end user_instantiate() */
85 71
72EXPORT_SYMBOL_GPL(user_instantiate);
73
86/*****************************************************************************/ 74/*****************************************************************************/
87/* 75/*
88 * duplicate a user defined key 76 * duplicate a user defined key
89 * - both keys' semaphores are locked against further modification 77 * - both keys' semaphores are locked against further modification
90 * - the new key cannot yet be accessed 78 * - the new key cannot yet be accessed
91 */ 79 */
92static int user_duplicate(struct key *key, const struct key *source) 80int user_duplicate(struct key *key, const struct key *source)
93{ 81{
94 struct user_key_payload *upayload, *spayload; 82 struct user_key_payload *upayload, *spayload;
95 int ret; 83 int ret;
@@ -112,6 +100,8 @@ static int user_duplicate(struct key *key, const struct key *source)
112 100
113} /* end user_duplicate() */ 101} /* end user_duplicate() */
114 102
103EXPORT_SYMBOL_GPL(user_duplicate);
104
115/*****************************************************************************/ 105/*****************************************************************************/
116/* 106/*
117 * dispose of the old data from an updated user defined key 107 * dispose of the old data from an updated user defined key
@@ -131,7 +121,7 @@ static void user_update_rcu_disposal(struct rcu_head *rcu)
131 * update a user defined key 121 * update a user defined key
132 * - the key's semaphore is write-locked 122 * - the key's semaphore is write-locked
133 */ 123 */
134static int user_update(struct key *key, const void *data, size_t datalen) 124int user_update(struct key *key, const void *data, size_t datalen)
135{ 125{
136 struct user_key_payload *upayload, *zap; 126 struct user_key_payload *upayload, *zap;
137 int ret; 127 int ret;
@@ -163,26 +153,30 @@ static int user_update(struct key *key, const void *data, size_t datalen)
163 153
164 call_rcu(&zap->rcu, user_update_rcu_disposal); 154 call_rcu(&zap->rcu, user_update_rcu_disposal);
165 155
166 error: 156error:
167 return ret; 157 return ret;
168 158
169} /* end user_update() */ 159} /* end user_update() */
170 160
161EXPORT_SYMBOL_GPL(user_update);
162
171/*****************************************************************************/ 163/*****************************************************************************/
172/* 164/*
173 * match users on their name 165 * match users on their name
174 */ 166 */
175static int user_match(const struct key *key, const void *description) 167int user_match(const struct key *key, const void *description)
176{ 168{
177 return strcmp(key->description, description) == 0; 169 return strcmp(key->description, description) == 0;
178 170
179} /* end user_match() */ 171} /* end user_match() */
180 172
173EXPORT_SYMBOL_GPL(user_match);
174
181/*****************************************************************************/ 175/*****************************************************************************/
182/* 176/*
183 * dispose of the data dangling from the corpse of a user 177 * dispose of the data dangling from the corpse of a user
184 */ 178 */
185static void user_destroy(struct key *key) 179void user_destroy(struct key *key)
186{ 180{
187 struct user_key_payload *upayload = key->payload.data; 181 struct user_key_payload *upayload = key->payload.data;
188 182
@@ -190,11 +184,13 @@ static void user_destroy(struct key *key)
190 184
191} /* end user_destroy() */ 185} /* end user_destroy() */
192 186
187EXPORT_SYMBOL_GPL(user_destroy);
188
193/*****************************************************************************/ 189/*****************************************************************************/
194/* 190/*
195 * describe the user key 191 * describe the user key
196 */ 192 */
197static void user_describe(const struct key *key, struct seq_file *m) 193void user_describe(const struct key *key, struct seq_file *m)
198{ 194{
199 seq_puts(m, key->description); 195 seq_puts(m, key->description);
200 196
@@ -202,13 +198,14 @@ static void user_describe(const struct key *key, struct seq_file *m)
202 198
203} /* end user_describe() */ 199} /* end user_describe() */
204 200
201EXPORT_SYMBOL_GPL(user_describe);
202
205/*****************************************************************************/ 203/*****************************************************************************/
206/* 204/*
207 * read the key data 205 * read the key data
208 * - the key's semaphore is read-locked 206 * - the key's semaphore is read-locked
209 */ 207 */
210static long user_read(const struct key *key, 208long user_read(const struct key *key, char __user *buffer, size_t buflen)
211 char __user *buffer, size_t buflen)
212{ 209{
213 struct user_key_payload *upayload; 210 struct user_key_payload *upayload;
214 long ret; 211 long ret;
@@ -228,3 +225,5 @@ static long user_read(const struct key *key,
228 return ret; 225 return ret;
229 226
230} /* end user_read() */ 227} /* end user_read() */
228
229EXPORT_SYMBOL_GPL(user_read);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 447a1e0f48cb..45c41490d521 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -122,11 +122,10 @@ static int task_alloc_security(struct task_struct *task)
122{ 122{
123 struct task_security_struct *tsec; 123 struct task_security_struct *tsec;
124 124
125 tsec = kmalloc(sizeof(struct task_security_struct), GFP_KERNEL); 125 tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL);
126 if (!tsec) 126 if (!tsec)
127 return -ENOMEM; 127 return -ENOMEM;
128 128
129 memset(tsec, 0, sizeof(struct task_security_struct));
130 tsec->magic = SELINUX_MAGIC; 129 tsec->magic = SELINUX_MAGIC;
131 tsec->task = task; 130 tsec->task = task;
132 tsec->osid = tsec->sid = tsec->ptrace_sid = SECINITSID_UNLABELED; 131 tsec->osid = tsec->sid = tsec->ptrace_sid = SECINITSID_UNLABELED;
@@ -151,11 +150,10 @@ static int inode_alloc_security(struct inode *inode)
151 struct task_security_struct *tsec = current->security; 150 struct task_security_struct *tsec = current->security;
152 struct inode_security_struct *isec; 151 struct inode_security_struct *isec;
153 152
154 isec = kmalloc(sizeof(struct inode_security_struct), GFP_KERNEL); 153 isec = kzalloc(sizeof(struct inode_security_struct), GFP_KERNEL);
155 if (!isec) 154 if (!isec)
156 return -ENOMEM; 155 return -ENOMEM;
157 156
158 memset(isec, 0, sizeof(struct inode_security_struct));
159 init_MUTEX(&isec->sem); 157 init_MUTEX(&isec->sem);
160 INIT_LIST_HEAD(&isec->list); 158 INIT_LIST_HEAD(&isec->list);
161 isec->magic = SELINUX_MAGIC; 159 isec->magic = SELINUX_MAGIC;
@@ -193,11 +191,10 @@ static int file_alloc_security(struct file *file)
193 struct task_security_struct *tsec = current->security; 191 struct task_security_struct *tsec = current->security;
194 struct file_security_struct *fsec; 192 struct file_security_struct *fsec;
195 193
196 fsec = kmalloc(sizeof(struct file_security_struct), GFP_ATOMIC); 194 fsec = kzalloc(sizeof(struct file_security_struct), GFP_ATOMIC);
197 if (!fsec) 195 if (!fsec)
198 return -ENOMEM; 196 return -ENOMEM;
199 197
200 memset(fsec, 0, sizeof(struct file_security_struct));
201 fsec->magic = SELINUX_MAGIC; 198 fsec->magic = SELINUX_MAGIC;
202 fsec->file = file; 199 fsec->file = file;
203 if (tsec && tsec->magic == SELINUX_MAGIC) { 200 if (tsec && tsec->magic == SELINUX_MAGIC) {
@@ -227,11 +224,10 @@ static int superblock_alloc_security(struct super_block *sb)
227{ 224{
228 struct superblock_security_struct *sbsec; 225 struct superblock_security_struct *sbsec;
229 226
230 sbsec = kmalloc(sizeof(struct superblock_security_struct), GFP_KERNEL); 227 sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
231 if (!sbsec) 228 if (!sbsec)
232 return -ENOMEM; 229 return -ENOMEM;
233 230
234 memset(sbsec, 0, sizeof(struct superblock_security_struct));
235 init_MUTEX(&sbsec->sem); 231 init_MUTEX(&sbsec->sem);
236 INIT_LIST_HEAD(&sbsec->list); 232 INIT_LIST_HEAD(&sbsec->list);
237 INIT_LIST_HEAD(&sbsec->isec_head); 233 INIT_LIST_HEAD(&sbsec->isec_head);
@@ -269,11 +265,10 @@ static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
269 if (family != PF_UNIX) 265 if (family != PF_UNIX)
270 return 0; 266 return 0;
271 267
272 ssec = kmalloc(sizeof(*ssec), priority); 268 ssec = kzalloc(sizeof(*ssec), priority);
273 if (!ssec) 269 if (!ssec)
274 return -ENOMEM; 270 return -ENOMEM;
275 271
276 memset(ssec, 0, sizeof(*ssec));
277 ssec->magic = SELINUX_MAGIC; 272 ssec->magic = SELINUX_MAGIC;
278 ssec->sk = sk; 273 ssec->sk = sk;
279 ssec->peer_sid = SECINITSID_UNLABELED; 274 ssec->peer_sid = SECINITSID_UNLABELED;
@@ -1483,11 +1478,10 @@ static int selinux_bprm_alloc_security(struct linux_binprm *bprm)
1483{ 1478{
1484 struct bprm_security_struct *bsec; 1479 struct bprm_security_struct *bsec;
1485 1480
1486 bsec = kmalloc(sizeof(struct bprm_security_struct), GFP_KERNEL); 1481 bsec = kzalloc(sizeof(struct bprm_security_struct), GFP_KERNEL);
1487 if (!bsec) 1482 if (!bsec)
1488 return -ENOMEM; 1483 return -ENOMEM;
1489 1484
1490 memset(bsec, 0, sizeof *bsec);
1491 bsec->magic = SELINUX_MAGIC; 1485 bsec->magic = SELINUX_MAGIC;
1492 bsec->bprm = bprm; 1486 bsec->bprm = bprm;
1493 bsec->sid = SECINITSID_UNLABELED; 1487 bsec->sid = SECINITSID_UNLABELED;
@@ -1615,7 +1609,7 @@ static inline void flush_unauthorized_files(struct files_struct * files)
1615 1609
1616 if (tty) { 1610 if (tty) {
1617 file_list_lock(); 1611 file_list_lock();
1618 file = list_entry(tty->tty_files.next, typeof(*file), f_list); 1612 file = list_entry(tty->tty_files.next, typeof(*file), f_u.fu_list);
1619 if (file) { 1613 if (file) {
1620 /* Revalidate access to controlling tty. 1614 /* Revalidate access to controlling tty.
1621 Use inode_has_perm on the tty inode directly rather 1615 Use inode_has_perm on the tty inode directly rather
@@ -2211,12 +2205,6 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, char *name,
2211 2205
2212static int selinux_inode_getxattr (struct dentry *dentry, char *name) 2206static int selinux_inode_getxattr (struct dentry *dentry, char *name)
2213{ 2207{
2214 struct inode *inode = dentry->d_inode;
2215 struct superblock_security_struct *sbsec = inode->i_sb->s_security;
2216
2217 if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
2218 return -EOPNOTSUPP;
2219
2220 return dentry_has_perm(current, NULL, dentry, FILE__GETATTR); 2208 return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
2221} 2209}
2222 2210
@@ -2247,33 +2235,54 @@ static int selinux_inode_removexattr (struct dentry *dentry, char *name)
2247 return -EACCES; 2235 return -EACCES;
2248} 2236}
2249 2237
2250static int selinux_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size) 2238/*
2239 * Copy the in-core inode security context value to the user. If the
2240 * getxattr() prior to this succeeded, check to see if we need to
2241 * canonicalize the value to be finally returned to the user.
2242 *
2243 * Permission check is handled by selinux_inode_getxattr hook.
2244 */
2245static int selinux_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
2251{ 2246{
2252 struct inode_security_struct *isec = inode->i_security; 2247 struct inode_security_struct *isec = inode->i_security;
2253 char *context; 2248 char *context;
2254 unsigned len; 2249 unsigned len;
2255 int rc; 2250 int rc;
2256 2251
2257 /* Permission check handled by selinux_inode_getxattr hook.*/ 2252 if (strcmp(name, XATTR_SELINUX_SUFFIX)) {
2258 2253 rc = -EOPNOTSUPP;
2259 if (strcmp(name, XATTR_SELINUX_SUFFIX)) 2254 goto out;
2260 return -EOPNOTSUPP; 2255 }
2261 2256
2262 rc = security_sid_to_context(isec->sid, &context, &len); 2257 rc = security_sid_to_context(isec->sid, &context, &len);
2263 if (rc) 2258 if (rc)
2264 return rc; 2259 goto out;
2265 2260
2261 /* Probe for required buffer size */
2266 if (!buffer || !size) { 2262 if (!buffer || !size) {
2267 kfree(context); 2263 rc = len;
2268 return len; 2264 goto out_free;
2269 } 2265 }
2266
2270 if (size < len) { 2267 if (size < len) {
2271 kfree(context); 2268 rc = -ERANGE;
2272 return -ERANGE; 2269 goto out_free;
2270 }
2271
2272 if (err > 0) {
2273 if ((len == err) && !(memcmp(context, buffer, len))) {
2274 /* Don't need to canonicalize value */
2275 rc = err;
2276 goto out_free;
2277 }
2278 memset(buffer, 0, size);
2273 } 2279 }
2274 memcpy(buffer, context, len); 2280 memcpy(buffer, context, len);
2281 rc = len;
2282out_free:
2275 kfree(context); 2283 kfree(context);
2276 return len; 2284out:
2285 return rc;
2277} 2286}
2278 2287
2279static int selinux_inode_setsecurity(struct inode *inode, const char *name, 2288static int selinux_inode_setsecurity(struct inode *inode, const char *name,
@@ -2704,8 +2713,7 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info, int si
2704 if (rc) 2713 if (rc)
2705 return rc; 2714 return rc;
2706 2715
2707 if (info && ((unsigned long)info == 1 || 2716 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
2708 (unsigned long)info == 2 || SI_FROMKERNEL(info)))
2709 return 0; 2717 return 0;
2710 2718
2711 if (!sig) 2719 if (!sig)
@@ -3599,11 +3607,10 @@ static int ipc_alloc_security(struct task_struct *task,
3599 struct task_security_struct *tsec = task->security; 3607 struct task_security_struct *tsec = task->security;
3600 struct ipc_security_struct *isec; 3608 struct ipc_security_struct *isec;
3601 3609
3602 isec = kmalloc(sizeof(struct ipc_security_struct), GFP_KERNEL); 3610 isec = kzalloc(sizeof(struct ipc_security_struct), GFP_KERNEL);
3603 if (!isec) 3611 if (!isec)
3604 return -ENOMEM; 3612 return -ENOMEM;
3605 3613
3606 memset(isec, 0, sizeof(struct ipc_security_struct));
3607 isec->magic = SELINUX_MAGIC; 3614 isec->magic = SELINUX_MAGIC;
3608 isec->sclass = sclass; 3615 isec->sclass = sclass;
3609 isec->ipc_perm = perm; 3616 isec->ipc_perm = perm;
@@ -3631,11 +3638,10 @@ static int msg_msg_alloc_security(struct msg_msg *msg)
3631{ 3638{
3632 struct msg_security_struct *msec; 3639 struct msg_security_struct *msec;
3633 3640
3634 msec = kmalloc(sizeof(struct msg_security_struct), GFP_KERNEL); 3641 msec = kzalloc(sizeof(struct msg_security_struct), GFP_KERNEL);
3635 if (!msec) 3642 if (!msec)
3636 return -ENOMEM; 3643 return -ENOMEM;
3637 3644
3638 memset(msec, 0, sizeof(struct msg_security_struct));
3639 msec->magic = SELINUX_MAGIC; 3645 msec->magic = SELINUX_MAGIC;
3640 msec->msg = msg; 3646 msec->msg = msg;
3641 msec->sid = SECINITSID_UNLABELED; 3647 msec->sid = SECINITSID_UNLABELED;
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 718d7be9f4dd..b10c34e8a743 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -114,13 +114,12 @@ static struct sel_netif *sel_netif_lookup(struct net_device *dev)
114 if (likely(netif != NULL)) 114 if (likely(netif != NULL))
115 goto out; 115 goto out;
116 116
117 new = kmalloc(sizeof(*new), GFP_ATOMIC); 117 new = kzalloc(sizeof(*new), GFP_ATOMIC);
118 if (!new) { 118 if (!new) {
119 netif = ERR_PTR(-ENOMEM); 119 netif = ERR_PTR(-ENOMEM);
120 goto out; 120 goto out;
121 } 121 }
122 122
123 memset(new, 0, sizeof(*new));
124 nsec = &new->nsec; 123 nsec = &new->nsec;
125 124
126 ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid); 125 ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index a45cc971e735..fdc382389720 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -105,7 +105,7 @@ static ssize_t sel_write_enforce(struct file * file, const char __user * buf,
105 ssize_t length; 105 ssize_t length;
106 int new_value; 106 int new_value;
107 107
108 if (count < 0 || count >= PAGE_SIZE) 108 if (count >= PAGE_SIZE)
109 return -ENOMEM; 109 return -ENOMEM;
110 if (*ppos != 0) { 110 if (*ppos != 0) {
111 /* No partial writes. */ 111 /* No partial writes. */
@@ -155,7 +155,7 @@ static ssize_t sel_write_disable(struct file * file, const char __user * buf,
155 int new_value; 155 int new_value;
156 extern int selinux_disable(void); 156 extern int selinux_disable(void);
157 157
158 if (count < 0 || count >= PAGE_SIZE) 158 if (count >= PAGE_SIZE)
159 return -ENOMEM; 159 return -ENOMEM;
160 if (*ppos != 0) { 160 if (*ppos != 0) {
161 /* No partial writes. */ 161 /* No partial writes. */
@@ -242,7 +242,7 @@ static ssize_t sel_write_load(struct file * file, const char __user * buf,
242 goto out; 242 goto out;
243 } 243 }
244 244
245 if ((count < 0) || (count > 64 * 1024 * 1024) 245 if ((count > 64 * 1024 * 1024)
246 || (data = vmalloc(count)) == NULL) { 246 || (data = vmalloc(count)) == NULL) {
247 length = -ENOMEM; 247 length = -ENOMEM;
248 goto out; 248 goto out;
@@ -284,7 +284,7 @@ static ssize_t sel_write_context(struct file * file, const char __user * buf,
284 if (length) 284 if (length)
285 return length; 285 return length;
286 286
287 if (count < 0 || count >= PAGE_SIZE) 287 if (count >= PAGE_SIZE)
288 return -ENOMEM; 288 return -ENOMEM;
289 if (*ppos != 0) { 289 if (*ppos != 0) {
290 /* No partial writes. */ 290 /* No partial writes. */
@@ -332,7 +332,7 @@ static ssize_t sel_write_checkreqprot(struct file * file, const char __user * bu
332 if (length) 332 if (length)
333 return length; 333 return length;
334 334
335 if (count < 0 || count >= PAGE_SIZE) 335 if (count >= PAGE_SIZE)
336 return -ENOMEM; 336 return -ENOMEM;
337 if (*ppos != 0) { 337 if (*ppos != 0) {
338 /* No partial writes. */ 338 /* No partial writes. */
@@ -424,15 +424,13 @@ static ssize_t sel_write_access(struct file * file, char *buf, size_t size)
424 return length; 424 return length;
425 425
426 length = -ENOMEM; 426 length = -ENOMEM;
427 scon = kmalloc(size+1, GFP_KERNEL); 427 scon = kzalloc(size+1, GFP_KERNEL);
428 if (!scon) 428 if (!scon)
429 return length; 429 return length;
430 memset(scon, 0, size+1);
431 430
432 tcon = kmalloc(size+1, GFP_KERNEL); 431 tcon = kzalloc(size+1, GFP_KERNEL);
433 if (!tcon) 432 if (!tcon)
434 goto out; 433 goto out;
435 memset(tcon, 0, size+1);
436 434
437 length = -EINVAL; 435 length = -EINVAL;
438 if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4) 436 if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4)
@@ -475,15 +473,13 @@ static ssize_t sel_write_create(struct file * file, char *buf, size_t size)
475 return length; 473 return length;
476 474
477 length = -ENOMEM; 475 length = -ENOMEM;
478 scon = kmalloc(size+1, GFP_KERNEL); 476 scon = kzalloc(size+1, GFP_KERNEL);
479 if (!scon) 477 if (!scon)
480 return length; 478 return length;
481 memset(scon, 0, size+1);
482 479
483 tcon = kmalloc(size+1, GFP_KERNEL); 480 tcon = kzalloc(size+1, GFP_KERNEL);
484 if (!tcon) 481 if (!tcon)
485 goto out; 482 goto out;
486 memset(tcon, 0, size+1);
487 483
488 length = -EINVAL; 484 length = -EINVAL;
489 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 485 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
@@ -536,15 +532,13 @@ static ssize_t sel_write_relabel(struct file * file, char *buf, size_t size)
536 return length; 532 return length;
537 533
538 length = -ENOMEM; 534 length = -ENOMEM;
539 scon = kmalloc(size+1, GFP_KERNEL); 535 scon = kzalloc(size+1, GFP_KERNEL);
540 if (!scon) 536 if (!scon)
541 return length; 537 return length;
542 memset(scon, 0, size+1);
543 538
544 tcon = kmalloc(size+1, GFP_KERNEL); 539 tcon = kzalloc(size+1, GFP_KERNEL);
545 if (!tcon) 540 if (!tcon)
546 goto out; 541 goto out;
547 memset(tcon, 0, size+1);
548 542
549 length = -EINVAL; 543 length = -EINVAL;
550 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 544 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
@@ -595,15 +589,13 @@ static ssize_t sel_write_user(struct file * file, char *buf, size_t size)
595 return length; 589 return length;
596 590
597 length = -ENOMEM; 591 length = -ENOMEM;
598 con = kmalloc(size+1, GFP_KERNEL); 592 con = kzalloc(size+1, GFP_KERNEL);
599 if (!con) 593 if (!con)
600 return length; 594 return length;
601 memset(con, 0, size+1);
602 595
603 user = kmalloc(size+1, GFP_KERNEL); 596 user = kzalloc(size+1, GFP_KERNEL);
604 if (!user) 597 if (!user)
605 goto out; 598 goto out;
606 memset(user, 0, size+1);
607 599
608 length = -EINVAL; 600 length = -EINVAL;
609 if (sscanf(buf, "%s %s", con, user) != 2) 601 if (sscanf(buf, "%s %s", con, user) != 2)
@@ -658,15 +650,13 @@ static ssize_t sel_write_member(struct file * file, char *buf, size_t size)
658 return length; 650 return length;
659 651
660 length = -ENOMEM; 652 length = -ENOMEM;
661 scon = kmalloc(size+1, GFP_KERNEL); 653 scon = kzalloc(size+1, GFP_KERNEL);
662 if (!scon) 654 if (!scon)
663 return length; 655 return length;
664 memset(scon, 0, size+1);
665 656
666 tcon = kmalloc(size+1, GFP_KERNEL); 657 tcon = kzalloc(size+1, GFP_KERNEL);
667 if (!tcon) 658 if (!tcon)
668 goto out; 659 goto out;
669 memset(tcon, 0, size+1);
670 660
671 length = -EINVAL; 661 length = -EINVAL;
672 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 662 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
@@ -739,7 +729,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
739 if (!filep->f_op) 729 if (!filep->f_op)
740 goto out; 730 goto out;
741 731
742 if (count < 0 || count > PAGE_SIZE) { 732 if (count > PAGE_SIZE) {
743 ret = -EINVAL; 733 ret = -EINVAL;
744 goto out; 734 goto out;
745 } 735 }
@@ -800,7 +790,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
800 if (!filep->f_op) 790 if (!filep->f_op)
801 goto out; 791 goto out;
802 792
803 if (count < 0 || count >= PAGE_SIZE) { 793 if (count >= PAGE_SIZE) {
804 length = -ENOMEM; 794 length = -ENOMEM;
805 goto out; 795 goto out;
806 } 796 }
@@ -858,7 +848,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
858 if (!filep->f_op) 848 if (!filep->f_op)
859 goto out; 849 goto out;
860 850
861 if (count < 0 || count >= PAGE_SIZE) { 851 if (count >= PAGE_SIZE) {
862 length = -ENOMEM; 852 length = -ENOMEM;
863 goto out; 853 goto out;
864 } 854 }
@@ -924,7 +914,7 @@ static void sel_remove_bools(struct dentry *de)
924 914
925 file_list_lock(); 915 file_list_lock();
926 list_for_each(p, &sb->s_files) { 916 list_for_each(p, &sb->s_files) {
927 struct file * filp = list_entry(p, struct file, f_list); 917 struct file * filp = list_entry(p, struct file, f_u.fu_list);
928 struct dentry * dentry = filp->f_dentry; 918 struct dentry * dentry = filp->f_dentry;
929 919
930 if (dentry->d_parent != de) { 920 if (dentry->d_parent != de) {
@@ -1032,7 +1022,7 @@ static ssize_t sel_write_avc_cache_threshold(struct file * file,
1032 ssize_t ret; 1022 ssize_t ret;
1033 int new_value; 1023 int new_value;
1034 1024
1035 if (count < 0 || count >= PAGE_SIZE) { 1025 if (count >= PAGE_SIZE) {
1036 ret = -ENOMEM; 1026 ret = -ENOMEM;
1037 goto out; 1027 goto out;
1038 } 1028 }
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index daf288007460..d2737edba541 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -220,10 +220,9 @@ int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
220 u32 len; 220 u32 len;
221 int rc; 221 int rc;
222 222
223 booldatum = kmalloc(sizeof(struct cond_bool_datum), GFP_KERNEL); 223 booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL);
224 if (!booldatum) 224 if (!booldatum)
225 return -1; 225 return -1;
226 memset(booldatum, 0, sizeof(struct cond_bool_datum));
227 226
228 rc = next_entry(buf, fp, sizeof buf); 227 rc = next_entry(buf, fp, sizeof buf);
229 if (rc < 0) 228 if (rc < 0)
@@ -321,10 +320,9 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
321 goto err; 320 goto err;
322 } 321 }
323 322
324 list = kmalloc(sizeof(struct cond_av_list), GFP_KERNEL); 323 list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL);
325 if (!list) 324 if (!list)
326 goto err; 325 goto err;
327 memset(list, 0, sizeof(*list));
328 326
329 list->node = node_ptr; 327 list->node = node_ptr;
330 if (!data->head) 328 if (!data->head)
@@ -414,11 +412,10 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
414 if (rc < 0) 412 if (rc < 0)
415 goto err; 413 goto err;
416 414
417 expr = kmalloc(sizeof(struct cond_expr), GFP_KERNEL); 415 expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL);
418 if (!expr) { 416 if (!expr) {
419 goto err; 417 goto err;
420 } 418 }
421 memset(expr, 0, sizeof(struct cond_expr));
422 419
423 expr->expr_type = le32_to_cpu(buf[0]); 420 expr->expr_type = le32_to_cpu(buf[0]);
424 expr->bool = le32_to_cpu(buf[1]); 421 expr->bool = le32_to_cpu(buf[1]);
@@ -460,10 +457,9 @@ int cond_read_list(struct policydb *p, void *fp)
460 len = le32_to_cpu(buf[0]); 457 len = le32_to_cpu(buf[0]);
461 458
462 for (i = 0; i < len; i++) { 459 for (i = 0; i < len; i++) {
463 node = kmalloc(sizeof(struct cond_node), GFP_KERNEL); 460 node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
464 if (!node) 461 if (!node)
465 goto err; 462 goto err;
466 memset(node, 0, sizeof(struct cond_node));
467 463
468 if (cond_read_node(p, node, fp) != 0) 464 if (cond_read_node(p, node, fp) != 0)
469 goto err; 465 goto err;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index d515154128cc..47024a6e1844 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -39,12 +39,11 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
39 n = src->node; 39 n = src->node;
40 prev = NULL; 40 prev = NULL;
41 while (n) { 41 while (n) {
42 new = kmalloc(sizeof(*new), GFP_ATOMIC); 42 new = kzalloc(sizeof(*new), GFP_ATOMIC);
43 if (!new) { 43 if (!new) {
44 ebitmap_destroy(dst); 44 ebitmap_destroy(dst);
45 return -ENOMEM; 45 return -ENOMEM;
46 } 46 }
47 memset(new, 0, sizeof(*new));
48 new->startbit = n->startbit; 47 new->startbit = n->startbit;
49 new->map = n->map; 48 new->map = n->map;
50 new->next = NULL; 49 new->next = NULL;
@@ -150,10 +149,9 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
150 if (!value) 149 if (!value)
151 return 0; 150 return 0;
152 151
153 new = kmalloc(sizeof(*new), GFP_ATOMIC); 152 new = kzalloc(sizeof(*new), GFP_ATOMIC);
154 if (!new) 153 if (!new)
155 return -ENOMEM; 154 return -ENOMEM;
156 memset(new, 0, sizeof(*new));
157 155
158 new->startbit = bit & ~(MAPSIZE - 1); 156 new->startbit = bit & ~(MAPSIZE - 1);
159 new->map = (MAPBIT << (bit - new->startbit)); 157 new->map = (MAPBIT << (bit - new->startbit));
@@ -232,13 +230,12 @@ int ebitmap_read(struct ebitmap *e, void *fp)
232 printk(KERN_ERR "security: ebitmap: truncated map\n"); 230 printk(KERN_ERR "security: ebitmap: truncated map\n");
233 goto bad; 231 goto bad;
234 } 232 }
235 n = kmalloc(sizeof(*n), GFP_KERNEL); 233 n = kzalloc(sizeof(*n), GFP_KERNEL);
236 if (!n) { 234 if (!n) {
237 printk(KERN_ERR "security: ebitmap: out of memory\n"); 235 printk(KERN_ERR "security: ebitmap: out of memory\n");
238 rc = -ENOMEM; 236 rc = -ENOMEM;
239 goto bad; 237 goto bad;
240 } 238 }
241 memset(n, 0, sizeof(*n));
242 239
243 n->startbit = le32_to_cpu(buf[0]); 240 n->startbit = le32_to_cpu(buf[0]);
244 241
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 26661fcc00ce..24e5ec957630 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -15,11 +15,10 @@ struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, void *key),
15 struct hashtab *p; 15 struct hashtab *p;
16 u32 i; 16 u32 i;
17 17
18 p = kmalloc(sizeof(*p), GFP_KERNEL); 18 p = kzalloc(sizeof(*p), GFP_KERNEL);
19 if (p == NULL) 19 if (p == NULL)
20 return p; 20 return p;
21 21
22 memset(p, 0, sizeof(*p));
23 p->size = size; 22 p->size = size;
24 p->nel = 0; 23 p->nel = 0;
25 p->hash_value = hash_value; 24 p->hash_value = hash_value;
@@ -55,10 +54,9 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum)
55 if (cur && (h->keycmp(h, key, cur->key) == 0)) 54 if (cur && (h->keycmp(h, key, cur->key) == 0))
56 return -EEXIST; 55 return -EEXIST;
57 56
58 newnode = kmalloc(sizeof(*newnode), GFP_KERNEL); 57 newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
59 if (newnode == NULL) 58 if (newnode == NULL)
60 return -ENOMEM; 59 return -ENOMEM;
61 memset(newnode, 0, sizeof(*newnode));
62 newnode->key = key; 60 newnode->key = key;
63 newnode->datum = datum; 61 newnode->datum = datum;
64 if (prev) { 62 if (prev) {
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 8e6262d12aa9..2f5f539875f2 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -121,12 +121,11 @@ static int roles_init(struct policydb *p)
121 int rc; 121 int rc;
122 struct role_datum *role; 122 struct role_datum *role;
123 123
124 role = kmalloc(sizeof(*role), GFP_KERNEL); 124 role = kzalloc(sizeof(*role), GFP_KERNEL);
125 if (!role) { 125 if (!role) {
126 rc = -ENOMEM; 126 rc = -ENOMEM;
127 goto out; 127 goto out;
128 } 128 }
129 memset(role, 0, sizeof(*role));
130 role->value = ++p->p_roles.nprim; 129 role->value = ++p->p_roles.nprim;
131 if (role->value != OBJECT_R_VAL) { 130 if (role->value != OBJECT_R_VAL) {
132 rc = -EINVAL; 131 rc = -EINVAL;
@@ -851,12 +850,11 @@ static int perm_read(struct policydb *p, struct hashtab *h, void *fp)
851 __le32 buf[2]; 850 __le32 buf[2];
852 u32 len; 851 u32 len;
853 852
854 perdatum = kmalloc(sizeof(*perdatum), GFP_KERNEL); 853 perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL);
855 if (!perdatum) { 854 if (!perdatum) {
856 rc = -ENOMEM; 855 rc = -ENOMEM;
857 goto out; 856 goto out;
858 } 857 }
859 memset(perdatum, 0, sizeof(*perdatum));
860 858
861 rc = next_entry(buf, fp, sizeof buf); 859 rc = next_entry(buf, fp, sizeof buf);
862 if (rc < 0) 860 if (rc < 0)
@@ -893,12 +891,11 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp)
893 u32 len, nel; 891 u32 len, nel;
894 int i, rc; 892 int i, rc;
895 893
896 comdatum = kmalloc(sizeof(*comdatum), GFP_KERNEL); 894 comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL);
897 if (!comdatum) { 895 if (!comdatum) {
898 rc = -ENOMEM; 896 rc = -ENOMEM;
899 goto out; 897 goto out;
900 } 898 }
901 memset(comdatum, 0, sizeof(*comdatum));
902 899
903 rc = next_entry(buf, fp, sizeof buf); 900 rc = next_entry(buf, fp, sizeof buf);
904 if (rc < 0) 901 if (rc < 0)
@@ -950,10 +947,9 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
950 947
951 lc = NULL; 948 lc = NULL;
952 for (i = 0; i < ncons; i++) { 949 for (i = 0; i < ncons; i++) {
953 c = kmalloc(sizeof(*c), GFP_KERNEL); 950 c = kzalloc(sizeof(*c), GFP_KERNEL);
954 if (!c) 951 if (!c)
955 return -ENOMEM; 952 return -ENOMEM;
956 memset(c, 0, sizeof(*c));
957 953
958 if (lc) { 954 if (lc) {
959 lc->next = c; 955 lc->next = c;
@@ -969,10 +965,9 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
969 le = NULL; 965 le = NULL;
970 depth = -1; 966 depth = -1;
971 for (j = 0; j < nexpr; j++) { 967 for (j = 0; j < nexpr; j++) {
972 e = kmalloc(sizeof(*e), GFP_KERNEL); 968 e = kzalloc(sizeof(*e), GFP_KERNEL);
973 if (!e) 969 if (!e)
974 return -ENOMEM; 970 return -ENOMEM;
975 memset(e, 0, sizeof(*e));
976 971
977 if (le) { 972 if (le) {
978 le->next = e; 973 le->next = e;
@@ -1033,12 +1028,11 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
1033 u32 len, len2, ncons, nel; 1028 u32 len, len2, ncons, nel;
1034 int i, rc; 1029 int i, rc;
1035 1030
1036 cladatum = kmalloc(sizeof(*cladatum), GFP_KERNEL); 1031 cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL);
1037 if (!cladatum) { 1032 if (!cladatum) {
1038 rc = -ENOMEM; 1033 rc = -ENOMEM;
1039 goto out; 1034 goto out;
1040 } 1035 }
1041 memset(cladatum, 0, sizeof(*cladatum));
1042 1036
1043 rc = next_entry(buf, fp, sizeof(u32)*6); 1037 rc = next_entry(buf, fp, sizeof(u32)*6);
1044 if (rc < 0) 1038 if (rc < 0)
@@ -1127,12 +1121,11 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
1127 __le32 buf[2]; 1121 __le32 buf[2];
1128 u32 len; 1122 u32 len;
1129 1123
1130 role = kmalloc(sizeof(*role), GFP_KERNEL); 1124 role = kzalloc(sizeof(*role), GFP_KERNEL);
1131 if (!role) { 1125 if (!role) {
1132 rc = -ENOMEM; 1126 rc = -ENOMEM;
1133 goto out; 1127 goto out;
1134 } 1128 }
1135 memset(role, 0, sizeof(*role));
1136 1129
1137 rc = next_entry(buf, fp, sizeof buf); 1130 rc = next_entry(buf, fp, sizeof buf);
1138 if (rc < 0) 1131 if (rc < 0)
@@ -1188,12 +1181,11 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
1188 __le32 buf[3]; 1181 __le32 buf[3];
1189 u32 len; 1182 u32 len;
1190 1183
1191 typdatum = kmalloc(sizeof(*typdatum),GFP_KERNEL); 1184 typdatum = kzalloc(sizeof(*typdatum),GFP_KERNEL);
1192 if (!typdatum) { 1185 if (!typdatum) {
1193 rc = -ENOMEM; 1186 rc = -ENOMEM;
1194 return rc; 1187 return rc;
1195 } 1188 }
1196 memset(typdatum, 0, sizeof(*typdatum));
1197 1189
1198 rc = next_entry(buf, fp, sizeof buf); 1190 rc = next_entry(buf, fp, sizeof buf);
1199 if (rc < 0) 1191 if (rc < 0)
@@ -1261,12 +1253,11 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
1261 __le32 buf[2]; 1253 __le32 buf[2];
1262 u32 len; 1254 u32 len;
1263 1255
1264 usrdatum = kmalloc(sizeof(*usrdatum), GFP_KERNEL); 1256 usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
1265 if (!usrdatum) { 1257 if (!usrdatum) {
1266 rc = -ENOMEM; 1258 rc = -ENOMEM;
1267 goto out; 1259 goto out;
1268 } 1260 }
1269 memset(usrdatum, 0, sizeof(*usrdatum));
1270 1261
1271 rc = next_entry(buf, fp, sizeof buf); 1262 rc = next_entry(buf, fp, sizeof buf);
1272 if (rc < 0) 1263 if (rc < 0)
@@ -1316,12 +1307,11 @@ static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
1316 __le32 buf[2]; 1307 __le32 buf[2];
1317 u32 len; 1308 u32 len;
1318 1309
1319 levdatum = kmalloc(sizeof(*levdatum), GFP_ATOMIC); 1310 levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
1320 if (!levdatum) { 1311 if (!levdatum) {
1321 rc = -ENOMEM; 1312 rc = -ENOMEM;
1322 goto out; 1313 goto out;
1323 } 1314 }
1324 memset(levdatum, 0, sizeof(*levdatum));
1325 1315
1326 rc = next_entry(buf, fp, sizeof buf); 1316 rc = next_entry(buf, fp, sizeof buf);
1327 if (rc < 0) 1317 if (rc < 0)
@@ -1368,12 +1358,11 @@ static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
1368 __le32 buf[3]; 1358 __le32 buf[3];
1369 u32 len; 1359 u32 len;
1370 1360
1371 catdatum = kmalloc(sizeof(*catdatum), GFP_ATOMIC); 1361 catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
1372 if (!catdatum) { 1362 if (!catdatum) {
1373 rc = -ENOMEM; 1363 rc = -ENOMEM;
1374 goto out; 1364 goto out;
1375 } 1365 }
1376 memset(catdatum, 0, sizeof(*catdatum));
1377 1366
1378 rc = next_entry(buf, fp, sizeof buf); 1367 rc = next_entry(buf, fp, sizeof buf);
1379 if (rc < 0) 1368 if (rc < 0)
@@ -1567,12 +1556,11 @@ int policydb_read(struct policydb *p, void *fp)
1567 nel = le32_to_cpu(buf[0]); 1556 nel = le32_to_cpu(buf[0]);
1568 ltr = NULL; 1557 ltr = NULL;
1569 for (i = 0; i < nel; i++) { 1558 for (i = 0; i < nel; i++) {
1570 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 1559 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
1571 if (!tr) { 1560 if (!tr) {
1572 rc = -ENOMEM; 1561 rc = -ENOMEM;
1573 goto bad; 1562 goto bad;
1574 } 1563 }
1575 memset(tr, 0, sizeof(*tr));
1576 if (ltr) { 1564 if (ltr) {
1577 ltr->next = tr; 1565 ltr->next = tr;
1578 } else { 1566 } else {
@@ -1593,12 +1581,11 @@ int policydb_read(struct policydb *p, void *fp)
1593 nel = le32_to_cpu(buf[0]); 1581 nel = le32_to_cpu(buf[0]);
1594 lra = NULL; 1582 lra = NULL;
1595 for (i = 0; i < nel; i++) { 1583 for (i = 0; i < nel; i++) {
1596 ra = kmalloc(sizeof(*ra), GFP_KERNEL); 1584 ra = kzalloc(sizeof(*ra), GFP_KERNEL);
1597 if (!ra) { 1585 if (!ra) {
1598 rc = -ENOMEM; 1586 rc = -ENOMEM;
1599 goto bad; 1587 goto bad;
1600 } 1588 }
1601 memset(ra, 0, sizeof(*ra));
1602 if (lra) { 1589 if (lra) {
1603 lra->next = ra; 1590 lra->next = ra;
1604 } else { 1591 } else {
@@ -1627,12 +1614,11 @@ int policydb_read(struct policydb *p, void *fp)
1627 nel = le32_to_cpu(buf[0]); 1614 nel = le32_to_cpu(buf[0]);
1628 l = NULL; 1615 l = NULL;
1629 for (j = 0; j < nel; j++) { 1616 for (j = 0; j < nel; j++) {
1630 c = kmalloc(sizeof(*c), GFP_KERNEL); 1617 c = kzalloc(sizeof(*c), GFP_KERNEL);
1631 if (!c) { 1618 if (!c) {
1632 rc = -ENOMEM; 1619 rc = -ENOMEM;
1633 goto bad; 1620 goto bad;
1634 } 1621 }
1635 memset(c, 0, sizeof(*c));
1636 if (l) { 1622 if (l) {
1637 l->next = c; 1623 l->next = c;
1638 } else { 1624 } else {
@@ -1743,12 +1729,11 @@ int policydb_read(struct policydb *p, void *fp)
1743 if (rc < 0) 1729 if (rc < 0)
1744 goto bad; 1730 goto bad;
1745 len = le32_to_cpu(buf[0]); 1731 len = le32_to_cpu(buf[0]);
1746 newgenfs = kmalloc(sizeof(*newgenfs), GFP_KERNEL); 1732 newgenfs = kzalloc(sizeof(*newgenfs), GFP_KERNEL);
1747 if (!newgenfs) { 1733 if (!newgenfs) {
1748 rc = -ENOMEM; 1734 rc = -ENOMEM;
1749 goto bad; 1735 goto bad;
1750 } 1736 }
1751 memset(newgenfs, 0, sizeof(*newgenfs));
1752 1737
1753 newgenfs->fstype = kmalloc(len + 1,GFP_KERNEL); 1738 newgenfs->fstype = kmalloc(len + 1,GFP_KERNEL);
1754 if (!newgenfs->fstype) { 1739 if (!newgenfs->fstype) {
@@ -1790,12 +1775,11 @@ int policydb_read(struct policydb *p, void *fp)
1790 goto bad; 1775 goto bad;
1791 len = le32_to_cpu(buf[0]); 1776 len = le32_to_cpu(buf[0]);
1792 1777
1793 newc = kmalloc(sizeof(*newc), GFP_KERNEL); 1778 newc = kzalloc(sizeof(*newc), GFP_KERNEL);
1794 if (!newc) { 1779 if (!newc) {
1795 rc = -ENOMEM; 1780 rc = -ENOMEM;
1796 goto bad; 1781 goto bad;
1797 } 1782 }
1798 memset(newc, 0, sizeof(*newc));
1799 1783
1800 newc->u.name = kmalloc(len + 1,GFP_KERNEL); 1784 newc->u.name = kmalloc(len + 1,GFP_KERNEL);
1801 if (!newc->u.name) { 1785 if (!newc->u.name) {
@@ -1843,12 +1827,11 @@ int policydb_read(struct policydb *p, void *fp)
1843 nel = le32_to_cpu(buf[0]); 1827 nel = le32_to_cpu(buf[0]);
1844 lrt = NULL; 1828 lrt = NULL;
1845 for (i = 0; i < nel; i++) { 1829 for (i = 0; i < nel; i++) {
1846 rt = kmalloc(sizeof(*rt), GFP_KERNEL); 1830 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
1847 if (!rt) { 1831 if (!rt) {
1848 rc = -ENOMEM; 1832 rc = -ENOMEM;
1849 goto bad; 1833 goto bad;
1850 } 1834 }
1851 memset(rt, 0, sizeof(*rt));
1852 if (lrt) 1835 if (lrt)
1853 lrt->next = rt; 1836 lrt->next = rt;
1854 else 1837 else
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index aecdded55e74..44eb4d74908d 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1531,12 +1531,11 @@ int security_get_user_sids(u32 fromsid,
1531 } 1531 }
1532 usercon.user = user->value; 1532 usercon.user = user->value;
1533 1533
1534 mysids = kmalloc(maxnel*sizeof(*mysids), GFP_ATOMIC); 1534 mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
1535 if (!mysids) { 1535 if (!mysids) {
1536 rc = -ENOMEM; 1536 rc = -ENOMEM;
1537 goto out_unlock; 1537 goto out_unlock;
1538 } 1538 }
1539 memset(mysids, 0, maxnel*sizeof(*mysids));
1540 1539
1541 ebitmap_for_each_bit(&user->roles, rnode, i) { 1540 ebitmap_for_each_bit(&user->roles, rnode, i) {
1542 if (!ebitmap_node_get_bit(rnode, i)) 1541 if (!ebitmap_node_get_bit(rnode, i))
@@ -1566,13 +1565,12 @@ int security_get_user_sids(u32 fromsid,
1566 mysids[mynel++] = sid; 1565 mysids[mynel++] = sid;
1567 } else { 1566 } else {
1568 maxnel += SIDS_NEL; 1567 maxnel += SIDS_NEL;
1569 mysids2 = kmalloc(maxnel*sizeof(*mysids2), GFP_ATOMIC); 1568 mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
1570 if (!mysids2) { 1569 if (!mysids2) {
1571 rc = -ENOMEM; 1570 rc = -ENOMEM;
1572 kfree(mysids); 1571 kfree(mysids);
1573 goto out_unlock; 1572 goto out_unlock;
1574 } 1573 }
1575 memset(mysids2, 0, maxnel*sizeof(*mysids2));
1576 memcpy(mysids2, mysids, mynel * sizeof(*mysids2)); 1574 memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
1577 kfree(mysids); 1575 kfree(mysids);
1578 mysids = mysids2; 1576 mysids = mysids2;
@@ -1714,12 +1712,11 @@ int security_get_bools(int *len, char ***names, int **values)
1714 goto out; 1712 goto out;
1715 } 1713 }
1716 1714
1717 *names = (char**)kmalloc(sizeof(char*) * *len, GFP_ATOMIC); 1715 *names = (char**)kcalloc(*len, sizeof(char*), GFP_ATOMIC);
1718 if (!*names) 1716 if (!*names)
1719 goto err; 1717 goto err;
1720 memset(*names, 0, sizeof(char*) * *len);
1721 1718
1722 *values = (int*)kmalloc(sizeof(int) * *len, GFP_ATOMIC); 1719 *values = (int*)kcalloc(*len, sizeof(int), GFP_ATOMIC);
1723 if (!*values) 1720 if (!*values)
1724 goto err; 1721 goto err;
1725 1722
diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c
index 3ecef4689f1b..fd25aca25120 100644
--- a/sound/oss/ac97_codec.c
+++ b/sound/oss/ac97_codec.c
@@ -55,6 +55,7 @@
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/ac97_codec.h> 56#include <linux/ac97_codec.h>
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/semaphore.h>
58 59
59#define CODEC_ID_BUFSZ 14 60#define CODEC_ID_BUFSZ 14
60 61
diff --git a/sound/oss/awe_wave.c b/sound/oss/awe_wave.c
index d2b9beda8ace..b3ea719d33db 100644
--- a/sound/oss/awe_wave.c
+++ b/sound/oss/awe_wave.c
@@ -6062,7 +6062,7 @@ static int awe_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id
6062 io1 = pnp_port_start(dev,0); 6062 io1 = pnp_port_start(dev,0);
6063 io2 = pnp_port_start(dev,1); 6063 io2 = pnp_port_start(dev,1);
6064 io3 = pnp_port_start(dev,2); 6064 io3 = pnp_port_start(dev,2);
6065 printk(KERN_INFO "AWE32: A PnP Wave Table was detected at IO's %#x,%#x,%#x\n.", 6065 printk(KERN_INFO "AWE32: A PnP Wave Table was detected at IO's %#x,%#x,%#x.\n",
6066 io1, io2, io3); 6066 io1, io2, io3);
6067 setup_ports(io1, io2, io3); 6067 setup_ports(io1, io2, io3);
6068 6068
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
index 6ec308f5d935..7c59e2d4003a 100644
--- a/sound/oss/cs4232.c
+++ b/sound/oss/cs4232.c
@@ -195,10 +195,12 @@ static int __init probe_cs4232(struct address_info *hw_config, int isapnp_config
195 CS_OUT2(0x15, 0x00); /* Select logical device 0 (WSS/SB/FM) */ 195 CS_OUT2(0x15, 0x00); /* Select logical device 0 (WSS/SB/FM) */
196 CS_OUT3(0x47, (base >> 8) & 0xff, base & 0xff); /* WSS base */ 196 CS_OUT3(0x47, (base >> 8) & 0xff, base & 0xff); /* WSS base */
197 197
198 if (check_region(0x388, 4)) /* Not free */ 198 if (!request_region(0x388, 4, "FM")) /* Not free */
199 CS_OUT3(0x48, 0x00, 0x00) /* FM base off */ 199 CS_OUT3(0x48, 0x00, 0x00) /* FM base off */
200 else 200 else {
201 release_region(0x388, 4);
201 CS_OUT3(0x48, 0x03, 0x88); /* FM base 0x388 */ 202 CS_OUT3(0x48, 0x03, 0x88); /* FM base 0x388 */
203 }
202 204
203 CS_OUT3(0x42, 0x00, 0x00); /* SB base off */ 205 CS_OUT3(0x42, 0x00, 0x00); /* SB base off */
204 CS_OUT2(0x22, irq); /* SB+WSS IRQ */ 206 CS_OUT2(0x22, irq); /* SB+WSS IRQ */
diff --git a/sound/oss/wavfront.c b/sound/oss/wavfront.c
index b92ba8921638..b1a4eeb9dc08 100644
--- a/sound/oss/wavfront.c
+++ b/sound/oss/wavfront.c
@@ -2434,7 +2434,7 @@ static int __init detect_wavefront (int irq, int io_base)
2434 consumes 16. 2434 consumes 16.
2435 */ 2435 */
2436 2436
2437 if (check_region (io_base, 16)) { 2437 if (!request_region (io_base, 16, "wavfront")) {
2438 printk (KERN_ERR LOGNAME "IO address range 0x%x - 0x%x " 2438 printk (KERN_ERR LOGNAME "IO address range 0x%x - 0x%x "
2439 "already in use - ignored\n", dev.base, 2439 "already in use - ignored\n", dev.base,
2440 dev.base+15); 2440 dev.base+15);
@@ -2466,10 +2466,13 @@ static int __init detect_wavefront (int irq, int io_base)
2466 } else { 2466 } else {
2467 printk (KERN_WARNING LOGNAME "not raw, but no " 2467 printk (KERN_WARNING LOGNAME "not raw, but no "
2468 "hardware version!\n"); 2468 "hardware version!\n");
2469 release_region (io_base, 16);
2469 return 0; 2470 return 0;
2470 } 2471 }
2471 2472
2472 if (!wf_raw) { 2473 if (!wf_raw) {
2474 /* will re-acquire region in install_wavefront() */
2475 release_region (io_base, 16);
2473 return 1; 2476 return 1;
2474 } else { 2477 } else {
2475 printk (KERN_INFO LOGNAME 2478 printk (KERN_INFO LOGNAME
@@ -2489,6 +2492,7 @@ static int __init detect_wavefront (int irq, int io_base)
2489 2492
2490 if (wavefront_hw_reset ()) { 2493 if (wavefront_hw_reset ()) {
2491 printk (KERN_WARNING LOGNAME "hardware reset failed\n"); 2494 printk (KERN_WARNING LOGNAME "hardware reset failed\n");
2495 release_region (io_base, 16);
2492 return 0; 2496 return 0;
2493 } 2497 }
2494 2498
@@ -2496,6 +2500,8 @@ static int __init detect_wavefront (int irq, int io_base)
2496 2500
2497 dev.has_fx = (detect_wffx () == 0); 2501 dev.has_fx = (detect_wffx () == 0);
2498 2502
2503 /* will re-acquire region in install_wavefront() */
2504 release_region (io_base, 16);
2499 return 1; 2505 return 1;
2500} 2506}
2501 2507
@@ -2804,17 +2810,27 @@ static int __init wavefront_init (int atboot)
2804} 2810}
2805 2811
2806static int __init install_wavefront (void) 2812static int __init install_wavefront (void)
2807
2808{ 2813{
2814 if (!request_region (dev.base+2, 6, "wavefront synth"))
2815 return -1;
2816
2817 if (dev.has_fx) {
2818 if (!request_region (dev.base+8, 8, "wavefront fx")) {
2819 release_region (dev.base+2, 6);
2820 return -1;
2821 }
2822 }
2823
2809 if ((dev.synth_dev = register_sound_synth (&wavefront_fops, -1)) < 0) { 2824 if ((dev.synth_dev = register_sound_synth (&wavefront_fops, -1)) < 0) {
2810 printk (KERN_ERR LOGNAME "cannot register raw synth\n"); 2825 printk (KERN_ERR LOGNAME "cannot register raw synth\n");
2811 return -1; 2826 goto err_out;
2812 } 2827 }
2813 2828
2814#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ 2829#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
2815 if ((dev.oss_dev = sound_alloc_synthdev()) == -1) { 2830 if ((dev.oss_dev = sound_alloc_synthdev()) == -1) {
2816 printk (KERN_ERR LOGNAME "Too many sequencers\n"); 2831 printk (KERN_ERR LOGNAME "Too many sequencers\n");
2817 return -1; 2832 /* FIXME: leak: should unregister sound synth */
2833 goto err_out;
2818 } else { 2834 } else {
2819 synth_devs[dev.oss_dev] = &wavefront_operations; 2835 synth_devs[dev.oss_dev] = &wavefront_operations;
2820 } 2836 }
@@ -2827,20 +2843,20 @@ static int __init install_wavefront (void)
2827 sound_unload_synthdev (dev.oss_dev); 2843 sound_unload_synthdev (dev.oss_dev);
2828#endif /* OSS_SUPPORT_SEQ */ 2844#endif /* OSS_SUPPORT_SEQ */
2829 2845
2830 return -1; 2846 goto err_out;
2831 } 2847 }
2832 2848
2833 request_region (dev.base+2, 6, "wavefront synth");
2834
2835 if (dev.has_fx) {
2836 request_region (dev.base+8, 8, "wavefront fx");
2837 }
2838
2839 if (wavefront_config_midi ()) { 2849 if (wavefront_config_midi ()) {
2840 printk (KERN_WARNING LOGNAME "could not initialize MIDI.\n"); 2850 printk (KERN_WARNING LOGNAME "could not initialize MIDI.\n");
2841 } 2851 }
2842 2852
2843 return dev.oss_dev; 2853 return dev.oss_dev;
2854
2855err_out:
2856 release_region (dev.base+2, 6);
2857 if (dev.has_fx)
2858 release_region (dev.base+8, 8);
2859 return -1;
2844} 2860}
2845 2861
2846static void __exit uninstall_wavefront (void) 2862static void __exit uninstall_wavefront (void)