aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig864
-rw-r--r--arch/powerpc/Kconfig.debug128
-rw-r--r--arch/powerpc/Makefile219
-rw-r--r--arch/powerpc/kernel/Makefile52
-rw-r--r--arch/powerpc/kernel/asm-offsets.c274
-rw-r--r--arch/powerpc/kernel/binfmt_elf32.c (renamed from arch/ppc64/kernel/binfmt_elf32.c)3
-rw-r--r--arch/powerpc/kernel/btext.c853
-rw-r--r--arch/powerpc/kernel/cputable.c (renamed from arch/ppc/kernel/cputable.c)799
-rw-r--r--arch/powerpc/kernel/entry_32.S1002
-rw-r--r--arch/powerpc/kernel/entry_64.S842
-rw-r--r--arch/powerpc/kernel/fpu.S (renamed from arch/ppc/kernel/fpu.S)74
-rw-r--r--arch/powerpc/kernel/head_32.S1371
-rw-r--r--arch/powerpc/kernel/head_44x.S782
-rw-r--r--arch/powerpc/kernel/head_4xx.S1022
-rw-r--r--arch/powerpc/kernel/head_64.S1957
-rw-r--r--arch/powerpc/kernel/head_8xx.S860
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1063
-rw-r--r--arch/powerpc/kernel/idle_6xx.S233
-rw-r--r--arch/powerpc/kernel/idle_power4.S (renamed from arch/ppc64/kernel/idle_power4.S)9
-rw-r--r--arch/powerpc/kernel/init_task.c (renamed from arch/ppc64/kernel/init_task.c)0
-rw-r--r--arch/powerpc/kernel/lparmap.c (renamed from arch/ppc64/kernel/lparmap.c)0
-rw-r--r--arch/powerpc/kernel/misc_32.S1064
-rw-r--r--arch/powerpc/kernel/misc_64.S899
-rw-r--r--arch/powerpc/kernel/of_device.c (renamed from arch/ppc64/kernel/of_device.c)4
-rw-r--r--arch/powerpc/kernel/pmc.c (renamed from arch/ppc64/kernel/pmc.c)30
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c280
-rw-r--r--arch/powerpc/kernel/process.c (renamed from arch/ppc64/kernel/process.c)484
-rw-r--r--arch/powerpc/kernel/prom.c2125
-rw-r--r--arch/powerpc/kernel/prom_init.c2065
-rw-r--r--arch/powerpc/kernel/ptrace.c (renamed from arch/ppc/kernel/ptrace.c)170
-rw-r--r--arch/powerpc/kernel/ptrace32.c (renamed from arch/ppc64/kernel/ptrace32.c)9
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/powerpc/kernel/setup_32.c652
-rw-r--r--arch/powerpc/kernel/setup_64.c1307
-rw-r--r--arch/powerpc/kernel/signal_32.c (renamed from arch/ppc64/kernel/signal32.c)991
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c (renamed from arch/ppc64/kernel/sys_ppc32.c)320
-rw-r--r--arch/powerpc/kernel/syscalls.c (renamed from arch/ppc64/kernel/syscalls.c)187
-rw-r--r--arch/powerpc/kernel/systbl.S321
-rw-r--r--arch/powerpc/kernel/time.c (renamed from arch/ppc64/kernel/time.c)512
-rw-r--r--arch/powerpc/kernel/traps.c1101
-rw-r--r--arch/powerpc/kernel/vecemu.c (renamed from arch/ppc/kernel/vecemu.c)0
-rw-r--r--arch/powerpc/kernel/vector.S (renamed from arch/ppc64/kernel/vector.S)71
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S279
-rw-r--r--arch/powerpc/lib/Makefile13
-rw-r--r--arch/powerpc/lib/checksum_32.S225
-rw-r--r--arch/powerpc/lib/checksum_64.S229
-rw-r--r--arch/powerpc/lib/copy_32.S543
-rw-r--r--arch/powerpc/lib/copypage_64.S121
-rw-r--r--arch/powerpc/lib/copyuser_64.S576
-rw-r--r--arch/powerpc/lib/div64.S59
-rw-r--r--arch/powerpc/lib/e2a.c108
-rw-r--r--arch/powerpc/lib/locks.c95
-rw-r--r--arch/powerpc/lib/mem_64.S119
-rw-r--r--arch/powerpc/lib/memcpy_64.S172
-rw-r--r--arch/powerpc/lib/rheap.c693
-rw-r--r--arch/powerpc/lib/sstep.c141
-rw-r--r--arch/powerpc/lib/strcase.c23
-rw-r--r--arch/powerpc/lib/string.S198
-rw-r--r--arch/powerpc/lib/usercopy_64.c41
-rw-r--r--arch/powerpc/mm/44x_mmu.c120
-rw-r--r--arch/powerpc/mm/4xx_mmu.c141
-rw-r--r--arch/powerpc/mm/Makefile21
-rw-r--r--arch/powerpc/mm/fault.c (renamed from arch/ppc64/mm/fault.c)102
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c237
-rw-r--r--arch/powerpc/mm/hash_low_32.S618
-rw-r--r--arch/powerpc/mm/hash_low_64.S (renamed from arch/ppc64/mm/hash_low.S)2
-rw-r--r--arch/powerpc/mm/hash_native_64.c (renamed from arch/ppc64/mm/hash_native.c)13
-rw-r--r--arch/powerpc/mm/hash_utils_64.c (renamed from arch/ppc64/mm/hash_utils.c)61
-rw-r--r--arch/powerpc/mm/hugetlbpage.c (renamed from arch/ppc64/mm/hugetlbpage.c)0
-rw-r--r--arch/powerpc/mm/imalloc.c (renamed from arch/ppc64/mm/imalloc.c)0
-rw-r--r--arch/powerpc/mm/init_32.c252
-rw-r--r--arch/powerpc/mm/init_64.c223
-rw-r--r--arch/powerpc/mm/lmb.c (renamed from arch/ppc64/kernel/lmb.c)105
-rw-r--r--arch/powerpc/mm/mem.c484
-rw-r--r--arch/powerpc/mm/mmap.c (renamed from arch/ppc64/mm/mmap.c)0
-rw-r--r--arch/powerpc/mm/mmu_context_32.c86
-rw-r--r--arch/powerpc/mm/mmu_context_64.c63
-rw-r--r--arch/powerpc/mm/mmu_decl.h87
-rw-r--r--arch/powerpc/mm/numa.c (renamed from arch/ppc64/mm/numa.c)0
-rw-r--r--arch/powerpc/mm/pgtable_32.c469
-rw-r--r--arch/powerpc/mm/pgtable_64.c349
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c285
-rw-r--r--arch/powerpc/mm/slb.c (renamed from arch/ppc64/mm/slb.c)0
-rw-r--r--arch/powerpc/mm/slb_low.S (renamed from arch/ppc64/mm/slb_low.S)0
-rw-r--r--arch/powerpc/mm/stab.c (renamed from arch/ppc64/mm/stab.c)0
-rw-r--r--arch/powerpc/mm/tlb_32.c183
-rw-r--r--arch/powerpc/mm/tlb_64.c (renamed from arch/ppc64/mm/tlb.c)23
-rw-r--r--arch/powerpc/oprofile/Kconfig (renamed from arch/ppc/oprofile/Kconfig)0
-rw-r--r--arch/powerpc/oprofile/Makefile (renamed from arch/ppc/oprofile/Makefile)7
-rw-r--r--arch/powerpc/oprofile/common.c (renamed from arch/ppc64/oprofile/common.c)84
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_booke.c (renamed from arch/ppc/oprofile/op_model_fsl_booke.c)7
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c (renamed from arch/ppc64/oprofile/op_model_power4.c)2
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c (renamed from arch/ppc64/oprofile/op_model_rs64.c)2
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig280
-rw-r--r--arch/powerpc/platforms/4xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig86
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig352
-rw-r--r--arch/powerpc/platforms/Makefile11
-rw-r--r--arch/powerpc/platforms/apus/Kconfig130
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig305
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig31
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h (renamed from include/asm-ppc64/iSeries/HvCallHpt.h)7
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h (renamed from include/asm-ppc64/iSeries/HvCallPci.h)249
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h (renamed from include/asm-ppc64/iSeries/HvCallSm.h)7
-rw-r--r--arch/powerpc/platforms/iseries/htab.c (renamed from arch/ppc64/kernel/iSeries_htab.c)47
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S (renamed from arch/ppc64/kernel/hvCall.S)22
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c (renamed from arch/ppc64/kernel/HvCall.c)1
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c (renamed from arch/ppc64/kernel/HvLpConfig.c)1
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c (renamed from arch/ppc64/kernel/iSeries_iommu.c)44
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h (renamed from include/asm-ppc64/iSeries/ItIplParmsReal.h)7
-rw-r--r--arch/powerpc/platforms/iseries/irq.c (renamed from arch/ppc64/kernel/iSeries_irq.c)17
-rw-r--r--arch/powerpc/platforms/iseries/irq.h (renamed from include/asm-ppc64/iSeries/iSeries_irq.h)6
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c27
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c (renamed from arch/ppc64/kernel/LparData.c)28
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c (renamed from arch/ppc64/kernel/ItLpQueue.c)71
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h (renamed from include/asm-ppc64/iSeries/IoHriMainStore.h)7
-rw-r--r--arch/powerpc/platforms/iseries/mf.c (renamed from arch/ppc64/kernel/mf.c)98
-rw-r--r--arch/powerpc/platforms/iseries/misc.S55
-rw-r--r--arch/powerpc/platforms/iseries/pci.c (renamed from arch/ppc64/kernel/iSeries_pci.c)173
-rw-r--r--arch/powerpc/platforms/iseries/pci.h (renamed from include/asm-ppc64/iSeries/iSeries_pci.h)49
-rw-r--r--arch/powerpc/platforms/iseries/proc.c (renamed from arch/ppc64/kernel/iSeries_proc.c)15
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h (renamed from include/asm-ppc64/iSeries/IoHriProcessorVpd.h)7
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h (renamed from include/asm-ppc64/iSeries/HvReleaseData.h)7
-rw-r--r--arch/powerpc/platforms/iseries/setup.c (renamed from arch/ppc64/kernel/iSeries_setup.c)493
-rw-r--r--arch/powerpc/platforms/iseries/setup.h (renamed from arch/ppc64/kernel/iSeries_setup.h)4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c (renamed from arch/ppc64/kernel/iSeries_smp.c)46
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h (renamed from include/asm-ppc64/iSeries/ItSpCommArea.h)7
-rw-r--r--arch/powerpc/platforms/iseries/vio.c (renamed from arch/ppc64/kernel/iSeries_vio.c)1
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c (renamed from arch/ppc64/kernel/viopath.c)3
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h (renamed from include/asm-ppc64/iSeries/ItVpdAreas.h)7
-rw-r--r--arch/powerpc/platforms/iseries/vpdinfo.c (renamed from arch/ppc64/kernel/iSeries_VpdInfo.c)21
-rw-r--r--arch/powerpc/platforms/powermac/Makefile8
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c202
-rw-r--r--arch/powerpc/platforms/powermac/cache.S359
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq.c726
-rw-r--r--arch/powerpc/platforms/powermac/feature.c3063
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c (renamed from arch/ppc64/kernel/pmac_low_i2c.c)0
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c (renamed from arch/ppc64/kernel/pmac_nvram.c)282
-rw-r--r--arch/powerpc/platforms/powermac/pci.c1213
-rw-r--r--arch/powerpc/platforms/powermac/pic.c682
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h51
-rw-r--r--arch/powerpc/platforms/powermac/setup.c789
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S396
-rw-r--r--arch/powerpc/platforms/powermac/smp.c865
-rw-r--r--arch/powerpc/platforms/powermac/time.c364
-rw-r--r--arch/powerpc/platforms/prep/Kconfig22
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig47
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S (renamed from arch/ppc64/kernel/pSeries_hvCall.S)0
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c (renamed from arch/ppc64/kernel/pSeries_iommu.c)28
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c (renamed from arch/ppc64/kernel/pSeries_lpar.c)5
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c (renamed from arch/ppc64/kernel/pSeries_nvram.c)0
-rw-r--r--arch/powerpc/platforms/pseries/pci.c (renamed from arch/ppc64/kernel/pSeries_pci.c)3
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c (renamed from arch/ppc64/kernel/pSeries_reconfig.c)0
-rw-r--r--arch/powerpc/platforms/pseries/setup.c (renamed from arch/ppc64/kernel/pSeries_setup.c)30
-rw-r--r--arch/powerpc/platforms/pseries/smp.c (renamed from arch/ppc64/kernel/pSeries_smp.c)50
-rw-r--r--arch/powerpc/platforms/pseries/vio.c (renamed from arch/ppc64/kernel/pSeries_vio.c)1
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c134
-rw-r--r--arch/powerpc/sysdev/mpic.c (renamed from arch/ppc64/kernel/mpic.c)48
-rw-r--r--arch/ppc/Kconfig8
-rw-r--r--arch/ppc/Makefile7
-rw-r--r--arch/ppc/kernel/Makefile27
-rw-r--r--arch/ppc/kernel/asm-offsets.c3
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S6
-rw-r--r--arch/ppc/kernel/cpu_setup_power4.S6
-rw-r--r--arch/ppc/kernel/entry.S12
-rw-r--r--arch/ppc/kernel/head.S100
-rw-r--r--arch/ppc/kernel/head_44x.S32
-rw-r--r--arch/ppc/kernel/head_4xx.S68
-rw-r--r--arch/ppc/kernel/head_8xx.S42
-rw-r--r--arch/ppc/kernel/head_booke.h4
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S47
-rw-r--r--arch/ppc/kernel/idle.c3
-rw-r--r--arch/ppc/kernel/irq.c1
-rw-r--r--arch/ppc/kernel/l2cr.S2
-rw-r--r--arch/ppc/kernel/misc.S208
-rw-r--r--arch/ppc/kernel/pci.c28
-rw-r--r--arch/ppc/kernel/perfmon.c96
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c2
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c33
-rw-r--r--arch/ppc/kernel/process.c138
-rw-r--r--arch/ppc/kernel/setup.c39
-rw-r--r--arch/ppc/kernel/signal.c771
-rw-r--r--arch/ppc/kernel/smp.c22
-rw-r--r--arch/ppc/kernel/syscalls.c268
-rw-r--r--arch/ppc/kernel/time.c9
-rw-r--r--arch/ppc/kernel/traps.c40
-rw-r--r--arch/ppc/kernel/vector.S217
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S26
-rw-r--r--arch/ppc/lib/string.S24
-rw-r--r--arch/ppc/mm/init.c13
-rw-r--r--arch/ppc/oprofile/common.c161
-rw-r--r--arch/ppc/oprofile/op_impl.h45
-rw-r--r--arch/ppc/platforms/4xx/ebony.c2
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.h1
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.h1
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.h1
-rw-r--r--arch/ppc/platforms/chestnut.c1
-rw-r--r--arch/ppc/platforms/chrp_pci.c10
-rw-r--r--arch/ppc/platforms/chrp_setup.c26
-rw-r--r--arch/ppc/platforms/chrp_smp.c3
-rw-r--r--arch/ppc/platforms/chrp_time.c8
-rw-r--r--arch/ppc/platforms/ev64360.c1
-rw-r--r--arch/ppc/platforms/gemini_setup.c4
-rw-r--r--arch/ppc/platforms/hdpu.c4
-rw-r--r--arch/ppc/platforms/katana.c3
-rw-r--r--arch/ppc/platforms/lite5200.c1
-rw-r--r--arch/ppc/platforms/lopec.c11
-rw-r--r--arch/ppc/platforms/pal4_setup.c1
-rw-r--r--arch/ppc/platforms/pmac_backlight.c16
-rw-r--r--arch/ppc/platforms/pmac_cpufreq.c36
-rw-r--r--arch/ppc/platforms/pmac_feature.c176
-rw-r--r--arch/ppc/platforms/pmac_nvram.c42
-rw-r--r--arch/ppc/platforms/pmac_pci.c28
-rw-r--r--arch/ppc/platforms/pmac_pic.c27
-rw-r--r--arch/ppc/platforms/pmac_setup.c19
-rw-r--r--arch/ppc/platforms/pmac_sleep.S4
-rw-r--r--arch/ppc/platforms/pmac_smp.c11
-rw-r--r--arch/ppc/platforms/pmac_time.c8
-rw-r--r--arch/ppc/platforms/pplus.c12
-rw-r--r--arch/ppc/platforms/prep_pci.c64
-rw-r--r--arch/ppc/platforms/prep_setup.c62
-rw-r--r--arch/ppc/platforms/radstone_ppc7d.c9
-rw-r--r--arch/ppc/platforms/residual.c2
-rw-r--r--arch/ppc/platforms/sandpoint.c11
-rw-r--r--arch/ppc/syslib/Makefile2
-rw-r--r--arch/ppc/syslib/btext.c6
-rw-r--r--arch/ppc/syslib/gt64260_pic.c1
-rw-r--r--arch/ppc/syslib/ibm440gx_common.c6
-rw-r--r--arch/ppc/syslib/ibm44x_common.c2
-rw-r--r--arch/ppc/syslib/m82xx_pci.c2
-rw-r--r--arch/ppc/syslib/m8xx_setup.c1
-rw-r--r--arch/ppc/syslib/mpc52xx_pci.c3
-rw-r--r--arch/ppc/syslib/mpc83xx_devices.c1
-rw-r--r--arch/ppc/syslib/mv64360_pic.c1
-rw-r--r--arch/ppc/syslib/mv64x60.c2
-rw-r--r--arch/ppc/syslib/mv64x60_dbg.c1
-rw-r--r--arch/ppc/syslib/of_device.c276
-rw-r--r--arch/ppc/syslib/open_pic.c3
-rw-r--r--arch/ppc/syslib/open_pic2.c1
-rw-r--r--arch/ppc/syslib/ppc403_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_setup.c2
-rw-r--r--arch/ppc/syslib/ppc83xx_setup.c1
-rw-r--r--arch/ppc/syslib/ppc85xx_setup.c1
-rw-r--r--arch/ppc/syslib/pq2_devices.c1
-rw-r--r--arch/ppc/syslib/prep_nvram.c13
-rw-r--r--arch/ppc/syslib/prom.c18
-rw-r--r--arch/ppc/syslib/xilinx_pic.c1
-rw-r--r--arch/ppc/xmon/start.c3
-rw-r--r--arch/ppc64/Kconfig9
-rw-r--r--arch/ppc64/Makefile8
-rw-r--r--arch/ppc64/kernel/HvLpEvent.c88
-rw-r--r--arch/ppc64/kernel/Makefile67
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c2
-rw-r--r--arch/ppc64/kernel/bpa_setup.c7
-rw-r--r--arch/ppc64/kernel/btext.c42
-rw-r--r--arch/ppc64/kernel/cputable.c308
-rw-r--r--arch/ppc64/kernel/eeh.c2
-rw-r--r--arch/ppc64/kernel/entry.S40
-rw-r--r--arch/ppc64/kernel/head.S231
-rw-r--r--arch/ppc64/kernel/idle.c8
-rw-r--r--arch/ppc64/kernel/kprobes.c1
-rw-r--r--arch/ppc64/kernel/maple_pci.c7
-rw-r--r--arch/ppc64/kernel/maple_setup.c6
-rw-r--r--arch/ppc64/kernel/maple_time.c7
-rw-r--r--arch/ppc64/kernel/misc.S188
-rw-r--r--arch/ppc64/kernel/pci.c41
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c3
-rw-r--r--arch/ppc64/kernel/pci_dn.c3
-rw-r--r--arch/ppc64/kernel/pci_iommu.c21
-rw-r--r--arch/ppc64/kernel/pmac.h31
-rw-r--r--arch/ppc64/kernel/pmac_feature.c767
-rw-r--r--arch/ppc64/kernel/pmac_pci.c793
-rw-r--r--arch/ppc64/kernel/pmac_setup.c525
-rw-r--r--arch/ppc64/kernel/pmac_smp.c330
-rw-r--r--arch/ppc64/kernel/pmac_time.c195
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c20
-rw-r--r--arch/ppc64/kernel/prom.c5
-rw-r--r--arch/ppc64/kernel/ptrace.c363
-rw-r--r--arch/ppc64/kernel/rtas-proc.c1
-rw-r--r--arch/ppc64/kernel/rtas.c1
-rw-r--r--arch/ppc64/kernel/rtas_pci.c9
-rw-r--r--arch/ppc64/kernel/rtc.c48
-rw-r--r--arch/ppc64/kernel/setup.c51
-rw-r--r--arch/ppc64/kernel/smp.c40
-rw-r--r--arch/ppc64/kernel/traps.c568
-rw-r--r--arch/ppc64/kernel/u3_iommu.c50
-rw-r--r--arch/ppc64/kernel/vdso64/sigtramp.S1
-rw-r--r--arch/ppc64/kernel/vecemu.c346
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S17
-rw-r--r--arch/ppc64/mm/Makefile11
-rw-r--r--arch/ppc64/mm/init.c869
-rw-r--r--arch/ppc64/oprofile/Kconfig23
-rw-r--r--arch/ppc64/oprofile/Makefile9
-rw-r--r--drivers/ide/ppc/pmac.c80
-rw-r--r--drivers/macintosh/ans-lcd.c10
-rw-r--r--drivers/macintosh/mediabay.c56
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-cuda.c1
-rw-r--r--drivers/macintosh/via-pmu.c142
-rw-r--r--drivers/macintosh/via-pmu68k.c15
-rw-r--r--drivers/net/iseries_veth.c11
-rw-r--r--fs/proc/proc_misc.c8
-rw-r--r--include/asm-powerpc/a.out.h (renamed from include/asm-ppc64/a.out.h)21
-rw-r--r--include/asm-powerpc/atomic.h (renamed from include/asm-ppc/atomic.h)45
-rw-r--r--include/asm-powerpc/auxvec.h (renamed from include/asm-ppc64/auxvec.h)8
-rw-r--r--include/asm-powerpc/backlight.h (renamed from include/asm-ppc/backlight.h)9
-rw-r--r--include/asm-powerpc/bug.h (renamed from include/asm-ppc64/bug.h)38
-rw-r--r--include/asm-powerpc/byteorder.h (renamed from include/asm-ppc64/byteorder.h)11
-rw-r--r--include/asm-powerpc/cputable.h427
-rw-r--r--include/asm-powerpc/dbdma.h (renamed from include/asm-ppc/dbdma.h)0
-rw-r--r--include/asm-powerpc/dma.h (renamed from include/asm-ppc/dma.h)91
-rw-r--r--include/asm-powerpc/elf.h (renamed from include/asm-ppc64/elf.h)96
-rw-r--r--include/asm-powerpc/hardirq.h (renamed from include/asm-ppc/hardirq.h)14
-rw-r--r--include/asm-powerpc/heathrow.h (renamed from include/asm-ppc/heathrow.h)0
-rw-r--r--include/asm-powerpc/hw_irq.h (renamed from include/asm-ppc64/hw_irq.h)66
-rw-r--r--include/asm-powerpc/i8259.h (renamed from include/asm-ppc/i8259.h)6
-rw-r--r--include/asm-powerpc/iommu.h (renamed from include/asm-ppc64/iommu.h)41
-rw-r--r--include/asm-powerpc/irq.h (renamed from include/asm-ppc/irq.h)166
-rw-r--r--include/asm-powerpc/kdebug.h (renamed from include/asm-ppc64/kdebug.h)11
-rw-r--r--include/asm-powerpc/keylargo.h (renamed from include/asm-ppc/keylargo.h)0
-rw-r--r--include/asm-powerpc/kmap_types.h33
-rw-r--r--include/asm-powerpc/kprobes.h (renamed from include/asm-ppc64/kprobes.h)7
-rw-r--r--include/asm-powerpc/lmb.h (renamed from include/asm-ppc64/lmb.h)2
-rw-r--r--include/asm-powerpc/machdep.h (renamed from include/asm-ppc64/machdep.h)123
-rw-r--r--include/asm-powerpc/macio.h (renamed from include/asm-ppc/macio.h)0
-rw-r--r--include/asm-powerpc/mediabay.h (renamed from include/asm-ppc/mediabay.h)0
-rw-r--r--include/asm-powerpc/mpic.h (renamed from arch/ppc64/kernel/mpic.h)14
-rw-r--r--include/asm-powerpc/of_device.h (renamed from include/asm-ppc/of_device.h)7
-rw-r--r--include/asm-powerpc/ohare.h (renamed from include/asm-ppc/ohare.h)0
-rw-r--r--include/asm-powerpc/oprofile_impl.h (renamed from include/asm-ppc64/oprofile_impl.h)24
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h (renamed from include/asm-ppc64/pSeries_reconfig.h)0
-rw-r--r--include/asm-powerpc/pmac_feature.h (renamed from include/asm-ppc/pmac_feature.h)0
-rw-r--r--include/asm-powerpc/pmac_low_i2c.h (renamed from include/asm-ppc/pmac_low_i2c.h)0
-rw-r--r--include/asm-powerpc/pmc.h (renamed from include/asm-ppc64/pmc.h)21
-rw-r--r--include/asm-powerpc/posix_types.h (renamed from include/asm-ppc64/posix_types.h)40
-rw-r--r--include/asm-powerpc/ppc-pci.h (renamed from arch/ppc64/kernel/pci.h)6
-rw-r--r--include/asm-powerpc/ppc_asm.h (renamed from include/asm-ppc/ppc_asm.h)294
-rw-r--r--include/asm-powerpc/processor.h (renamed from include/asm-ppc/processor.h)185
-rw-r--r--include/asm-powerpc/prom.h219
-rw-r--r--include/asm-powerpc/reg.h (renamed from include/asm-ppc/reg.h)295
-rw-r--r--include/asm-powerpc/rtas.h (renamed from include/asm-ppc64/rtas.h)8
-rw-r--r--include/asm-powerpc/rtc.h78
-rw-r--r--include/asm-powerpc/rwsem.h (renamed from include/asm-ppc64/rwsem.h)18
-rw-r--r--include/asm-powerpc/seccomp.h (renamed from include/asm-ppc64/seccomp.h)11
-rw-r--r--include/asm-powerpc/sections.h (renamed from include/asm-ppc64/sections.h)21
-rw-r--r--include/asm-powerpc/semaphore.h (renamed from include/asm-ppc64/semaphore.h)6
-rw-r--r--include/asm-powerpc/smu.h (renamed from include/asm-ppc64/smu.h)0
-rw-r--r--include/asm-powerpc/spinlock_types.h (renamed from include/asm-ppc64/spinlock_types.h)4
-rw-r--r--include/asm-powerpc/statfs.h (renamed from include/asm-ppc64/statfs.h)19
-rw-r--r--include/asm-powerpc/synch.h51
-rw-r--r--include/asm-powerpc/system.h362
-rw-r--r--include/asm-powerpc/thread_info.h (renamed from include/asm-ppc64/thread_info.h)34
-rw-r--r--include/asm-powerpc/time.h214
-rw-r--r--include/asm-powerpc/types.h (renamed from include/asm-ppc64/types.h)31
-rw-r--r--include/asm-powerpc/uninorth.h (renamed from include/asm-ppc/uninorth.h)0
-rw-r--r--include/asm-powerpc/unistd.h (renamed from include/asm-ppc/unistd.h)91
-rw-r--r--include/asm-powerpc/vga.h (renamed from include/asm-ppc64/vga.h)20
-rw-r--r--include/asm-powerpc/xmon.h12
-rw-r--r--include/asm-ppc/a.out.h26
-rw-r--r--include/asm-ppc/auxvec.h14
-rw-r--r--include/asm-ppc/bug.h58
-rw-r--r--include/asm-ppc/byteorder.h76
-rw-r--r--include/asm-ppc/cache.h13
-rw-r--r--include/asm-ppc/cputable.h129
-rw-r--r--include/asm-ppc/elf.h151
-rw-r--r--include/asm-ppc/hw_irq.h74
-rw-r--r--include/asm-ppc/io.h11
-rw-r--r--include/asm-ppc/kmap_types.h25
-rw-r--r--include/asm-ppc/machdep.h2
-rw-r--r--include/asm-ppc/mmu_context.h6
-rw-r--r--include/asm-ppc/open_pic.h3
-rw-r--r--include/asm-ppc/page.h18
-rw-r--r--include/asm-ppc/pci-bridge.h5
-rw-r--r--include/asm-ppc/pci.h4
-rw-r--r--include/asm-ppc/perfmon.h22
-rw-r--r--include/asm-ppc/posix_types.h111
-rw-r--r--include/asm-ppc/ptrace.h2
-rw-r--r--include/asm-ppc/rwsem.h172
-rw-r--r--include/asm-ppc/seccomp.h10
-rw-r--r--include/asm-ppc/sections.h33
-rw-r--r--include/asm-ppc/semaphore.h111
-rw-r--r--include/asm-ppc/smp.h18
-rw-r--r--include/asm-ppc/spinlock.h8
-rw-r--r--include/asm-ppc/spinlock_types.h20
-rw-r--r--include/asm-ppc/statfs.h8
-rw-r--r--include/asm-ppc/system.h23
-rw-r--r--include/asm-ppc/thread_info.h107
-rw-r--r--include/asm-ppc/types.h69
-rw-r--r--include/asm-ppc/vga.h46
-rw-r--r--include/asm-ppc/xmon.h17
-rw-r--r--include/asm-ppc64/abs_addr.h7
-rw-r--r--include/asm-ppc64/atomic.h197
-rw-r--r--include/asm-ppc64/bitops.h2
-rw-r--r--include/asm-ppc64/btext.h1
-rw-r--r--include/asm-ppc64/cputable.h167
-rw-r--r--include/asm-ppc64/dart.h59
-rw-r--r--include/asm-ppc64/dbdma.h2
-rw-r--r--include/asm-ppc64/dma.h329
-rw-r--r--include/asm-ppc64/futex.h2
-rw-r--r--include/asm-ppc64/hardirq.h27
-rw-r--r--include/asm-ppc64/io.h2
-rw-r--r--include/asm-ppc64/irq.h120
-rw-r--r--include/asm-ppc64/keylargo.h2
-rw-r--r--include/asm-ppc64/kmap_types.h23
-rw-r--r--include/asm-ppc64/macio.h2
-rw-r--r--include/asm-ppc64/memory.h61
-rw-r--r--include/asm-ppc64/mmu.h7
-rw-r--r--include/asm-ppc64/of_device.h2
-rw-r--r--include/asm-ppc64/page.h8
-rw-r--r--include/asm-ppc64/pci-bridge.h22
-rw-r--r--include/asm-ppc64/pmac_feature.h2
-rw-r--r--include/asm-ppc64/pmac_low_i2c.h2
-rw-r--r--include/asm-ppc64/ppc32.h14
-rw-r--r--include/asm-ppc64/ppc_asm.h242
-rw-r--r--include/asm-ppc64/processor.h558
-rw-r--r--include/asm-ppc64/prom.h4
-rw-r--r--include/asm-ppc64/smp.h1
-rw-r--r--include/asm-ppc64/system.h15
-rw-r--r--include/asm-ppc64/tce.h64
-rw-r--r--include/asm-ppc64/time.h124
-rw-r--r--include/asm-ppc64/tlbflush.h7
-rw-r--r--include/asm-ppc64/udbg.h3
-rw-r--r--include/asm-ppc64/uninorth.h2
-rw-r--r--include/asm-ppc64/unistd.h487
-rw-r--r--kernel/irq/handle.c6
432 files changed, 46549 insertions, 15585 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
new file mode 100644
index 000000000000..27f122e1f849
--- /dev/null
+++ b/arch/powerpc/Kconfig
@@ -0,0 +1,864 @@
1# For a description of the syntax of this configuration file,
2# see Documentation/kbuild/kconfig-language.txt.
3#
4
5mainmenu "Linux/PowerPC Kernel Configuration"
6
7config PPC64
8 bool "64-bit kernel"
9 default n
10 help
11 This option selects whether a 32-bit or a 64-bit kernel
12 will be built.
13
14config PPC32
15 bool
16 default y if !PPC64
17
18config 64BIT
19 bool
20 default y if PPC64
21
22config PPC_MERGE
23 def_bool y
24
25config MMU
26 bool
27 default y
28
29config UID16
30 bool
31
32config GENERIC_HARDIRQS
33 bool
34 default y
35
36config RWSEM_GENERIC_SPINLOCK
37 bool
38
39config RWSEM_XCHGADD_ALGORITHM
40 bool
41 default y
42
43config GENERIC_CALIBRATE_DELAY
44 bool
45 default y
46
47config PPC
48 bool
49 default y
50
51config EARLY_PRINTK
52 bool
53 default y if PPC64
54
55config COMPAT
56 bool
57 default y if PPC64
58
59config SYSVIPC_COMPAT
60 bool
61 depends on COMPAT && SYSVIPC
62 default y
63
64# All PPC32s use generic nvram driver through ppc_md
65config GENERIC_NVRAM
66 bool
67 default y if PPC32
68
69config SCHED_NO_NO_OMIT_FRAME_POINTER
70 bool
71 default y
72
73config ARCH_MAY_HAVE_PC_FDC
74 bool
75 default y
76
77menu "Processor support"
78choice
79 prompt "Processor Type"
80 depends on PPC32
81 default 6xx
82
83config 6xx
84 bool "6xx/7xx/74xx"
85 select PPC_FPU
86 help
87 There are four families of PowerPC chips supported. The more common
88 types (601, 603, 604, 740, 750, 7400), the Motorola embedded
89 versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
90 embedded versions (403 and 405) and the high end 64 bit Power
91 processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
92
93 Unless you are building a kernel for one of the embedded processor
94 systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
95 Note that the kernel runs in 32-bit mode even on 64-bit chips.
96
97config PPC_52xx
98 bool "Freescale 52xx"
99
100config PPC_82xx
101 bool "Freescale 82xx"
102
103config PPC_83xx
104 bool "Freescale 83xx"
105
106config 40x
107 bool "AMCC 40x"
108
109config 44x
110 bool "AMCC 44x"
111
112config 8xx
113 bool "Freescale 8xx"
114
115config E200
116 bool "Freescale e200"
117
118config E500
119 bool "Freescale e500"
120endchoice
121
122config POWER4_ONLY
123 bool "Optimize for POWER4"
124 depends on PPC64
125 default n
126 ---help---
127 Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
128 The resulting binary will not work on POWER3 or RS64 processors
129 when compiled with binutils 2.15 or later.
130
131config POWER3
132 bool
133 depends on PPC64
134 default y if !POWER4_ONLY
135
136config POWER4
137 depends on PPC64
138 def_bool y
139
140config PPC_FPU
141 bool
142 default y if PPC64
143
144config BOOKE
145 bool
146 depends on E200 || E500
147 default y
148
149config FSL_BOOKE
150 bool
151 depends on E200 || E500
152 default y
153
154config PTE_64BIT
155 bool
156 depends on 44x || E500
157 default y if 44x
158 default y if E500 && PHYS_64BIT
159
160config PHYS_64BIT
161 bool 'Large physical address support' if E500
162 depends on 44x || E500
163 default y if 44x
164 ---help---
165 This option enables kernel support for larger than 32-bit physical
166 addresses. This features is not be available on all e500 cores.
167
168 If in doubt, say N here.
169
170config ALTIVEC
171 bool "AltiVec Support"
172 depends on 6xx || POWER4
173 ---help---
174 This option enables kernel support for the Altivec extensions to the
175 PowerPC processor. The kernel currently supports saving and restoring
176 altivec registers, and turning on the 'altivec enable' bit so user
177 processes can execute altivec instructions.
178
179 This option is only usefully if you have a processor that supports
180 altivec (G4, otherwise known as 74xx series), but does not have
181 any affect on a non-altivec cpu (it does, however add code to the
182 kernel).
183
184 If in doubt, say Y here.
185
186config SPE
187 bool "SPE Support"
188 depends on E200 || E500
189 ---help---
190 This option enables kernel support for the Signal Processing
191 Extensions (SPE) to the PowerPC processor. The kernel currently
192 supports saving and restoring SPE registers, and turning on the
193 'spe enable' bit so user processes can execute SPE instructions.
194
195 This option is only useful if you have a processor that supports
196 SPE (e500, otherwise known as 85xx series), but does not have any
197 effect on a non-spe cpu (it does, however add code to the kernel).
198
199 If in doubt, say Y here.
200
201config PPC_STD_MMU
202 bool
203 depends on 6xx || POWER3 || POWER4 || PPC64
204 default y
205
206config PPC_STD_MMU_32
207 def_bool y
208 depends on PPC_STD_MMU && PPC32
209
210config SMP
211 depends on PPC_STD_MMU
212 bool "Symmetric multi-processing support"
213 ---help---
214 This enables support for systems with more than one CPU. If you have
215 a system with only one CPU, say N. If you have a system with more
216 than one CPU, say Y. Note that the kernel does not currently
217 support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
218 since they have inadequate hardware support for multiprocessor
219 operation.
220
221 If you say N here, the kernel will run on single and multiprocessor
222 machines, but will use only one CPU of a multiprocessor machine. If
223 you say Y here, the kernel will run on single-processor machines.
224 On a single-processor machine, the kernel will run faster if you say
225 N here.
226
227 If you don't know what to do here, say N.
228
229config NR_CPUS
230 int "Maximum number of CPUs (2-32)"
231 range 2 128
232 depends on SMP
233 default "32" if PPC64
234 default "4"
235
236config NOT_COHERENT_CACHE
237 bool
238 depends on 4xx || 8xx || E200
239 default y
240endmenu
241
242source "init/Kconfig"
243
244menu "Platform support"
245 depends on PPC64 || 6xx
246
247choice
248 prompt "Machine type"
249 default PPC_MULTIPLATFORM
250
251config PPC_MULTIPLATFORM
252 bool "Generic desktop/server/laptop"
253 help
254 Select this option if configuring for an IBM pSeries or
255 RS/6000 machine, an Apple machine, or a PReP, CHRP,
256 Maple or Cell-based machine.
257
258config PPC_ISERIES
259 bool "IBM Legacy iSeries"
260 depends on PPC64
261
262config EMBEDDED6xx
263 bool "Embedded 6xx/7xx/7xxx-based board"
264 depends on PPC32
265
266config APUS
267 bool "Amiga-APUS"
268 depends on PPC32 && BROKEN
269 help
270 Select APUS if configuring for a PowerUP Amiga.
271 More information is available at:
272 <http://linux-apus.sourceforge.net/>.
273endchoice
274
275config PPC_PSERIES
276 depends on PPC_MULTIPLATFORM && PPC64
277 bool " IBM pSeries & new (POWER5-based) iSeries"
278 default y
279
280config PPC_CHRP
281 bool " Common Hardware Reference Platform (CHRP) based machines"
282 depends on PPC_MULTIPLATFORM && PPC32
283 default y
284
285config PPC_PMAC
286 bool " Apple PowerMac based machines"
287 depends on PPC_MULTIPLATFORM
288 default y
289
290config PPC_PMAC64
291 bool
292 depends on PPC_PMAC && POWER4
293 select U3_DART
294 default y
295
296config PPC_PREP
297 bool " PowerPC Reference Platform (PReP) based machines"
298 depends on PPC_MULTIPLATFORM && PPC32
299 default y
300
301config PPC_MAPLE
302 depends on PPC_MULTIPLATFORM && PPC64
303 bool " Maple 970FX Evaluation Board"
304 select U3_DART
305 select MPIC_BROKEN_U3
306 default n
307 help
308 This option enables support for the Maple 970FX Evaluation Board.
309 For more informations, refer to <http://www.970eval.com>
310
311config PPC_BPA
312 bool " Broadband Processor Architecture"
313 depends on PPC_MULTIPLATFORM && PPC64
314
315config PPC_OF
316 bool
317 depends on PPC_MULTIPLATFORM # for now
318 default y
319
320config XICS
321 depends on PPC_PSERIES
322 bool
323 default y
324
325config U3_DART
326 bool
327 depends on PPC_MULTIPLATFORM && PPC64
328 default n
329
330config MPIC
331 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
332 bool
333 default y
334
335config MPIC_BROKEN_U3
336 bool
337 depends on PPC_MAPLE
338 default y
339
340config BPA_IIC
341 depends on PPC_BPA
342 bool
343 default y
344
345config IBMVIO
346 depends on PPC_PSERIES || PPC_ISERIES
347 bool
348 default y
349
350source "drivers/cpufreq/Kconfig"
351
352config CPU_FREQ_PMAC
353 bool "Support for Apple PowerBooks"
354 depends on CPU_FREQ && ADB_PMU && PPC32
355 select CPU_FREQ_TABLE
356 help
357 This adds support for frequency switching on Apple PowerBooks,
358 this currently includes some models of iBook & Titanium
359 PowerBook.
360
361config PPC601_SYNC_FIX
362 bool "Workarounds for PPC601 bugs"
363 depends on 6xx && (PPC_PREP || PPC_PMAC)
364 help
365 Some versions of the PPC601 (the first PowerPC chip) have bugs which
366 mean that extra synchronization instructions are required near
367 certain instructions, typically those that make major changes to the
368 CPU state. These extra instructions reduce performance slightly.
369 If you say N here, these extra instructions will not be included,
370 resulting in a kernel which will run faster but may not run at all
371 on some systems with the PPC601 chip.
372
373 If in doubt, say Y here.
374
375config TAU
376 bool "Thermal Management Support"
377 depends on 6xx
378 help
379 G3 and G4 processors have an on-chip temperature sensor called the
380 'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
381 temperature within 2-4 degrees Celsius. This option shows the current
382 on-die temperature in /proc/cpuinfo if the cpu supports it.
383
384 Unfortunately, on some chip revisions, this sensor is very inaccurate
385 and in some cases, does not work at all, so don't assume the cpu
386 temp is actually what /proc/cpuinfo says it is.
387
388config TAU_INT
389 bool "Interrupt driven TAU driver (DANGEROUS)"
390 depends on TAU
391 ---help---
392 The TAU supports an interrupt driven mode which causes an interrupt
393 whenever the temperature goes out of range. This is the fastest way
394 to get notified the temp has exceeded a range. With this option off,
395 a timer is used to re-check the temperature periodically.
396
397 However, on some cpus it appears that the TAU interrupt hardware
398 is buggy and can cause a situation which would lead unexplained hard
399 lockups.
400
401 Unless you are extending the TAU driver, or enjoy kernel/hardware
402 debugging, leave this option off.
403
404config TAU_AVERAGE
405 bool "Average high and low temp"
406 depends on TAU
407 ---help---
408 The TAU hardware can compare the temperature to an upper and lower
409 bound. The default behavior is to show both the upper and lower
410 bound in /proc/cpuinfo. If the range is large, the temperature is
411 either changing a lot, or the TAU hardware is broken (likely on some
412 G4's). If the range is small (around 4 degrees), the temperature is
413 relatively stable. If you say Y here, a single temperature value,
414 halfway between the upper and lower bounds, will be reported in
415 /proc/cpuinfo.
416
417 If in doubt, say N here.
418endmenu
419
420source arch/powerpc/platforms/embedded6xx/Kconfig
421source arch/powerpc/platforms/4xx/Kconfig
422source arch/powerpc/platforms/85xx/Kconfig
423source arch/powerpc/platforms/8xx/Kconfig
424
425menu "Kernel options"
426
427config HIGHMEM
428 bool "High memory support"
429 depends on PPC32
430
431source kernel/Kconfig.hz
432source kernel/Kconfig.preempt
433source "fs/Kconfig.binfmt"
434
435# We optimistically allocate largepages from the VM, so make the limit
436# large enough (16MB). This badly named config option is actually
437# max order + 1
438config FORCE_MAX_ZONEORDER
439 int
440 depends on PPC64
441 default "13"
442
443config MATH_EMULATION
444 bool "Math emulation"
445 depends on 4xx || 8xx || E200 || E500
446 ---help---
447 Some PowerPC chips designed for embedded applications do not have
448 a floating-point unit and therefore do not implement the
449 floating-point instructions in the PowerPC instruction set. If you
450 say Y here, the kernel will include code to emulate a floating-point
451 unit, which will allow programs that use floating-point
452 instructions to run.
453
454config IOMMU_VMERGE
455 bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
456 depends on EXPERIMENTAL && PPC64
457 default n
458 help
459 Cause IO segments sent to a device for DMA to be merged virtually
460 by the IOMMU when they happen to have been allocated contiguously.
461 This doesn't add pressure to the IOMMU allocator. However, some
462 drivers don't support getting large merged segments coming back
463 from *_map_sg(). Say Y if you know the drivers you are using are
464 properly handling this case.
465
466config HOTPLUG_CPU
467 bool "Support for enabling/disabling CPUs"
468 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
469 ---help---
470 Say Y here to be able to disable and re-enable individual
471 CPUs at runtime on SMP machines.
472
473 Say N if you are unsure.
474
475config KEXEC
476 bool "kexec system call (EXPERIMENTAL)"
477 depends on PPC_MULTIPLATFORM && EXPERIMENTAL
478 help
479 kexec is a system call that implements the ability to shutdown your
480 current kernel, and to start another kernel. It is like a reboot
481 but it is indepedent of the system firmware. And like a reboot
482 you can start any kernel with it, not just Linux.
483
484 The name comes from the similiarity to the exec system call.
485
486 It is an ongoing process to be certain the hardware in a machine
487 is properly shutdown, so do not be surprised if this code does not
488 initially work for you. It may help to enable device hotplugging
489 support. As of this writing the exact hardware interface is
490 strongly in flux, so no good recommendation can be made.
491
492config EMBEDDEDBOOT
493 bool
494 depends on 8xx || 8260
495 default y
496
497config PC_KEYBOARD
498 bool "PC PS/2 style Keyboard"
499 depends on 4xx || CPM2
500
501config PPCBUG_NVRAM
502 bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
503 default y if PPC_PREP
504
505config IRQ_ALL_CPUS
506 bool "Distribute interrupts on all CPUs by default"
507 depends on SMP && !MV64360
508 help
509 This option gives the kernel permission to distribute IRQs across
510 multiple CPUs. Saying N here will route all IRQs to the first
511 CPU. Generally saying Y is safe, although some problems have been
512 reported with SMP Power Macintoshes with this option enabled.
513
514source "arch/powerpc/platforms/pseries/Kconfig"
515
516config ARCH_SELECT_MEMORY_MODEL
517 def_bool y
518 depends on PPC64
519
520config ARCH_FLATMEM_ENABLE
521 def_bool y
522 depends on PPC64 && !NUMA
523
524config ARCH_DISCONTIGMEM_ENABLE
525 def_bool y
526 depends on SMP && PPC_PSERIES
527
528config ARCH_DISCONTIGMEM_DEFAULT
529 def_bool y
530 depends on ARCH_DISCONTIGMEM_ENABLE
531
532config ARCH_FLATMEM_ENABLE
533 def_bool y
534 depends on PPC64
535
536config ARCH_SPARSEMEM_ENABLE
537 def_bool y
538 depends on ARCH_DISCONTIGMEM_ENABLE
539
540source "mm/Kconfig"
541
542config HAVE_ARCH_EARLY_PFN_TO_NID
543 def_bool y
544 depends on NEED_MULTIPLE_NODES
545
546# Some NUMA nodes have memory ranges that span
547# other nodes. Even though a pfn is valid and
548# between a node's start and end pfns, it may not
549# reside on that node.
550#
551# This is a relatively temporary hack that should
552# be able to go away when sparsemem is fully in
553# place
554
555config NODES_SPAN_OTHER_NODES
556 def_bool y
557 depends on NEED_MULTIPLE_NODES
558
559config NUMA
560 bool "NUMA support"
561 default y if DISCONTIGMEM || SPARSEMEM
562
563config SCHED_SMT
564 bool "SMT (Hyperthreading) scheduler support"
565 depends on PPC64 && SMP
566 default off
567 help
568 SMT scheduler support improves the CPU scheduler's decision making
569 when dealing with POWER5 cpus at a cost of slightly increased
570 overhead in some places. If unsure say N here.
571
572config PROC_DEVICETREE
573 bool "Support for device tree in /proc"
574 depends on PROC_FS
575 help
576 This option adds a device-tree directory under /proc which contains
577 an image of the device tree that the kernel copies from Open
578 Firmware or other boot firmware. If unsure, say Y here.
579
580source "arch/powerpc/platforms/prep/Kconfig"
581
582config CMDLINE_BOOL
583 bool "Default bootloader kernel arguments"
584 depends on !PPC_ISERIES
585
586config CMDLINE
587 string "Initial kernel command string"
588 depends on CMDLINE_BOOL
589 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
590 help
591 On some platforms, there is currently no way for the boot loader to
592 pass arguments to the kernel. For these platforms, you can supply
593 some command-line options at build time by entering them here. In
594 most cases you will need to specify the root device here.
595
596if !44x || BROKEN
597source kernel/power/Kconfig
598endif
599
600config SECCOMP
601 bool "Enable seccomp to safely compute untrusted bytecode"
602 depends on PROC_FS
603 default y
604 help
605 This kernel feature is useful for number crunching applications
606 that may need to compute untrusted bytecode during their
607 execution. By using pipes or other transports made available to
608 the process as file descriptors supporting the read/write
609 syscalls, it's possible to isolate those applications in
610 their own address space using seccomp. Once seccomp is
611 enabled via /proc/<pid>/seccomp, it cannot be disabled
612 and the task is only allowed to execute a few safe syscalls
613 defined by each seccomp mode.
614
615 If unsure, say Y. Only embedded should say N here.
616
617endmenu
618
619config ISA_DMA_API
620 bool
621 default y
622
623menu "Bus options"
624
625config ISA
626 bool "Support for ISA-bus hardware"
627 depends on PPC_PREP || PPC_CHRP
628 help
629 Find out whether you have ISA slots on your motherboard. ISA is the
630 name of a bus system, i.e. the way the CPU talks to the other stuff
631 inside your box. If you have an Apple machine, say N here; if you
632 have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
633 you have an embedded board, consult your board documentation.
634
635config GENERIC_ISA_DMA
636 bool
637 depends on PPC64 || POWER4 || 6xx && !CPM2
638 default y
639
640config EISA
641 bool
642
643config SBUS
644 bool
645
646# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
647config MCA
648 bool
649
650config PCI
651 bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
652 default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
653 default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
654 default PCI_QSPAN if !4xx && !CPM2 && 8xx
655 help
656 Find out whether your system includes a PCI bus. PCI is the name of
657 a bus system, i.e. the way the CPU talks to the other stuff inside
658 your box. If you say Y here, the kernel will include drivers and
659 infrastructure code to support PCI bus devices.
660
661config PCI_DOMAINS
662 bool
663 default PCI
664
665config MPC83xx_PCI2
666 bool " Supprt for 2nd PCI host controller"
667 depends on PCI && MPC834x
668 default y if MPC834x_SYS
669
670config PCI_QSPAN
671 bool "QSpan PCI"
672 depends on !4xx && !CPM2 && 8xx
673 help
674 Say Y here if you have a system based on a Motorola 8xx-series
675 embedded processor with a QSPAN PCI interface, otherwise say N.
676
677config PCI_8260
678 bool
679 depends on PCI && 8260
680 default y
681
682config 8260_PCI9
683 bool " Enable workaround for MPC826x erratum PCI 9"
684 depends on PCI_8260 && !ADS8272
685 default y
686
687choice
688 prompt " IDMA channel for PCI 9 workaround"
689 depends on 8260_PCI9
690
691config 8260_PCI9_IDMA1
692 bool "IDMA1"
693
694config 8260_PCI9_IDMA2
695 bool "IDMA2"
696
697config 8260_PCI9_IDMA3
698 bool "IDMA3"
699
700config 8260_PCI9_IDMA4
701 bool "IDMA4"
702
703endchoice
704
705source "drivers/pci/Kconfig"
706
707source "drivers/pcmcia/Kconfig"
708
709source "drivers/pci/hotplug/Kconfig"
710
711endmenu
712
713menu "Advanced setup"
714 depends on PPC32
715
716config ADVANCED_OPTIONS
717 bool "Prompt for advanced kernel configuration options"
718 help
719 This option will enable prompting for a variety of advanced kernel
720 configuration options. These options can cause the kernel to not
721 work if they are set incorrectly, but can be used to optimize certain
722 aspects of kernel memory management.
723
724 Unless you know what you are doing, say N here.
725
726comment "Default settings for advanced configuration options are used"
727 depends on !ADVANCED_OPTIONS
728
729config HIGHMEM_START_BOOL
730 bool "Set high memory pool address"
731 depends on ADVANCED_OPTIONS && HIGHMEM
732 help
733 This option allows you to set the base address of the kernel virtual
734 area used to map high memory pages. This can be useful in
735 optimizing the layout of kernel virtual memory.
736
737 Say N here unless you know what you are doing.
738
739config HIGHMEM_START
740 hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
741 default "0xfe000000"
742
743config LOWMEM_SIZE_BOOL
744 bool "Set maximum low memory"
745 depends on ADVANCED_OPTIONS
746 help
747 This option allows you to set the maximum amount of memory which
748 will be used as "low memory", that is, memory which the kernel can
749 access directly, without having to set up a kernel virtual mapping.
750 This can be useful in optimizing the layout of kernel virtual
751 memory.
752
753 Say N here unless you know what you are doing.
754
755config LOWMEM_SIZE
756 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
757 default "0x30000000"
758
759config KERNEL_START_BOOL
760 bool "Set custom kernel base address"
761 depends on ADVANCED_OPTIONS
762 help
763 This option allows you to set the kernel virtual address at which
764 the kernel will map low memory (the kernel image will be linked at
765 this address). This can be useful in optimizing the virtual memory
766 layout of the system.
767
768 Say N here unless you know what you are doing.
769
770config KERNEL_START
771 hex "Virtual address of kernel base" if KERNEL_START_BOOL
772 default "0xc0000000"
773
774config TASK_SIZE_BOOL
775 bool "Set custom user task size"
776 depends on ADVANCED_OPTIONS
777 help
778 This option allows you to set the amount of virtual address space
779 allocated to user tasks. This can be useful in optimizing the
780 virtual memory layout of the system.
781
782 Say N here unless you know what you are doing.
783
784config TASK_SIZE
785 hex "Size of user task space" if TASK_SIZE_BOOL
786 default "0x80000000"
787
788config CONSISTENT_START_BOOL
789 bool "Set custom consistent memory pool address"
790 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
791 help
792 This option allows you to set the base virtual address
793 of the the consistent memory pool. This pool of virtual
794 memory is used to make consistent memory allocations.
795
796config CONSISTENT_START
797 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
798 default "0xff100000" if NOT_COHERENT_CACHE
799
800config CONSISTENT_SIZE_BOOL
801 bool "Set custom consistent memory pool size"
802 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
803 help
804 This option allows you to set the size of the the
805 consistent memory pool. This pool of virtual memory
806 is used to make consistent memory allocations.
807
808config CONSISTENT_SIZE
809 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
810 default "0x00200000" if NOT_COHERENT_CACHE
811
812config BOOT_LOAD_BOOL
813 bool "Set the boot link/load address"
814 depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
815 help
816 This option allows you to set the initial load address of the zImage
817 or zImage.initrd file. This can be useful if you are on a board
818 which has a small amount of memory.
819
820 Say N here unless you know what you are doing.
821
822config BOOT_LOAD
823 hex "Link/load address for booting" if BOOT_LOAD_BOOL
824 default "0x00400000" if 40x || 8xx || 8260
825 default "0x01000000" if 44x
826 default "0x00800000"
827
828config PIN_TLB
829 bool "Pinned Kernel TLBs (860 ONLY)"
830 depends on ADVANCED_OPTIONS && 8xx
831endmenu
832
833if PPC64
834config KERNEL_START
835 hex
836 default "0xc000000000000000"
837endif
838
839source "net/Kconfig"
840
841source "drivers/Kconfig"
842
843source "fs/Kconfig"
844
845# XXX source "arch/ppc/8xx_io/Kconfig"
846
847# XXX source "arch/ppc/8260_io/Kconfig"
848
849source "arch/powerpc/platforms/iseries/Kconfig"
850
851source "lib/Kconfig"
852
853source "arch/powerpc/oprofile/Kconfig"
854
855source "arch/powerpc/Kconfig.debug"
856
857source "security/Kconfig"
858
859config KEYS_COMPAT
860 bool
861 depends on COMPAT && KEYS
862 default y
863
864source "crypto/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
new file mode 100644
index 000000000000..0baf64ec80d0
--- /dev/null
+++ b/arch/powerpc/Kconfig.debug
@@ -0,0 +1,128 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config DEBUG_STACKOVERFLOW
6 bool "Check for stack overflows"
7 depends on DEBUG_KERNEL && PPC64
8 help
9 This option will cause messages to be printed if free stack space
10 drops below a certain limit.
11
12config KPROBES
13 bool "Kprobes"
14 depends on DEBUG_KERNEL && PPC64
15 help
16 Kprobes allows you to trap at almost any kernel address and
17 execute a callback function. register_kprobe() establishes
18 a probepoint and specifies the callback. Kprobes is useful
19 for kernel debugging, non-intrusive instrumentation and testing.
20 If in doubt, say "N".
21
22config DEBUG_STACK_USAGE
23 bool "Stack utilization instrumentation"
24 depends on DEBUG_KERNEL && PPC64
25 help
26 Enables the display of the minimum amount of free stack which each
27 task has ever had available in the sysrq-T and sysrq-P debug output.
28
29 This option will slow down process creation somewhat.
30
31config DEBUGGER
32 bool "Enable debugger hooks"
33 depends on DEBUG_KERNEL
34 help
35 Include in-kernel hooks for kernel debuggers. Unless you are
36 intending to debug the kernel, say N here.
37
38config KGDB
39 bool "Include kgdb kernel debugger"
40 depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
41 select DEBUG_INFO
42 help
43 Include in-kernel hooks for kgdb, the Linux kernel source level
44 debugger. See <http://kgdb.sourceforge.net/> for more information.
45 Unless you are intending to debug the kernel, say N here.
46
47choice
48 prompt "Serial Port"
49 depends on KGDB
50 default KGDB_TTYS1
51
52config KGDB_TTYS0
53 bool "ttyS0"
54
55config KGDB_TTYS1
56 bool "ttyS1"
57
58config KGDB_TTYS2
59 bool "ttyS2"
60
61config KGDB_TTYS3
62 bool "ttyS3"
63
64endchoice
65
66config KGDB_CONSOLE
67 bool "Enable serial console thru kgdb port"
68 depends on KGDB && 8xx || CPM2
69 help
70 If you enable this, all serial console messages will be sent
71 over the gdb stub.
72 If unsure, say N.
73
74config XMON
75 bool "Include xmon kernel debugger"
76 depends on DEBUGGER && !PPC_ISERIES
77 help
78 Include in-kernel hooks for the xmon kernel monitor/debugger.
79 Unless you are intending to debug the kernel, say N here.
80 Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
81 nothing will appear on the screen (xmon writes directly to the
82 framebuffer memory).
83 The cmdline option 'xmon' or 'xmon=early' will drop into xmon
84 very early during boot. 'xmon=on' will just enable the xmon
85 debugger hooks. 'xmon=off' will disable the debugger hooks
86 if CONFIG_XMON_DEFAULT is set.
87
88config XMON_DEFAULT
89 bool "Enable xmon by default"
90 depends on XMON
91 help
92 xmon is normally disabled unless booted with 'xmon=on'.
93 Use 'xmon=off' to disable xmon init during runtime.
94
95config IRQSTACKS
96 bool "Use separate kernel stacks when processing interrupts"
97 depends on PPC64
98 help
99 If you say Y here the kernel will use separate kernel stacks
100 for handling hard and soft interrupts. This can help avoid
101 overflowing the process kernel stacks.
102
103config BDI_SWITCH
104 bool "Include BDI-2000 user context switcher"
105 depends on DEBUG_KERNEL && PPC32
106 help
107 Include in-kernel support for the Abatron BDI2000 debugger.
108 Unless you are intending to debug the kernel with one of these
109 machines, say N here.
110
111config BOOTX_TEXT
112 bool "Support for early boot text console (BootX or OpenFirmware only)"
113 depends PPC_OF && !PPC_ISERIES
114 help
115 Say Y here to see progress messages from the boot firmware in text
116 mode. Requires either BootX or Open Firmware.
117
118config SERIAL_TEXT_DEBUG
119 bool "Support for early boot texts over serial port"
120 depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
121 PPC_GEN550 || PPC_MPC52xx
122
123config PPC_OCP
124 bool
125 depends on IBM_OCP || XILINX_OCP
126 default y
127
128endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
new file mode 100644
index 000000000000..dedf1219761a
--- /dev/null
+++ b/arch/powerpc/Makefile
@@ -0,0 +1,219 @@
1# This file is included by the global makefile so that you can add your own
2# architecture-specific flags and dependencies. Remember to do have actions
3# for "archclean" and "archdep" for cleaning up and making dependencies for
4# this architecture.
5#
6# This file is subject to the terms and conditions of the GNU General Public
7# License. See the file "COPYING" in the main directory of this archive
8# for more details.
9#
10# Copyright (C) 1994 by Linus Torvalds
11# Changes for PPC by Gary Thomas
12# Rewritten by Cort Dougan and Paul Mackerras
13#
14
15# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
16KERNELLOAD := $(CONFIG_KERNEL_START)
17
18HAS_BIARCH := $(call cc-option-yn, -m32)
19
20ifeq ($(CONFIG_PPC64),y)
21OLDARCH := ppc64
22SZ := 64
23
24# Set default 32 bits cross compilers for vdso and boot wrapper
25CROSS32_COMPILE ?=
26
27CROSS32CC := $(CROSS32_COMPILE)gcc
28CROSS32AS := $(CROSS32_COMPILE)as
29CROSS32LD := $(CROSS32_COMPILE)ld
30CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
31
32ifeq ($(HAS_BIARCH),y)
33ifeq ($(CROSS32_COMPILE),)
34CROSS32CC := $(CC) -m32
35CROSS32AS := $(AS) -a32
36CROSS32LD := $(LD) -m elf32ppc
37CROSS32OBJCOPY := $(OBJCOPY)
38endif
39endif
40
41export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
42
43new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
44
45ifeq ($(new_nm),y)
46NM := $(NM) --synthetic
47endif
48
49else
50OLDARCH := ppc
51SZ := 32
52endif
53
54UTS_MACHINE := $(OLDARCH)
55
56ifeq ($(HAS_BIARCH),y)
57override AS += -a$(SZ)
58override LD += -m elf$(SZ)ppc
59override CC += -m$(SZ)
60endif
61
62LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
63
64# The -Iarch/$(ARCH)/include is temporary while we are merging
65CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
66AFLAGS += -Iarch/$(ARCH)
67CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
68CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
69CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
70CFLAGS += $(CFLAGS-y)
71CPP = $(CC) -E $(CFLAGS)
72# Temporary hack until we have migrated to asm-powerpc
73LINUXINCLUDE += -Iarch/$(ARCH)/include
74
75CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
76
77ifeq ($(CONFIG_PPC64),y)
78GCC_VERSION := $(call cc-version)
79GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
80
81ifeq ($(CONFIG_POWER4_ONLY),y)
82ifeq ($(CONFIG_ALTIVEC),y)
83ifeq ($(GCC_BROKEN_VEC),y)
84 CFLAGS += $(call cc-option,-mcpu=970)
85else
86 CFLAGS += $(call cc-option,-mcpu=power4)
87endif
88else
89 CFLAGS += $(call cc-option,-mcpu=power4)
90endif
91else
92 CFLAGS += $(call cc-option,-mtune=power4)
93endif
94endif
95
96# Enable unit-at-a-time mode when possible. It shrinks the
97# kernel considerably.
98CFLAGS += $(call cc-option,-funit-at-a-time)
99
100ifndef CONFIG_FSL_BOOKE
101CFLAGS += -mstring
102endif
103
104cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
105cpu-as-$(CONFIG_4xx) += -Wa,-m405
106cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
107cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
108cpu-as-$(CONFIG_E500) += -Wa,-me500
109cpu-as-$(CONFIG_E200) += -Wa,-me200
110
111AFLAGS += $(cpu-as-y)
112CFLAGS += $(cpu-as-y)
113
114# Default to the common case.
115KBUILD_DEFCONFIG := common_defconfig
116
117head-y := arch/powerpc/kernel/head_32.o
118head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
119head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
120head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
121head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
122head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
123
124head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
125head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
126
127core-y += arch/powerpc/kernel/ \
128 arch/$(OLDARCH)/kernel/ \
129 arch/powerpc/mm/ \
130 arch/powerpc/lib/ \
131 arch/powerpc/sysdev/ \
132 arch/powerpc/platforms/
133core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
134#core-$(CONFIG_XMON) += arch/powerpc/xmon/
135core-$(CONFIG_APUS) += arch/ppc/amiga/
136drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
137drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
138drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
139
140drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
141
142defaultimage-$(CONFIG_PPC32) := uImage zImage
143defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
144defaultimage-$(CONFIG_PPC_PSERIES) := zImage
145KBUILD_IMAGE := $(defaultimage-y)
146all: $(KBUILD_IMAGE)
147
148CPPFLAGS_vmlinux.lds := -Upowerpc
149
150# All the instructions talk about "make bzImage".
151bzImage: zImage
152
153BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
154
155.PHONY: $(BOOT_TARGETS)
156
157boot := arch/$(OLDARCH)/boot
158
159# urk
160ifeq ($(CONFIG_PPC64),y)
161$(BOOT_TARGETS): vmlinux
162 $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
163else
164$(BOOT_TARGETS): vmlinux
165 $(Q)$(MAKE) ARCH=ppc $(build)=$(boot) $@
166endif
167
168uImage: vmlinux
169 $(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot)/images $(boot)/images/$@
170
171define archhelp
172 @echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
173 @echo ' uImage - Create a bootable image for U-Boot / PPCBoot'
174 @echo ' install - Install kernel using'
175 @echo ' (your) ~/bin/installkernel or'
176 @echo ' (distribution) /sbin/installkernel or'
177 @echo ' install to $$(INSTALL_PATH) and run lilo'
178 @echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
179endef
180
181archclean:
182 $(Q)$(MAKE) $(clean)=$(boot)
183 # Temporary hack until we have migrated to asm-powerpc
184 $(Q)rm -rf arch/$(ARCH)/include
185
186archprepare: checkbin
187
188# Temporary hack until we have migrated to asm-powerpc
189include/asm: arch/$(ARCH)/include/asm
190arch/$(ARCH)/include/asm:
191 $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
192 $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
193
194# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
195# to stdout and these checks are run even on install targets.
196TOUT := .tmp_gas_check
197# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
198# instructions.
199# gcc-3.4 and binutils-2.14 are a fatal combination.
200GCC_VERSION := $(call cc-version)
201
202checkbin:
203 @if test "$(GCC_VERSION)" = "0304" ; then \
204 if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
205 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
206 echo 'correctly with gcc-3.4 and your version of binutils.'; \
207 echo '*** Please upgrade your binutils or downgrade your gcc'; \
208 false; \
209 fi ; \
210 fi
211 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
212 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
213 echo 'correctly with old versions of binutils.' ; \
214 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
215 false ; \
216 fi
217
218CLEAN_FILES += $(TOUT)
219
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644
index 000000000000..6b0f176265e3
--- /dev/null
+++ b/arch/powerpc/kernel/Makefile
@@ -0,0 +1,52 @@
1#
2# Makefile for the linux kernel.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC
10CFLAGS_btext.o += -fPIC
11endif
12
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += binfmt_elf32.o sys_ppc32.o ptrace32.o
16obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
17obj-$(CONFIG_POWER4) += idle_power4.o
18obj-$(CONFIG_PPC_OF) += of_device.o
19
20ifeq ($(CONFIG_PPC_MERGE),y)
21
22extra-$(CONFIG_PPC_STD_MMU) := head_32.o
23extra-$(CONFIG_PPC64) := head_64.o
24extra-$(CONFIG_40x) := head_4xx.o
25extra-$(CONFIG_44x) := head_44x.o
26extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
27extra-$(CONFIG_8xx) := head_8xx.o
28extra-$(CONFIG_PPC64) += entry_64.o
29extra-$(CONFIG_PPC_FPU) += fpu.o
30extra-y += vmlinux.lds
31
32obj-y += process.o init_task.o time.o \
33 prom.o systbl.o traps.o
34obj-$(CONFIG_PPC32) += entry_32.o idle_6xx.o setup_32.o misc_32.o
35obj-$(CONFIG_PPC64) += setup_64.o misc_64.o
36obj-$(CONFIG_PPC_OF) += prom_init.o
37obj-$(CONFIG_MODULES) += ppc_ksyms.o
38obj-$(CONFIG_BOOTX_TEXT) += btext.o
39
40ifeq ($(CONFIG_PPC_ISERIES),y)
41$(obj)/head_64.o: $(obj)/lparmap.s
42AFLAGS_head_64.o += -I$(obj)
43endif
44
45else
46# stuff used from here for ARCH=ppc or ARCH=ppc64
47obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o
48
49fpux-$(CONFIG_PPC32) += fpu.o
50extra-$(CONFIG_PPC_FPU) += $(fpux-y)
51
52endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1c83abd9f37c
--- /dev/null
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -0,0 +1,274 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#ifdef CONFIG_PPC64
26#include <linux/time.h>
27#include <linux/hardirq.h>
28#else
29#include <linux/ptrace.h>
30#include <linux/suspend.h>
31#endif
32
33#include <asm/io.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/processor.h>
37#include <asm/cputable.h>
38#include <asm/thread_info.h>
39#ifdef CONFIG_PPC64
40#include <asm/paca.h>
41#include <asm/lppaca.h>
42#include <asm/iSeries/HvLpEvent.h>
43#include <asm/rtas.h>
44#include <asm/cache.h>
45#include <asm/systemcfg.h>
46#include <asm/compat.h>
47#endif
48
49#define DEFINE(sym, val) \
50 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
51
52#define BLANK() asm volatile("\n->" : : )
53
54int main(void)
55{
56 DEFINE(THREAD, offsetof(struct task_struct, thread));
57 DEFINE(MM, offsetof(struct task_struct, mm));
58#ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60#else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63#endif /* CONFIG_PPC64 */
64
65 DEFINE(KSP, offsetof(struct thread_struct, ksp));
66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
70#ifdef CONFIG_ALTIVEC
71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
75#endif /* CONFIG_ALTIVEC */
76#ifdef CONFIG_PPC64
77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
78#else /* CONFIG_PPC64 */
79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
81#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
83 DEFINE(PT_PTRACED, PT_PTRACED);
84#endif
85#ifdef CONFIG_SPE
86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
90#endif /* CONFIG_SPE */
91#endif /* CONFIG_PPC64 */
92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
96#ifdef CONFIG_PPC32
97 DEFINE(TI_TASK, offsetof(struct thread_info, task));
98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
100#endif /* CONFIG_PPC64 */
101
102#ifdef CONFIG_PPC64
103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
110 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
111
112 /* paca */
113 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
114 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
115 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
116 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
117 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
118 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
119 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
120 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
121 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
122 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
123 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
124 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
128#ifdef CONFIG_HUGETLB_PAGE
129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
131#endif /* CONFIG_HUGETLB_PAGE */
132 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
133 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
134 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
135 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
136 DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
142 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
143 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
144 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
145
146 /* RTAS */
147 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
148 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
149#endif /* CONFIG_PPC64 */
150
151 /* Interrupt register frame */
152 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
153#ifndef CONFIG_PPC64
154 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
155#else /* CONFIG_PPC64 */
156 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
157 /* 288 = # of volatile regs, int & fp, for leaf routines */
158 /* which do not stack a frame. See the PPC64 ABI. */
159 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
160 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
161 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
162 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
163#endif /* CONFIG_PPC64 */
164 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
165 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
166 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
167 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
168 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
169 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
170 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
171 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
172 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
173 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
174 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
175 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
176 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
177 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
178#ifndef CONFIG_PPC64
179 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
180 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
181 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
182 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
183 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
184 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
185 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
186 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
187 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
188 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
189 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
190 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
191 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
192 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
193 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
194 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
195 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
196 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
197#endif /* CONFIG_PPC64 */
198 /*
199 * Note: these symbols include _ because they overlap with special
200 * register names
201 */
202 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
203 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
204 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
205 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
206 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
207 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
208 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
209 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
210 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
211 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
212#ifndef CONFIG_PPC64
213 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
214 /*
215 * The PowerPC 400-class & Book-E processors have neither the DAR
216 * nor the DSISR SPRs. Hence, we overload them to hold the similar
217 * DEAR and ESR SPRs for such processors. For critical interrupts
218 * we use them to hold SRR0 and SRR1.
219 */
220 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
221 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
222 DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
223#else /* CONFIG_PPC64 */
224 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
225 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
226
227 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
228 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
229 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
230#endif /* CONFIG_PPC64 */
231
232 DEFINE(CLONE_VM, CLONE_VM);
233 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
234
235#ifndef CONFIG_PPC64
236 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
237#endif /* ! CONFIG_PPC64 */
238
239 /* About the CPU features table */
240 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
241 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
242 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
243 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
244 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
245
246#ifndef CONFIG_PPC64
247 DEFINE(pbe_address, offsetof(struct pbe, address));
248 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
249 DEFINE(pbe_next, offsetof(struct pbe, next));
250
251 DEFINE(TASK_SIZE, TASK_SIZE);
252 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
253#else /* CONFIG_PPC64 */
254 /* systemcfg offsets for use by vdso */
255 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
256 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
257 DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
258 DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
259 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
260 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
261 DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
262 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
263 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
264
265 /* timeval/timezone offsets for use by vdso */
266 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
267 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
268 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
269 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
270 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
271 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
272#endif /* CONFIG_PPC64 */
273 return 0;
274}
diff --git a/arch/ppc64/kernel/binfmt_elf32.c b/arch/powerpc/kernel/binfmt_elf32.c
index fadc699a0497..8ad6b0f33651 100644
--- a/arch/ppc64/kernel/binfmt_elf32.c
+++ b/arch/powerpc/kernel/binfmt_elf32.c
@@ -70,9 +70,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
70 value->tv_sec = jiffies / HZ; 70 value->tv_sec = jiffies / HZ;
71} 71}
72 72
73extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
74#undef start_thread
75#define start_thread start_thread32
76#define init_elf_binfmt init_elf32_binfmt 73#define init_elf_binfmt init_elf32_binfmt
77 74
78#include "../../../fs/binfmt_elf.c" 75#include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
new file mode 100644
index 000000000000..bdfba92b2b38
--- /dev/null
+++ b/arch/powerpc/kernel/btext.c
@@ -0,0 +1,853 @@
1/*
2 * Procedures for drawing on the screen early on in the boot process.
3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/module.h>
11
12#include <asm/sections.h>
13#include <asm/prom.h>
14#include <asm/btext.h>
15#include <asm/prom.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18#include <asm/pgtable.h>
19#include <asm/io.h>
20#include <asm/lmb.h>
21#include <asm/processor.h>
22
23#define NO_SCROLL
24
25#ifndef NO_SCROLL
26static void scrollscreen(void);
27#endif
28
29static void draw_byte(unsigned char c, long locX, long locY);
30static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
32static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
33
34static int g_loc_X;
35static int g_loc_Y;
36static int g_max_loc_X;
37static int g_max_loc_Y;
38
39static int dispDeviceRowBytes;
40static int dispDeviceDepth;
41static int dispDeviceRect[4];
42static unsigned char *dispDeviceBase, *logicalDisplayBase;
43
44unsigned long disp_BAT[2] __initdata = {0, 0};
45
46#define cmapsz (16*256)
47
48static unsigned char vga_font[cmapsz];
49
50int boot_text_mapped;
51int force_printk_to_btext = 0;
52
53#ifdef CONFIG_PPC32
54/* Calc BAT values for mapping the display and store them
55 * in disp_BAT. Those values are then used from head.S to map
56 * the display during identify_machine() and MMU_Init()
57 *
58 * The display is mapped to virtual address 0xD0000000, rather
59 * than 1:1, because some some CHRP machines put the frame buffer
60 * in the region starting at 0xC0000000 (KERNELBASE).
61 * This mapping is temporary and will disappear as soon as the
62 * setup done by MMU_Init() is applied.
63 *
64 * For now, we align the BAT and then map 8Mb on 601 and 16Mb
65 * on other PPCs. This may cause trouble if the framebuffer
66 * is really badly aligned, but I didn't encounter this case
67 * yet.
68 */
69void __init
70btext_prepare_BAT(void)
71{
72 unsigned long vaddr = KERNELBASE + 0x10000000;
73 unsigned long addr;
74 unsigned long lowbits;
75
76 addr = (unsigned long)dispDeviceBase;
77 if (!addr) {
78 boot_text_mapped = 0;
79 return;
80 }
81 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
82 /* 603, 604, G3, G4, ... */
83 lowbits = addr & ~0xFF000000UL;
84 addr &= 0xFF000000UL;
85 disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
86 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
87 } else {
88 /* 601 */
89 lowbits = addr & ~0xFF800000UL;
90 addr &= 0xFF800000UL;
91 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
92 disp_BAT[1] = addr | BL_8M | 0x40;
93 }
94 logicalDisplayBase = (void *) (vaddr + lowbits);
95}
96#endif
97
98/* This function will enable the early boot text when doing OF booting. This
99 * way, xmon output should work too
100 */
101void __init
102btext_setup_display(int width, int height, int depth, int pitch,
103 unsigned long address)
104{
105 g_loc_X = 0;
106 g_loc_Y = 0;
107 g_max_loc_X = width / 8;
108 g_max_loc_Y = height / 16;
109 logicalDisplayBase = (unsigned char *)address;
110 dispDeviceBase = (unsigned char *)address;
111 dispDeviceRowBytes = pitch;
112 dispDeviceDepth = depth;
113 dispDeviceRect[0] = dispDeviceRect[1] = 0;
114 dispDeviceRect[2] = width;
115 dispDeviceRect[3] = height;
116 boot_text_mapped = 1;
117}
118
119/* Here's a small text engine to use during early boot
120 * or for debugging purposes
121 *
122 * todo:
123 *
124 * - build some kind of vgacon with it to enable early printk
125 * - move to a separate file
126 * - add a few video driver hooks to keep in sync with display
127 * changes.
128 */
129
130void map_boot_text(void)
131{
132 unsigned long base, offset, size;
133 unsigned char *vbase;
134
135 /* By default, we are no longer mapped */
136 boot_text_mapped = 0;
137 if (dispDeviceBase == 0)
138 return;
139 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
140 offset = ((unsigned long) dispDeviceBase) - base;
141 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
142 + dispDeviceRect[0];
143 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
144 if (vbase == 0)
145 return;
146 logicalDisplayBase = vbase + offset;
147 boot_text_mapped = 1;
148}
149
150int btext_initialize(struct device_node *np)
151{
152 unsigned int width, height, depth, pitch;
153 unsigned long address = 0;
154 u32 *prop;
155
156 prop = (u32 *)get_property(np, "width", NULL);
157 if (prop == NULL)
158 return -EINVAL;
159 width = *prop;
160 prop = (u32 *)get_property(np, "height", NULL);
161 if (prop == NULL)
162 return -EINVAL;
163 height = *prop;
164 prop = (u32 *)get_property(np, "depth", NULL);
165 if (prop == NULL)
166 return -EINVAL;
167 depth = *prop;
168 pitch = width * ((depth + 7) / 8);
169 prop = (u32 *)get_property(np, "linebytes", NULL);
170 if (prop)
171 pitch = *prop;
172 if (pitch == 1)
173 pitch = 0x1000;
174 prop = (u32 *)get_property(np, "address", NULL);
175 if (prop)
176 address = *prop;
177
178 /* FIXME: Add support for PCI reg properties */
179
180 if (address == 0)
181 return -EINVAL;
182
183 g_loc_X = 0;
184 g_loc_Y = 0;
185 g_max_loc_X = width / 8;
186 g_max_loc_Y = height / 16;
187 logicalDisplayBase = (unsigned char *)address;
188 dispDeviceBase = (unsigned char *)address;
189 dispDeviceRowBytes = pitch;
190 dispDeviceDepth = depth;
191 dispDeviceRect[0] = dispDeviceRect[1] = 0;
192 dispDeviceRect[2] = width;
193 dispDeviceRect[3] = height;
194
195 map_boot_text();
196
197 return 0;
198}
199
200void __init init_boot_display(void)
201{
202 char *name;
203 struct device_node *np = NULL;
204 int rc = -ENODEV;
205
206 printk("trying to initialize btext ...\n");
207
208 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
209 if (name != NULL) {
210 np = of_find_node_by_path(name);
211 if (np != NULL) {
212 if (strcmp(np->type, "display") != 0) {
213 printk("boot stdout isn't a display !\n");
214 of_node_put(np);
215 np = NULL;
216 }
217 }
218 }
219 if (np)
220 rc = btext_initialize(np);
221 if (rc == 0)
222 return;
223
224 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
225 if (get_property(np, "linux,opened", NULL)) {
226 printk("trying %s ...\n", np->full_name);
227 rc = btext_initialize(np);
228 printk("result: %d\n", rc);
229 }
230 if (rc == 0)
231 return;
232 }
233}
234
235/* Calc the base address of a given point (x,y) */
236static unsigned char * calc_base(int x, int y)
237{
238 unsigned char *base;
239
240 base = logicalDisplayBase;
241 if (base == 0)
242 base = dispDeviceBase;
243 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
244 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
245 return base;
246}
247
248/* Adjust the display to a new resolution */
249void btext_update_display(unsigned long phys, int width, int height,
250 int depth, int pitch)
251{
252 if (dispDeviceBase == 0)
253 return;
254
255 /* check it's the same frame buffer (within 256MB) */
256 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
257 return;
258
259 dispDeviceBase = (__u8 *) phys;
260 dispDeviceRect[0] = 0;
261 dispDeviceRect[1] = 0;
262 dispDeviceRect[2] = width;
263 dispDeviceRect[3] = height;
264 dispDeviceDepth = depth;
265 dispDeviceRowBytes = pitch;
266 if (boot_text_mapped) {
267 iounmap(logicalDisplayBase);
268 boot_text_mapped = 0;
269 }
270 map_boot_text();
271 g_loc_X = 0;
272 g_loc_Y = 0;
273 g_max_loc_X = width / 8;
274 g_max_loc_Y = height / 16;
275}
276EXPORT_SYMBOL(btext_update_display);
277
278void btext_clearscreen(void)
279{
280 unsigned long *base = (unsigned long *)calc_base(0, 0);
281 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
282 (dispDeviceDepth >> 3)) >> 3;
283 int i,j;
284
285 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
286 {
287 unsigned long *ptr = base;
288 for(j=width; j; --j)
289 *(ptr++) = 0;
290 base += (dispDeviceRowBytes >> 3);
291 }
292}
293
294#ifndef NO_SCROLL
295static void scrollscreen(void)
296{
297 unsigned long *src = (unsigned long *)calc_base(0,16);
298 unsigned long *dst = (unsigned long *)calc_base(0,0);
299 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
300 (dispDeviceDepth >> 3)) >> 3;
301 int i,j;
302
303 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
304 {
305 unsigned long *src_ptr = src;
306 unsigned long *dst_ptr = dst;
307 for(j=width; j; --j)
308 *(dst_ptr++) = *(src_ptr++);
309 src += (dispDeviceRowBytes >> 3);
310 dst += (dispDeviceRowBytes >> 3);
311 }
312 for (i=0; i<16; i++)
313 {
314 unsigned long *dst_ptr = dst;
315 for(j=width; j; --j)
316 *(dst_ptr++) = 0;
317 dst += (dispDeviceRowBytes >> 3);
318 }
319}
320#endif /* ndef NO_SCROLL */
321
322void btext_drawchar(char c)
323{
324 int cline = 0;
325#ifdef NO_SCROLL
326 int x;
327#endif
328 if (!boot_text_mapped)
329 return;
330
331 switch (c) {
332 case '\b':
333 if (g_loc_X > 0)
334 --g_loc_X;
335 break;
336 case '\t':
337 g_loc_X = (g_loc_X & -8) + 8;
338 break;
339 case '\r':
340 g_loc_X = 0;
341 break;
342 case '\n':
343 g_loc_X = 0;
344 g_loc_Y++;
345 cline = 1;
346 break;
347 default:
348 draw_byte(c, g_loc_X++, g_loc_Y);
349 }
350 if (g_loc_X >= g_max_loc_X) {
351 g_loc_X = 0;
352 g_loc_Y++;
353 cline = 1;
354 }
355#ifndef NO_SCROLL
356 while (g_loc_Y >= g_max_loc_Y) {
357 scrollscreen();
358 g_loc_Y--;
359 }
360#else
361 /* wrap around from bottom to top of screen so we don't
362 waste time scrolling each line. -- paulus. */
363 if (g_loc_Y >= g_max_loc_Y)
364 g_loc_Y = 0;
365 if (cline) {
366 for (x = 0; x < g_max_loc_X; ++x)
367 draw_byte(' ', x, g_loc_Y);
368 }
369#endif
370}
371
372void btext_drawstring(const char *c)
373{
374 if (!boot_text_mapped)
375 return;
376 while (*c)
377 btext_drawchar(*c++);
378}
379
380void btext_drawhex(unsigned long v)
381{
382 char *hex_table = "0123456789abcdef";
383
384 if (!boot_text_mapped)
385 return;
386#ifdef CONFIG_PPC64
387 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
388 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
389 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
390 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
391 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
392 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
393 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
394 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
395#endif
396 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
397 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
398 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
399 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
400 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
401 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
402 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
403 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
404 btext_drawchar(' ');
405}
406
407static void draw_byte(unsigned char c, long locX, long locY)
408{
409 unsigned char *base = calc_base(locX << 3, locY << 4);
410 unsigned char *font = &vga_font[((unsigned int)c) * 16];
411 int rb = dispDeviceRowBytes;
412
413 switch(dispDeviceDepth) {
414 case 24:
415 case 32:
416 draw_byte_32(font, (unsigned int *)base, rb);
417 break;
418 case 15:
419 case 16:
420 draw_byte_16(font, (unsigned int *)base, rb);
421 break;
422 case 8:
423 draw_byte_8(font, (unsigned int *)base, rb);
424 break;
425 }
426}
427
428static unsigned int expand_bits_8[16] = {
429 0x00000000,
430 0x000000ff,
431 0x0000ff00,
432 0x0000ffff,
433 0x00ff0000,
434 0x00ff00ff,
435 0x00ffff00,
436 0x00ffffff,
437 0xff000000,
438 0xff0000ff,
439 0xff00ff00,
440 0xff00ffff,
441 0xffff0000,
442 0xffff00ff,
443 0xffffff00,
444 0xffffffff
445};
446
447static unsigned int expand_bits_16[4] = {
448 0x00000000,
449 0x0000ffff,
450 0xffff0000,
451 0xffffffff
452};
453
454
455static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
456{
457 int l, bits;
458 int fg = 0xFFFFFFFFUL;
459 int bg = 0x00000000UL;
460
461 for (l = 0; l < 16; ++l)
462 {
463 bits = *font++;
464 base[0] = (-(bits >> 7) & fg) ^ bg;
465 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
466 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
467 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
468 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
469 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
470 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
471 base[7] = (-(bits & 1) & fg) ^ bg;
472 base = (unsigned int *) ((char *)base + rb);
473 }
474}
475
476static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
477{
478 int l, bits;
479 int fg = 0xFFFFFFFFUL;
480 int bg = 0x00000000UL;
481 unsigned int *eb = (int *)expand_bits_16;
482
483 for (l = 0; l < 16; ++l)
484 {
485 bits = *font++;
486 base[0] = (eb[bits >> 6] & fg) ^ bg;
487 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
488 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
489 base[3] = (eb[bits & 3] & fg) ^ bg;
490 base = (unsigned int *) ((char *)base + rb);
491 }
492}
493
494static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
495{
496 int l, bits;
497 int fg = 0x0F0F0F0FUL;
498 int bg = 0x00000000UL;
499 unsigned int *eb = (int *)expand_bits_8;
500
501 for (l = 0; l < 16; ++l)
502 {
503 bits = *font++;
504 base[0] = (eb[bits >> 4] & fg) ^ bg;
505 base[1] = (eb[bits & 0xf] & fg) ^ bg;
506 base = (unsigned int *) ((char *)base + rb);
507 }
508}
509
510static unsigned char vga_font[cmapsz] = {
5110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
5130x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
5140xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
5150x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
5160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
5170x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
5180x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
5190x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
5200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5210x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
5220xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
5230x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
5240x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
5250xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
5260x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
5270x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
5280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
5290x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
5300x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
5310x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
5320x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
5330xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
5340x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
5350x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
5360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
5370x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
5380xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
5390x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
5400x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5410xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5420x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
5430x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5450x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5460x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5470x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
5480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
5490xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5500x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5510x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
5520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
5530x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5540x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5550x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5560x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
5570x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
5580x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
5590x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
5600x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
5610x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
5620x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
5630x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
5650x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
5660x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5670x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
5680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
5690x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5700x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
5710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
5720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5730x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5740x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
5750x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
5760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
5770x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5780x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
5790x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
5800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
5810x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
5820xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5830x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5840x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
5850x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5860xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5870x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
5880x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
5890x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5900x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5910x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
5920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
5930x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
5940x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
5950x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
5970xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
5980x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
5990x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
6000x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
6010xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
6020x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
6030x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
6040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
6050x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6060xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
6070x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
6090x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
6100x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
6110x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
6120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
6130x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
6140xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
6150x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
6160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
6180x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
6190x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
6200x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
6210x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
6220xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6230x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6250xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6260xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
6270x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
6280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
6290x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6300xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6310x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
6320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
6330x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
6340xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
6350x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
6360x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
6370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6380x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
6390x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
6410xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
6420x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
6430x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
6440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
6450xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6460x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6470x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
6480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
6490xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
6500x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6510x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
6530x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
6540x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6550x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6560x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
6570xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6580x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
6590x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6600x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
6610x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
6620x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
6630x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
6640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
6650x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
6660x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
6670x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
6690xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6700x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
6710x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
6720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
6730xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
6740x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
6750x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
6760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
6770x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
6780x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
6790x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
6810xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6820xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
6830x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6840x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
6850xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
6860x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6870x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
6880x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
6890xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
6900x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6910x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
6920x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
6930xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6940x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6950x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
6960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
6970x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
6980x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6990x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
7000x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
7010xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
7020x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
7030x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
7040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
7050x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
7060xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
7070x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
7090xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
7100x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
7110x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
7120x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
7130xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
7140x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
7150x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7160x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
7170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
7180xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
7190x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
7200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
7210xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
7220x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
7230x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
7240xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
7250xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
7260x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
7270x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7280x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
7290xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
7300x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
7310x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
7320x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
7330x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
7340x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7350x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
7360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
7370xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7380x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
7390x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
7400x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
7410x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
7420x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
7430x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
7440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
7450x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
7460x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
7470x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
7480x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
7490xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
7500x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7510x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
7520x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7530x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7540x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
7560x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
7570x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7580x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7590x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7600x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
7610x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7620x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7630x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
7640x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7650x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7660x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7670x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
7680x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
7690x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7700x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7710x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7720x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7730x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7740x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7750x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7760x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
7770x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7780x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7790x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7800x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
7810x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7820x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7830x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7840x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
7850x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
7860x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7870x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
7880x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
7890x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7900x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
7920x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
7930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7940x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7950x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7960x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
7970x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7980x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7990x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
8000x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
8010x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8020x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8030xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8040xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
8050xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
8060xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
8070x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
8080x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
8090x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8100x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
8110x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
8120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
8130xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8140xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
8150x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
8160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
8170xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8180x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
8190x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
8210x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
8220x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
8230x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
8240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
8250x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8260x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8270x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
8280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
8290x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
8300xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
8310x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
8320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
8330x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
8340x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
8350x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
8360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
8370x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8380x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
8390x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
8400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
8410x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
8420x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
8440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8450x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
8460x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
8470x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
8480x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
8490x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8500x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
8510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8520x00, 0x00, 0x00, 0x00,
853};
diff --git a/arch/ppc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6b76cf58d9e0..1fb80baebc87 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * arch/ppc/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 * 3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -14,96 +15,304 @@
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/threads.h> 16#include <linux/threads.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <asm/cputable.h> 18#include <linux/module.h>
18 19
19struct cpu_spec* cur_cpu_spec[NR_CPUS]; 20#include <asm/oprofile_impl.h>
21#include <asm/cputable.h>
20 22
21extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 23struct cpu_spec* cur_cpu_spec = NULL;
22extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 24#ifdef CONFIG_PPC64
23extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 25EXPORT_SYMBOL(cur_cpu_spec);
24extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 26#endif
25extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
26extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
27extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
28extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
29extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
30extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
31extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
32extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
33extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
34 27
35#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ 28/* NOTE:
36 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ 29 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
37 !defined(CONFIG_BOOKE)) 30 * the responsibility of the appropriate CPU save/restore functions to
31 * eventually copy these settings over. Those save/restore aren't yet
32 * part of the cputable though. That has to be fixed for both ppc32
33 * and ppc64
34 */
35#ifdef CONFIG_PPC64
36extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
39#else
40extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
41extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
42extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
43extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
44extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
45extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
46extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
47extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
48#endif /* CONFIG_PPC32 */
49extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38 50
39/* This table only contains "desktop" CPUs, it need to be filled with embedded 51/* This table only contains "desktop" CPUs, it need to be filled with embedded
40 * ones as well... 52 * ones as well...
41 */ 53 */
42#define COMMON_PPC (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ 54#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
43 PPC_FEATURE_HAS_MMU) 55 PPC_FEATURE_HAS_MMU)
56#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
44 57
45/* We only set the altivec features if the kernel was compiled with altivec
46 * support
47 */
48#ifdef CONFIG_ALTIVEC
49#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
50#define PPC_FEATURE_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
51#else
52#define CPU_FTR_ALTIVEC_COMP 0
53#define PPC_FEATURE_ALTIVEC_COMP 0
54#endif
55 58
56/* We only set the spe features if the kernel was compiled with 59/* We only set the spe features if the kernel was compiled with
57 * spe support 60 * spe support
58 */ 61 */
59#ifdef CONFIG_SPE 62#ifdef CONFIG_SPE
60#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE 63#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
61#else 64#else
62#define PPC_FEATURE_SPE_COMP 0 65#define PPC_FEATURE_SPE_COMP 0
63#endif 66#endif
64 67
65/* We need to mark all pages as being coherent if we're SMP or we 68struct cpu_spec cpu_specs[] = {
66 * have a 74[45]x and an MPC107 host bridge. 69#ifdef CONFIG_PPC64
67 */ 70 { /* Power3 */
68#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) 71 .pvr_mask = 0xffff0000,
69#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT 72 .pvr_value = 0x00400000,
70#else 73 .cpu_name = "POWER3 (630)",
71#define CPU_FTR_COMMON 0 74 .cpu_features = CPU_FTRS_POWER3,
75 .cpu_user_features = COMMON_USER_PPC64,
76 .icache_bsize = 128,
77 .dcache_bsize = 128,
78 .num_pmcs = 8,
79 .cpu_setup = __setup_cpu_power3,
80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
72#endif 83#endif
73 84 },
74/* The powersave features NAP & DOZE seems to confuse BDI when 85 { /* Power3+ */
75 debugging. So if a BDI is used, disable theses 86 .pvr_mask = 0xffff0000,
76 */ 87 .pvr_value = 0x00410000,
77#ifndef CONFIG_BDI_SWITCH 88 .cpu_name = "POWER3 (630+)",
78#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE 89 .cpu_features = CPU_FTRS_POWER3,
79#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP 90 .cpu_user_features = COMMON_USER_PPC64,
91 .icache_bsize = 128,
92 .dcache_bsize = 128,
93 .num_pmcs = 8,
94 .cpu_setup = __setup_cpu_power3,
95#ifdef CONFIG_OPROFILE
96 .oprofile_cpu_type = "ppc64/power3",
97 .oprofile_model = &op_model_rs64,
98#endif
99 },
100 { /* Northstar */
101 .pvr_mask = 0xffff0000,
102 .pvr_value = 0x00330000,
103 .cpu_name = "RS64-II (northstar)",
104 .cpu_features = CPU_FTRS_RS64,
105 .cpu_user_features = COMMON_USER_PPC64,
106 .icache_bsize = 128,
107 .dcache_bsize = 128,
108 .num_pmcs = 8,
109 .cpu_setup = __setup_cpu_power3,
110#ifdef CONFIG_OPROFILE
111 .oprofile_cpu_type = "ppc64/rs64",
112 .oprofile_model = &op_model_rs64,
113#endif
114 },
115 { /* Pulsar */
116 .pvr_mask = 0xffff0000,
117 .pvr_value = 0x00340000,
118 .cpu_name = "RS64-III (pulsar)",
119 .cpu_features = CPU_FTRS_RS64,
120 .cpu_user_features = COMMON_USER_PPC64,
121 .icache_bsize = 128,
122 .dcache_bsize = 128,
123 .num_pmcs = 8,
124 .cpu_setup = __setup_cpu_power3,
125#ifdef CONFIG_OPROFILE
126 .oprofile_cpu_type = "ppc64/rs64",
127 .oprofile_model = &op_model_rs64,
128#endif
129 },
130 { /* I-star */
131 .pvr_mask = 0xffff0000,
132 .pvr_value = 0x00360000,
133 .cpu_name = "RS64-III (icestar)",
134 .cpu_features = CPU_FTRS_RS64,
135 .cpu_user_features = COMMON_USER_PPC64,
136 .icache_bsize = 128,
137 .dcache_bsize = 128,
138 .num_pmcs = 8,
139 .cpu_setup = __setup_cpu_power3,
140#ifdef CONFIG_OPROFILE
141 .oprofile_cpu_type = "ppc64/rs64",
142 .oprofile_model = &op_model_rs64,
143#endif
144 },
145 { /* S-star */
146 .pvr_mask = 0xffff0000,
147 .pvr_value = 0x00370000,
148 .cpu_name = "RS64-IV (sstar)",
149 .cpu_features = CPU_FTRS_RS64,
150 .cpu_user_features = COMMON_USER_PPC64,
151 .icache_bsize = 128,
152 .dcache_bsize = 128,
153 .num_pmcs = 8,
154 .cpu_setup = __setup_cpu_power3,
155#ifdef CONFIG_OPROFILE
156 .oprofile_cpu_type = "ppc64/rs64",
157 .oprofile_model = &op_model_rs64,
158#endif
159 },
160 { /* Power4 */
161 .pvr_mask = 0xffff0000,
162 .pvr_value = 0x00350000,
163 .cpu_name = "POWER4 (gp)",
164 .cpu_features = CPU_FTRS_POWER4,
165 .cpu_user_features = COMMON_USER_PPC64,
166 .icache_bsize = 128,
167 .dcache_bsize = 128,
168 .num_pmcs = 8,
169 .cpu_setup = __setup_cpu_power4,
170#ifdef CONFIG_OPROFILE
171 .oprofile_cpu_type = "ppc64/power4",
172 .oprofile_model = &op_model_rs64,
173#endif
174 },
175 { /* Power4+ */
176 .pvr_mask = 0xffff0000,
177 .pvr_value = 0x00380000,
178 .cpu_name = "POWER4+ (gq)",
179 .cpu_features = CPU_FTRS_POWER4,
180 .cpu_user_features = COMMON_USER_PPC64,
181 .icache_bsize = 128,
182 .dcache_bsize = 128,
183 .num_pmcs = 8,
184 .cpu_setup = __setup_cpu_power4,
185#ifdef CONFIG_OPROFILE
186 .oprofile_cpu_type = "ppc64/power4",
187 .oprofile_model = &op_model_power4,
188#endif
189 },
190 { /* PPC970 */
191 .pvr_mask = 0xffff0000,
192 .pvr_value = 0x00390000,
193 .cpu_name = "PPC970",
194 .cpu_features = CPU_FTRS_PPC970,
195 .cpu_user_features = COMMON_USER_PPC64 |
196 PPC_FEATURE_HAS_ALTIVEC_COMP,
197 .icache_bsize = 128,
198 .dcache_bsize = 128,
199 .num_pmcs = 8,
200 .cpu_setup = __setup_cpu_ppc970,
201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
205 },
206#endif /* CONFIG_PPC64 */
207#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
208 { /* PPC970FX */
209 .pvr_mask = 0xffff0000,
210 .pvr_value = 0x003c0000,
211 .cpu_name = "PPC970FX",
212#ifdef CONFIG_PPC32
213 .cpu_features = CPU_FTRS_970_32,
80#else 214#else
81#define CPU_FTR_MAYBE_CAN_DOZE 0 215 .cpu_features = CPU_FTRS_PPC970,
82#define CPU_FTR_MAYBE_CAN_NAP 0
83#endif 216#endif
84 217 .cpu_user_features = COMMON_USER_PPC64 |
85struct cpu_spec cpu_specs[] = { 218 PPC_FEATURE_HAS_ALTIVEC_COMP,
219 .icache_bsize = 128,
220 .dcache_bsize = 128,
221 .num_pmcs = 8,
222 .cpu_setup = __setup_cpu_ppc970,
223#ifdef CONFIG_OPROFILE
224 .oprofile_cpu_type = "ppc64/970",
225 .oprofile_model = &op_model_power4,
226#endif
227 },
228#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
229#ifdef CONFIG_PPC64
230 { /* PPC970MP */
231 .pvr_mask = 0xffff0000,
232 .pvr_value = 0x00440000,
233 .cpu_name = "PPC970MP",
234 .cpu_features = CPU_FTRS_PPC970,
235 .cpu_user_features = COMMON_USER_PPC64 |
236 PPC_FEATURE_HAS_ALTIVEC_COMP,
237 .icache_bsize = 128,
238 .dcache_bsize = 128,
239 .cpu_setup = __setup_cpu_ppc970,
240#ifdef CONFIG_OPROFILE
241 .oprofile_cpu_type = "ppc64/970",
242 .oprofile_model = &op_model_power4,
243#endif
244 },
245 { /* Power5 */
246 .pvr_mask = 0xffff0000,
247 .pvr_value = 0x003a0000,
248 .cpu_name = "POWER5 (gr)",
249 .cpu_features = CPU_FTRS_POWER5,
250 .cpu_user_features = COMMON_USER_PPC64,
251 .icache_bsize = 128,
252 .dcache_bsize = 128,
253 .num_pmcs = 6,
254 .cpu_setup = __setup_cpu_power4,
255#ifdef CONFIG_OPROFILE
256 .oprofile_cpu_type = "ppc64/power5",
257 .oprofile_model = &op_model_power4,
258#endif
259 },
260 { /* Power5 */
261 .pvr_mask = 0xffff0000,
262 .pvr_value = 0x003b0000,
263 .cpu_name = "POWER5 (gs)",
264 .cpu_features = CPU_FTRS_POWER5,
265 .cpu_user_features = COMMON_USER_PPC64,
266 .icache_bsize = 128,
267 .dcache_bsize = 128,
268 .num_pmcs = 6,
269 .cpu_setup = __setup_cpu_power4,
270#ifdef CONFIG_OPROFILE
271 .oprofile_cpu_type = "ppc64/power5",
272 .oprofile_model = &op_model_power4,
273#endif
274 },
275 { /* BE DD1.x */
276 .pvr_mask = 0xffff0000,
277 .pvr_value = 0x00700000,
278 .cpu_name = "Cell Broadband Engine",
279 .cpu_features = CPU_FTRS_CELL,
280 .cpu_user_features = COMMON_USER_PPC64 |
281 PPC_FEATURE_HAS_ALTIVEC_COMP,
282 .icache_bsize = 128,
283 .dcache_bsize = 128,
284 .cpu_setup = __setup_cpu_be,
285 },
286 { /* default match */
287 .pvr_mask = 0x00000000,
288 .pvr_value = 0x00000000,
289 .cpu_name = "POWER4 (compatible)",
290 .cpu_features = CPU_FTRS_COMPATIBLE,
291 .cpu_user_features = COMMON_USER_PPC64,
292 .icache_bsize = 128,
293 .dcache_bsize = 128,
294 .num_pmcs = 6,
295 .cpu_setup = __setup_cpu_power4,
296 }
297#endif /* CONFIG_PPC64 */
298#ifdef CONFIG_PPC32
86#if CLASSIC_PPC 299#if CLASSIC_PPC
87 { /* 601 */ 300 { /* 601 */
88 .pvr_mask = 0xffff0000, 301 .pvr_mask = 0xffff0000,
89 .pvr_value = 0x00010000, 302 .pvr_value = 0x00010000,
90 .cpu_name = "601", 303 .cpu_name = "601",
91 .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 | 304 .cpu_features = CPU_FTRS_PPC601,
92 CPU_FTR_HPTE_TABLE, 305 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
93 .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR |
94 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 306 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
95 .icache_bsize = 32, 307 .icache_bsize = 32,
96 .dcache_bsize = 32, 308 .dcache_bsize = 32,
97 .cpu_setup = __setup_cpu_601
98 }, 309 },
99 { /* 603 */ 310 { /* 603 */
100 .pvr_mask = 0xffff0000, 311 .pvr_mask = 0xffff0000,
101 .pvr_value = 0x00030000, 312 .pvr_value = 0x00030000,
102 .cpu_name = "603", 313 .cpu_name = "603",
103 .cpu_features = CPU_FTR_COMMON | 314 .cpu_features = CPU_FTRS_603,
104 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 315 .cpu_user_features = COMMON_USER,
105 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
106 .cpu_user_features = COMMON_PPC,
107 .icache_bsize = 32, 316 .icache_bsize = 32,
108 .dcache_bsize = 32, 317 .dcache_bsize = 32,
109 .cpu_setup = __setup_cpu_603 318 .cpu_setup = __setup_cpu_603
@@ -112,10 +321,8 @@ struct cpu_spec cpu_specs[] = {
112 .pvr_mask = 0xffff0000, 321 .pvr_mask = 0xffff0000,
113 .pvr_value = 0x00060000, 322 .pvr_value = 0x00060000,
114 .cpu_name = "603e", 323 .cpu_name = "603e",
115 .cpu_features = CPU_FTR_COMMON | 324 .cpu_features = CPU_FTRS_603,
116 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 325 .cpu_user_features = COMMON_USER,
117 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
118 .cpu_user_features = COMMON_PPC,
119 .icache_bsize = 32, 326 .icache_bsize = 32,
120 .dcache_bsize = 32, 327 .dcache_bsize = 32,
121 .cpu_setup = __setup_cpu_603 328 .cpu_setup = __setup_cpu_603
@@ -124,10 +331,8 @@ struct cpu_spec cpu_specs[] = {
124 .pvr_mask = 0xffff0000, 331 .pvr_mask = 0xffff0000,
125 .pvr_value = 0x00070000, 332 .pvr_value = 0x00070000,
126 .cpu_name = "603ev", 333 .cpu_name = "603ev",
127 .cpu_features = CPU_FTR_COMMON | 334 .cpu_features = CPU_FTRS_603,
128 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 335 .cpu_user_features = COMMON_USER,
129 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
130 .cpu_user_features = COMMON_PPC,
131 .icache_bsize = 32, 336 .icache_bsize = 32,
132 .dcache_bsize = 32, 337 .dcache_bsize = 32,
133 .cpu_setup = __setup_cpu_603 338 .cpu_setup = __setup_cpu_603
@@ -136,10 +341,8 @@ struct cpu_spec cpu_specs[] = {
136 .pvr_mask = 0xffff0000, 341 .pvr_mask = 0xffff0000,
137 .pvr_value = 0x00040000, 342 .pvr_value = 0x00040000,
138 .cpu_name = "604", 343 .cpu_name = "604",
139 .cpu_features = CPU_FTR_COMMON | 344 .cpu_features = CPU_FTRS_604,
140 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 345 .cpu_user_features = COMMON_USER,
141 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
142 .cpu_user_features = COMMON_PPC,
143 .icache_bsize = 32, 346 .icache_bsize = 32,
144 .dcache_bsize = 32, 347 .dcache_bsize = 32,
145 .num_pmcs = 2, 348 .num_pmcs = 2,
@@ -149,10 +352,8 @@ struct cpu_spec cpu_specs[] = {
149 .pvr_mask = 0xfffff000, 352 .pvr_mask = 0xfffff000,
150 .pvr_value = 0x00090000, 353 .pvr_value = 0x00090000,
151 .cpu_name = "604e", 354 .cpu_name = "604e",
152 .cpu_features = CPU_FTR_COMMON | 355 .cpu_features = CPU_FTRS_604,
153 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 356 .cpu_user_features = COMMON_USER,
154 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
155 .cpu_user_features = COMMON_PPC,
156 .icache_bsize = 32, 357 .icache_bsize = 32,
157 .dcache_bsize = 32, 358 .dcache_bsize = 32,
158 .num_pmcs = 4, 359 .num_pmcs = 4,
@@ -162,10 +363,8 @@ struct cpu_spec cpu_specs[] = {
162 .pvr_mask = 0xffff0000, 363 .pvr_mask = 0xffff0000,
163 .pvr_value = 0x00090000, 364 .pvr_value = 0x00090000,
164 .cpu_name = "604r", 365 .cpu_name = "604r",
165 .cpu_features = CPU_FTR_COMMON | 366 .cpu_features = CPU_FTRS_604,
166 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 367 .cpu_user_features = COMMON_USER,
167 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
168 .cpu_user_features = COMMON_PPC,
169 .icache_bsize = 32, 368 .icache_bsize = 32,
170 .dcache_bsize = 32, 369 .dcache_bsize = 32,
171 .num_pmcs = 4, 370 .num_pmcs = 4,
@@ -175,10 +374,8 @@ struct cpu_spec cpu_specs[] = {
175 .pvr_mask = 0xffff0000, 374 .pvr_mask = 0xffff0000,
176 .pvr_value = 0x000a0000, 375 .pvr_value = 0x000a0000,
177 .cpu_name = "604ev", 376 .cpu_name = "604ev",
178 .cpu_features = CPU_FTR_COMMON | 377 .cpu_features = CPU_FTRS_604,
179 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 378 .cpu_user_features = COMMON_USER,
180 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
181 .cpu_user_features = COMMON_PPC,
182 .icache_bsize = 32, 379 .icache_bsize = 32,
183 .dcache_bsize = 32, 380 .dcache_bsize = 32,
184 .num_pmcs = 4, 381 .num_pmcs = 4,
@@ -188,11 +385,8 @@ struct cpu_spec cpu_specs[] = {
188 .pvr_mask = 0xffffffff, 385 .pvr_mask = 0xffffffff,
189 .pvr_value = 0x00084202, 386 .pvr_value = 0x00084202,
190 .cpu_name = "740/750", 387 .cpu_name = "740/750",
191 .cpu_features = CPU_FTR_COMMON | 388 .cpu_features = CPU_FTRS_740_NOTAU,
192 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 389 .cpu_user_features = COMMON_USER,
193 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
194 CPU_FTR_MAYBE_CAN_NAP,
195 .cpu_user_features = COMMON_PPC,
196 .icache_bsize = 32, 390 .icache_bsize = 32,
197 .dcache_bsize = 32, 391 .dcache_bsize = 32,
198 .num_pmcs = 4, 392 .num_pmcs = 4,
@@ -202,11 +396,8 @@ struct cpu_spec cpu_specs[] = {
202 .pvr_mask = 0xfffffff0, 396 .pvr_mask = 0xfffffff0,
203 .pvr_value = 0x00080100, 397 .pvr_value = 0x00080100,
204 .cpu_name = "750CX", 398 .cpu_name = "750CX",
205 .cpu_features = CPU_FTR_COMMON | 399 .cpu_features = CPU_FTRS_750,
206 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 400 .cpu_user_features = COMMON_USER,
207 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
208 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
209 .cpu_user_features = COMMON_PPC,
210 .icache_bsize = 32, 401 .icache_bsize = 32,
211 .dcache_bsize = 32, 402 .dcache_bsize = 32,
212 .num_pmcs = 4, 403 .num_pmcs = 4,
@@ -216,11 +407,8 @@ struct cpu_spec cpu_specs[] = {
216 .pvr_mask = 0xfffffff0, 407 .pvr_mask = 0xfffffff0,
217 .pvr_value = 0x00082200, 408 .pvr_value = 0x00082200,
218 .cpu_name = "750CX", 409 .cpu_name = "750CX",
219 .cpu_features = CPU_FTR_COMMON | 410 .cpu_features = CPU_FTRS_750,
220 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 411 .cpu_user_features = COMMON_USER,
221 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
222 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
223 .cpu_user_features = COMMON_PPC,
224 .icache_bsize = 32, 412 .icache_bsize = 32,
225 .dcache_bsize = 32, 413 .dcache_bsize = 32,
226 .num_pmcs = 4, 414 .num_pmcs = 4,
@@ -230,11 +418,8 @@ struct cpu_spec cpu_specs[] = {
230 .pvr_mask = 0xfffffff0, 418 .pvr_mask = 0xfffffff0,
231 .pvr_value = 0x00082210, 419 .pvr_value = 0x00082210,
232 .cpu_name = "750CXe", 420 .cpu_name = "750CXe",
233 .cpu_features = CPU_FTR_COMMON | 421 .cpu_features = CPU_FTRS_750,
234 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 422 .cpu_user_features = COMMON_USER,
235 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
236 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
237 .cpu_user_features = COMMON_PPC,
238 .icache_bsize = 32, 423 .icache_bsize = 32,
239 .dcache_bsize = 32, 424 .dcache_bsize = 32,
240 .num_pmcs = 4, 425 .num_pmcs = 4,
@@ -244,11 +429,8 @@ struct cpu_spec cpu_specs[] = {
244 .pvr_mask = 0xffffffff, 429 .pvr_mask = 0xffffffff,
245 .pvr_value = 0x00083214, 430 .pvr_value = 0x00083214,
246 .cpu_name = "750CXe", 431 .cpu_name = "750CXe",
247 .cpu_features = CPU_FTR_COMMON | 432 .cpu_features = CPU_FTRS_750,
248 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 433 .cpu_user_features = COMMON_USER,
249 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
250 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
251 .cpu_user_features = COMMON_PPC,
252 .icache_bsize = 32, 434 .icache_bsize = 32,
253 .dcache_bsize = 32, 435 .dcache_bsize = 32,
254 .num_pmcs = 4, 436 .num_pmcs = 4,
@@ -258,11 +440,8 @@ struct cpu_spec cpu_specs[] = {
258 .pvr_mask = 0xfffff000, 440 .pvr_mask = 0xfffff000,
259 .pvr_value = 0x00083000, 441 .pvr_value = 0x00083000,
260 .cpu_name = "745/755", 442 .cpu_name = "745/755",
261 .cpu_features = CPU_FTR_COMMON | 443 .cpu_features = CPU_FTRS_750,
262 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 444 .cpu_user_features = COMMON_USER,
263 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
264 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
265 .cpu_user_features = COMMON_PPC,
266 .icache_bsize = 32, 445 .icache_bsize = 32,
267 .dcache_bsize = 32, 446 .dcache_bsize = 32,
268 .num_pmcs = 4, 447 .num_pmcs = 4,
@@ -272,12 +451,8 @@ struct cpu_spec cpu_specs[] = {
272 .pvr_mask = 0xffffff00, 451 .pvr_mask = 0xffffff00,
273 .pvr_value = 0x70000100, 452 .pvr_value = 0x70000100,
274 .cpu_name = "750FX", 453 .cpu_name = "750FX",
275 .cpu_features = CPU_FTR_COMMON | 454 .cpu_features = CPU_FTRS_750FX1,
276 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 455 .cpu_user_features = COMMON_USER,
277 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
278 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
279 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
280 .cpu_user_features = COMMON_PPC,
281 .icache_bsize = 32, 456 .icache_bsize = 32,
282 .dcache_bsize = 32, 457 .dcache_bsize = 32,
283 .num_pmcs = 4, 458 .num_pmcs = 4,
@@ -287,12 +462,8 @@ struct cpu_spec cpu_specs[] = {
287 .pvr_mask = 0xffffffff, 462 .pvr_mask = 0xffffffff,
288 .pvr_value = 0x70000200, 463 .pvr_value = 0x70000200,
289 .cpu_name = "750FX", 464 .cpu_name = "750FX",
290 .cpu_features = CPU_FTR_COMMON | 465 .cpu_features = CPU_FTRS_750FX2,
291 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 466 .cpu_user_features = COMMON_USER,
292 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
293 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
294 CPU_FTR_NO_DPM,
295 .cpu_user_features = COMMON_PPC,
296 .icache_bsize = 32, 467 .icache_bsize = 32,
297 .dcache_bsize = 32, 468 .dcache_bsize = 32,
298 .num_pmcs = 4, 469 .num_pmcs = 4,
@@ -302,12 +473,8 @@ struct cpu_spec cpu_specs[] = {
302 .pvr_mask = 0xffff0000, 473 .pvr_mask = 0xffff0000,
303 .pvr_value = 0x70000000, 474 .pvr_value = 0x70000000,
304 .cpu_name = "750FX", 475 .cpu_name = "750FX",
305 .cpu_features = CPU_FTR_COMMON | 476 .cpu_features = CPU_FTRS_750FX,
306 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 477 .cpu_user_features = COMMON_USER,
307 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
308 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
309 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
310 .cpu_user_features = COMMON_PPC,
311 .icache_bsize = 32, 478 .icache_bsize = 32,
312 .dcache_bsize = 32, 479 .dcache_bsize = 32,
313 .num_pmcs = 4, 480 .num_pmcs = 4,
@@ -317,12 +484,8 @@ struct cpu_spec cpu_specs[] = {
317 .pvr_mask = 0xffff0000, 484 .pvr_mask = 0xffff0000,
318 .pvr_value = 0x70020000, 485 .pvr_value = 0x70020000,
319 .cpu_name = "750GX", 486 .cpu_name = "750GX",
320 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 487 .cpu_features = CPU_FTRS_750GX,
321 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 488 .cpu_user_features = COMMON_USER,
322 CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
323 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
324 CPU_FTR_HAS_HIGH_BATS,
325 .cpu_user_features = COMMON_PPC,
326 .icache_bsize = 32, 489 .icache_bsize = 32,
327 .dcache_bsize = 32, 490 .dcache_bsize = 32,
328 .num_pmcs = 4, 491 .num_pmcs = 4,
@@ -332,11 +495,8 @@ struct cpu_spec cpu_specs[] = {
332 .pvr_mask = 0xffff0000, 495 .pvr_mask = 0xffff0000,
333 .pvr_value = 0x00080000, 496 .pvr_value = 0x00080000,
334 .cpu_name = "740/750", 497 .cpu_name = "740/750",
335 .cpu_features = CPU_FTR_COMMON | 498 .cpu_features = CPU_FTRS_740,
336 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 499 .cpu_user_features = COMMON_USER,
337 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
338 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
339 .cpu_user_features = COMMON_PPC,
340 .icache_bsize = 32, 500 .icache_bsize = 32,
341 .dcache_bsize = 32, 501 .dcache_bsize = 32,
342 .num_pmcs = 4, 502 .num_pmcs = 4,
@@ -346,11 +506,8 @@ struct cpu_spec cpu_specs[] = {
346 .pvr_mask = 0xffffffff, 506 .pvr_mask = 0xffffffff,
347 .pvr_value = 0x000c1101, 507 .pvr_value = 0x000c1101,
348 .cpu_name = "7400 (1.1)", 508 .cpu_name = "7400 (1.1)",
349 .cpu_features = CPU_FTR_COMMON | 509 .cpu_features = CPU_FTRS_7400_NOTAU,
350 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 510 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
351 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
352 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
353 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
354 .icache_bsize = 32, 511 .icache_bsize = 32,
355 .dcache_bsize = 32, 512 .dcache_bsize = 32,
356 .num_pmcs = 4, 513 .num_pmcs = 4,
@@ -360,12 +517,8 @@ struct cpu_spec cpu_specs[] = {
360 .pvr_mask = 0xffff0000, 517 .pvr_mask = 0xffff0000,
361 .pvr_value = 0x000c0000, 518 .pvr_value = 0x000c0000,
362 .cpu_name = "7400", 519 .cpu_name = "7400",
363 .cpu_features = CPU_FTR_COMMON | 520 .cpu_features = CPU_FTRS_7400,
364 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 521 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
365 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
366 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
367 CPU_FTR_MAYBE_CAN_NAP,
368 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
369 .icache_bsize = 32, 522 .icache_bsize = 32,
370 .dcache_bsize = 32, 523 .dcache_bsize = 32,
371 .num_pmcs = 4, 524 .num_pmcs = 4,
@@ -375,12 +528,8 @@ struct cpu_spec cpu_specs[] = {
375 .pvr_mask = 0xffff0000, 528 .pvr_mask = 0xffff0000,
376 .pvr_value = 0x800c0000, 529 .pvr_value = 0x800c0000,
377 .cpu_name = "7410", 530 .cpu_name = "7410",
378 .cpu_features = CPU_FTR_COMMON | 531 .cpu_features = CPU_FTRS_7400,
379 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 532 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
380 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
381 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
382 CPU_FTR_MAYBE_CAN_NAP,
383 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
384 .icache_bsize = 32, 533 .icache_bsize = 32,
385 .dcache_bsize = 32, 534 .dcache_bsize = 32,
386 .num_pmcs = 4, 535 .num_pmcs = 4,
@@ -390,12 +539,8 @@ struct cpu_spec cpu_specs[] = {
390 .pvr_mask = 0xffffffff, 539 .pvr_mask = 0xffffffff,
391 .pvr_value = 0x80000200, 540 .pvr_value = 0x80000200,
392 .cpu_name = "7450", 541 .cpu_name = "7450",
393 .cpu_features = CPU_FTR_COMMON | 542 .cpu_features = CPU_FTRS_7450_20,
394 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 543 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
395 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
396 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
397 CPU_FTR_NEED_COHERENT,
398 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
399 .icache_bsize = 32, 544 .icache_bsize = 32,
400 .dcache_bsize = 32, 545 .dcache_bsize = 32,
401 .num_pmcs = 6, 546 .num_pmcs = 6,
@@ -405,14 +550,8 @@ struct cpu_spec cpu_specs[] = {
405 .pvr_mask = 0xffffffff, 550 .pvr_mask = 0xffffffff,
406 .pvr_value = 0x80000201, 551 .pvr_value = 0x80000201,
407 .cpu_name = "7450", 552 .cpu_name = "7450",
408 .cpu_features = CPU_FTR_COMMON | 553 .cpu_features = CPU_FTRS_7450_21,
409 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 554 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
410 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
412 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
413 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
414 CPU_FTR_NEED_COHERENT,
415 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
416 .icache_bsize = 32, 555 .icache_bsize = 32,
417 .dcache_bsize = 32, 556 .dcache_bsize = 32,
418 .num_pmcs = 6, 557 .num_pmcs = 6,
@@ -422,13 +561,8 @@ struct cpu_spec cpu_specs[] = {
422 .pvr_mask = 0xffff0000, 561 .pvr_mask = 0xffff0000,
423 .pvr_value = 0x80000000, 562 .pvr_value = 0x80000000,
424 .cpu_name = "7450", 563 .cpu_name = "7450",
425 .cpu_features = CPU_FTR_COMMON | 564 .cpu_features = CPU_FTRS_7450_23,
426 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 565 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
427 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
428 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
429 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
430 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
431 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
432 .icache_bsize = 32, 566 .icache_bsize = 32,
433 .dcache_bsize = 32, 567 .dcache_bsize = 32,
434 .num_pmcs = 6, 568 .num_pmcs = 6,
@@ -438,12 +572,8 @@ struct cpu_spec cpu_specs[] = {
438 .pvr_mask = 0xffffff00, 572 .pvr_mask = 0xffffff00,
439 .pvr_value = 0x80010100, 573 .pvr_value = 0x80010100,
440 .cpu_name = "7455", 574 .cpu_name = "7455",
441 .cpu_features = CPU_FTR_COMMON | 575 .cpu_features = CPU_FTRS_7455_1,
442 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 576 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
443 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
444 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
445 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
446 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
447 .icache_bsize = 32, 577 .icache_bsize = 32,
448 .dcache_bsize = 32, 578 .dcache_bsize = 32,
449 .num_pmcs = 6, 579 .num_pmcs = 6,
@@ -453,14 +583,8 @@ struct cpu_spec cpu_specs[] = {
453 .pvr_mask = 0xffffffff, 583 .pvr_mask = 0xffffffff,
454 .pvr_value = 0x80010200, 584 .pvr_value = 0x80010200,
455 .cpu_name = "7455", 585 .cpu_name = "7455",
456 .cpu_features = CPU_FTR_COMMON | 586 .cpu_features = CPU_FTRS_7455_20,
457 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 587 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
458 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
459 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
460 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
461 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
462 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
463 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
464 .icache_bsize = 32, 588 .icache_bsize = 32,
465 .dcache_bsize = 32, 589 .dcache_bsize = 32,
466 .num_pmcs = 6, 590 .num_pmcs = 6,
@@ -470,14 +594,8 @@ struct cpu_spec cpu_specs[] = {
470 .pvr_mask = 0xffff0000, 594 .pvr_mask = 0xffff0000,
471 .pvr_value = 0x80010000, 595 .pvr_value = 0x80010000,
472 .cpu_name = "7455", 596 .cpu_name = "7455",
473 .cpu_features = CPU_FTR_COMMON | 597 .cpu_features = CPU_FTRS_7455,
474 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 598 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
475 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
476 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
477 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
478 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
479 CPU_FTR_NEED_COHERENT,
480 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
481 .icache_bsize = 32, 599 .icache_bsize = 32,
482 .dcache_bsize = 32, 600 .dcache_bsize = 32,
483 .num_pmcs = 6, 601 .num_pmcs = 6,
@@ -487,14 +605,8 @@ struct cpu_spec cpu_specs[] = {
487 .pvr_mask = 0xffffffff, 605 .pvr_mask = 0xffffffff,
488 .pvr_value = 0x80020100, 606 .pvr_value = 0x80020100,
489 .cpu_name = "7447/7457", 607 .cpu_name = "7447/7457",
490 .cpu_features = CPU_FTR_COMMON | 608 .cpu_features = CPU_FTRS_7447_10,
491 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 609 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
492 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
493 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
494 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
495 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
496 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
497 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
498 .icache_bsize = 32, 610 .icache_bsize = 32,
499 .dcache_bsize = 32, 611 .dcache_bsize = 32,
500 .num_pmcs = 6, 612 .num_pmcs = 6,
@@ -504,14 +616,8 @@ struct cpu_spec cpu_specs[] = {
504 .pvr_mask = 0xffffffff, 616 .pvr_mask = 0xffffffff,
505 .pvr_value = 0x80020101, 617 .pvr_value = 0x80020101,
506 .cpu_name = "7447/7457", 618 .cpu_name = "7447/7457",
507 .cpu_features = CPU_FTR_COMMON | 619 .cpu_features = CPU_FTRS_7447_10,
508 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 620 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
509 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
510 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
511 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
512 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
513 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
514 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
515 .icache_bsize = 32, 621 .icache_bsize = 32,
516 .dcache_bsize = 32, 622 .dcache_bsize = 32,
517 .num_pmcs = 6, 623 .num_pmcs = 6,
@@ -521,14 +627,8 @@ struct cpu_spec cpu_specs[] = {
521 .pvr_mask = 0xffff0000, 627 .pvr_mask = 0xffff0000,
522 .pvr_value = 0x80020000, 628 .pvr_value = 0x80020000,
523 .cpu_name = "7447/7457", 629 .cpu_name = "7447/7457",
524 .cpu_features = CPU_FTR_COMMON | 630 .cpu_features = CPU_FTRS_7447,
525 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 631 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
526 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
527 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
528 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
529 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
530 CPU_FTR_NEED_COHERENT,
531 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
532 .icache_bsize = 32, 632 .icache_bsize = 32,
533 .dcache_bsize = 32, 633 .dcache_bsize = 32,
534 .num_pmcs = 6, 634 .num_pmcs = 6,
@@ -538,13 +638,8 @@ struct cpu_spec cpu_specs[] = {
538 .pvr_mask = 0xffff0000, 638 .pvr_mask = 0xffff0000,
539 .pvr_value = 0x80030000, 639 .pvr_value = 0x80030000,
540 .cpu_name = "7447A", 640 .cpu_name = "7447A",
541 .cpu_features = CPU_FTR_COMMON | 641 .cpu_features = CPU_FTRS_7447A,
542 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 642 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
543 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
544 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
545 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
546 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
547 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
548 .icache_bsize = 32, 643 .icache_bsize = 32,
549 .dcache_bsize = 32, 644 .dcache_bsize = 32,
550 .num_pmcs = 6, 645 .num_pmcs = 6,
@@ -554,13 +649,8 @@ struct cpu_spec cpu_specs[] = {
554 .pvr_mask = 0xffff0000, 649 .pvr_mask = 0xffff0000,
555 .pvr_value = 0x80040000, 650 .pvr_value = 0x80040000,
556 .cpu_name = "7448", 651 .cpu_name = "7448",
557 .cpu_features = CPU_FTR_COMMON | 652 .cpu_features = CPU_FTRS_7447A,
558 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 653 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
559 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
560 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
561 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
562 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
563 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
564 .icache_bsize = 32, 654 .icache_bsize = 32,
565 .dcache_bsize = 32, 655 .dcache_bsize = 32,
566 .num_pmcs = 6, 656 .num_pmcs = 6,
@@ -570,10 +660,8 @@ struct cpu_spec cpu_specs[] = {
570 .pvr_mask = 0x7fff0000, 660 .pvr_mask = 0x7fff0000,
571 .pvr_value = 0x00810000, 661 .pvr_value = 0x00810000,
572 .cpu_name = "82xx", 662 .cpu_name = "82xx",
573 .cpu_features = CPU_FTR_COMMON | 663 .cpu_features = CPU_FTRS_82XX,
574 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 664 .cpu_user_features = COMMON_USER,
575 CPU_FTR_USE_TB,
576 .cpu_user_features = COMMON_PPC,
577 .icache_bsize = 32, 665 .icache_bsize = 32,
578 .dcache_bsize = 32, 666 .dcache_bsize = 32,
579 .cpu_setup = __setup_cpu_603 667 .cpu_setup = __setup_cpu_603
@@ -582,10 +670,8 @@ struct cpu_spec cpu_specs[] = {
582 .pvr_mask = 0x7fff0000, 670 .pvr_mask = 0x7fff0000,
583 .pvr_value = 0x00820000, 671 .pvr_value = 0x00820000,
584 .cpu_name = "G2_LE", 672 .cpu_name = "G2_LE",
585 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 673 .cpu_features = CPU_FTRS_G2_LE,
586 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 674 .cpu_user_features = COMMON_USER,
587 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
588 .cpu_user_features = COMMON_PPC,
589 .icache_bsize = 32, 675 .icache_bsize = 32,
590 .dcache_bsize = 32, 676 .dcache_bsize = 32,
591 .cpu_setup = __setup_cpu_603 677 .cpu_setup = __setup_cpu_603
@@ -594,10 +680,8 @@ struct cpu_spec cpu_specs[] = {
594 .pvr_mask = 0x7fff0000, 680 .pvr_mask = 0x7fff0000,
595 .pvr_value = 0x00830000, 681 .pvr_value = 0x00830000,
596 .cpu_name = "e300", 682 .cpu_name = "e300",
597 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 683 .cpu_features = CPU_FTRS_E300,
598 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 684 .cpu_user_features = COMMON_USER,
599 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
600 .cpu_user_features = COMMON_PPC,
601 .icache_bsize = 32, 685 .icache_bsize = 32,
602 .dcache_bsize = 32, 686 .dcache_bsize = 32,
603 .cpu_setup = __setup_cpu_603 687 .cpu_setup = __setup_cpu_603
@@ -606,114 +690,12 @@ struct cpu_spec cpu_specs[] = {
606 .pvr_mask = 0x00000000, 690 .pvr_mask = 0x00000000,
607 .pvr_value = 0x00000000, 691 .pvr_value = 0x00000000,
608 .cpu_name = "(generic PPC)", 692 .cpu_name = "(generic PPC)",
609 .cpu_features = CPU_FTR_COMMON | 693 .cpu_features = CPU_FTRS_CLASSIC32,
610 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 694 .cpu_user_features = COMMON_USER,
611 CPU_FTR_HPTE_TABLE,
612 .cpu_user_features = COMMON_PPC,
613 .icache_bsize = 32, 695 .icache_bsize = 32,
614 .dcache_bsize = 32, 696 .dcache_bsize = 32,
615 .cpu_setup = __setup_cpu_generic
616 }, 697 },
617#endif /* CLASSIC_PPC */ 698#endif /* CLASSIC_PPC */
618#ifdef CONFIG_PPC64BRIDGE
619 { /* Power3 */
620 .pvr_mask = 0xffff0000,
621 .pvr_value = 0x00400000,
622 .cpu_name = "Power3 (630)",
623 .cpu_features = CPU_FTR_COMMON |
624 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
625 CPU_FTR_HPTE_TABLE,
626 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
627 .icache_bsize = 128,
628 .dcache_bsize = 128,
629 .num_pmcs = 8,
630 .cpu_setup = __setup_cpu_power3
631 },
632 { /* Power3+ */
633 .pvr_mask = 0xffff0000,
634 .pvr_value = 0x00410000,
635 .cpu_name = "Power3 (630+)",
636 .cpu_features = CPU_FTR_COMMON |
637 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
638 CPU_FTR_HPTE_TABLE,
639 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
640 .icache_bsize = 128,
641 .dcache_bsize = 128,
642 .num_pmcs = 8,
643 .cpu_setup = __setup_cpu_power3
644 },
645 { /* I-star */
646 .pvr_mask = 0xffff0000,
647 .pvr_value = 0x00360000,
648 .cpu_name = "I-star",
649 .cpu_features = CPU_FTR_COMMON |
650 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
651 CPU_FTR_HPTE_TABLE,
652 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
653 .icache_bsize = 128,
654 .dcache_bsize = 128,
655 .num_pmcs = 8,
656 .cpu_setup = __setup_cpu_power3
657 },
658 { /* S-star */
659 .pvr_mask = 0xffff0000,
660 .pvr_value = 0x00370000,
661 .cpu_name = "S-star",
662 .cpu_features = CPU_FTR_COMMON |
663 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
664 CPU_FTR_HPTE_TABLE,
665 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
666 .icache_bsize = 128,
667 .dcache_bsize = 128,
668 .num_pmcs = 8,
669 .cpu_setup = __setup_cpu_power3
670 },
671#endif /* CONFIG_PPC64BRIDGE */
672#ifdef CONFIG_POWER4
673 { /* Power4 */
674 .pvr_mask = 0xffff0000,
675 .pvr_value = 0x00350000,
676 .cpu_name = "Power4",
677 .cpu_features = CPU_FTR_COMMON |
678 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
679 CPU_FTR_HPTE_TABLE,
680 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
681 .icache_bsize = 128,
682 .dcache_bsize = 128,
683 .num_pmcs = 8,
684 .cpu_setup = __setup_cpu_power4
685 },
686 { /* PPC970 */
687 .pvr_mask = 0xffff0000,
688 .pvr_value = 0x00390000,
689 .cpu_name = "PPC970",
690 .cpu_features = CPU_FTR_COMMON |
691 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
692 CPU_FTR_HPTE_TABLE |
693 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
694 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
695 PPC_FEATURE_ALTIVEC_COMP,
696 .icache_bsize = 128,
697 .dcache_bsize = 128,
698 .num_pmcs = 8,
699 .cpu_setup = __setup_cpu_ppc970
700 },
701 { /* PPC970FX */
702 .pvr_mask = 0xffff0000,
703 .pvr_value = 0x003c0000,
704 .cpu_name = "PPC970FX",
705 .cpu_features = CPU_FTR_COMMON |
706 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
707 CPU_FTR_HPTE_TABLE |
708 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
709 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
710 PPC_FEATURE_ALTIVEC_COMP,
711 .icache_bsize = 128,
712 .dcache_bsize = 128,
713 .num_pmcs = 8,
714 .cpu_setup = __setup_cpu_ppc970
715 },
716#endif /* CONFIG_POWER4 */
717#ifdef CONFIG_8xx 699#ifdef CONFIG_8xx
718 { /* 8xx */ 700 { /* 8xx */
719 .pvr_mask = 0xffff0000, 701 .pvr_mask = 0xffff0000,
@@ -721,8 +703,7 @@ struct cpu_spec cpu_specs[] = {
721 .cpu_name = "8xx", 703 .cpu_name = "8xx",
722 /* CPU_FTR_MAYBE_CAN_DOZE is possible, 704 /* CPU_FTR_MAYBE_CAN_DOZE is possible,
723 * if the 8xx code is there.... */ 705 * if the 8xx code is there.... */
724 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 706 .cpu_features = CPU_FTRS_8XX,
725 CPU_FTR_USE_TB,
726 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 707 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
727 .icache_bsize = 16, 708 .icache_bsize = 16,
728 .dcache_bsize = 16, 709 .dcache_bsize = 16,
@@ -733,8 +714,7 @@ struct cpu_spec cpu_specs[] = {
733 .pvr_mask = 0xffffff00, 714 .pvr_mask = 0xffffff00,
734 .pvr_value = 0x00200200, 715 .pvr_value = 0x00200200,
735 .cpu_name = "403GC", 716 .cpu_name = "403GC",
736 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 717 .cpu_features = CPU_FTRS_40X,
737 CPU_FTR_USE_TB,
738 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 718 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
739 .icache_bsize = 16, 719 .icache_bsize = 16,
740 .dcache_bsize = 16, 720 .dcache_bsize = 16,
@@ -743,8 +723,7 @@ struct cpu_spec cpu_specs[] = {
743 .pvr_mask = 0xffffff00, 723 .pvr_mask = 0xffffff00,
744 .pvr_value = 0x00201400, 724 .pvr_value = 0x00201400,
745 .cpu_name = "403GCX", 725 .cpu_name = "403GCX",
746 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 726 .cpu_features = CPU_FTRS_40X,
747 CPU_FTR_USE_TB,
748 .cpu_user_features = PPC_FEATURE_32 | 727 .cpu_user_features = PPC_FEATURE_32 |
749 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 728 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
750 .icache_bsize = 16, 729 .icache_bsize = 16,
@@ -754,8 +733,7 @@ struct cpu_spec cpu_specs[] = {
754 .pvr_mask = 0xffff0000, 733 .pvr_mask = 0xffff0000,
755 .pvr_value = 0x00200000, 734 .pvr_value = 0x00200000,
756 .cpu_name = "403G ??", 735 .cpu_name = "403G ??",
757 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 736 .cpu_features = CPU_FTRS_40X,
758 CPU_FTR_USE_TB,
759 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 737 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
760 .icache_bsize = 16, 738 .icache_bsize = 16,
761 .dcache_bsize = 16, 739 .dcache_bsize = 16,
@@ -764,8 +742,7 @@ struct cpu_spec cpu_specs[] = {
764 .pvr_mask = 0xffff0000, 742 .pvr_mask = 0xffff0000,
765 .pvr_value = 0x40110000, 743 .pvr_value = 0x40110000,
766 .cpu_name = "405GP", 744 .cpu_name = "405GP",
767 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 745 .cpu_features = CPU_FTRS_40X,
768 CPU_FTR_USE_TB,
769 .cpu_user_features = PPC_FEATURE_32 | 746 .cpu_user_features = PPC_FEATURE_32 |
770 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 747 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
771 .icache_bsize = 32, 748 .icache_bsize = 32,
@@ -775,8 +752,7 @@ struct cpu_spec cpu_specs[] = {
775 .pvr_mask = 0xffff0000, 752 .pvr_mask = 0xffff0000,
776 .pvr_value = 0x40130000, 753 .pvr_value = 0x40130000,
777 .cpu_name = "STB03xxx", 754 .cpu_name = "STB03xxx",
778 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 755 .cpu_features = CPU_FTRS_40X,
779 CPU_FTR_USE_TB,
780 .cpu_user_features = PPC_FEATURE_32 | 756 .cpu_user_features = PPC_FEATURE_32 |
781 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 757 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
782 .icache_bsize = 32, 758 .icache_bsize = 32,
@@ -786,8 +762,7 @@ struct cpu_spec cpu_specs[] = {
786 .pvr_mask = 0xffff0000, 762 .pvr_mask = 0xffff0000,
787 .pvr_value = 0x41810000, 763 .pvr_value = 0x41810000,
788 .cpu_name = "STB04xxx", 764 .cpu_name = "STB04xxx",
789 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 765 .cpu_features = CPU_FTRS_40X,
790 CPU_FTR_USE_TB,
791 .cpu_user_features = PPC_FEATURE_32 | 766 .cpu_user_features = PPC_FEATURE_32 |
792 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 767 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
793 .icache_bsize = 32, 768 .icache_bsize = 32,
@@ -797,8 +772,7 @@ struct cpu_spec cpu_specs[] = {
797 .pvr_mask = 0xffff0000, 772 .pvr_mask = 0xffff0000,
798 .pvr_value = 0x41610000, 773 .pvr_value = 0x41610000,
799 .cpu_name = "NP405L", 774 .cpu_name = "NP405L",
800 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 775 .cpu_features = CPU_FTRS_40X,
801 CPU_FTR_USE_TB,
802 .cpu_user_features = PPC_FEATURE_32 | 776 .cpu_user_features = PPC_FEATURE_32 |
803 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 777 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
804 .icache_bsize = 32, 778 .icache_bsize = 32,
@@ -808,8 +782,7 @@ struct cpu_spec cpu_specs[] = {
808 .pvr_mask = 0xffff0000, 782 .pvr_mask = 0xffff0000,
809 .pvr_value = 0x40B10000, 783 .pvr_value = 0x40B10000,
810 .cpu_name = "NP4GS3", 784 .cpu_name = "NP4GS3",
811 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 785 .cpu_features = CPU_FTRS_40X,
812 CPU_FTR_USE_TB,
813 .cpu_user_features = PPC_FEATURE_32 | 786 .cpu_user_features = PPC_FEATURE_32 |
814 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 787 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
815 .icache_bsize = 32, 788 .icache_bsize = 32,
@@ -819,8 +792,7 @@ struct cpu_spec cpu_specs[] = {
819 .pvr_mask = 0xffff0000, 792 .pvr_mask = 0xffff0000,
820 .pvr_value = 0x41410000, 793 .pvr_value = 0x41410000,
821 .cpu_name = "NP405H", 794 .cpu_name = "NP405H",
822 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 795 .cpu_features = CPU_FTRS_40X,
823 CPU_FTR_USE_TB,
824 .cpu_user_features = PPC_FEATURE_32 | 796 .cpu_user_features = PPC_FEATURE_32 |
825 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 797 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
826 .icache_bsize = 32, 798 .icache_bsize = 32,
@@ -830,8 +802,7 @@ struct cpu_spec cpu_specs[] = {
830 .pvr_mask = 0xffff0000, 802 .pvr_mask = 0xffff0000,
831 .pvr_value = 0x50910000, 803 .pvr_value = 0x50910000,
832 .cpu_name = "405GPr", 804 .cpu_name = "405GPr",
833 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 805 .cpu_features = CPU_FTRS_40X,
834 CPU_FTR_USE_TB,
835 .cpu_user_features = PPC_FEATURE_32 | 806 .cpu_user_features = PPC_FEATURE_32 |
836 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 807 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
837 .icache_bsize = 32, 808 .icache_bsize = 32,
@@ -841,8 +812,7 @@ struct cpu_spec cpu_specs[] = {
841 .pvr_mask = 0xffff0000, 812 .pvr_mask = 0xffff0000,
842 .pvr_value = 0x51510000, 813 .pvr_value = 0x51510000,
843 .cpu_name = "STBx25xx", 814 .cpu_name = "STBx25xx",
844 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 815 .cpu_features = CPU_FTRS_40X,
845 CPU_FTR_USE_TB,
846 .cpu_user_features = PPC_FEATURE_32 | 816 .cpu_user_features = PPC_FEATURE_32 |
847 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 817 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
848 .icache_bsize = 32, 818 .icache_bsize = 32,
@@ -852,8 +822,7 @@ struct cpu_spec cpu_specs[] = {
852 .pvr_mask = 0xffff0000, 822 .pvr_mask = 0xffff0000,
853 .pvr_value = 0x41F10000, 823 .pvr_value = 0x41F10000,
854 .cpu_name = "405LP", 824 .cpu_name = "405LP",
855 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 825 .cpu_features = CPU_FTRS_40X,
856 CPU_FTR_USE_TB,
857 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 826 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
858 .icache_bsize = 32, 827 .icache_bsize = 32,
859 .dcache_bsize = 32, 828 .dcache_bsize = 32,
@@ -862,8 +831,7 @@ struct cpu_spec cpu_specs[] = {
862 .pvr_mask = 0xffff0000, 831 .pvr_mask = 0xffff0000,
863 .pvr_value = 0x20010000, 832 .pvr_value = 0x20010000,
864 .cpu_name = "Virtex-II Pro", 833 .cpu_name = "Virtex-II Pro",
865 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 834 .cpu_features = CPU_FTRS_40X,
866 CPU_FTR_USE_TB,
867 .cpu_user_features = PPC_FEATURE_32 | 835 .cpu_user_features = PPC_FEATURE_32 |
868 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 836 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
869 .icache_bsize = 32, 837 .icache_bsize = 32,
@@ -873,8 +841,7 @@ struct cpu_spec cpu_specs[] = {
873 .pvr_mask = 0xffff0000, 841 .pvr_mask = 0xffff0000,
874 .pvr_value = 0x51210000, 842 .pvr_value = 0x51210000,
875 .cpu_name = "405EP", 843 .cpu_name = "405EP",
876 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 844 .cpu_features = CPU_FTRS_40X,
877 CPU_FTR_USE_TB,
878 .cpu_user_features = PPC_FEATURE_32 | 845 .cpu_user_features = PPC_FEATURE_32 |
879 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 846 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
880 .icache_bsize = 32, 847 .icache_bsize = 32,
@@ -887,9 +854,8 @@ struct cpu_spec cpu_specs[] = {
887 .pvr_mask = 0xf0000fff, 854 .pvr_mask = 0xf0000fff,
888 .pvr_value = 0x40000850, 855 .pvr_value = 0x40000850,
889 .cpu_name = "440EP Rev. A", 856 .cpu_name = "440EP Rev. A",
890 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 857 .cpu_features = CPU_FTRS_44X,
891 CPU_FTR_USE_TB, 858 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
892 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
893 .icache_bsize = 32, 859 .icache_bsize = 32,
894 .dcache_bsize = 32, 860 .dcache_bsize = 32,
895 }, 861 },
@@ -897,28 +863,25 @@ struct cpu_spec cpu_specs[] = {
897 .pvr_mask = 0xf0000fff, 863 .pvr_mask = 0xf0000fff,
898 .pvr_value = 0x400008d3, 864 .pvr_value = 0x400008d3,
899 .cpu_name = "440EP Rev. B", 865 .cpu_name = "440EP Rev. B",
900 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 866 .cpu_features = CPU_FTRS_44X,
901 CPU_FTR_USE_TB, 867 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
902 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
903 .icache_bsize = 32, 868 .icache_bsize = 32,
904 .dcache_bsize = 32, 869 .dcache_bsize = 32,
905 }, 870 },
906 { /* 440GP Rev. B */ 871 { /* 440GP Rev. B */
907 .pvr_mask = 0xf0000fff, 872 .pvr_mask = 0xf0000fff,
908 .pvr_value = 0x40000440, 873 .pvr_value = 0x40000440,
909 .cpu_name = "440GP Rev. B", 874 .cpu_name = "440GP Rev. B",
910 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 875 .cpu_features = CPU_FTRS_44X,
911 CPU_FTR_USE_TB,
912 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 876 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
913 .icache_bsize = 32, 877 .icache_bsize = 32,
914 .dcache_bsize = 32, 878 .dcache_bsize = 32,
915 }, 879 },
916 { /* 440GP Rev. C */ 880 { /* 440GP Rev. C */
917 .pvr_mask = 0xf0000fff, 881 .pvr_mask = 0xf0000fff,
918 .pvr_value = 0x40000481, 882 .pvr_value = 0x40000481,
919 .cpu_name = "440GP Rev. C", 883 .cpu_name = "440GP Rev. C",
920 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 884 .cpu_features = CPU_FTRS_44X,
921 CPU_FTR_USE_TB,
922 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 885 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
923 .icache_bsize = 32, 886 .icache_bsize = 32,
924 .dcache_bsize = 32, 887 .dcache_bsize = 32,
@@ -927,8 +890,7 @@ struct cpu_spec cpu_specs[] = {
927 .pvr_mask = 0xf0000fff, 890 .pvr_mask = 0xf0000fff,
928 .pvr_value = 0x50000850, 891 .pvr_value = 0x50000850,
929 .cpu_name = "440GX Rev. A", 892 .cpu_name = "440GX Rev. A",
930 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 893 .cpu_features = CPU_FTRS_44X,
931 CPU_FTR_USE_TB,
932 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 894 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
933 .icache_bsize = 32, 895 .icache_bsize = 32,
934 .dcache_bsize = 32, 896 .dcache_bsize = 32,
@@ -937,8 +899,7 @@ struct cpu_spec cpu_specs[] = {
937 .pvr_mask = 0xf0000fff, 899 .pvr_mask = 0xf0000fff,
938 .pvr_value = 0x50000851, 900 .pvr_value = 0x50000851,
939 .cpu_name = "440GX Rev. B", 901 .cpu_name = "440GX Rev. B",
940 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 902 .cpu_features = CPU_FTRS_44X,
941 CPU_FTR_USE_TB,
942 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 903 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
943 .icache_bsize = 32, 904 .icache_bsize = 32,
944 .dcache_bsize = 32, 905 .dcache_bsize = 32,
@@ -947,8 +908,7 @@ struct cpu_spec cpu_specs[] = {
947 .pvr_mask = 0xf0000fff, 908 .pvr_mask = 0xf0000fff,
948 .pvr_value = 0x50000892, 909 .pvr_value = 0x50000892,
949 .cpu_name = "440GX Rev. C", 910 .cpu_name = "440GX Rev. C",
950 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 911 .cpu_features = CPU_FTRS_44X,
951 CPU_FTR_USE_TB,
952 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 912 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
953 .icache_bsize = 32, 913 .icache_bsize = 32,
954 .dcache_bsize = 32, 914 .dcache_bsize = 32,
@@ -957,8 +917,7 @@ struct cpu_spec cpu_specs[] = {
957 .pvr_mask = 0xf0000fff, 917 .pvr_mask = 0xf0000fff,
958 .pvr_value = 0x50000894, 918 .pvr_value = 0x50000894,
959 .cpu_name = "440GX Rev. F", 919 .cpu_name = "440GX Rev. F",
960 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 920 .cpu_features = CPU_FTRS_44X,
961 CPU_FTR_USE_TB,
962 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 921 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
963 .icache_bsize = 32, 922 .icache_bsize = 32,
964 .dcache_bsize = 32, 923 .dcache_bsize = 32,
@@ -967,44 +926,42 @@ struct cpu_spec cpu_specs[] = {
967 .pvr_mask = 0xff000fff, 926 .pvr_mask = 0xff000fff,
968 .pvr_value = 0x53000891, 927 .pvr_value = 0x53000891,
969 .cpu_name = "440SP Rev. A", 928 .cpu_name = "440SP Rev. A",
970 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 929 .cpu_features = CPU_FTRS_44X,
971 CPU_FTR_USE_TB,
972 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 930 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
973 .icache_bsize = 32, 931 .icache_bsize = 32,
974 .dcache_bsize = 32, 932 .dcache_bsize = 32,
975 }, 933 },
976#endif /* CONFIG_44x */ 934#endif /* CONFIG_44x */
977#ifdef CONFIG_FSL_BOOKE 935#ifdef CONFIG_FSL_BOOKE
978 { /* e200z5 */ 936 { /* e200z5 */
979 .pvr_mask = 0xfff00000, 937 .pvr_mask = 0xfff00000,
980 .pvr_value = 0x81000000, 938 .pvr_value = 0x81000000,
981 .cpu_name = "e200z5", 939 .cpu_name = "e200z5",
982 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 940 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
983 .cpu_features = CPU_FTR_USE_TB, 941 .cpu_features = CPU_FTRS_E200,
984 .cpu_user_features = PPC_FEATURE_32 | 942 .cpu_user_features = PPC_FEATURE_32 |
985 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE | 943 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
986 PPC_FEATURE_UNIFIED_CACHE, 944 PPC_FEATURE_UNIFIED_CACHE,
987 .dcache_bsize = 32, 945 .dcache_bsize = 32,
988 }, 946 },
989 { /* e200z6 */ 947 { /* e200z6 */
990 .pvr_mask = 0xfff00000, 948 .pvr_mask = 0xfff00000,
991 .pvr_value = 0x81100000, 949 .pvr_value = 0x81100000,
992 .cpu_name = "e200z6", 950 .cpu_name = "e200z6",
993 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 951 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
994 .cpu_features = CPU_FTR_USE_TB, 952 .cpu_features = CPU_FTRS_E200,
995 .cpu_user_features = PPC_FEATURE_32 | 953 .cpu_user_features = PPC_FEATURE_32 |
996 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 954 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
997 PPC_FEATURE_HAS_EFP_SINGLE | 955 PPC_FEATURE_HAS_EFP_SINGLE |
998 PPC_FEATURE_UNIFIED_CACHE, 956 PPC_FEATURE_UNIFIED_CACHE,
999 .dcache_bsize = 32, 957 .dcache_bsize = 32,
1000 }, 958 },
1001 { /* e500 */ 959 { /* e500 */
1002 .pvr_mask = 0xffff0000, 960 .pvr_mask = 0xffff0000,
1003 .pvr_value = 0x80200000, 961 .pvr_value = 0x80200000,
1004 .cpu_name = "e500", 962 .cpu_name = "e500",
1005 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 963 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1006 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 964 .cpu_features = CPU_FTRS_E500,
1007 CPU_FTR_USE_TB,
1008 .cpu_user_features = PPC_FEATURE_32 | 965 .cpu_user_features = PPC_FEATURE_32 |
1009 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 966 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1010 PPC_FEATURE_HAS_EFP_SINGLE, 967 PPC_FEATURE_HAS_EFP_SINGLE,
@@ -1012,13 +969,12 @@ struct cpu_spec cpu_specs[] = {
1012 .dcache_bsize = 32, 969 .dcache_bsize = 32,
1013 .num_pmcs = 4, 970 .num_pmcs = 4,
1014 }, 971 },
1015 { /* e500v2 */ 972 { /* e500v2 */
1016 .pvr_mask = 0xffff0000, 973 .pvr_mask = 0xffff0000,
1017 .pvr_value = 0x80210000, 974 .pvr_value = 0x80210000,
1018 .cpu_name = "e500v2", 975 .cpu_name = "e500v2",
1019 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 976 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1020 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 977 .cpu_features = CPU_FTRS_E500_2,
1021 CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
1022 .cpu_user_features = PPC_FEATURE_32 | 978 .cpu_user_features = PPC_FEATURE_32 |
1023 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 979 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1024 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, 980 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
@@ -1032,10 +988,11 @@ struct cpu_spec cpu_specs[] = {
1032 .pvr_mask = 0x00000000, 988 .pvr_mask = 0x00000000,
1033 .pvr_value = 0x00000000, 989 .pvr_value = 0x00000000,
1034 .cpu_name = "(generic PPC)", 990 .cpu_name = "(generic PPC)",
1035 .cpu_features = CPU_FTR_COMMON, 991 .cpu_features = CPU_FTRS_GENERIC_32,
1036 .cpu_user_features = PPC_FEATURE_32, 992 .cpu_user_features = PPC_FEATURE_32,
1037 .icache_bsize = 32, 993 .icache_bsize = 32,
1038 .dcache_bsize = 32, 994 .dcache_bsize = 32,
1039 } 995 }
1040#endif /* !CLASSIC_PPC */ 996#endif /* !CLASSIC_PPC */
997#endif /* CONFIG_PPC32 */
1041}; 998};
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..37b4396ca978
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1002 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
203 li r11,0
204 stb r11,TI_SC_NOERR(r10)
205 lwz r11,TI_FLAGS(r10)
206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
207 bne- syscall_dotrace
208syscall_dotrace_cont:
209 cmplwi 0,r0,NR_syscalls
210 lis r10,sys_call_table@h
211 ori r10,r10,sys_call_table@l
212 slwi r0,r0,2
213 bge- 66f
214 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
215 mtlr r10
216 addi r9,r1,STACK_FRAME_OVERHEAD
217 PPC440EP_ERR42
218 blrl /* Call handler */
219 .globl ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222 bl do_show_syscall_exit
223#endif
224 mr r6,r3
225 li r11,-_LAST_ERRNO
226 cmplw 0,r3,r11
227 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
228 blt+ 30f
229 lbz r11,TI_SC_NOERR(r12)
230 cmpwi r11,0
231 bne 30f
232 neg r3,r3
233 lwz r10,_CCR(r1) /* Set SO bit in CR */
234 oris r10,r10,0x1000
235 stw r10,_CCR(r1)
236
237 /* disable interrupts so current_thread_info()->flags can't change */
23830: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
239 SYNC
240 MTMSRD(r10)
241 lwz r9,TI_FLAGS(r12)
242 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
243 bne- syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246 /* If the process has its own DBCR0 value, load it up. The single
247 step bit tells us that dbcr0 should be loaded. */
248 lwz r0,THREAD+THREAD_DBCR0(r2)
249 andis. r10,r0,DBCR0_IC@h
250 bnel- load_dbcr0
251#endif
252 stwcx. r0,0,r1 /* to clear the reservation */
253 lwz r4,_LINK(r1)
254 lwz r5,_CCR(r1)
255 mtlr r4
256 mtcr r5
257 lwz r7,_NIP(r1)
258 lwz r8,_MSR(r1)
259 FIX_SRR1(r8, r0)
260 lwz r2,GPR2(r1)
261 lwz r1,GPR1(r1)
262 mtspr SPRN_SRR0,r7
263 mtspr SPRN_SRR1,r8
264 SYNC
265 RFI
266
26766: li r3,-ENOSYS
268 b ret_from_syscall
269
270 .globl ret_from_fork
271ret_from_fork:
272 REST_NVGPRS(r1)
273 bl schedule_tail
274 li r3,0
275 b ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279 SAVE_NVGPRS(r1)
280 li r0,0xc00
281 stw r0,TRAP(r1)
282 addi r3,r1,STACK_FRAME_OVERHEAD
283 bl do_syscall_trace_enter
284 lwz r0,GPR0(r1) /* Restore original registers */
285 lwz r3,GPR3(r1)
286 lwz r4,GPR4(r1)
287 lwz r5,GPR5(r1)
288 lwz r6,GPR6(r1)
289 lwz r7,GPR7(r1)
290 lwz r8,GPR8(r1)
291 REST_NVGPRS(r1)
292 b syscall_dotrace_cont
293
294syscall_exit_work:
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
297 andi. r0,r9,_TIF_SYSCALL_T_OR_A
298 beq 5f
299 ori r10,r10,MSR_EE
300 SYNC
301 MTMSRD(r10) /* re-enable interrupts */
302 lwz r4,TRAP(r1)
303 andi. r4,r4,1
304 beq 4f
305 SAVE_NVGPRS(r1)
306 li r4,0xc00
307 stw r4,TRAP(r1)
3084:
309 addi r3,r1,STACK_FRAME_OVERHEAD
310 bl do_syscall_trace_leave
311 REST_NVGPRS(r1)
3122:
313 lwz r3,GPR3(r1)
314 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 SYNC
316 MTMSRD(r10) /* disable interrupts again */
317 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
318 lwz r9,TI_FLAGS(r12)
3195:
320 andi. r0,r9,_TIF_NEED_RESCHED
321 bne 1f
322 lwz r5,_MSR(r1)
323 andi. r5,r5,MSR_PR
324 beq syscall_exit_cont
325 andi. r0,r9,_TIF_SIGPENDING
326 beq syscall_exit_cont
327 b do_user_signal
3281:
329 ori r10,r10,MSR_EE
330 SYNC
331 MTMSRD(r10) /* re-enable interrupts */
332 bl schedule
333 b 2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338 lis r11,show_syscalls_task@ha
339 lwz r11,show_syscalls_task@l(r11)
340 cmp 0,r2,r11
341 bnelr
342#endif
343 stw r31,GPR31(r1)
344 mflr r31
345 lis r3,7f@ha
346 addi r3,r3,7f@l
347 lwz r4,GPR0(r1)
348 lwz r5,GPR3(r1)
349 lwz r6,GPR4(r1)
350 lwz r7,GPR5(r1)
351 lwz r8,GPR6(r1)
352 lwz r9,GPR7(r1)
353 bl printk
354 lis r3,77f@ha
355 addi r3,r3,77f@l
356 lwz r4,GPR8(r1)
357 mr r5,r2
358 bl printk
359 lwz r0,GPR0(r1)
360 lwz r3,GPR3(r1)
361 lwz r4,GPR4(r1)
362 lwz r5,GPR5(r1)
363 lwz r6,GPR6(r1)
364 lwz r7,GPR7(r1)
365 lwz r8,GPR8(r1)
366 mtlr r31
367 lwz r31,GPR31(r1)
368 blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372 lis r11,show_syscalls_task@ha
373 lwz r11,show_syscalls_task@l(r11)
374 cmp 0,r2,r11
375 bnelr
376#endif
377 stw r31,GPR31(r1)
378 mflr r31
379 stw r3,RESULT(r1) /* Save result */
380 mr r4,r3
381 lis r3,79f@ha
382 addi r3,r3,79f@l
383 bl printk
384 lwz r3,RESULT(r1)
385 mtlr r31
386 lwz r31,GPR31(r1)
387 blr
388
3897: .string "syscall %d(%x, %x, %x, %x, %x, "
39077: .string "%x), current=%p\n"
39179: .string " -> %x\n"
392 .align 2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395 .data
396 .globl show_syscalls_task
397show_syscalls_task:
398 .long -1
399 .text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace. Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410 .globl ppc_sigsuspend
411ppc_sigsuspend:
412 SAVE_NVGPRS(r1)
413 lwz r0,TRAP(r1)
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,TRAP(r1) /* register set saved */
416 b sys_sigsuspend
417
418 .globl ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420 SAVE_NVGPRS(r1)
421 lwz r0,TRAP(r1)
422 rlwinm r0,r0,0,0,30
423 stw r0,TRAP(r1)
424 b sys_rt_sigsuspend
425
426 .globl ppc_fork
427ppc_fork:
428 SAVE_NVGPRS(r1)
429 lwz r0,TRAP(r1)
430 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
431 stw r0,TRAP(r1) /* register set saved */
432 b sys_fork
433
434 .globl ppc_vfork
435ppc_vfork:
436 SAVE_NVGPRS(r1)
437 lwz r0,TRAP(r1)
438 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
439 stw r0,TRAP(r1) /* register set saved */
440 b sys_vfork
441
442 .globl ppc_clone
443ppc_clone:
444 SAVE_NVGPRS(r1)
445 lwz r0,TRAP(r1)
446 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
447 stw r0,TRAP(r1) /* register set saved */
448 b sys_clone
449
450 .globl ppc_swapcontext
451ppc_swapcontext:
452 SAVE_NVGPRS(r1)
453 lwz r0,TRAP(r1)
454 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
455 stw r0,TRAP(r1) /* register set saved */
456 b sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464 .globl handle_page_fault
465handle_page_fault:
466 stw r4,_DAR(r1)
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 bl do_page_fault
469 cmpwi r3,0
470 beq+ ret_from_except
471 SAVE_NVGPRS(r1)
472 lwz r0,TRAP(r1)
473 clrrwi r0,r0,1
474 stw r0,TRAP(r1)
475 mr r5,r3
476 addi r3,r1,STACK_FRAME_OVERHEAD
477 lwz r4,_DAR(r1)
478 bl bad_page_fault
479 b ret_from_except_full
480
481/*
482 * This routine switches between two different tasks. The process
483 * state of one is saved on its kernel stack. Then the state
484 * of the other is restored from its kernel stack. The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path. If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502 stwu r1,-INT_FRAME_SIZE(r1)
503 mflr r0
504 stw r0,INT_FRAME_SIZE+4(r1)
505 /* r3-r12 are caller saved -- Cort */
506 SAVE_NVGPRS(r1)
507 stw r0,_NIP(r1) /* Return to switch caller */
508 mfmsr r11
509 li r0,MSR_FP /* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512 oris r0,r0,MSR_VEC@h /* Disable altivec */
513 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
514 stw r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518 oris r0,r0,MSR_SPE@h /* Disable SPE */
519 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
520 stw r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556 lwz r0,THREAD+THREAD_SPEFSCR(r2)
557 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560 lwz r0,_CCR(r1)
561 mtcrf 0xFF,r0
562 /* r3-r12 are destroyed -- Cort */
563 REST_NVGPRS(r1)
564
565 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 mtlr r4
567 addi r1,r1,INT_FRAME_SIZE
568 blr
569
570 .globl fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
574 beq 1f /* if not, we've got problems */
575#endif
576
5772: REST_4GPRS(3, r11)
578 lwz r10,_CCR(r11)
579 REST_GPR(1, r11)
580 mtcr r10
581 lwz r10,_LINK(r11)
582 mtlr r10
583 REST_GPR(10, r11)
584 mtspr SPRN_SRR1,r9
585 mtspr SPRN_SRR0,r12
586 REST_GPR(9, r11)
587 REST_GPR(12, r11)
588 lwz r11,GPR11(r11)
589 SYNC
590 RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941: lis r3,exc_exit_restart_end@ha
595 addi r3,r3,exc_exit_restart_end@l
596 cmplw r12,r3
597 bge 3f
598 lis r4,exc_exit_restart@ha
599 addi r4,r4,exc_exit_restart@l
600 cmplw r12,r4
601 blt 3f
602 lis r3,fee_restarts@ha
603 tophys(r3,r3)
604 lwz r5,fee_restarts@l(r3)
605 addi r5,r5,1
606 stw r5,fee_restarts@l(r3)
607 mr r12,r4 /* restart at exc_exit_restart */
608 b 2b
609
610 .comm fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617 b 2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619 li r10,-1
620 stw r10,TRAP(r11)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 lis r10,MSR_KERNEL@h
623 ori r10,r10,MSR_KERNEL@l
624 bl transfer_to_handler_full
625 .long nonrecoverable_exception
626 .long ret_from_except
627#endif
628
629 .globl sigreturn_exit
630sigreturn_exit:
631 subi r1,r3,STACK_FRAME_OVERHEAD
632 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
633 lwz r9,TI_FLAGS(r12)
634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_OF
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 lis r4,rtas_data@ha
967 lwz r4,rtas_data@l(r4)
968 lis r6,1f@ha /* physical return address for rtas */
969 addi r6,r6,1f@l
970 tophys(r6,r6)
971 tophys(r7,r1)
972 lis r8,rtas_entry@ha
973 lwz r8,rtas_entry@l(r8)
974 mfmsr r9
975 stw r9,8(r1)
976 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
977 SYNC /* disable interrupts so SRR0/1 */
978 MTMSRD(r0) /* don't get trashed */
979 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
980 mtlr r6
981 CLR_TOP32(r7)
982 mtspr SPRN_SPRG2,r7
983 mtspr SPRN_SRR0,r8
984 mtspr SPRN_SRR1,r9
985 RFI
9861: tophys(r9,r1)
987 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
988 lwz r9,8(r9) /* original msr value */
989 FIX_SRR1(r9,r0)
990 addi r1,r1,INT_FRAME_SIZE
991 li r0,0
992 mtspr SPRN_SPRG2,r0
993 mtspr SPRN_SRR0,r8
994 mtspr SPRN_SRR1,r9
995 RFI /* return to caller */
996
997 .globl machine_check_in_rtas
998machine_check_in_rtas:
999 twi 31,0,0
1000 /* XXX load up BATs and panic */
1001
1002#endif /* CONFIG_PPC_OF */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
new file mode 100644
index 000000000000..984a10630714
--- /dev/null
+++ b/arch/powerpc/kernel/entry_64.S
@@ -0,0 +1,842 @@
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
116 li r12,0
117 ld r10,TI_FLAGS(r11)
118 stb r12,TI_SC_NOERR(r11)
119 andi. r11,r10,_TIF_SYSCALL_T_OR_A
120 bne- syscall_dotrace
121syscall_dotrace_cont:
122 cmpldi 0,r0,NR_syscalls
123 bge- syscall_enosys
124
125system_call: /* label this so stack traces look sane */
126/*
127 * Need to vector to 32 Bit or default sys_call_table here,
128 * based on caller's run-mode / personality.
129 */
130 ld r11,.SYS_CALL_TABLE@toc(2)
131 andi. r10,r10,_TIF_32BIT
132 beq 15f
133 addi r11,r11,8 /* use 32-bit syscall entries */
134 clrldi r3,r3,32
135 clrldi r4,r4,32
136 clrldi r5,r5,32
137 clrldi r6,r6,32
138 clrldi r7,r7,32
139 clrldi r8,r8,32
14015:
141 slwi r0,r0,4
142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
143 mtctr r10
144 bctrl /* Call handler */
145
146syscall_exit:
147#ifdef SHOW_SYSCALLS
148 std r3,GPR3(r1)
149 bl .do_show_syscall_exit
150 ld r3,GPR3(r1)
151#endif
152 std r3,RESULT(r1)
153 ld r5,_CCR(r1)
154 li r10,-_LAST_ERRNO
155 cmpld r3,r10
156 clrrdi r12,r1,THREAD_SHIFT
157 bge- syscall_error
158syscall_error_cont:
159
160 /* check for syscall tracing or audit */
161 ld r9,TI_FLAGS(r12)
162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
163 bne- syscall_exit_trace
164syscall_exit_trace_cont:
165
166 /* disable interrupts so current_thread_info()->flags can't change,
167 and so that we don't get interrupted after loading SRR0/1. */
168 ld r8,_MSR(r1)
169 andi. r10,r8,MSR_RI
170 beq- unrecov_restore
171 mfmsr r10
172 rldicl r10,r10,48,1
173 rotldi r10,r10,16
174 mtmsrd r10,1
175 ld r9,TI_FLAGS(r12)
176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
177 bne- syscall_exit_work
178 ld r7,_NIP(r1)
179 stdcx. r0,0,r1 /* to clear the reservation */
180 andi. r6,r8,MSR_PR
181 ld r4,_LINK(r1)
182 beq- 1f /* only restore r13 if */
183 ld r13,GPR13(r1) /* returning to usermode */
1841: ld r2,GPR2(r1)
185 li r12,MSR_RI
186 andc r10,r10,r12
187 mtmsrd r10,1 /* clear MSR.RI */
188 ld r1,GPR1(r1)
189 mtlr r4
190 mtcr r5
191 mtspr SPRN_SRR0,r7
192 mtspr SPRN_SRR1,r8
193 rfid
194 b . /* prevent speculative execution */
195
196syscall_enosys:
197 li r3,-ENOSYS
198 std r3,RESULT(r1)
199 clrrdi r12,r1,THREAD_SHIFT
200 ld r5,_CCR(r1)
201
202syscall_error:
203 lbz r11,TI_SC_NOERR(r12)
204 cmpwi 0,r11,0
205 bne- syscall_error_cont
206 neg r3,r3
207 oris r5,r5,0x1000 /* Set SO bit in CR */
208 std r5,_CCR(r1)
209 b syscall_error_cont
210
211/* Traced system call support */
212syscall_dotrace:
213 bl .save_nvgprs
214 addi r3,r1,STACK_FRAME_OVERHEAD
215 bl .do_syscall_trace_enter
216 ld r0,GPR0(r1) /* Restore original registers */
217 ld r3,GPR3(r1)
218 ld r4,GPR4(r1)
219 ld r5,GPR5(r1)
220 ld r6,GPR6(r1)
221 ld r7,GPR7(r1)
222 ld r8,GPR8(r1)
223 addi r9,r1,STACK_FRAME_OVERHEAD
224 clrrdi r10,r1,THREAD_SHIFT
225 ld r10,TI_FLAGS(r10)
226 b syscall_dotrace_cont
227
228syscall_exit_trace:
229 std r3,GPR3(r1)
230 bl .save_nvgprs
231 addi r3,r1,STACK_FRAME_OVERHEAD
232 bl .do_syscall_trace_leave
233 REST_NVGPRS(r1)
234 ld r3,GPR3(r1)
235 ld r5,_CCR(r1)
236 clrrdi r12,r1,THREAD_SHIFT
237 b syscall_exit_trace_cont
238
239/* Stuff to do on exit from a system call. */
240syscall_exit_work:
241 std r3,GPR3(r1)
242 std r5,_CCR(r1)
243 b .ret_from_except_lite
244
245/* Save non-volatile GPRs, if not already saved. */
246_GLOBAL(save_nvgprs)
247 ld r11,_TRAP(r1)
248 andi. r0,r11,1
249 beqlr-
250 SAVE_NVGPRS(r1)
251 clrrdi r0,r11,1
252 std r0,_TRAP(r1)
253 blr
254
255/*
256 * The sigsuspend and rt_sigsuspend system calls can call do_signal
257 * and thus put the process into the stopped state where we might
258 * want to examine its user state with ptrace. Therefore we need
259 * to save all the nonvolatile registers (r14 - r31) before calling
260 * the C code. Similarly, fork, vfork and clone need the full
261 * register state on the stack so that it can be copied to the child.
262 */
263_GLOBAL(ppc32_sigsuspend)
264 bl .save_nvgprs
265 bl .compat_sys_sigsuspend
266 b 70f
267
268_GLOBAL(ppc64_rt_sigsuspend)
269 bl .save_nvgprs
270 bl .sys_rt_sigsuspend
271 b 70f
272
273_GLOBAL(ppc32_rt_sigsuspend)
274 bl .save_nvgprs
275 bl .compat_sys_rt_sigsuspend
27670: cmpdi 0,r3,0
277 /* If it returned an error, we need to return via syscall_exit to set
278 the SO bit in cr0 and potentially stop for ptrace. */
279 bne syscall_exit
280 /* If sigsuspend() returns zero, we are going into a signal handler. We
281 may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
282#ifdef CONFIG_AUDIT
283 ld r3,PACACURRENT(r13)
284 ld r4,AUDITCONTEXT(r3)
285 cmpdi 0,r4,0
286 beq .ret_from_except /* No audit_context: Leave immediately. */
287 li r4, 2 /* AUDITSC_FAILURE */
288 li r5,-4 /* It's always -EINTR */
289 bl .audit_syscall_exit
290#endif
291 b .ret_from_except
292
293_GLOBAL(ppc_fork)
294 bl .save_nvgprs
295 bl .sys_fork
296 b syscall_exit
297
298_GLOBAL(ppc_vfork)
299 bl .save_nvgprs
300 bl .sys_vfork
301 b syscall_exit
302
303_GLOBAL(ppc_clone)
304 bl .save_nvgprs
305 bl .sys_clone
306 b syscall_exit
307
308_GLOBAL(ppc32_swapcontext)
309 bl .save_nvgprs
310 bl .compat_sys_swapcontext
311 b 80f
312
313_GLOBAL(ppc64_swapcontext)
314 bl .save_nvgprs
315 bl .sys_swapcontext
316 b 80f
317
318_GLOBAL(ppc32_sigreturn)
319 bl .compat_sys_sigreturn
320 b 80f
321
322_GLOBAL(ppc32_rt_sigreturn)
323 bl .compat_sys_rt_sigreturn
324 b 80f
325
326_GLOBAL(ppc64_rt_sigreturn)
327 bl .sys_rt_sigreturn
328
32980: cmpdi 0,r3,0
330 blt syscall_exit
331 clrrdi r4,r1,THREAD_SHIFT
332 ld r4,TI_FLAGS(r4)
333 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
334 beq+ 81f
335 addi r3,r1,STACK_FRAME_OVERHEAD
336 bl .do_syscall_trace_leave
33781: b .ret_from_except
338
339_GLOBAL(ret_from_fork)
340 bl .schedule_tail
341 REST_NVGPRS(r1)
342 li r3,0
343 b syscall_exit
344
345/*
346 * This routine switches between two different tasks. The process
347 * state of one is saved on its kernel stack. Then the state
348 * of the other is restored from its kernel stack. The memory
349 * management hardware is updated to the second process's state.
350 * Finally, we can return to the second process, via ret_from_except.
351 * On entry, r3 points to the THREAD for the current task, r4
352 * points to the THREAD for the new task.
353 *
354 * Note: there are two ways to get to the "going out" portion
355 * of this code; either by coming in via the entry (_switch)
356 * or via "fork" which must set up an environment equivalent
357 * to the "_switch" path. If you change this you'll have to change
358 * the fork code also.
359 *
360 * The code which creates the new task context is in 'copy_thread'
361 * in arch/ppc64/kernel/process.c
362 */
363 .align 7
364_GLOBAL(_switch)
365 mflr r0
366 std r0,16(r1)
367 stdu r1,-SWITCH_FRAME_SIZE(r1)
368 /* r3-r13 are caller saved -- Cort */
369 SAVE_8GPRS(14, r1)
370 SAVE_10GPRS(22, r1)
371 mflr r20 /* Return to switch caller */
372 mfmsr r22
373 li r0, MSR_FP
374#ifdef CONFIG_ALTIVEC
375BEGIN_FTR_SECTION
376 oris r0,r0,MSR_VEC@h /* Disable altivec */
377 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
378 std r24,THREAD_VRSAVE(r3)
379END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
380#endif /* CONFIG_ALTIVEC */
381 and. r0,r0,r22
382 beq+ 1f
383 andc r22,r22,r0
384 mtmsrd r22
385 isync
3861: std r20,_NIP(r1)
387 mfcr r23
388 std r23,_CCR(r1)
389 std r1,KSP(r3) /* Set old stack pointer */
390
391#ifdef CONFIG_SMP
392 /* We need a sync somewhere here to make sure that if the
393 * previous task gets rescheduled on another CPU, it sees all
394 * stores it has performed on this one.
395 */
396 sync
397#endif /* CONFIG_SMP */
398
399 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
400 std r6,PACACURRENT(r13) /* Set new 'current' */
401
402 ld r8,KSP(r4) /* new stack pointer */
403BEGIN_FTR_SECTION
404 clrrdi r6,r8,28 /* get its ESID */
405 clrrdi r9,r1,28 /* get current sp ESID */
406 clrldi. r0,r6,2 /* is new ESID c00000000? */
407 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
408 cror eq,4*cr1+eq,eq
409 beq 2f /* if yes, don't slbie it */
410
411 /* Bolt in the new stack SLB entry */
412 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
413 oris r0,r6,(SLB_ESID_V)@h
414 ori r0,r0,(SLB_NUM_BOLTED-1)@l
415 slbie r6
416 slbie r6 /* Workaround POWER5 < DD2.1 issue */
417 slbmte r7,r0
418 isync
419
4202:
421END_FTR_SECTION_IFSET(CPU_FTR_SLB)
422 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
423 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
424 because we don't need to leave the 288-byte ABI gap at the
425 top of the kernel stack. */
426 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
427
428 mr r1,r8 /* start using new stack pointer */
429 std r7,PACAKSAVE(r13)
430
431 ld r6,_CCR(r1)
432 mtcrf 0xFF,r6
433
434#ifdef CONFIG_ALTIVEC
435BEGIN_FTR_SECTION
436 ld r0,THREAD_VRSAVE(r4)
437 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
438END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
439#endif /* CONFIG_ALTIVEC */
440
441 /* r3-r13 are destroyed -- Cort */
442 REST_8GPRS(14, r1)
443 REST_10GPRS(22, r1)
444
445 /* convert old thread to its task_struct for return value */
446 addi r3,r3,-THREAD
447 ld r7,_NIP(r1) /* Return to _switch caller in new task */
448 mtlr r7
449 addi r1,r1,SWITCH_FRAME_SIZE
450 blr
451
452 .align 7
453_GLOBAL(ret_from_except)
454 ld r11,_TRAP(r1)
455 andi. r0,r11,1
456 bne .ret_from_except_lite
457 REST_NVGPRS(r1)
458
459_GLOBAL(ret_from_except_lite)
460 /*
461 * Disable interrupts so that current_thread_info()->flags
462 * can't change between when we test it and when we return
463 * from the interrupt.
464 */
465 mfmsr r10 /* Get current interrupt state */
466 rldicl r9,r10,48,1 /* clear MSR_EE */
467 rotldi r9,r9,16
468 mtmsrd r9,1 /* Update machine state */
469
470#ifdef CONFIG_PREEMPT
471 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
472 li r0,_TIF_NEED_RESCHED /* bits to check */
473 ld r3,_MSR(r1)
474 ld r4,TI_FLAGS(r9)
475 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
476 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
477 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
478 bne do_work
479
480#else /* !CONFIG_PREEMPT */
481 ld r3,_MSR(r1) /* Returning to user mode? */
482 andi. r3,r3,MSR_PR
483 beq restore /* if not, just restore regs and return */
484
485 /* Check current_thread_info()->flags */
486 clrrdi r9,r1,THREAD_SHIFT
487 ld r4,TI_FLAGS(r9)
488 andi. r0,r4,_TIF_USER_WORK_MASK
489 bne do_work
490#endif
491
492restore:
493#ifdef CONFIG_PPC_ISERIES
494 ld r5,SOFTE(r1)
495 cmpdi 0,r5,0
496 beq 4f
497 /* Check for pending interrupts (iSeries) */
498 ld r3,PACALPPACA+LPPACAANYINT(r13)
499 cmpdi r3,0
500 beq+ 4f /* skip do_IRQ if no interrupts */
501
502 li r3,0
503 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
504 ori r10,r10,MSR_EE
505 mtmsrd r10 /* hard-enable again */
506 addi r3,r1,STACK_FRAME_OVERHEAD
507 bl .do_IRQ
508 b .ret_from_except_lite /* loop back and handle more */
509
5104: stb r5,PACAPROCENABLED(r13)
511#endif
512
513 ld r3,_MSR(r1)
514 andi. r0,r3,MSR_RI
515 beq- unrecov_restore
516
517 andi. r0,r3,MSR_PR
518
519 /*
520 * r13 is our per cpu area, only restore it if we are returning to
521 * userspace
522 */
523 beq 1f
524 REST_GPR(13, r1)
5251:
526 ld r3,_CTR(r1)
527 ld r0,_LINK(r1)
528 mtctr r3
529 mtlr r0
530 ld r3,_XER(r1)
531 mtspr SPRN_XER,r3
532
533 REST_8GPRS(5, r1)
534
535 stdcx. r0,0,r1 /* to clear the reservation */
536
537 mfmsr r0
538 li r2, MSR_RI
539 andc r0,r0,r2
540 mtmsrd r0,1
541
542 ld r0,_MSR(r1)
543 mtspr SPRN_SRR1,r0
544
545 ld r2,_CCR(r1)
546 mtcrf 0xFF,r2
547 ld r2,_NIP(r1)
548 mtspr SPRN_SRR0,r2
549
550 ld r0,GPR0(r1)
551 ld r2,GPR2(r1)
552 ld r3,GPR3(r1)
553 ld r4,GPR4(r1)
554 ld r1,GPR1(r1)
555
556 rfid
557 b . /* prevent speculative execution */
558
559/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
560do_work:
561#ifdef CONFIG_PREEMPT
562 andi. r0,r3,MSR_PR /* Returning to user mode? */
563 bne user_work
564 /* Check that preempt_count() == 0 and interrupts are enabled */
565 lwz r8,TI_PREEMPT(r9)
566 cmpwi cr1,r8,0
567#ifdef CONFIG_PPC_ISERIES
568 ld r0,SOFTE(r1)
569 cmpdi r0,0
570#else
571 andi. r0,r3,MSR_EE
572#endif
573 crandc eq,cr1*4+eq,eq
574 bne restore
575 /* here we are preempting the current task */
5761:
577#ifdef CONFIG_PPC_ISERIES
578 li r0,1
579 stb r0,PACAPROCENABLED(r13)
580#endif
581 ori r10,r10,MSR_EE
582 mtmsrd r10,1 /* reenable interrupts */
583 bl .preempt_schedule
584 mfmsr r10
585 clrrdi r9,r1,THREAD_SHIFT
586 rldicl r10,r10,48,1 /* disable interrupts again */
587 rotldi r10,r10,16
588 mtmsrd r10,1
589 ld r4,TI_FLAGS(r9)
590 andi. r0,r4,_TIF_NEED_RESCHED
591 bne 1b
592 b restore
593
594user_work:
595#endif
596 /* Enable interrupts */
597 ori r10,r10,MSR_EE
598 mtmsrd r10,1
599
600 andi. r0,r4,_TIF_NEED_RESCHED
601 beq 1f
602 bl .schedule
603 b .ret_from_except_lite
604
6051: bl .save_nvgprs
606 li r3,0
607 addi r4,r1,STACK_FRAME_OVERHEAD
608 bl .do_signal
609 b .ret_from_except
610
611unrecov_restore:
612 addi r3,r1,STACK_FRAME_OVERHEAD
613 bl .unrecoverable_exception
614 b unrecov_restore
615
616#ifdef CONFIG_PPC_RTAS
617/*
618 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
619 * called with the MMU off.
620 *
621 * In addition, we need to be in 32b mode, at least for now.
622 *
623 * Note: r3 is an input parameter to rtas, so don't trash it...
624 */
625_GLOBAL(enter_rtas)
626 mflr r0
627 std r0,16(r1)
628 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
629
630 /* Because RTAS is running in 32b mode, it clobbers the high order half
631 * of all registers that it saves. We therefore save those registers
632 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
633 */
634 SAVE_GPR(2, r1) /* Save the TOC */
635 SAVE_GPR(13, r1) /* Save paca */
636 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
637 SAVE_10GPRS(22, r1) /* ditto */
638
639 mfcr r4
640 std r4,_CCR(r1)
641 mfctr r5
642 std r5,_CTR(r1)
643 mfspr r6,SPRN_XER
644 std r6,_XER(r1)
645 mfdar r7
646 std r7,_DAR(r1)
647 mfdsisr r8
648 std r8,_DSISR(r1)
649 mfsrr0 r9
650 std r9,_SRR0(r1)
651 mfsrr1 r10
652 std r10,_SRR1(r1)
653
654 /* There is no way it is acceptable to get here with interrupts enabled,
655 * check it with the asm equivalent of WARN_ON
656 */
657 mfmsr r6
658 andi. r0,r6,MSR_EE
6591: tdnei r0,0
660.section __bug_table,"a"
661 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
662.previous
663.section .rodata,"a"
6641: .asciz __FILE__
6652: .asciz "enter_rtas"
666.previous
667
668 /* Unfortunately, the stack pointer and the MSR are also clobbered,
669 * so they are saved in the PACA which allows us to restore
670 * our original state after RTAS returns.
671 */
672 std r1,PACAR1(r13)
673 std r6,PACASAVEDMSR(r13)
674
675 /* Setup our real return addr */
676 SET_REG_TO_LABEL(r4,.rtas_return_loc)
677 SET_REG_TO_CONST(r9,KERNELBASE)
678 sub r4,r4,r9
679 mtlr r4
680
681 li r0,0
682 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
683 andc r0,r6,r0
684
685 li r9,1
686 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
687 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
688 andc r6,r0,r9
689 ori r6,r6,MSR_RI
690 sync /* disable interrupts so SRR0/1 */
691 mtmsrd r0 /* don't get trashed */
692
693 SET_REG_TO_LABEL(r4,rtas)
694 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
695 ld r4,RTASBASE(r4) /* get the rtas->base value */
696
697 mtspr SPRN_SRR0,r5
698 mtspr SPRN_SRR1,r6
699 rfid
700 b . /* prevent speculative execution */
701
702_STATIC(rtas_return_loc)
703 /* relocation is off at this point */
704 mfspr r4,SPRN_SPRG3 /* Get PACA */
705 SET_REG_TO_CONST(r5, KERNELBASE)
706 sub r4,r4,r5 /* RELOC the PACA base pointer */
707
708 mfmsr r6
709 li r0,MSR_RI
710 andc r6,r6,r0
711 sync
712 mtmsrd r6
713
714 ld r1,PACAR1(r4) /* Restore our SP */
715 LOADADDR(r3,.rtas_restore_regs)
716 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
717
718 mtspr SPRN_SRR0,r3
719 mtspr SPRN_SRR1,r4
720 rfid
721 b . /* prevent speculative execution */
722
723_STATIC(rtas_restore_regs)
724 /* relocation is on at this point */
725 REST_GPR(2, r1) /* Restore the TOC */
726 REST_GPR(13, r1) /* Restore paca */
727 REST_8GPRS(14, r1) /* Restore the non-volatiles */
728 REST_10GPRS(22, r1) /* ditto */
729
730 mfspr r13,SPRN_SPRG3
731
732 ld r4,_CCR(r1)
733 mtcr r4
734 ld r5,_CTR(r1)
735 mtctr r5
736 ld r6,_XER(r1)
737 mtspr SPRN_XER,r6
738 ld r7,_DAR(r1)
739 mtdar r7
740 ld r8,_DSISR(r1)
741 mtdsisr r8
742 ld r9,_SRR0(r1)
743 mtsrr0 r9
744 ld r10,_SRR1(r1)
745 mtsrr1 r10
746
747 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
748 ld r0,16(r1) /* get return address */
749
750 mtlr r0
751 blr /* return to caller */
752
753#endif /* CONFIG_PPC_RTAS */
754
755#ifdef CONFIG_PPC_MULTIPLATFORM
756
757_GLOBAL(enter_prom)
758 mflr r0
759 std r0,16(r1)
760 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
761
762 /* Because PROM is running in 32b mode, it clobbers the high order half
763 * of all registers that it saves. We therefore save those registers
764 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
765 */
766 SAVE_8GPRS(2, r1)
767 SAVE_GPR(13, r1)
768 SAVE_8GPRS(14, r1)
769 SAVE_10GPRS(22, r1)
770 mfcr r4
771 std r4,_CCR(r1)
772 mfctr r5
773 std r5,_CTR(r1)
774 mfspr r6,SPRN_XER
775 std r6,_XER(r1)
776 mfdar r7
777 std r7,_DAR(r1)
778 mfdsisr r8
779 std r8,_DSISR(r1)
780 mfsrr0 r9
781 std r9,_SRR0(r1)
782 mfsrr1 r10
783 std r10,_SRR1(r1)
784 mfmsr r11
785 std r11,_MSR(r1)
786
787 /* Get the PROM entrypoint */
788 ld r0,GPR4(r1)
789 mtlr r0
790
791 /* Switch MSR to 32 bits mode
792 */
793 mfmsr r11
794 li r12,1
795 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
796 andc r11,r11,r12
797 li r12,1
798 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
799 andc r11,r11,r12
800 mtmsrd r11
801 isync
802
803 /* Restore arguments & enter PROM here... */
804 ld r3,GPR3(r1)
805 blrl
806
807 /* Just make sure that r1 top 32 bits didn't get
808 * corrupt by OF
809 */
810 rldicl r1,r1,0,32
811
812 /* Restore the MSR (back to 64 bits) */
813 ld r0,_MSR(r1)
814 mtmsrd r0
815 isync
816
817 /* Restore other registers */
818 REST_GPR(2, r1)
819 REST_GPR(13, r1)
820 REST_8GPRS(14, r1)
821 REST_10GPRS(22, r1)
822 ld r4,_CCR(r1)
823 mtcr r4
824 ld r5,_CTR(r1)
825 mtctr r5
826 ld r6,_XER(r1)
827 mtspr SPRN_XER,r6
828 ld r7,_DAR(r1)
829 mtdar r7
830 ld r8,_DSISR(r1)
831 mtdsisr r8
832 ld r9,_SRR0(r1)
833 mtsrr0 r9
834 ld r10,_SRR1(r1)
835 mtsrr1 r10
836
837 addi r1,r1,PROM_FRAME_SIZE
838 ld r0,16(r1)
839 mtlr r0
840 blr
841
842#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/ppc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 665d7d34304c..563d445ff584 100644
--- a/arch/ppc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
@@ -27,13 +27,9 @@
27 * Load up this task's FP registers from its thread_struct, 27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task. 28 * enable the FPU for the current task and return to the task.
29 */ 29 */
30 .globl load_up_fpu 30_GLOBAL(load_up_fpu)
31load_up_fpu:
32 mfmsr r5 31 mfmsr r5
33 ori r5,r5,MSR_FP 32 ori r5,r5,MSR_FP
34#ifdef CONFIG_PPC64BRIDGE
35 clrldi r5,r5,1 /* turn off 64-bit mode */
36#endif /* CONFIG_PPC64BRIDGE */
37 SYNC 33 SYNC
38 MTMSRD(r5) /* enable use of fpu now */ 34 MTMSRD(r5) /* enable use of fpu now */
39 isync 35 isync
@@ -43,67 +39,57 @@ load_up_fpu:
43 * to another. Instead we call giveup_fpu in switch_to. 39 * to another. Instead we call giveup_fpu in switch_to.
44 */ 40 */
45#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
46 tophys(r6,0) /* get __pa constant */ 42 LOADBASE(r3, last_task_used_math)
47 addis r3,r6,last_task_used_math@ha 43 tophys(r3,r3)
48 lwz r4,last_task_used_math@l(r3) 44 LDL r4,OFF(last_task_used_math)(r3)
49 cmpwi 0,r4,0 45 CMPI 0,r4,0
50 beq 1f 46 beq 1f
51 add r4,r4,r6 47 tophys(r4,r4)
52 addi r4,r4,THREAD /* want last_task_used_math->thread */ 48 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4) 49 SAVE_32FPRS(0, r4)
54 mffs fr0 50 mffs fr0
55 stfd fr0,THREAD_FPSCR-4(r4) 51 stfd fr0,THREAD_FPSCR-4(r4)
56 lwz r5,PT_REGS(r4) 52 LDL r5,PT_REGS(r4)
57 add r5,r5,r6 53 tophys(r5,r5)
58 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
59 li r10,MSR_FP|MSR_FE0|MSR_FE1 55 li r10,MSR_FP|MSR_FE0|MSR_FE1
60 andc r4,r4,r10 /* disable FP for previous task */ 56 andc r4,r4,r10 /* disable FP for previous task */
61 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
621: 581:
63#endif /* CONFIG_SMP */ 59#endif /* CONFIG_SMP */
64 /* enable use of FP after return */ 60 /* enable use of FP after return */
61#ifdef CONFIG_PPC32
65 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 62 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
66 lwz r4,THREAD_FPEXC_MODE(r5) 63 lwz r4,THREAD_FPEXC_MODE(r5)
67 ori r9,r9,MSR_FP /* enable FP for current */ 64 ori r9,r9,MSR_FP /* enable FP for current */
68 or r9,r9,r4 65 or r9,r9,r4
66#else
67 ld r4,PACACURRENT(r13)
68 addi r5,r4,THREAD /* Get THREAD */
69 ld r4,THREAD_FPEXC_MODE(r5)
70 ori r12,r12,MSR_FP
71 or r12,r12,r4
72 std r12,_MSR(r1)
73#endif
69 lfd fr0,THREAD_FPSCR-4(r5) 74 lfd fr0,THREAD_FPSCR-4(r5)
70 mtfsf 0xff,fr0 75 mtfsf 0xff,fr0
71 REST_32FPRS(0, r5) 76 REST_32FPRS(0, r5)
72#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
73 subi r4,r5,THREAD 78 subi r4,r5,THREAD
74 sub r4,r4,r6 79 tovirt(r4,r4)
75 stw r4,last_task_used_math@l(r3) 80 STL r4,OFF(last_task_used_math)(r3)
76#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
77 /* restore registers and return */ 82 /* restore registers and return */
78 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
79 b fast_exception_return 84 b fast_exception_return
80 85
81/* 86/*
82 * FP unavailable trap from kernel - print a message, but let
83 * the task use FP in the kernel until it returns to user mode.
84 */
85 .globl KernelFP
86KernelFP:
87 lwz r3,_MSR(r1)
88 ori r3,r3,MSR_FP
89 stw r3,_MSR(r1) /* enable use of FP after return */
90 lis r3,86f@h
91 ori r3,r3,86f@l
92 mr r4,r2 /* current */
93 lwz r5,_NIP(r1)
94 bl printk
95 b ret_from_except
9686: .string "floating point used in kernel (task=%p, pc=%x)\n"
97 .align 4,0
98
99/*
100 * giveup_fpu(tsk) 87 * giveup_fpu(tsk)
101 * Disable FP for the task given as the argument, 88 * Disable FP for the task given as the argument,
102 * and save the floating-point registers in its thread_struct. 89 * and save the floating-point registers in its thread_struct.
103 * Enables the FPU for use in the kernel on return. 90 * Enables the FPU for use in the kernel on return.
104 */ 91 */
105 .globl giveup_fpu 92_GLOBAL(giveup_fpu)
106giveup_fpu:
107 mfmsr r5 93 mfmsr r5
108 ori r5,r5,MSR_FP 94 ori r5,r5,MSR_FP
109 SYNC_601 95 SYNC_601
@@ -111,23 +97,23 @@ giveup_fpu:
111 MTMSRD(r5) /* enable use of fpu now */ 97 MTMSRD(r5) /* enable use of fpu now */
112 SYNC_601 98 SYNC_601
113 isync 99 isync
114 cmpwi 0,r3,0 100 CMPI 0,r3,0
115 beqlr- /* if no previous owner, done */ 101 beqlr- /* if no previous owner, done */
116 addi r3,r3,THREAD /* want THREAD of task */ 102 addi r3,r3,THREAD /* want THREAD of task */
117 lwz r5,PT_REGS(r3) 103 LDL r5,PT_REGS(r3)
118 cmpwi 0,r5,0 104 CMPI 0,r5,0
119 SAVE_32FPRS(0, r3) 105 SAVE_32FPRS(0, r3)
120 mffs fr0 106 mffs fr0
121 stfd fr0,THREAD_FPSCR-4(r3) 107 stfd fr0,THREAD_FPSCR-4(r3)
122 beq 1f 108 beq 1f
123 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
124 li r3,MSR_FP|MSR_FE0|MSR_FE1 110 li r3,MSR_FP|MSR_FE0|MSR_FE1
125 andc r4,r4,r3 /* disable FP for previous task */ 111 andc r4,r4,r3 /* disable FP for previous task */
126 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1271: 1131:
128#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
129 li r5,0 115 li r5,0
130 lis r4,last_task_used_math@ha 116 LOADBASE(r4,last_task_used_math)
131 stw r5,last_task_used_math@l(r4) 117 STL r5,OFF(last_task_used_math)(r4)
132#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
133 blr 119 blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
new file mode 100644
index 000000000000..cd51fe585fcd
--- /dev/null
+++ b/arch/powerpc/kernel/head_32.S
@@ -0,0 +1,1371 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35
36#ifdef CONFIG_APUS
37#include <asm/amigappc.h>
38#endif
39
40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
41#define LOAD_BAT(n, reg, RA, RB) \
42 /* see the comment for clear_bats() -- Cort */ \
43 li RA,0; \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_DBAT##n##U,RA; \
46 lwz RA,(n*16)+0(reg); \
47 lwz RB,(n*16)+4(reg); \
48 mtspr SPRN_IBAT##n##U,RA; \
49 mtspr SPRN_IBAT##n##L,RB; \
50 beq 1f; \
51 lwz RA,(n*16)+8(reg); \
52 lwz RB,(n*16)+12(reg); \
53 mtspr SPRN_DBAT##n##U,RA; \
54 mtspr SPRN_DBAT##n##L,RB; \
551:
56
57 .text
58 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
59 .stabs "head_32.S",N_SO,0,0,0f
600:
61 .globl _stext
62_stext:
63
64/*
65 * _start is defined this way because the XCOFF loader in the OpenFirmware
66 * on the powermac expects the entry point to be a procedure descriptor.
67 */
68 .text
69 .globl _start
70_start:
71 /*
72 * These are here for legacy reasons, the kernel used to
73 * need to look like a coff function entry for the pmac
74 * but we're always started by some kind of bootloader now.
75 * -- Cort
76 */
77 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
78 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
79 nop
80
81/* PMAC
82 * Enter here with the kernel text, data and bss loaded starting at
83 * 0, running with virtual == physical mapping.
84 * r5 points to the prom entry point (the client interface handler
85 * address). Address translation is turned on, with the prom
86 * managing the hash table. Interrupts are disabled. The stack
87 * pointer (r1) points to just below the end of the half-meg region
88 * from 0x380000 - 0x400000, which is mapped in already.
89 *
90 * If we are booted from MacOS via BootX, we enter with the kernel
91 * image loaded somewhere, and the following values in registers:
92 * r3: 'BooX' (0x426f6f58)
93 * r4: virtual address of boot_infos_t
94 * r5: 0
95 *
96 * APUS
97 * r3: 'APUS'
98 * r4: physical address of memory base
99 * Linux/m68k style BootInfo structure at &_end.
100 *
101 * PREP
102 * This is jumped to on prep systems right after the kernel is relocated
103 * to its proper place in memory by the boot loader. The expected layout
104 * of the regs is:
105 * r3: ptr to residual data
106 * r4: initrd_start or if no initrd then 0
107 * r5: initrd_end - unused if r4 is 0
108 * r6: Start of command line string
109 * r7: End of command line string
110 *
111 * This just gets a minimal mmu environment setup so we can call
112 * start_here() to do the real work.
113 * -- Cort
114 */
115
116 .globl __start
117__start:
118/*
119 * We have to do any OF calls before we map ourselves to KERNELBASE,
120 * because OF may have I/O devices mapped into that area
121 * (particularly on CHRP).
122 */
123 cmpwi 0,r5,0
124 beq 1f
125 bl prom_init
126 trap
127
1281: mr r31,r3 /* save parameters */
129 mr r30,r4
130 li r24,0 /* cpu # */
131
132/*
133 * early_init() does the early machine identification and does
134 * the necessary low-level setup and clears the BSS
135 * -- Cort <cort@fsmlabs.com>
136 */
137 bl early_init
138
139#ifdef CONFIG_APUS
140/* On APUS the __va/__pa constants need to be set to the correct
141 * values before continuing.
142 */
143 mr r4,r30
144 bl fix_mem_constants
145#endif /* CONFIG_APUS */
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156
157/*
158 * Call setup_cpu for CPU 0 and initialize 6xx Idle
159 */
160 bl reloc_offset
161 li r24,0 /* cpu# */
162 bl call_setup_cpu /* Call setup_cpu for this CPU */
163#ifdef CONFIG_6xx
164 bl reloc_offset
165 bl init_idle_6xx
166#endif /* CONFIG_6xx */
167
168
169#ifndef CONFIG_APUS
170/*
171 * We need to run with _start at physical address 0.
172 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
173 * the exception vectors at 0 (and therefore this copy
174 * overwrites OF's exception vectors with our own).
175 * The MMU is off at this point.
176 */
177 bl reloc_offset
178 mr r26,r3
179 addis r4,r3,KERNELBASE@h /* current address of _start */
180 cmpwi 0,r4,0 /* are we already running at 0? */
181 bne relocate_kernel
182#endif /* CONFIG_APUS */
183/*
184 * we now have the 1st 16M of ram mapped with the bats.
185 * prep needs the mmu to be turned on here, but pmac already has it on.
186 * this shouldn't bother the pmac since it just gets turned on again
187 * as we jump to our code at KERNELBASE. -- Cort
188 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
189 * off, and in other cases, we now turn it off before changing BATs above.
190 */
191turn_on_mmu:
192 mfmsr r0
193 ori r0,r0,MSR_DR|MSR_IR
194 mtspr SPRN_SRR1,r0
195 lis r0,start_here@h
196 ori r0,r0,start_here@l
197 mtspr SPRN_SRR0,r0
198 SYNC
199 RFI /* enables MMU */
200
201/*
202 * We need __secondary_hold as a place to hold the other cpus on
203 * an SMP machine, even when we are running a UP kernel.
204 */
205 . = 0xc0 /* for prep bootloader */
206 li r3,1 /* MTX only has 1 cpu */
207 .globl __secondary_hold
208__secondary_hold:
209 /* tell the master we're here */
210 stw r3,4(0)
211#ifdef CONFIG_SMP
212100: lwz r4,0(0)
213 /* wait until we're told to start */
214 cmpw 0,r4,r3
215 bne 100b
216 /* our cpu # was at addr 0 - go */
217 mr r24,r3 /* cpu # */
218 b __secondary_start
219#else
220 b .
221#endif /* CONFIG_SMP */
222
223/*
224 * Exception entry code. This code runs with address translation
225 * turned off, i.e. using physical addresses.
226 * We assume sprg3 has the physical address of the current
227 * task's thread_struct.
228 */
229#define EXCEPTION_PROLOG \
230 mtspr SPRN_SPRG0,r10; \
231 mtspr SPRN_SPRG1,r11; \
232 mfcr r10; \
233 EXCEPTION_PROLOG_1; \
234 EXCEPTION_PROLOG_2
235
236#define EXCEPTION_PROLOG_1 \
237 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
238 andi. r11,r11,MSR_PR; \
239 tophys(r11,r1); /* use tophys(r1) if kernel */ \
240 beq 1f; \
241 mfspr r11,SPRN_SPRG3; \
242 lwz r11,THREAD_INFO-THREAD(r11); \
243 addi r11,r11,THREAD_SIZE; \
244 tophys(r11,r11); \
2451: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
246
247
248#define EXCEPTION_PROLOG_2 \
249 CLR_TOP32(r11); \
250 stw r10,_CCR(r11); /* save registers */ \
251 stw r12,GPR12(r11); \
252 stw r9,GPR9(r11); \
253 mfspr r10,SPRN_SPRG0; \
254 stw r10,GPR10(r11); \
255 mfspr r12,SPRN_SPRG1; \
256 stw r12,GPR11(r11); \
257 mflr r10; \
258 stw r10,_LINK(r11); \
259 mfspr r12,SPRN_SRR0; \
260 mfspr r9,SPRN_SRR1; \
261 stw r1,GPR1(r11); \
262 stw r1,0(r11); \
263 tovirt(r1,r11); /* set new kernel sp */ \
264 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
265 MTMSRD(r10); /* (except for mach check in rtas) */ \
266 stw r0,GPR0(r11); \
267 SAVE_4GPRS(3, r11); \
268 SAVE_2GPRS(7, r11)
269
270/*
271 * Note: code which follows this uses cr0.eq (set if from kernel),
272 * r11, r12 (SRR0), and r9 (SRR1).
273 *
274 * Note2: once we have set r1 we are in a position to take exceptions
275 * again, and we could thus set MSR:RI at that point.
276 */
277
278/*
279 * Exception vectors.
280 */
281#define EXCEPTION(n, label, hdlr, xfer) \
282 . = n; \
283label: \
284 EXCEPTION_PROLOG; \
285 addi r3,r1,STACK_FRAME_OVERHEAD; \
286 xfer(n, hdlr)
287
288#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
289 li r10,trap; \
290 stw r10,TRAP(r11); \
291 li r10,MSR_KERNEL; \
292 copyee(r10, r9); \
293 bl tfer; \
294i##n: \
295 .long hdlr; \
296 .long ret
297
298#define COPY_EE(d, s) rlwimi d,s,0,16,16
299#define NOCOPY(d, s)
300
301#define EXC_XFER_STD(n, hdlr) \
302 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
303 ret_from_except_full)
304
305#define EXC_XFER_LITE(n, hdlr) \
306 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
307 ret_from_except)
308
309#define EXC_XFER_EE(n, hdlr) \
310 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
311 ret_from_except_full)
312
313#define EXC_XFER_EE_LITE(n, hdlr) \
314 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
315 ret_from_except)
316
317/* System reset */
318/* core99 pmac starts the seconary here by changing the vector, and
319 putting it back to what it was (unknown_exception) when done. */
320#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
321 . = 0x100
322 b __secondary_start_gemini
323#else
324 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
325#endif
326
327/* Machine check */
328/*
329 * On CHRP, this is complicated by the fact that we could get a
330 * machine check inside RTAS, and we have no guarantee that certain
331 * critical registers will have the values we expect. The set of
332 * registers that might have bad values includes all the GPRs
333 * and all the BATs. We indicate that we are in RTAS by putting
334 * a non-zero value, the address of the exception frame to use,
335 * in SPRG2. The machine check handler checks SPRG2 and uses its
336 * value if it is non-zero. If we ever needed to free up SPRG2,
337 * we could use a field in the thread_info or thread_struct instead.
338 * (Other exception handlers assume that r1 is a valid kernel stack
339 * pointer when we take an exception from supervisor mode.)
340 * -- paulus.
341 */
342 . = 0x200
343 mtspr SPRN_SPRG0,r10
344 mtspr SPRN_SPRG1,r11
345 mfcr r10
346#ifdef CONFIG_PPC_CHRP
347 mfspr r11,SPRN_SPRG2
348 cmpwi 0,r11,0
349 bne 7f
350#endif /* CONFIG_PPC_CHRP */
351 EXCEPTION_PROLOG_1
3527: EXCEPTION_PROLOG_2
353 addi r3,r1,STACK_FRAME_OVERHEAD
354#ifdef CONFIG_PPC_CHRP
355 mfspr r4,SPRN_SPRG2
356 cmpwi cr1,r4,0
357 bne cr1,1f
358#endif
359 EXC_XFER_STD(0x200, machine_check_exception)
360#ifdef CONFIG_PPC_CHRP
3611: b machine_check_in_rtas
362#endif
363
364/* Data access exception. */
365 . = 0x300
366DataAccess:
367 EXCEPTION_PROLOG
368 mfspr r10,SPRN_DSISR
369 andis. r0,r10,0xa470 /* weird error? */
370 bne 1f /* if not, try to put a PTE */
371 mfspr r4,SPRN_DAR /* into the hash table */
372 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
373 bl hash_page
3741: stw r10,_DSISR(r11)
375 mr r5,r10
376 mfspr r4,SPRN_DAR
377 EXC_XFER_EE_LITE(0x300, handle_page_fault)
378
379
380/* Instruction access exception. */
381 . = 0x400
382InstructionAccess:
383 EXCEPTION_PROLOG
384 andis. r0,r9,0x4000 /* no pte found? */
385 beq 1f /* if so, try to put a PTE */
386 li r3,0 /* into the hash table */
387 mr r4,r12 /* SRR0 is fault address */
388 bl hash_page
3891: mr r4,r12
390 mr r5,r9
391 EXC_XFER_EE_LITE(0x400, handle_page_fault)
392
393/* External interrupt */
394 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
395
396/* Alignment exception */
397 . = 0x600
398Alignment:
399 EXCEPTION_PROLOG
400 mfspr r4,SPRN_DAR
401 stw r4,_DAR(r11)
402 mfspr r5,SPRN_DSISR
403 stw r5,_DSISR(r11)
404 addi r3,r1,STACK_FRAME_OVERHEAD
405 EXC_XFER_EE(0x600, alignment_exception)
406
407/* Program check exception */
408 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
409
410/* Floating-point unavailable */
411 . = 0x800
412FPUnavailable:
413 EXCEPTION_PROLOG
414 bne load_up_fpu /* if from user, just load it up */
415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
417
418/* Decrementer */
419 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
420
421 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
422 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
423
424/* System call */
425 . = 0xc00
426SystemCall:
427 EXCEPTION_PROLOG
428 EXC_XFER_EE_LITE(0xc00, DoSyscall)
429
430/* Single step - not used on 601 */
431 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
432 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
433
434/*
435 * The Altivec unavailable trap is at 0x0f20. Foo.
436 * We effectively remap it to 0x3000.
437 * We include an altivec unavailable exception vector even if
438 * not configured for Altivec, so that you can't panic a
439 * non-altivec kernel running on a machine with altivec just
440 * by executing an altivec instruction.
441 */
442 . = 0xf00
443 b Trap_0f
444
445 . = 0xf20
446 b AltiVecUnavailable
447
448Trap_0f:
449 EXCEPTION_PROLOG
450 addi r3,r1,STACK_FRAME_OVERHEAD
451 EXC_XFER_EE(0xf00, unknown_exception)
452
453/*
454 * Handle TLB miss for instruction on 603/603e.
455 * Note: we get an alternate set of r0 - r3 to use automatically.
456 */
457 . = 0x1000
458InstructionTLBMiss:
459/*
460 * r0: stored ctr
461 * r1: linux style pte ( later becomes ppc hardware pte )
462 * r2: ptr to linux-style pte
463 * r3: scratch
464 */
465 mfctr r0
466 /* Get PTE (linux-style) and check access */
467 mfspr r3,SPRN_IMISS
468 lis r1,KERNELBASE@h /* check if kernel address */
469 cmplw 0,r3,r1
470 mfspr r2,SPRN_SPRG3
471 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
472 lwz r2,PGDIR(r2)
473 blt+ 112f
474 lis r2,swapper_pg_dir@ha /* if kernel address, use */
475 addi r2,r2,swapper_pg_dir@l /* kernel page table */
476 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
477 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
478112: tophys(r2,r2)
479 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
480 lwz r2,0(r2) /* get pmd entry */
481 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
482 beq- InstructionAddressInvalid /* return if no mapping */
483 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
484 lwz r3,0(r2) /* get linux-style pte */
485 andc. r1,r1,r3 /* check access & ~permission */
486 bne- InstructionAddressInvalid /* return if access not permitted */
487 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
488 /*
489 * NOTE! We are assuming this is not an SMP system, otherwise
490 * we would need to update the pte atomically with lwarx/stwcx.
491 */
492 stw r3,0(r2) /* update PTE (accessed bit) */
493 /* Convert linux-style PTE to low word of PPC-style PTE */
494 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
495 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
496 and r1,r1,r2 /* writable if _RW and _DIRTY */
497 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
498 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
499 ori r1,r1,0xe14 /* clear out reserved bits and M */
500 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
501 mtspr SPRN_RPA,r1
502 mfspr r3,SPRN_IMISS
503 tlbli r3
504 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
505 mtcrf 0x80,r3
506 rfi
507InstructionAddressInvalid:
508 mfspr r3,SPRN_SRR1
509 rlwinm r1,r3,9,6,6 /* Get load/store bit */
510
511 addis r1,r1,0x2000
512 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
513 mtctr r0 /* Restore CTR */
514 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
515 or r2,r2,r1
516 mtspr SPRN_SRR1,r2
517 mfspr r1,SPRN_IMISS /* Get failing address */
518 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
519 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
520 xor r1,r1,r2
521 mtspr SPRN_DAR,r1 /* Set fault address */
522 mfmsr r0 /* Restore "normal" registers */
523 xoris r0,r0,MSR_TGPR>>16
524 mtcrf 0x80,r3 /* Restore CR0 */
525 mtmsr r0
526 b InstructionAccess
527
528/*
529 * Handle TLB miss for DATA Load operation on 603/603e
530 */
531 . = 0x1100
532DataLoadTLBMiss:
533/*
534 * r0: stored ctr
535 * r1: linux style pte ( later becomes ppc hardware pte )
536 * r2: ptr to linux-style pte
537 * r3: scratch
538 */
539 mfctr r0
540 /* Get PTE (linux-style) and check access */
541 mfspr r3,SPRN_DMISS
542 lis r1,KERNELBASE@h /* check if kernel address */
543 cmplw 0,r3,r1
544 mfspr r2,SPRN_SPRG3
545 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
546 lwz r2,PGDIR(r2)
547 blt+ 112f
548 lis r2,swapper_pg_dir@ha /* if kernel address, use */
549 addi r2,r2,swapper_pg_dir@l /* kernel page table */
550 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
551 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
552112: tophys(r2,r2)
553 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
554 lwz r2,0(r2) /* get pmd entry */
555 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
556 beq- DataAddressInvalid /* return if no mapping */
557 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
558 lwz r3,0(r2) /* get linux-style pte */
559 andc. r1,r1,r3 /* check access & ~permission */
560 bne- DataAddressInvalid /* return if access not permitted */
561 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
562 /*
563 * NOTE! We are assuming this is not an SMP system, otherwise
564 * we would need to update the pte atomically with lwarx/stwcx.
565 */
566 stw r3,0(r2) /* update PTE (accessed bit) */
567 /* Convert linux-style PTE to low word of PPC-style PTE */
568 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
569 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
570 and r1,r1,r2 /* writable if _RW and _DIRTY */
571 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
572 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
573 ori r1,r1,0xe14 /* clear out reserved bits and M */
574 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
575 mtspr SPRN_RPA,r1
576 mfspr r3,SPRN_DMISS
577 tlbld r3
578 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
579 mtcrf 0x80,r3
580 rfi
581DataAddressInvalid:
582 mfspr r3,SPRN_SRR1
583 rlwinm r1,r3,9,6,6 /* Get load/store bit */
584 addis r1,r1,0x2000
585 mtspr SPRN_DSISR,r1
586 mtctr r0 /* Restore CTR */
587 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
588 mtspr SPRN_SRR1,r2
589 mfspr r1,SPRN_DMISS /* Get failing address */
590 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
591 beq 20f /* Jump if big endian */
592 xori r1,r1,3
59320: mtspr SPRN_DAR,r1 /* Set fault address */
594 mfmsr r0 /* Restore "normal" registers */
595 xoris r0,r0,MSR_TGPR>>16
596 mtcrf 0x80,r3 /* Restore CR0 */
597 mtmsr r0
598 b DataAccess
599
600/*
601 * Handle TLB miss for DATA Store on 603/603e
602 */
603 . = 0x1200
604DataStoreTLBMiss:
605/*
606 * r0: stored ctr
607 * r1: linux style pte ( later becomes ppc hardware pte )
608 * r2: ptr to linux-style pte
609 * r3: scratch
610 */
611 mfctr r0
612 /* Get PTE (linux-style) and check access */
613 mfspr r3,SPRN_DMISS
614 lis r1,KERNELBASE@h /* check if kernel address */
615 cmplw 0,r3,r1
616 mfspr r2,SPRN_SPRG3
617 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
618 lwz r2,PGDIR(r2)
619 blt+ 112f
620 lis r2,swapper_pg_dir@ha /* if kernel address, use */
621 addi r2,r2,swapper_pg_dir@l /* kernel page table */
622 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
623 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
624112: tophys(r2,r2)
625 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
626 lwz r2,0(r2) /* get pmd entry */
627 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
628 beq- DataAddressInvalid /* return if no mapping */
629 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
630 lwz r3,0(r2) /* get linux-style pte */
631 andc. r1,r1,r3 /* check access & ~permission */
632 bne- DataAddressInvalid /* return if access not permitted */
633 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
634 /*
635 * NOTE! We are assuming this is not an SMP system, otherwise
636 * we would need to update the pte atomically with lwarx/stwcx.
637 */
638 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
639 /* Convert linux-style PTE to low word of PPC-style PTE */
640 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
641 li r1,0xe15 /* clear out reserved bits and M */
642 andc r1,r3,r1 /* PP = user? 2: 0 */
643 mtspr SPRN_RPA,r1
644 mfspr r3,SPRN_DMISS
645 tlbld r3
646 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
647 mtcrf 0x80,r3
648 rfi
649
650#ifndef CONFIG_ALTIVEC
651#define altivec_assist_exception unknown_exception
652#endif
653
654 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
655 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
656 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
657 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
658 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
659 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
663 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
664 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
665 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
666 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
667 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
668 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
669 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
678 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
679 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
680 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
681 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
682 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
683
684 .globl mol_trampoline
685 .set mol_trampoline, i0x2f00
686
687 . = 0x3000
688
689AltiVecUnavailable:
690 EXCEPTION_PROLOG
691#ifdef CONFIG_ALTIVEC
692 bne load_up_altivec /* if from user, just load it up */
693#endif /* CONFIG_ALTIVEC */
694 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
695
696#ifdef CONFIG_ALTIVEC
697/* Note that the AltiVec support is closely modeled after the FP
698 * support. Changes to one are likely to be applicable to the
699 * other! */
700load_up_altivec:
701/*
702 * Disable AltiVec for the task which had AltiVec previously,
703 * and save its AltiVec registers in its thread_struct.
704 * Enables AltiVec for use in the kernel on return.
705 * On SMP we know the AltiVec units are free, since we give it up every
706 * switch. -- Kumar
707 */
708 mfmsr r5
709 oris r5,r5,MSR_VEC@h
710 MTMSRD(r5) /* enable use of AltiVec now */
711 isync
712/*
713 * For SMP, we don't do lazy AltiVec switching because it just gets too
714 * horrendously complex, especially when a task switches from one CPU
715 * to another. Instead we call giveup_altivec in switch_to.
716 */
717#ifndef CONFIG_SMP
718 tophys(r6,0)
719 addis r3,r6,last_task_used_altivec@ha
720 lwz r4,last_task_used_altivec@l(r3)
721 cmpwi 0,r4,0
722 beq 1f
723 add r4,r4,r6
724 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
725 SAVE_32VRS(0,r10,r4)
726 mfvscr vr0
727 li r10,THREAD_VSCR
728 stvx vr0,r10,r4
729 lwz r5,PT_REGS(r4)
730 add r5,r5,r6
731 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
732 lis r10,MSR_VEC@h
733 andc r4,r4,r10 /* disable altivec for previous task */
734 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7351:
736#endif /* CONFIG_SMP */
737 /* enable use of AltiVec after return */
738 oris r9,r9,MSR_VEC@h
739 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
740 li r4,1
741 li r10,THREAD_VSCR
742 stw r4,THREAD_USED_VR(r5)
743 lvx vr0,r10,r5
744 mtvscr vr0
745 REST_32VRS(0,r10,r5)
746#ifndef CONFIG_SMP
747 subi r4,r5,THREAD
748 sub r4,r4,r6
749 stw r4,last_task_used_altivec@l(r3)
750#endif /* CONFIG_SMP */
751 /* restore registers and return */
752 /* we haven't used ctr or xer or lr */
753 b fast_exception_return
754
755/*
756 * AltiVec unavailable trap from kernel - print a message, but let
757 * the task use AltiVec in the kernel until it returns to user mode.
758 */
759KernelAltiVec:
760 lwz r3,_MSR(r1)
761 oris r3,r3,MSR_VEC@h
762 stw r3,_MSR(r1) /* enable use of AltiVec after return */
763 lis r3,87f@h
764 ori r3,r3,87f@l
765 mr r4,r2 /* current */
766 lwz r5,_NIP(r1)
767 bl printk
768 b ret_from_except
76987: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
770 .align 4,0
771
772/*
773 * giveup_altivec(tsk)
774 * Disable AltiVec for the task given as the argument,
775 * and save the AltiVec registers in its thread_struct.
776 * Enables AltiVec for use in the kernel on return.
777 */
778
779 .globl giveup_altivec
780giveup_altivec:
781 mfmsr r5
782 oris r5,r5,MSR_VEC@h
783 SYNC
784 MTMSRD(r5) /* enable use of AltiVec now */
785 isync
786 cmpwi 0,r3,0
787 beqlr- /* if no previous owner, done */
788 addi r3,r3,THREAD /* want THREAD of task */
789 lwz r5,PT_REGS(r3)
790 cmpwi 0,r5,0
791 SAVE_32VRS(0, r4, r3)
792 mfvscr vr0
793 li r4,THREAD_VSCR
794 stvx vr0,r4,r3
795 beq 1f
796 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
797 lis r3,MSR_VEC@h
798 andc r4,r4,r3 /* disable AltiVec for previous task */
799 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8001:
801#ifndef CONFIG_SMP
802 li r5,0
803 lis r4,last_task_used_altivec@ha
804 stw r5,last_task_used_altivec@l(r4)
805#endif /* CONFIG_SMP */
806 blr
807#endif /* CONFIG_ALTIVEC */
808
809/*
810 * This code is jumped to from the startup code to copy
811 * the kernel image to physical address 0.
812 */
813relocate_kernel:
814 addis r9,r26,klimit@ha /* fetch klimit */
815 lwz r25,klimit@l(r9)
816 addis r25,r25,-KERNELBASE@h
817 li r3,0 /* Destination base address */
818 li r6,0 /* Destination offset */
819 li r5,0x4000 /* # bytes of memory to copy */
820 bl copy_and_flush /* copy the first 0x4000 bytes */
821 addi r0,r3,4f@l /* jump to the address of 4f */
822 mtctr r0 /* in copy and do the rest. */
823 bctr /* jump to the copy */
8244: mr r5,r25
825 bl copy_and_flush /* copy the rest */
826 b turn_on_mmu
827
828/*
829 * Copy routine used to copy the kernel to start at physical address 0
830 * and flush and invalidate the caches as needed.
831 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
832 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
833 */
834_GLOBAL(copy_and_flush)
835 addi r5,r5,-4
836 addi r6,r6,-4
8374: li r0,L1_CACHE_BYTES/4
838 mtctr r0
8393: addi r6,r6,4 /* copy a cache line */
840 lwzx r0,r6,r4
841 stwx r0,r6,r3
842 bdnz 3b
843 dcbst r6,r3 /* write it to memory */
844 sync
845 icbi r6,r3 /* flush the icache line */
846 cmplw 0,r6,r5
847 blt 4b
848 sync /* additional sync needed on g4 */
849 isync
850 addi r5,r5,4
851 addi r6,r6,4
852 blr
853
854#ifdef CONFIG_APUS
855/*
856 * On APUS the physical base address of the kernel is not known at compile
857 * time, which means the __pa/__va constants used are incorrect. In the
858 * __init section is recorded the virtual addresses of instructions using
859 * these constants, so all that has to be done is fix these before
860 * continuing the kernel boot.
861 *
862 * r4 = The physical address of the kernel base.
863 */
864fix_mem_constants:
865 mr r10,r4
866 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
867 neg r11,r10 /* phys_to_virt constant */
868
869 lis r12,__vtop_table_begin@h
870 ori r12,r12,__vtop_table_begin@l
871 add r12,r12,r10 /* table begin phys address */
872 lis r13,__vtop_table_end@h
873 ori r13,r13,__vtop_table_end@l
874 add r13,r13,r10 /* table end phys address */
875 subi r12,r12,4
876 subi r13,r13,4
8771: lwzu r14,4(r12) /* virt address of instruction */
878 add r14,r14,r10 /* phys address of instruction */
879 lwz r15,0(r14) /* instruction, now insert top */
880 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
881 stw r15,0(r14) /* of instruction and restore. */
882 dcbst r0,r14 /* write it to memory */
883 sync
884 icbi r0,r14 /* flush the icache line */
885 cmpw r12,r13
886 bne 1b
887 sync /* additional sync needed on g4 */
888 isync
889
890/*
891 * Map the memory where the exception handlers will
892 * be copied to when hash constants have been patched.
893 */
894#ifdef CONFIG_APUS_FAST_EXCEPT
895 lis r8,0xfff0
896#else
897 lis r8,0
898#endif
899 ori r8,r8,0x2 /* 128KB, supervisor */
900 mtspr SPRN_DBAT3U,r8
901 mtspr SPRN_DBAT3L,r8
902
903 lis r12,__ptov_table_begin@h
904 ori r12,r12,__ptov_table_begin@l
905 add r12,r12,r10 /* table begin phys address */
906 lis r13,__ptov_table_end@h
907 ori r13,r13,__ptov_table_end@l
908 add r13,r13,r10 /* table end phys address */
909 subi r12,r12,4
910 subi r13,r13,4
9111: lwzu r14,4(r12) /* virt address of instruction */
912 add r14,r14,r10 /* phys address of instruction */
913 lwz r15,0(r14) /* instruction, now insert top */
914 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
915 stw r15,0(r14) /* of instruction and restore. */
916 dcbst r0,r14 /* write it to memory */
917 sync
918 icbi r0,r14 /* flush the icache line */
919 cmpw r12,r13
920 bne 1b
921
922 sync /* additional sync needed on g4 */
923 isync /* No speculative loading until now */
924 blr
925
926/***********************************************************************
927 * Please note that on APUS the exception handlers are located at the
928 * physical address 0xfff0000. For this reason, the exception handlers
929 * cannot use relative branches to access the code below.
930 ***********************************************************************/
931#endif /* CONFIG_APUS */
932
933#ifdef CONFIG_SMP
934#ifdef CONFIG_GEMINI
935 .globl __secondary_start_gemini
936__secondary_start_gemini:
937 mfspr r4,SPRN_HID0
938 ori r4,r4,HID0_ICFI
939 li r3,0
940 ori r3,r3,HID0_ICE
941 andc r4,r4,r3
942 mtspr SPRN_HID0,r4
943 sync
944 b __secondary_start
945#endif /* CONFIG_GEMINI */
946
947 .globl __secondary_start_pmac_0
948__secondary_start_pmac_0:
949 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
950 li r24,0
951 b 1f
952 li r24,1
953 b 1f
954 li r24,2
955 b 1f
956 li r24,3
9571:
958 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
959 set to map the 0xf0000000 - 0xffffffff region */
960 mfmsr r0
961 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
962 SYNC
963 mtmsr r0
964 isync
965
966 .globl __secondary_start
967__secondary_start:
968 /* Copy some CPU settings from CPU 0 */
969 bl __restore_cpu_setup
970
971 lis r3,-KERNELBASE@h
972 mr r4,r24
973 bl call_setup_cpu /* Call setup_cpu for this CPU */
974#ifdef CONFIG_6xx
975 lis r3,-KERNELBASE@h
976 bl init_idle_6xx
977#endif /* CONFIG_6xx */
978
979 /* get current_thread_info and current */
980 lis r1,secondary_ti@ha
981 tophys(r1,r1)
982 lwz r1,secondary_ti@l(r1)
983 tophys(r2,r1)
984 lwz r2,TI_TASK(r2)
985
986 /* stack */
987 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
988 li r0,0
989 tophys(r3,r1)
990 stw r0,0(r3)
991
992 /* load up the MMU */
993 bl load_up_mmu
994
995 /* ptr to phys current thread */
996 tophys(r4,r2)
997 addi r4,r4,THREAD /* phys address of our thread_struct */
998 CLR_TOP32(r4)
999 mtspr SPRN_SPRG3,r4
1000 li r3,0
1001 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1002
1003 /* enable MMU and jump to start_secondary */
1004 li r4,MSR_KERNEL
1005 FIX_SRR1(r4,r5)
1006 lis r3,start_secondary@h
1007 ori r3,r3,start_secondary@l
1008 mtspr SPRN_SRR0,r3
1009 mtspr SPRN_SRR1,r4
1010 SYNC
1011 RFI
1012#endif /* CONFIG_SMP */
1013
1014/*
1015 * Those generic dummy functions are kept for CPUs not
1016 * included in CONFIG_6xx
1017 */
1018#if !defined(CONFIG_6xx)
1019_GLOBAL(__save_cpu_setup)
1020 blr
1021_GLOBAL(__restore_cpu_setup)
1022 blr
1023#endif /* !defined(CONFIG_6xx) */
1024
1025
1026/*
1027 * Load stuff into the MMU. Intended to be called with
1028 * IR=0 and DR=0.
1029 */
1030load_up_mmu:
1031 sync /* Force all PTE updates to finish */
1032 isync
1033 tlbia /* Clear all TLB entries */
1034 sync /* wait for tlbia/tlbie to finish */
1035 TLBSYNC /* ... on all CPUs */
1036 /* Load the SDR1 register (hash table base & size) */
1037 lis r6,_SDR1@ha
1038 tophys(r6,r6)
1039 lwz r6,_SDR1@l(r6)
1040 mtspr SPRN_SDR1,r6
1041 li r0,16 /* load up segment register values */
1042 mtctr r0 /* for context 0 */
1043 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1044 li r4,0
10453: mtsrin r3,r4
1046 addi r3,r3,0x111 /* increment VSID */
1047 addis r4,r4,0x1000 /* address of next segment */
1048 bdnz 3b
1049
1050/* Load the BAT registers with the values set up by MMU_init.
1051 MMU_init takes care of whether we're on a 601 or not. */
1052 mfpvr r3
1053 srwi r3,r3,16
1054 cmpwi r3,1
1055 lis r3,BATS@ha
1056 addi r3,r3,BATS@l
1057 tophys(r3,r3)
1058 LOAD_BAT(0,r3,r4,r5)
1059 LOAD_BAT(1,r3,r4,r5)
1060 LOAD_BAT(2,r3,r4,r5)
1061 LOAD_BAT(3,r3,r4,r5)
1062
1063 blr
1064
1065/*
1066 * This is where the main kernel code starts.
1067 */
1068start_here:
1069 /* ptr to current */
1070 lis r2,init_task@h
1071 ori r2,r2,init_task@l
1072 /* Set up for using our exception vectors */
1073 /* ptr to phys current thread */
1074 tophys(r4,r2)
1075 addi r4,r4,THREAD /* init task's THREAD */
1076 CLR_TOP32(r4)
1077 mtspr SPRN_SPRG3,r4
1078 li r3,0
1079 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1080
1081 /* stack */
1082 lis r1,init_thread_union@ha
1083 addi r1,r1,init_thread_union@l
1084 li r0,0
1085 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1086/*
1087 * Do early platform-specific initialization,
1088 * and set up the MMU.
1089 */
1090 mr r3,r31
1091 mr r4,r30
1092 bl machine_init
1093 bl MMU_init
1094
1095#ifdef CONFIG_APUS
1096 /* Copy exception code to exception vector base on APUS. */
1097 lis r4,KERNELBASE@h
1098#ifdef CONFIG_APUS_FAST_EXCEPT
1099 lis r3,0xfff0 /* Copy to 0xfff00000 */
1100#else
1101 lis r3,0 /* Copy to 0x00000000 */
1102#endif
1103 li r5,0x4000 /* # bytes of memory to copy */
1104 li r6,0
1105 bl copy_and_flush /* copy the first 0x4000 bytes */
1106#endif /* CONFIG_APUS */
1107
1108/*
1109 * Go back to running unmapped so we can load up new values
1110 * for SDR1 (hash table pointer) and the segment registers
1111 * and change to using our exception vectors.
1112 */
1113 lis r4,2f@h
1114 ori r4,r4,2f@l
1115 tophys(r4,r4)
1116 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1117 FIX_SRR1(r3,r5)
1118 mtspr SPRN_SRR0,r4
1119 mtspr SPRN_SRR1,r3
1120 SYNC
1121 RFI
1122/* Load up the kernel context */
11232: bl load_up_mmu
1124
1125#ifdef CONFIG_BDI_SWITCH
1126 /* Add helper information for the Abatron bdiGDB debugger.
1127 * We do this here because we know the mmu is disabled, and
1128 * will be enabled for real in just a few instructions.
1129 */
1130 lis r5, abatron_pteptrs@h
1131 ori r5, r5, abatron_pteptrs@l
1132 stw r5, 0xf0(r0) /* This much match your Abatron config */
1133 lis r6, swapper_pg_dir@h
1134 ori r6, r6, swapper_pg_dir@l
1135 tophys(r5, r5)
1136 stw r6, 0(r5)
1137#endif /* CONFIG_BDI_SWITCH */
1138
1139/* Now turn on the MMU for real! */
1140 li r4,MSR_KERNEL
1141 FIX_SRR1(r4,r5)
1142 lis r3,start_kernel@h
1143 ori r3,r3,start_kernel@l
1144 mtspr SPRN_SRR0,r3
1145 mtspr SPRN_SRR1,r4
1146 SYNC
1147 RFI
1148
1149/*
1150 * Set up the segment registers for a new context.
1151 */
1152_GLOBAL(set_context)
1153 mulli r3,r3,897 /* multiply context by skew factor */
1154 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1155 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1156 li r0,NUM_USER_SEGMENTS
1157 mtctr r0
1158
1159#ifdef CONFIG_BDI_SWITCH
1160 /* Context switch the PTE pointer for the Abatron BDI2000.
1161 * The PGDIR is passed as second argument.
1162 */
1163 lis r5, KERNELBASE@h
1164 lwz r5, 0xf0(r5)
1165 stw r4, 0x4(r5)
1166#endif
1167 li r4,0
1168 isync
11693:
1170 mtsrin r3,r4
1171 addi r3,r3,0x111 /* next VSID */
1172 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1173 addis r4,r4,0x1000 /* address of next segment */
1174 bdnz 3b
1175 sync
1176 isync
1177 blr
1178
1179/*
1180 * An undocumented "feature" of 604e requires that the v bit
1181 * be cleared before changing BAT values.
1182 *
1183 * Also, newer IBM firmware does not clear bat3 and 4 so
1184 * this makes sure it's done.
1185 * -- Cort
1186 */
1187clear_bats:
1188 li r10,0
1189 mfspr r9,SPRN_PVR
1190 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1191 cmpwi r9, 1
1192 beq 1f
1193
1194 mtspr SPRN_DBAT0U,r10
1195 mtspr SPRN_DBAT0L,r10
1196 mtspr SPRN_DBAT1U,r10
1197 mtspr SPRN_DBAT1L,r10
1198 mtspr SPRN_DBAT2U,r10
1199 mtspr SPRN_DBAT2L,r10
1200 mtspr SPRN_DBAT3U,r10
1201 mtspr SPRN_DBAT3L,r10
12021:
1203 mtspr SPRN_IBAT0U,r10
1204 mtspr SPRN_IBAT0L,r10
1205 mtspr SPRN_IBAT1U,r10
1206 mtspr SPRN_IBAT1L,r10
1207 mtspr SPRN_IBAT2U,r10
1208 mtspr SPRN_IBAT2L,r10
1209 mtspr SPRN_IBAT3U,r10
1210 mtspr SPRN_IBAT3L,r10
1211BEGIN_FTR_SECTION
1212 /* Here's a tweak: at this point, CPU setup have
1213 * not been called yet, so HIGH_BAT_EN may not be
1214 * set in HID0 for the 745x processors. However, it
1215 * seems that doesn't affect our ability to actually
1216 * write to these SPRs.
1217 */
1218 mtspr SPRN_DBAT4U,r10
1219 mtspr SPRN_DBAT4L,r10
1220 mtspr SPRN_DBAT5U,r10
1221 mtspr SPRN_DBAT5L,r10
1222 mtspr SPRN_DBAT6U,r10
1223 mtspr SPRN_DBAT6L,r10
1224 mtspr SPRN_DBAT7U,r10
1225 mtspr SPRN_DBAT7L,r10
1226 mtspr SPRN_IBAT4U,r10
1227 mtspr SPRN_IBAT4L,r10
1228 mtspr SPRN_IBAT5U,r10
1229 mtspr SPRN_IBAT5L,r10
1230 mtspr SPRN_IBAT6U,r10
1231 mtspr SPRN_IBAT6L,r10
1232 mtspr SPRN_IBAT7U,r10
1233 mtspr SPRN_IBAT7L,r10
1234END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1235 blr
1236
1237flush_tlbs:
1238 lis r10, 0x40
12391: addic. r10, r10, -0x1000
1240 tlbie r10
1241 blt 1b
1242 sync
1243 blr
1244
1245mmu_off:
1246 addi r4, r3, __after_mmu_off - _start
1247 mfmsr r3
1248 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1249 beqlr
1250 andc r3,r3,r0
1251 mtspr SPRN_SRR0,r4
1252 mtspr SPRN_SRR1,r3
1253 sync
1254 RFI
1255
1256/*
1257 * Use the first pair of BAT registers to map the 1st 16MB
1258 * of RAM to KERNELBASE. From this point on we can't safely
1259 * call OF any more.
1260 */
1261initial_bats:
1262 lis r11,KERNELBASE@h
1263 mfspr r9,SPRN_PVR
1264 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1265 cmpwi 0,r9,1
1266 bne 4f
1267 ori r11,r11,4 /* set up BAT registers for 601 */
1268 li r8,0x7f /* valid, block length = 8MB */
1269 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1270 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1271 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1272 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1273 mtspr SPRN_IBAT1U,r9
1274 mtspr SPRN_IBAT1L,r10
1275 isync
1276 blr
1277
12784: tophys(r8,r11)
1279#ifdef CONFIG_SMP
1280 ori r8,r8,0x12 /* R/W access, M=1 */
1281#else
1282 ori r8,r8,2 /* R/W access */
1283#endif /* CONFIG_SMP */
1284#ifdef CONFIG_APUS
1285 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1286#else
1287 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1288#endif /* CONFIG_APUS */
1289
1290 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1291 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1292 mtspr SPRN_IBAT0L,r8
1293 mtspr SPRN_IBAT0U,r11
1294 isync
1295 blr
1296
1297
1298#ifdef CONFIG_8260
1299/* Jump into the system reset for the rom.
1300 * We first disable the MMU, and then jump to the ROM reset address.
1301 *
1302 * r3 is the board info structure, r4 is the location for starting.
1303 * I use this for building a small kernel that can load other kernels,
1304 * rather than trying to write or rely on a rom monitor that can tftp load.
1305 */
1306 .globl m8260_gorom
1307m8260_gorom:
1308 mfmsr r0
1309 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1310 sync
1311 mtmsr r0
1312 sync
1313 mfspr r11, SPRN_HID0
1314 lis r10, 0
1315 ori r10,r10,HID0_ICE|HID0_DCE
1316 andc r11, r11, r10
1317 mtspr SPRN_HID0, r11
1318 isync
1319 li r5, MSR_ME|MSR_RI
1320 lis r6,2f@h
1321 addis r6,r6,-KERNELBASE@h
1322 ori r6,r6,2f@l
1323 mtspr SPRN_SRR0,r6
1324 mtspr SPRN_SRR1,r5
1325 isync
1326 sync
1327 rfi
13282:
1329 mtlr r4
1330 blr
1331#endif
1332
1333
1334/*
1335 * We put a few things here that have to be page-aligned.
1336 * This stuff goes at the beginning of the data segment,
1337 * which is page-aligned.
1338 */
1339 .data
1340 .globl sdata
1341sdata:
1342 .globl empty_zero_page
1343empty_zero_page:
1344 .space 4096
1345
1346 .globl swapper_pg_dir
1347swapper_pg_dir:
1348 .space 4096
1349
1350/*
1351 * This space gets a copy of optional info passed to us by the bootstrap
1352 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1353 */
1354 .globl cmd_line
1355cmd_line:
1356 .space 512
1357
1358 .globl intercept_table
1359intercept_table:
1360 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1361 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1362 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1363 .long 0, 0, 0, 0, 0, 0, 0, 0
1364 .long 0, 0, 0, 0, 0, 0, 0, 0
1365 .long 0, 0, 0, 0, 0, 0, 0, 0
1366
1367/* Room for two PTE pointers, usually the kernel and current user pointers
1368 * to their respective root page table.
1369 */
1370abatron_pteptrs:
1371 .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644
index 000000000000..8b49679fad54
--- /dev/null
+++ b/arch/powerpc/kernel/head_44x.S
@@ -0,0 +1,782 @@
1/*
2 * arch/ppc/kernel/head_44x.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2005 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 *
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
31 */
32
33#include <linux/config.h>
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/ibm44x.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44#include "head_booke.h"
45
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/*
77 * Set up the initial MMU state
78 *
79 * We are still executing code at the virtual address
80 * mappings set by the firmware for the base of RAM.
81 *
82 * We first invalidate all TLB entries but the one
83 * we are running from. We then load the KERNELBASE
84 * mappings so we can begin to use kernel addresses
85 * natively and so the interrupt vector locations are
86 * permanently pinned (necessary since Book E
87 * implementations always have translation enabled).
88 *
89 * TODO: Use the known TLB entry we are running from to
90 * determine which physical region we are located
91 * in. This can be used to determine where in RAM
92 * (on a shared CPU system) or PCI memory space
93 * (on a DRAMless system) we are located.
94 * For now, we assume a perfect world which means
95 * we are located at the base of DRAM (physical 0).
96 */
97
98/*
99 * Search TLB for entry that we are currently using.
100 * Invalidate all entries but the one we are using.
101 */
102 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
103 mfspr r3,SPRN_PID /* Get PID */
104 mfmsr r4 /* Get MSR */
105 andi. r4,r4,MSR_IS@l /* TS=1? */
106 beq wmmucr /* If not, leave STS=0 */
107 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
108wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
109 sync
110
111 bl invstr /* Find our address */
112invstr: mflr r5 /* Make it accessible */
113 tlbsx r23,0,r5 /* Find entry we are in */
114 li r4,0 /* Start at TLB entry 0 */
115 li r3,0 /* Set PAGEID inval value */
1161: cmpw r23,r4 /* Is this our entry? */
117 beq skpinv /* If so, skip the inval */
118 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
119skpinv: addi r4,r4,1 /* Increment */
120 cmpwi r4,64 /* Are we done? */
121 bne 1b /* If not, repeat */
122 isync /* If so, context change */
123
124/*
125 * Configure and load pinned entry into TLB slot 63.
126 */
127
128 lis r3,KERNELBASE@h /* Load the kernel virtual address */
129 ori r3,r3,KERNELBASE@l
130
131 /* Kernel is at the base of RAM */
132 li r4, 0 /* Load the kernel physical address */
133
134 /* Load the kernel PID = 0 */
135 li r0,0
136 mtspr SPRN_PID,r0
137 sync
138
139 /* Initialize MMUCR */
140 li r5,0
141 mtspr SPRN_MMUCR,r5
142 sync
143
144 /* pageid fields */
145 clrrwi r3,r3,10 /* Mask off the effective page number */
146 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
147
148 /* xlat fields */
149 clrrwi r4,r4,10 /* Mask off the real page number */
150 /* ERPN is 0 for first 4GB page */
151
152 /* attrib fields */
153 /* Added guarded bit to protect against speculative loads/stores */
154 li r5,0
155 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
156
157 li r0,63 /* TLB slot 63 */
158
159 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
160 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
161 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
162
163 /* Force context change */
164 mfmsr r0
165 mtspr SPRN_SRR1, r0
166 lis r0,3f@h
167 ori r0,r0,3f@l
168 mtspr SPRN_SRR0,r0
169 sync
170 rfi
171
172 /* If necessary, invalidate original entry we used */
1733: cmpwi r23,63
174 beq 4f
175 li r6,0
176 tlbwe r6,r23,PPC44x_TLB_PAGEID
177 isync
178
1794:
180#ifdef CONFIG_SERIAL_TEXT_DEBUG
181 /*
182 * Add temporary UART mapping for early debug.
183 * We can map UART registers wherever we want as long as they don't
184 * interfere with other system mappings (e.g. with pinned entries).
185 * For an example of how we handle this - see ocotea.h. --ebs
186 */
187 /* pageid fields */
188 lis r3,UART0_IO_BASE@h
189 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
190
191 /* xlat fields */
192 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
193#ifndef CONFIG_440EP
194 ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
195#endif
196
197 /* attrib fields */
198 li r5,0
199 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
200
201 li r0,0 /* TLB slot 0 */
202
203 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
204 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
205 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
206
207 /* Force context change */
208 isync
209#endif /* CONFIG_SERIAL_TEXT_DEBUG */
210
211 /* Establish the interrupt vector offsets */
212 SET_IVOR(0, CriticalInput);
213 SET_IVOR(1, MachineCheck);
214 SET_IVOR(2, DataStorage);
215 SET_IVOR(3, InstructionStorage);
216 SET_IVOR(4, ExternalInput);
217 SET_IVOR(5, Alignment);
218 SET_IVOR(6, Program);
219 SET_IVOR(7, FloatingPointUnavailable);
220 SET_IVOR(8, SystemCall);
221 SET_IVOR(9, AuxillaryProcessorUnavailable);
222 SET_IVOR(10, Decrementer);
223 SET_IVOR(11, FixedIntervalTimer);
224 SET_IVOR(12, WatchdogTimer);
225 SET_IVOR(13, DataTLBError);
226 SET_IVOR(14, InstructionTLBError);
227 SET_IVOR(15, Debug);
228
229 /* Establish the interrupt vector base */
230 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
231 mtspr SPRN_IVPR,r4
232
233#ifdef CONFIG_440EP
234 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
235 mfspr r2,SPRN_CCR0
236 lis r3,0xffef
237 ori r3,r3,0xffff
238 and r2,r2,r3
239 mtspr SPRN_CCR0,r2
240 isync
241#endif
242
243 /*
244 * This is where the main kernel code starts.
245 */
246
247 /* ptr to current */
248 lis r2,init_task@h
249 ori r2,r2,init_task@l
250
251 /* ptr to current thread */
252 addi r4,r2,THREAD /* init task's THREAD */
253 mtspr SPRN_SPRG3,r4
254
255 /* stack */
256 lis r1,init_thread_union@h
257 ori r1,r1,init_thread_union@l
258 li r0,0
259 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
260
261 bl early_init
262
263/*
264 * Decide what sort of machine this is and initialize the MMU.
265 */
266 mr r3,r31
267 mr r4,r30
268 mr r5,r29
269 mr r6,r28
270 mr r7,r27
271 bl machine_init
272 bl MMU_init
273
274 /* Setup PTE pointers for the Abatron bdiGDB */
275 lis r6, swapper_pg_dir@h
276 ori r6, r6, swapper_pg_dir@l
277 lis r5, abatron_pteptrs@h
278 ori r5, r5, abatron_pteptrs@l
279 lis r4, KERNELBASE@h
280 ori r4, r4, KERNELBASE@l
281 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
282 stw r6, 0(r5)
283
284 /* Let's move on */
285 lis r4,start_kernel@h
286 ori r4,r4,start_kernel@l
287 lis r3,MSR_KERNEL@h
288 ori r3,r3,MSR_KERNEL@l
289 mtspr SPRN_SRR0,r4
290 mtspr SPRN_SRR1,r3
291 rfi /* change context and jump to start_kernel */
292
293/*
294 * Interrupt vector entry code
295 *
296 * The Book E MMUs are always on so we don't need to handle
297 * interrupts in real mode as with previous PPC processors. In
298 * this case we handle interrupts in the kernel virtual address
299 * space.
300 *
301 * Interrupt vectors are dynamically placed relative to the
302 * interrupt prefix as determined by the address of interrupt_base.
303 * The interrupt vectors offsets are programmed using the labels
304 * for each interrupt vector entry.
305 *
306 * Interrupt vectors must be aligned on a 16 byte boundary.
307 * We align on a 32 byte cache line boundary for good measure.
308 */
309
310interrupt_base:
311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313
314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif
320
321 /* Data Storage Interrupt */
322 START_EXCEPTION(DataStorage)
323 mtspr SPRN_SPRG0, r10 /* Save some working registers */
324 mtspr SPRN_SPRG1, r11
325 mtspr SPRN_SPRG4W, r12
326 mtspr SPRN_SPRG5W, r13
327 mfcr r11
328 mtspr SPRN_SPRG7W, r11
329
330 /*
331 * Check if it was a store fault, if not then bail
332 * because a user tried to access a kernel or
333 * read-protected page. Otherwise, get the
334 * offending address and handle it.
335 */
336 mfspr r10, SPRN_ESR
337 andis. r10, r10, ESR_ST@h
338 beq 2f
339
340 mfspr r10, SPRN_DEAR /* Get faulting address */
341
342 /* If we are faulting a kernel address, we have to use the
343 * kernel page tables.
344 */
345 lis r11, TASK_SIZE@h
346 cmplw r10, r11
347 blt+ 3f
348 lis r11, swapper_pg_dir@h
349 ori r11, r11, swapper_pg_dir@l
350
351 mfspr r12,SPRN_MMUCR
352 rlwinm r12,r12,0,0,23 /* Clear TID */
353
354 b 4f
355
356 /* Get the PGD for the current thread */
3573:
358 mfspr r11,SPRN_SPRG3
359 lwz r11,PGDIR(r11)
360
361 /* Load PID into MMUCR TID */
362 mfspr r12,SPRN_MMUCR /* Get MMUCR */
363 mfspr r13,SPRN_PID /* Get PID */
364 rlwimi r12,r13,0,24,31 /* Set TID */
365
3664:
367 mtspr SPRN_MMUCR,r12
368
369 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
370 lwzx r11, r12, r11 /* Get pgd/pmd entry */
371 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
372 beq 2f /* Bail if no table */
373
374 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
375 lwz r11, 4(r12) /* Get pte entry */
376
377 andi. r13, r11, _PAGE_RW /* Is it writeable? */
378 beq 2f /* Bail if not */
379
380 /* Update 'changed'.
381 */
382 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
383 stw r11, 4(r12) /* Update Linux page table */
384
385 li r13, PPC44x_TLB_SR@l /* Set SR */
386 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
387 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
388 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
389 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
390 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
391 and r12, r12, r11 /* HWEXEC/RW & USER */
392 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
393 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
394
395 rlwimi r11,r13,0,26,31 /* Insert static perms */
396
397 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
398
399 /* find the TLB index that caused the fault. It has to be here. */
400 tlbsx r10, 0, r10
401
402 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
403
404 /* Done...restore registers and get out of here.
405 */
406 mfspr r11, SPRN_SPRG7R
407 mtcr r11
408 mfspr r13, SPRN_SPRG5R
409 mfspr r12, SPRN_SPRG4R
410
411 mfspr r11, SPRN_SPRG1
412 mfspr r10, SPRN_SPRG0
413 rfi /* Force context change */
414
4152:
416 /*
417 * The bailout. Restore registers to pre-exception conditions
418 * and call the heavyweights to help us out.
419 */
420 mfspr r11, SPRN_SPRG7R
421 mtcr r11
422 mfspr r13, SPRN_SPRG5R
423 mfspr r12, SPRN_SPRG4R
424
425 mfspr r11, SPRN_SPRG1
426 mfspr r10, SPRN_SPRG0
427 b data_access
428
429 /* Instruction Storage Interrupt */
430 INSTRUCTION_STORAGE_EXCEPTION
431
432 /* External Input Interrupt */
433 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
434
435 /* Alignment Interrupt */
436 ALIGNMENT_EXCEPTION
437
438 /* Program Interrupt */
439 PROGRAM_EXCEPTION
440
441 /* Floating Point Unavailable Interrupt */
442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION
444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif
447
448 /* System Call Interrupt */
449 START_EXCEPTION(SystemCall)
450 NORMAL_EXCEPTION_PROLOG
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452
453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455
456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION
458
459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462
463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif
470
471 /* Data TLB Error Interrupt */
472 START_EXCEPTION(DataTLBError)
473 mtspr SPRN_SPRG0, r10 /* Save some working registers */
474 mtspr SPRN_SPRG1, r11
475 mtspr SPRN_SPRG4W, r12
476 mtspr SPRN_SPRG5W, r13
477 mfcr r11
478 mtspr SPRN_SPRG7W, r11
479 mfspr r10, SPRN_DEAR /* Get faulting address */
480
481 /* If we are faulting a kernel address, we have to use the
482 * kernel page tables.
483 */
484 lis r11, TASK_SIZE@h
485 cmplw r10, r11
486 blt+ 3f
487 lis r11, swapper_pg_dir@h
488 ori r11, r11, swapper_pg_dir@l
489
490 mfspr r12,SPRN_MMUCR
491 rlwinm r12,r12,0,0,23 /* Clear TID */
492
493 b 4f
494
495 /* Get the PGD for the current thread */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
499
500 /* Load PID into MMUCR TID */
501 mfspr r12,SPRN_MMUCR
502 mfspr r13,SPRN_PID /* Get PID */
503 rlwimi r12,r13,0,24,31 /* Set TID */
504
5054:
506 mtspr SPRN_MMUCR,r12
507
508 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
509 lwzx r11, r12, r11 /* Get pgd/pmd entry */
510 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
511 beq 2f /* Bail if no table */
512
513 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
514 lwz r11, 4(r12) /* Get pte entry */
515 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
516 beq 2f /* Bail if not present */
517
518 ori r11, r11, _PAGE_ACCESSED
519 stw r11, 4(r12)
520
521 /* Jump to common tlb load */
522 b finish_tlb_load
523
5242:
525 /* The bailout. Restore registers to pre-exception conditions
526 * and call the heavyweights to help us out.
527 */
528 mfspr r11, SPRN_SPRG7R
529 mtcr r11
530 mfspr r13, SPRN_SPRG5R
531 mfspr r12, SPRN_SPRG4R
532 mfspr r11, SPRN_SPRG1
533 mfspr r10, SPRN_SPRG0
534 b data_access
535
536 /* Instruction TLB Error Interrupt */
537 /*
538 * Nearly the same as above, except we get our
539 * information from different registers and bailout
540 * to a different point.
541 */
542 START_EXCEPTION(InstructionTLBError)
543 mtspr SPRN_SPRG0, r10 /* Save some working registers */
544 mtspr SPRN_SPRG1, r11
545 mtspr SPRN_SPRG4W, r12
546 mtspr SPRN_SPRG5W, r13
547 mfcr r11
548 mtspr SPRN_SPRG7W, r11
549 mfspr r10, SPRN_SRR0 /* Get faulting address */
550
551 /* If we are faulting a kernel address, we have to use the
552 * kernel page tables.
553 */
554 lis r11, TASK_SIZE@h
555 cmplw r10, r11
556 blt+ 3f
557 lis r11, swapper_pg_dir@h
558 ori r11, r11, swapper_pg_dir@l
559
560 mfspr r12,SPRN_MMUCR
561 rlwinm r12,r12,0,0,23 /* Clear TID */
562
563 b 4f
564
565 /* Get the PGD for the current thread */
5663:
567 mfspr r11,SPRN_SPRG3
568 lwz r11,PGDIR(r11)
569
570 /* Load PID into MMUCR TID */
571 mfspr r12,SPRN_MMUCR
572 mfspr r13,SPRN_PID /* Get PID */
573 rlwimi r12,r13,0,24,31 /* Set TID */
574
5754:
576 mtspr SPRN_MMUCR,r12
577
578 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
579 lwzx r11, r12, r11 /* Get pgd/pmd entry */
580 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
581 beq 2f /* Bail if no table */
582
583 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
584 lwz r11, 4(r12) /* Get pte entry */
585 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
586 beq 2f /* Bail if not present */
587
588 ori r11, r11, _PAGE_ACCESSED
589 stw r11, 4(r12)
590
591 /* Jump to common TLB load point */
592 b finish_tlb_load
593
5942:
595 /* The bailout. Restore registers to pre-exception conditions
596 * and call the heavyweights to help us out.
597 */
598 mfspr r11, SPRN_SPRG7R
599 mtcr r11
600 mfspr r13, SPRN_SPRG5R
601 mfspr r12, SPRN_SPRG4R
602 mfspr r11, SPRN_SPRG1
603 mfspr r10, SPRN_SPRG0
604 b InstructionStorage
605
606 /* Debug Interrupt */
607 DEBUG_EXCEPTION
608
609/*
610 * Local functions
611 */
612 /*
613 * Data TLB exceptions will bail out to this point
614 * if they can't resolve the lightweight TLB fault.
615 */
616data_access:
617 NORMAL_EXCEPTION_PROLOG
618 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
619 stw r5,_ESR(r11)
620 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
621 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
622
623/*
624
625 * Both the instruction and data TLB miss get to this
626 * point to load the TLB.
627 * r10 - EA of fault
628 * r11 - available to use
629 * r12 - Pointer to the 64-bit PTE
630 * r13 - available to use
631 * MMUCR - loaded with proper value when we get here
632 * Upon exit, we reload everything and RFI.
633 */
634finish_tlb_load:
635 /*
636 * We set execute, because we don't have the granularity to
637 * properly set this at the page level (Linux problem).
638 * If shared is set, we cause a zero PID->TID load.
639 * Many of these bits are software only. Bits we don't set
640 * here we (properly should) assume have the appropriate value.
641 */
642
643 /* Load the next available TLB index */
644 lis r13, tlb_44x_index@ha
645 lwz r13, tlb_44x_index@l(r13)
646 /* Load the TLB high watermark */
647 lis r11, tlb_44x_hwater@ha
648 lwz r11, tlb_44x_hwater@l(r11)
649
650 /* Increment, rollover, and store TLB index */
651 addi r13, r13, 1
652 cmpw 0, r13, r11 /* reserve entries */
653 ble 7f
654 li r13, 0
6557:
656 /* Store the next available TLB index */
657 lis r11, tlb_44x_index@ha
658 stw r13, tlb_44x_index@l(r11)
659
660 lwz r11, 0(r12) /* Get MS word of PTE */
661 lwz r12, 4(r12) /* Get LS word of PTE */
662 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
663 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
664
665 /*
666 * Create PAGEID. This is the faulting address,
667 * page size, and valid flag.
668 */
669 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
670 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
671 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
672
673 li r10, PPC44x_TLB_SR@l /* Set SR */
674 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
675 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
676 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
677 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
678 and r11, r12, r11 /* HWEXEC & USER */
679 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
680
681 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
682 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
683 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
684
685 /* Done...restore registers and get out of here.
686 */
687 mfspr r11, SPRN_SPRG7R
688 mtcr r11
689 mfspr r13, SPRN_SPRG5R
690 mfspr r12, SPRN_SPRG4R
691 mfspr r11, SPRN_SPRG1
692 mfspr r10, SPRN_SPRG0
693 rfi /* Force context change */
694
695/*
696 * Global functions
697 */
698
699/*
700 * extern void giveup_altivec(struct task_struct *prev)
701 *
702 * The 44x core does not have an AltiVec unit.
703 */
704_GLOBAL(giveup_altivec)
705 blr
706
707/*
708 * extern void giveup_fpu(struct task_struct *prev)
709 *
710 * The 44x core does not have an FPU.
711 */
712#ifndef CONFIG_PPC_FPU
713_GLOBAL(giveup_fpu)
714 blr
715#endif
716
717/*
718 * extern void abort(void)
719 *
720 * At present, this routine just applies a system reset.
721 */
722_GLOBAL(abort)
723 mfspr r13,SPRN_DBCR0
724 oris r13,r13,DBCR0_RST_SYSTEM@h
725 mtspr SPRN_DBCR0,r13
726
727_GLOBAL(set_context)
728
729#ifdef CONFIG_BDI_SWITCH
730 /* Context switch the PTE pointer for the Abatron BDI2000.
731 * The PGDIR is the second parameter.
732 */
733 lis r5, abatron_pteptrs@h
734 ori r5, r5, abatron_pteptrs@l
735 stw r4, 0x4(r5)
736#endif
737 mtspr SPRN_PID,r3
738 isync /* Force context change */
739 blr
740
741/*
742 * We put a few things here that have to be page-aligned. This stuff
743 * goes at the beginning of the data segment, which is page-aligned.
744 */
745 .data
746 .align 12
747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
751 .space 4096
752
753/*
754 * To support >32-bit physical addresses, we use an 8KB pgdir.
755 */
756 .globl swapper_pg_dir
757swapper_pg_dir:
758 .space 8192
759
760/* Reserved 4k for the critical exception stack & 4k for the machine
761 * check stack per CPU for kernel mode exceptions */
762 .section .bss
763 .align 12
764exception_stack_bottom:
765 .space BOOKE_EXCEPTION_STACK_SIZE
766 .globl exception_stack_top
767exception_stack_top:
768
769/*
770 * This space gets a copy of optional info passed to us by the bootstrap
771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
772 */
773 .globl cmd_line
774cmd_line:
775 .space 512
776
777/*
778 * Room for two PTE pointers, usually the kernel and current user pointers
779 * to their respective root page table.
780 */
781abatron_pteptrs:
782 .space 8
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644
index 000000000000..10c261c67021
--- /dev/null
+++ b/arch/powerpc/kernel/head_4xx.S
@@ -0,0 +1,1022 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <linux/config.h>
35#include <asm/processor.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/ibm4xx.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 * r4 - Starting address of the init RAM disk
51 * r5 - Ending address of the init RAM disk
52 * r6 - Start of kernel command line string (e.g. "mem=96m")
53 * r7 - End of kernel command line string
54 *
55 * This is all going to change RSN when we add bi_recs....... -- Dan
56 */
57 .text
58_GLOBAL(_stext)
59_GLOBAL(_start)
60
61 /* Save parameters we are passed.
62 */
63 mr r31,r3
64 mr r30,r4
65 mr r29,r5
66 mr r28,r6
67 mr r27,r7
68
69 /* We have to turn on the MMU right away so we get cache modes
70 * set correctly.
71 */
72 bl initial_mmu
73
74/* We now have the lower 16 Meg mapped into TLB entries, and the caches
75 * ready to work.
76 */
77turn_on_mmu:
78 lis r0,MSR_KERNEL@h
79 ori r0,r0,MSR_KERNEL@l
80 mtspr SPRN_SRR1,r0
81 lis r0,start_here@h
82 ori r0,r0,start_here@l
83 mtspr SPRN_SRR0,r0
84 SYNC
85 rfi /* enables MMU */
86 b . /* prevent prefetch past rfi */
87
88/*
89 * This area is used for temporarily saving registers during the
90 * critical exception prolog.
91 */
92 . = 0xc0
93crit_save:
94_GLOBAL(crit_r10)
95 .space 4
96_GLOBAL(crit_r11)
97 .space 4
98
99/*
100 * Exception vector entry code. This code runs with address translation
101 * turned off (i.e. using physical addresses). We assume SPRG3 has the
102 * physical address of the current task thread_struct.
103 * Note that we have to have decremented r1 before we write to any fields
104 * of the exception frame, since a critical interrupt could occur at any
105 * time, and it will write to the area immediately below the current r1.
106 */
107#define NORMAL_EXCEPTION_PROLOG \
108 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
109 mtspr SPRN_SPRG1,r11; \
110 mtspr SPRN_SPRG2,r1; \
111 mfcr r10; /* save CR in r10 for now */\
112 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
113 andi. r11,r11,MSR_PR; \
114 beq 1f; \
115 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
116 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
117 addi r1,r1,THREAD_SIZE; \
1181: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
119 tophys(r11,r1); \
120 stw r10,_CCR(r11); /* save various registers */\
121 stw r12,GPR12(r11); \
122 stw r9,GPR9(r11); \
123 mfspr r10,SPRN_SPRG0; \
124 stw r10,GPR10(r11); \
125 mfspr r12,SPRN_SPRG1; \
126 stw r12,GPR11(r11); \
127 mflr r10; \
128 stw r10,_LINK(r11); \
129 mfspr r10,SPRN_SPRG2; \
130 mfspr r12,SPRN_SRR0; \
131 stw r10,GPR1(r11); \
132 mfspr r9,SPRN_SRR1; \
133 stw r10,0(r11); \
134 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
135 stw r0,GPR0(r11); \
136 SAVE_4GPRS(3, r11); \
137 SAVE_2GPRS(7, r11)
138
139/*
140 * Exception prolog for critical exceptions. This is a little different
141 * from the normal exception prolog above since a critical exception
142 * can potentially occur at any point during normal exception processing.
143 * Thus we cannot use the same SPRG registers as the normal prolog above.
144 * Instead we use a couple of words of memory at low physical addresses.
145 * This is OK since we don't support SMP on these processors.
146 */
147#define CRITICAL_EXCEPTION_PROLOG \
148 stw r10,crit_r10@l(0); /* save two registers to work with */\
149 stw r11,crit_r11@l(0); \
150 mfcr r10; /* save CR in r10 for now */\
151 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
152 andi. r11,r11,MSR_PR; \
153 lis r11,critical_stack_top@h; \
154 ori r11,r11,critical_stack_top@l; \
155 beq 1f; \
156 /* COMING FROM USER MODE */ \
157 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
158 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
159 addi r11,r11,THREAD_SIZE; \
1601: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
161 tophys(r11,r11); \
162 stw r10,_CCR(r11); /* save various registers */\
163 stw r12,GPR12(r11); \
164 stw r9,GPR9(r11); \
165 mflr r10; \
166 stw r10,_LINK(r11); \
167 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
168 stw r12,_DEAR(r11); /* since they may have had stuff */\
169 mfspr r9,SPRN_ESR; /* in them at the point where the */\
170 stw r9,_ESR(r11); /* exception was taken */\
171 mfspr r12,SPRN_SRR2; \
172 stw r1,GPR1(r11); \
173 mfspr r9,SPRN_SRR3; \
174 stw r1,0(r11); \
175 tovirt(r1,r11); \
176 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
177 stw r0,GPR0(r11); \
178 SAVE_4GPRS(3, r11); \
179 SAVE_2GPRS(7, r11)
180
181 /*
182 * State at this point:
183 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
184 * r10 saved in crit_r10 and in stack frame, trashed
185 * r11 saved in crit_r11 and in stack frame,
186 * now phys stack/exception frame pointer
187 * r12 saved in stack frame, now saved SRR2
188 * CR saved in stack frame, CR0.EQ = !SRR3.PR
189 * LR, DEAR, ESR in stack frame
190 * r1 saved in stack frame, now virt stack/excframe pointer
191 * r0, r3-r8 saved in stack frame
192 */
193
194/*
195 * Exception vectors.
196 */
197#define START_EXCEPTION(n, label) \
198 . = n; \
199label:
200
201#define EXCEPTION(n, label, hdlr, xfer) \
202 START_EXCEPTION(n, label); \
203 NORMAL_EXCEPTION_PROLOG; \
204 addi r3,r1,STACK_FRAME_OVERHEAD; \
205 xfer(n, hdlr)
206
207#define CRITICAL_EXCEPTION(n, label, hdlr) \
208 START_EXCEPTION(n, label); \
209 CRITICAL_EXCEPTION_PROLOG; \
210 addi r3,r1,STACK_FRAME_OVERHEAD; \
211 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
212 NOCOPY, crit_transfer_to_handler, \
213 ret_from_crit_exc)
214
215#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
216 li r10,trap; \
217 stw r10,TRAP(r11); \
218 lis r10,msr@h; \
219 ori r10,r10,msr@l; \
220 copyee(r10, r9); \
221 bl tfer; \
222 .long hdlr; \
223 .long ret
224
225#define COPY_EE(d, s) rlwimi d,s,0,16,16
226#define NOCOPY(d, s)
227
228#define EXC_XFER_STD(n, hdlr) \
229 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
230 ret_from_except_full)
231
232#define EXC_XFER_LITE(n, hdlr) \
233 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
234 ret_from_except)
235
236#define EXC_XFER_EE(n, hdlr) \
237 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
238 ret_from_except_full)
239
240#define EXC_XFER_EE_LITE(n, hdlr) \
241 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
242 ret_from_except)
243
244
245/*
246 * 0x0100 - Critical Interrupt Exception
247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249
250/*
251 * 0x0200 - Machine Check Exception
252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254
255/*
256 * 0x0300 - Data Storage Exception
257 * This happens for just a few reasons. U0 set (but we don't do that),
258 * or zone protection fault (user violation, write to protected page).
259 * If this is just an update of modified status, we do that quickly
260 * and exit. Otherwise, we call heavywight functions to do the work.
261 */
262 START_EXCEPTION(0x0300, DataStorage)
263 mtspr SPRN_SPRG0, r10 /* Save some working registers */
264 mtspr SPRN_SPRG1, r11
265#ifdef CONFIG_403GCX
266 stw r12, 0(r0)
267 stw r9, 4(r0)
268 mfcr r11
269 mfspr r12, SPRN_PID
270 stw r11, 8(r0)
271 stw r12, 12(r0)
272#else
273 mtspr SPRN_SPRG4, r12
274 mtspr SPRN_SPRG5, r9
275 mfcr r11
276 mfspr r12, SPRN_PID
277 mtspr SPRN_SPRG7, r11
278 mtspr SPRN_SPRG6, r12
279#endif
280
281 /* First, check if it was a zone fault (which means a user
282 * tried to access a kernel or read-protected page - always
283 * a SEGV). All other faults here must be stores, so no
284 * need to check ESR_DST as well. */
285 mfspr r10, SPRN_ESR
286 andis. r10, r10, ESR_DIZ@h
287 bne 2f
288
289 mfspr r10, SPRN_DEAR /* Get faulting address */
290
291 /* If we are faulting a kernel address, we have to use the
292 * kernel page tables.
293 */
294 lis r11, TASK_SIZE@h
295 cmplw r10, r11
296 blt+ 3f
297 lis r11, swapper_pg_dir@h
298 ori r11, r11, swapper_pg_dir@l
299 li r9, 0
300 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
301 b 4f
302
303 /* Get the PGD for the current thread.
304 */
3053:
306 mfspr r11,SPRN_SPRG3
307 lwz r11,PGDIR(r11)
3084:
309 tophys(r11, r11)
310 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
311 lwz r11, 0(r11) /* Get L1 entry */
312 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
313 beq 2f /* Bail if no table */
314
315 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
316 lwz r11, 0(r12) /* Get Linux PTE */
317
318 andi. r9, r11, _PAGE_RW /* Is it writeable? */
319 beq 2f /* Bail if not */
320
321 /* Update 'changed'.
322 */
323 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
324 stw r11, 0(r12) /* Update Linux page table */
325
326 /* Most of the Linux PTE is ready to load into the TLB LO.
327 * We set ZSEL, where only the LS-bit determines user access.
328 * We set execute, because we don't have the granularity to
329 * properly set this at the page level (Linux problem).
330 * If shared is set, we cause a zero PID->TID load.
331 * Many of these bits are software only. Bits we don't set
332 * here we (properly should) assume have the appropriate value.
333 */
334 li r12, 0x0ce2
335 andc r11, r11, r12 /* Make sure 20, 21 are zero */
336
337 /* find the TLB index that caused the fault. It has to be here.
338 */
339 tlbsx r9, 0, r10
340
341 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
342
343 /* Done...restore registers and get out of here.
344 */
345#ifdef CONFIG_403GCX
346 lwz r12, 12(r0)
347 lwz r11, 8(r0)
348 mtspr SPRN_PID, r12
349 mtcr r11
350 lwz r9, 4(r0)
351 lwz r12, 0(r0)
352#else
353 mfspr r12, SPRN_SPRG6
354 mfspr r11, SPRN_SPRG7
355 mtspr SPRN_PID, r12
356 mtcr r11
357 mfspr r9, SPRN_SPRG5
358 mfspr r12, SPRN_SPRG4
359#endif
360 mfspr r11, SPRN_SPRG1
361 mfspr r10, SPRN_SPRG0
362 PPC405_ERR77_SYNC
363 rfi /* Should sync shadow TLBs */
364 b . /* prevent prefetch past rfi */
365
3662:
367 /* The bailout. Restore registers to pre-exception conditions
368 * and call the heavyweights to help us out.
369 */
370#ifdef CONFIG_403GCX
371 lwz r12, 12(r0)
372 lwz r11, 8(r0)
373 mtspr SPRN_PID, r12
374 mtcr r11
375 lwz r9, 4(r0)
376 lwz r12, 0(r0)
377#else
378 mfspr r12, SPRN_SPRG6
379 mfspr r11, SPRN_SPRG7
380 mtspr SPRN_PID, r12
381 mtcr r11
382 mfspr r9, SPRN_SPRG5
383 mfspr r12, SPRN_SPRG4
384#endif
385 mfspr r11, SPRN_SPRG1
386 mfspr r10, SPRN_SPRG0
387 b DataAccess
388
389/*
390 * 0x0400 - Instruction Storage Exception
391 * This is caused by a fetch from non-execute or guarded pages.
392 */
393 START_EXCEPTION(0x0400, InstructionAccess)
394 NORMAL_EXCEPTION_PROLOG
395 mr r4,r12 /* Pass SRR0 as arg2 */
396 li r5,0 /* Pass zero as arg3 */
397 EXC_XFER_EE_LITE(0x400, handle_page_fault)
398
399/* 0x0500 - External Interrupt Exception */
400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
401
402/* 0x0600 - Alignment Exception */
403 START_EXCEPTION(0x0600, Alignment)
404 NORMAL_EXCEPTION_PROLOG
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, alignment_exception)
409
410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck)
412 NORMAL_EXCEPTION_PROLOG
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, program_check_exception)
417
418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422
423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427
428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer)
434 NORMAL_EXCEPTION_PROLOG
435 lis r0,TSR_PIS@h
436 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
437 addi r3,r1,STACK_FRAME_OVERHEAD
438 EXC_XFER_LITE(0x1000, timer_interrupt)
439
440#if 0
441/* NOTE:
442 * FIT and WDT handlers are not implemented yet.
443 */
444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/
447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448
449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/
451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif
456#endif
457
458/* 0x1100 - Data TLB Miss Exception
459 * As the name implies, translation is not in the MMU, so search the
460 * page tables and fix it. The only purpose of this function is to
461 * load TLB entries from the page table if they exist.
462 */
463 START_EXCEPTION(0x1100, DTLBMiss)
464 mtspr SPRN_SPRG0, r10 /* Save some working registers */
465 mtspr SPRN_SPRG1, r11
466#ifdef CONFIG_403GCX
467 stw r12, 0(r0)
468 stw r9, 4(r0)
469 mfcr r11
470 mfspr r12, SPRN_PID
471 stw r11, 8(r0)
472 stw r12, 12(r0)
473#else
474 mtspr SPRN_SPRG4, r12
475 mtspr SPRN_SPRG5, r9
476 mfcr r11
477 mfspr r12, SPRN_PID
478 mtspr SPRN_SPRG7, r11
479 mtspr SPRN_SPRG6, r12
480#endif
481 mfspr r10, SPRN_DEAR /* Get faulting address */
482
483 /* If we are faulting a kernel address, we have to use the
484 * kernel page tables.
485 */
486 lis r11, TASK_SIZE@h
487 cmplw r10, r11
488 blt+ 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 li r9, 0
492 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
493 b 4f
494
495 /* Get the PGD for the current thread.
496 */
4973:
498 mfspr r11,SPRN_SPRG3
499 lwz r11,PGDIR(r11)
5004:
501 tophys(r11, r11)
502 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
503 lwz r12, 0(r11) /* Get L1 entry */
504 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
505 beq 2f /* Bail if no table */
506
507 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
508 lwz r11, 0(r12) /* Get Linux PTE */
509 andi. r9, r11, _PAGE_PRESENT
510 beq 5f
511
512 ori r11, r11, _PAGE_ACCESSED
513 stw r11, 0(r12)
514
515 /* Create TLB tag. This is the faulting address plus a static
516 * set of bits. These are size, valid, E, U0.
517 */
518 li r12, 0x00c0
519 rlwimi r10, r12, 0, 20, 31
520
521 b finish_tlb_load
522
5232: /* Check for possible large-page pmd entry */
524 rlwinm. r9, r12, 2, 22, 24
525 beq 5f
526
527 /* Create TLB tag. This is the faulting address, plus a static
528 * set of bits (valid, E, U0) plus the size from the PMD.
529 */
530 ori r9, r9, 0x40
531 rlwimi r10, r9, 0, 20, 31
532 mr r11, r12
533
534 b finish_tlb_load
535
5365:
537 /* The bailout. Restore registers to pre-exception conditions
538 * and call the heavyweights to help us out.
539 */
540#ifdef CONFIG_403GCX
541 lwz r12, 12(r0)
542 lwz r11, 8(r0)
543 mtspr SPRN_PID, r12
544 mtcr r11
545 lwz r9, 4(r0)
546 lwz r12, 0(r0)
547#else
548 mfspr r12, SPRN_SPRG6
549 mfspr r11, SPRN_SPRG7
550 mtspr SPRN_PID, r12
551 mtcr r11
552 mfspr r9, SPRN_SPRG5
553 mfspr r12, SPRN_SPRG4
554#endif
555 mfspr r11, SPRN_SPRG1
556 mfspr r10, SPRN_SPRG0
557 b DataAccess
558
559/* 0x1200 - Instruction TLB Miss Exception
560 * Nearly the same as above, except we get our information from different
561 * registers and bailout to a different point.
562 */
563 START_EXCEPTION(0x1200, ITLBMiss)
564 mtspr SPRN_SPRG0, r10 /* Save some working registers */
565 mtspr SPRN_SPRG1, r11
566#ifdef CONFIG_403GCX
567 stw r12, 0(r0)
568 stw r9, 4(r0)
569 mfcr r11
570 mfspr r12, SPRN_PID
571 stw r11, 8(r0)
572 stw r12, 12(r0)
573#else
574 mtspr SPRN_SPRG4, r12
575 mtspr SPRN_SPRG5, r9
576 mfcr r11
577 mfspr r12, SPRN_PID
578 mtspr SPRN_SPRG7, r11
579 mtspr SPRN_SPRG6, r12
580#endif
581 mfspr r10, SPRN_SRR0 /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 cmplw r10, r11
588 blt+ 3f
589 lis r11, swapper_pg_dir@h
590 ori r11, r11, swapper_pg_dir@l
591 li r9, 0
592 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
593 b 4f
594
595 /* Get the PGD for the current thread.
596 */
5973:
598 mfspr r11,SPRN_SPRG3
599 lwz r11,PGDIR(r11)
6004:
601 tophys(r11, r11)
602 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
603 lwz r12, 0(r11) /* Get L1 entry */
604 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
605 beq 2f /* Bail if no table */
606
607 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
608 lwz r11, 0(r12) /* Get Linux PTE */
609 andi. r9, r11, _PAGE_PRESENT
610 beq 5f
611
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, 0(r12)
614
615 /* Create TLB tag. This is the faulting address plus a static
616 * set of bits. These are size, valid, E, U0.
617 */
618 li r12, 0x00c0
619 rlwimi r10, r12, 0, 20, 31
620
621 b finish_tlb_load
622
6232: /* Check for possible large-page pmd entry */
624 rlwinm. r9, r12, 2, 22, 24
625 beq 5f
626
627 /* Create TLB tag. This is the faulting address, plus a static
628 * set of bits (valid, E, U0) plus the size from the PMD.
629 */
630 ori r9, r9, 0x40
631 rlwimi r10, r9, 0, 20, 31
632 mr r11, r12
633
634 b finish_tlb_load
635
6365:
637 /* The bailout. Restore registers to pre-exception conditions
638 * and call the heavyweights to help us out.
639 */
640#ifdef CONFIG_403GCX
641 lwz r12, 12(r0)
642 lwz r11, 8(r0)
643 mtspr SPRN_PID, r12
644 mtcr r11
645 lwz r9, 4(r0)
646 lwz r12, 0(r0)
647#else
648 mfspr r12, SPRN_SPRG6
649 mfspr r11, SPRN_SPRG7
650 mtspr SPRN_PID, r12
651 mtcr r11
652 mfspr r9, SPRN_SPRG5
653 mfspr r12, SPRN_SPRG4
654#endif
655 mfspr r11, SPRN_SPRG1
656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess
658
659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss
667#else
668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif
670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678
679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case
681 * where an instruction that we are trying to single step causes
682 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
683 * the exception handler generates a single step debug exception.
684 *
685 * If we get a debug trap on the first instruction of an exception handler,
686 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
687 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
688 * The exception handler was handling a non-critical interrupt, so it will
689 * save (and later restore) the MSR via SPRN_SRR1, which will still have
690 * the MSR_DE bit set.
691 */
692 /* 0x2000 - Debug Exception */
693 START_EXCEPTION(0x2000, DebugTrap)
694 CRITICAL_EXCEPTION_PROLOG
695
696 /*
697 * If this is a single step or branch-taken exception in an
698 * exception entry sequence, it was probably meant to apply to
699 * the code where the exception occurred (since exception entry
700 * doesn't turn off DE automatically). We simulate the effect
701 * of turning off DE on entry to an exception handler by turning
702 * off DE in the SRR3 value and clearing the debug status.
703 */
704 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
705 andis. r10,r10,DBSR_IC@h
706 beq+ 2f
707
708 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
709 beq 1f /* branch and fix it up */
710
711 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
712 cmplwi r10,0x2100
713 bgt+ 2f /* address above exception vectors */
714
715 /* here it looks like we got an inappropriate debug exception. */
7161: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
717 lis r10,DBSR_IC@h /* clear the IC event */
718 mtspr SPRN_DBSR,r10
719 /* restore state and get out */
720 lwz r10,_CCR(r11)
721 lwz r0,GPR0(r11)
722 lwz r1,GPR1(r11)
723 mtcrf 0x80,r10
724 mtspr SPRN_SRR2,r12
725 mtspr SPRN_SRR3,r9
726 lwz r9,GPR9(r11)
727 lwz r12,GPR12(r11)
728 lwz r10,crit_r10@l(0)
729 lwz r11,crit_r11@l(0)
730 PPC405_ERR77_SYNC
731 rfci
732 b .
733
734 /* continue normal handling for a critical exception... */
7352: mfspr r4,SPRN_DBSR
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
738 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
739 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
740
741/*
742 * The other Data TLB exceptions bail out to this point
743 * if they can't resolve the lightweight TLB fault.
744 */
745DataAccess:
746 NORMAL_EXCEPTION_PROLOG
747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
748 stw r5,_ESR(r11)
749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
750 EXC_XFER_EE_LITE(0x300, handle_page_fault)
751
752/* Other PowerPC processors, namely those derived from the 6xx-series
753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
754 * However, for the 4xx-series processors these are neither defined nor
755 * reserved.
756 */
757
758 /* Damn, I came up one instruction too many to fit into the
759 * exception space :-). Both the instruction and data TLB
760 * miss get to this point to load the TLB.
761 * r10 - TLB_TAG value
762 * r11 - Linux PTE
763 * r12, r9 - avilable to use
764 * PID - loaded with proper value when we get here
765 * Upon exit, we reload everything and RFI.
766 * Actually, it will fit now, but oh well.....a common place
767 * to load the TLB.
768 */
769tlb_4xx_index:
770 .long 0
771finish_tlb_load:
772 /* load the next available TLB index.
773 */
774 lwz r9, tlb_4xx_index@l(0)
775 addi r9, r9, 1
776 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
777 stw r9, tlb_4xx_index@l(0)
778
7796:
780 /*
781 * Clear out the software-only bits in the PTE to generate the
782 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
783 * top 3 bits of the zone field, and M.
784 */
785 li r12, 0x0ce2
786 andc r11, r11, r12
787
788 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
789 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
790
791 /* Done...restore registers and get out of here.
792 */
793#ifdef CONFIG_403GCX
794 lwz r12, 12(r0)
795 lwz r11, 8(r0)
796 mtspr SPRN_PID, r12
797 mtcr r11
798 lwz r9, 4(r0)
799 lwz r12, 0(r0)
800#else
801 mfspr r12, SPRN_SPRG6
802 mfspr r11, SPRN_SPRG7
803 mtspr SPRN_PID, r12
804 mtcr r11
805 mfspr r9, SPRN_SPRG5
806 mfspr r12, SPRN_SPRG4
807#endif
808 mfspr r11, SPRN_SPRG1
809 mfspr r10, SPRN_SPRG0
810 PPC405_ERR77_SYNC
811 rfi /* Should sync shadow TLBs */
812 b . /* prevent prefetch past rfi */
813
814/* extern void giveup_fpu(struct task_struct *prev)
815 *
816 * The PowerPC 4xx family of processors do not have an FPU, so this just
817 * returns.
818 */
819_GLOBAL(giveup_fpu)
820 blr
821
822/* This is where the main kernel code starts.
823 */
824start_here:
825
826 /* ptr to current */
827 lis r2,init_task@h
828 ori r2,r2,init_task@l
829
830 /* ptr to phys current thread */
831 tophys(r4,r2)
832 addi r4,r4,THREAD /* init task's THREAD */
833 mtspr SPRN_SPRG3,r4
834
835 /* stack */
836 lis r1,init_thread_union@ha
837 addi r1,r1,init_thread_union@l
838 li r0,0
839 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
840
841 bl early_init /* We have to do this with MMU on */
842
843/*
844 * Decide what sort of machine this is and initialize the MMU.
845 */
846 mr r3,r31
847 mr r4,r30
848 mr r5,r29
849 mr r6,r28
850 mr r7,r27
851 bl machine_init
852 bl MMU_init
853
854/* Go back to running unmapped so we can load up new values
855 * and change to using our exception vectors.
856 * On the 4xx, all we have to do is invalidate the TLB to clear
857 * the old 16M byte TLB mappings.
858 */
859 lis r4,2f@h
860 ori r4,r4,2f@l
861 tophys(r4,r4)
862 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
863 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
864 mtspr SPRN_SRR0,r4
865 mtspr SPRN_SRR1,r3
866 rfi
867 b . /* prevent prefetch past rfi */
868
869/* Load up the kernel context */
8702:
871 sync /* Flush to memory before changing TLB */
872 tlbia
873 isync /* Flush shadow TLBs */
874
875 /* set up the PTE pointers for the Abatron bdiGDB.
876 */
877 lis r6, swapper_pg_dir@h
878 ori r6, r6, swapper_pg_dir@l
879 lis r5, abatron_pteptrs@h
880 ori r5, r5, abatron_pteptrs@l
881 stw r5, 0xf0(r0) /* Must match your Abatron config file */
882 tophys(r5,r5)
883 stw r6, 0(r5)
884
885/* Now turn on the MMU for real! */
886 lis r4,MSR_KERNEL@h
887 ori r4,r4,MSR_KERNEL@l
888 lis r3,start_kernel@h
889 ori r3,r3,start_kernel@l
890 mtspr SPRN_SRR0,r3
891 mtspr SPRN_SRR1,r4
892 rfi /* enable MMU and jump to start_kernel */
893 b . /* prevent prefetch past rfi */
894
895/* Set up the initial MMU state so we can do the first level of
896 * kernel initialization. This maps the first 16 MBytes of memory 1:1
897 * virtual to physical and more importantly sets the cache mode.
898 */
899initial_mmu:
900 tlbia /* Invalidate all TLB entries */
901 isync
902
903 /* We should still be executing code at physical address 0x0000xxxx
904 * at this point. However, start_here is at virtual address
905 * 0xC000xxxx. So, set up a TLB mapping to cover this once
906 * translation is enabled.
907 */
908
909 lis r3,KERNELBASE@h /* Load the kernel virtual address */
910 ori r3,r3,KERNELBASE@l
911 tophys(r4,r3) /* Load the kernel physical address */
912
913 iccci r0,r3 /* Invalidate the i-cache before use */
914
915 /* Load the kernel PID.
916 */
917 li r0,0
918 mtspr SPRN_PID,r0
919 sync
920
921 /* Configure and load two entries into TLB slots 62 and 63.
922 * In case we are pinning TLBs, these are reserved in by the
923 * other TLB functions. If not reserving, then it doesn't
924 * matter where they are loaded.
925 */
926 clrrwi r4,r4,10 /* Mask off the real page number */
927 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
928
929 clrrwi r3,r3,10 /* Mask off the effective page number */
930 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
931
932 li r0,63 /* TLB slot 63 */
933
934 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
935 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
936
937#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
938
939 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
940 * the UARTs nice and early. We use a 4k real==virtual mapping. */
941
942 lis r3,SERIAL_DEBUG_IO_BASE@h
943 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
944 mr r4,r3
945 clrrwi r4,r4,12
946 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
947
948 clrrwi r3,r3,12
949 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
950
951 li r0,0 /* TLB slot 0 */
952 tlbwe r4,r0,TLB_DATA
953 tlbwe r3,r0,TLB_TAG
954#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
955
956 isync
957
958 /* Establish the exception vector base
959 */
960 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
961 tophys(r0,r4) /* Use the physical address */
962 mtspr SPRN_EVPR,r0
963
964 blr
965
966_GLOBAL(abort)
967 mfspr r13,SPRN_DBCR0
968 oris r13,r13,DBCR0_RST_SYSTEM@h
969 mtspr SPRN_DBCR0,r13
970
971_GLOBAL(set_context)
972
973#ifdef CONFIG_BDI_SWITCH
974 /* Context switch the PTE pointer for the Abatron BDI2000.
975 * The PGDIR is the second parameter.
976 */
977 lis r5, KERNELBASE@h
978 lwz r5, 0xf0(r5)
979 stw r4, 0x4(r5)
980#endif
981 sync
982 mtspr SPRN_PID,r3
983 isync /* Need an isync to flush shadow */
984 /* TLBs after changing PID */
985 blr
986
987/* We put a few things here that have to be page-aligned. This stuff
988 * goes at the beginning of the data segment, which is page-aligned.
989 */
990 .data
991 .align 12
992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
996 .space 4096
997 .globl swapper_pg_dir
998swapper_pg_dir:
999 .space 4096
1000
1001
1002/* Stack for handling critical exceptions from kernel mode */
1003 .section .bss
1004 .align 12
1005exception_stack_bottom:
1006 .space 4096
1007critical_stack_top:
1008 .globl exception_stack_top
1009exception_stack_top:
1010
1011/* This space gets a copy of optional info passed to us by the bootstrap
1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1013 */
1014 .globl cmd_line
1015cmd_line:
1016 .space 512
1017
1018/* Room for two PTE pointers, usually the kernel and current user pointers
1019 * to their respective root page table.
1020 */
1021abatron_pteptrs:
1022 .space 8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 000000000000..147215a0d6c0
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,1957 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
40
41#ifdef CONFIG_PPC_ISERIES
42#define DO_SOFT_DISABLE
43#endif
44
45/*
46 * We layout physical memory as follows:
47 * 0x0000 - 0x00ff : Secondary processor spin code
48 * 0x0100 - 0x2fff : pSeries Interrupt prologs
49 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
50 * 0x6000 - 0x6fff : Initial (CPU0) segment table
51 * 0x7000 - 0x7fff : FWNMI data area
52 * 0x8000 - : Early init and support code
53 */
54
55/*
56 * SPRG Usage
57 *
58 * Register Definition
59 *
60 * SPRG0 reserved for hypervisor
61 * SPRG1 temp - used to save gpr
62 * SPRG2 temp - used to save gpr
63 * SPRG3 virt addr of paca
64 */
65
66/*
67 * Entering into this code we make the following assumptions:
68 * For pSeries:
69 * 1. The MMU is off & open firmware is running in real mode.
70 * 2. The kernel is entered at __start
71 *
72 * For iSeries:
73 * 1. The MMU is on (as it always is for iSeries)
74 * 2. The kernel is entered at system_reset_iSeries
75 */
76
77 .text
78 .globl _stext
79_stext:
80#ifdef CONFIG_PPC_MULTIPLATFORM
81_GLOBAL(__start)
82 /* NOP this out unconditionally */
83BEGIN_FTR_SECTION
84 b .__start_initialization_multiplatform
85END_FTR_SECTION(0, 1)
86#endif /* CONFIG_PPC_MULTIPLATFORM */
87
88 /* Catch branch to 0 in real mode */
89 trap
90
91#ifdef CONFIG_PPC_ISERIES
92 /*
93 * At offset 0x20, there is a pointer to iSeries LPAR data.
94 * This is required by the hypervisor
95 */
96 . = 0x20
97 .llong hvReleaseData-KERNELBASE
98
99 /*
100 * At offset 0x28 and 0x30 are offsets to the mschunks_map
101 * array (used by the iSeries LPAR debugger to do translation
102 * between physical addresses and absolute addresses) and
103 * to the pidhash table (also used by the debugger)
104 */
105 .llong mschunks_map-KERNELBASE
106 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107
108 /* Offset 0x38 - Pointer to start of embedded System.map */
109 .globl embedded_sysmap_start
110embedded_sysmap_start:
111 .llong 0
112 /* Offset 0x40 - Pointer to end of embedded System.map */
113 .globl embedded_sysmap_end
114embedded_sysmap_end:
115 .llong 0
116
117#endif /* CONFIG_PPC_ISERIES */
118
119 /* Secondary processors spin on this value until it goes to 1. */
120 .globl __secondary_hold_spinloop
121__secondary_hold_spinloop:
122 .llong 0x0
123
124 /* Secondary processors write this value with their cpu # */
125 /* after they enter the spin loop immediately below. */
126 .globl __secondary_hold_acknowledge
127__secondary_hold_acknowledge:
128 .llong 0x0
129
130 . = 0x60
131/*
132 * The following code is used on pSeries to hold secondary processors
133 * in a spin loop after they have been freed from OpenFirmware, but
134 * before the bulk of the kernel has been relocated. This code
135 * is relocated to physical address 0x60 before prom_init is run.
136 * All of it must fit below the first exception vector at 0x100.
137 */
138_GLOBAL(__secondary_hold)
139 mfmsr r24
140 ori r24,r24,MSR_RI
141 mtmsrd r24 /* RI on */
142
143 /* Grab our linux cpu number */
144 mr r24,r3
145
146 /* Tell the master cpu we're here */
147 /* Relocation is off & we are located at an address less */
148 /* than 0x100, so only need to grab low order offset. */
149 std r24,__secondary_hold_acknowledge@l(0)
150 sync
151
152 /* All secondary cpus wait here until told to start. */
153100: ld r4,__secondary_hold_spinloop@l(0)
154 cmpdi 0,r4,1
155 bne 100b
156
157#ifdef CONFIG_HMT
158 b .hmt_init
159#else
160#ifdef CONFIG_SMP
161 mr r3,r24
162 b .pSeries_secondary_smp_init
163#else
164 BUG_OPCODE
165#endif
166#endif
167
168/* This value is used to mark exception frames on the stack. */
169 .section ".toc","aw"
170exception_marker:
171 .tc ID_72656773_68657265[TC],0x7265677368657265
172 .text
173
174/*
175 * The following macros define the code that appears as
176 * the prologue to each of the exception handlers. They
177 * are split into two parts to allow a single kernel binary
178 * to be used for pSeries and iSeries.
179 * LOL. One day... - paulus
180 */
181
182/*
183 * We make as much of the exception code common between native
184 * exception handlers (including pSeries LPAR) and iSeries LPAR
185 * implementations as possible.
186 */
187
188/*
189 * This is the start of the interrupt handlers for pSeries
190 * This code runs with relocation off.
191 */
192#define EX_R9 0
193#define EX_R10 8
194#define EX_R11 16
195#define EX_R12 24
196#define EX_R13 32
197#define EX_SRR0 40
198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
199#define EX_DAR 48
200#define EX_LR 48 /* SLB miss saves LR, but not DAR */
201#define EX_DSISR 56
202#define EX_CCR 60
203
204#define EXCEPTION_PROLOG_PSERIES(area, label) \
205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
207 std r10,area+EX_R10(r13); \
208 std r11,area+EX_R11(r13); \
209 std r12,area+EX_R12(r13); \
210 mfspr r9,SPRN_SPRG1; \
211 std r9,area+EX_R13(r13); \
212 mfcr r9; \
213 clrrdi r12,r13,32; /* get high part of &label */ \
214 mfmsr r10; \
215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
216 ori r12,r12,(label)@l; /* virt addr of handler */ \
217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
218 mtspr SPRN_SRR0,r12; \
219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
220 mtspr SPRN_SRR1,r10; \
221 rfid; \
222 b . /* prevent speculative execution */
223
224/*
225 * This is the start of the interrupt handlers for iSeries
226 * This code runs with relocation on.
227 */
228#define EXCEPTION_PROLOG_ISERIES_1(area) \
229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
231 std r10,area+EX_R10(r13); \
232 std r11,area+EX_R11(r13); \
233 std r12,area+EX_R12(r13); \
234 mfspr r9,SPRN_SPRG1; \
235 std r9,area+EX_R13(r13); \
236 mfcr r9
237
238#define EXCEPTION_PROLOG_ISERIES_2 \
239 mfmsr r10; \
240 ld r11,PACALPPACA+LPPACASRR0(r13); \
241 ld r12,PACALPPACA+LPPACASRR1(r13); \
242 ori r10,r10,MSR_RI; \
243 mtmsrd r10,1
244
245/*
246 * The common exception prolog is used for all except a few exceptions
247 * such as a segment miss on a kernel address. We have to be prepared
248 * to take another exception from the point where we first touch the
249 * kernel stack onwards.
250 *
251 * On entry r13 points to the paca, r9-r13 are saved in the paca,
252 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
253 * SRR1, and relocation is on.
254 */
255#define EXCEPTION_PROLOG_COMMON(n, area) \
256 andi. r10,r12,MSR_PR; /* See if coming from user */ \
257 mr r10,r1; /* Save r1 */ \
258 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
259 beq- 1f; \
260 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2611: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
262 bge- cr1,bad_stack; /* abort if it is */ \
263 std r9,_CCR(r1); /* save CR in stackframe */ \
264 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
265 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
266 std r10,0(r1); /* make stack chain pointer */ \
267 std r0,GPR0(r1); /* save r0 in stackframe */ \
268 std r10,GPR1(r1); /* save r1 in stackframe */ \
269 std r2,GPR2(r1); /* save r2 in stackframe */ \
270 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
271 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
272 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
273 ld r10,area+EX_R10(r13); \
274 std r9,GPR9(r1); \
275 std r10,GPR10(r1); \
276 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
277 ld r10,area+EX_R12(r13); \
278 ld r11,area+EX_R13(r13); \
279 std r9,GPR11(r1); \
280 std r10,GPR12(r1); \
281 std r11,GPR13(r1); \
282 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
283 mflr r9; /* save LR in stackframe */ \
284 std r9,_LINK(r1); \
285 mfctr r10; /* save CTR in stackframe */ \
286 std r10,_CTR(r1); \
287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
288 std r11,_XER(r1); \
289 li r9,(n)+1; \
290 std r9,_TRAP(r1); /* set trap number */ \
291 li r10,0; \
292 ld r11,exception_marker@toc(r2); \
293 std r10,RESULT(r1); /* clear regs->result */ \
294 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
295
296/*
297 * Exception vectors.
298 */
299#define STD_EXCEPTION_PSERIES(n, label) \
300 . = n; \
301 .globl label##_pSeries; \
302label##_pSeries: \
303 HMT_MEDIUM; \
304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
305 RUNLATCH_ON(r13); \
306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307
308#define STD_EXCEPTION_ISERIES(n, label, area) \
309 .globl label##_iSeries; \
310label##_iSeries: \
311 HMT_MEDIUM; \
312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
313 RUNLATCH_ON(r13); \
314 EXCEPTION_PROLOG_ISERIES_1(area); \
315 EXCEPTION_PROLOG_ISERIES_2; \
316 b label##_common
317
318#define MASKABLE_EXCEPTION_ISERIES(n, label) \
319 .globl label##_iSeries; \
320label##_iSeries: \
321 HMT_MEDIUM; \
322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 RUNLATCH_ON(r13); \
324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
325 lbz r10,PACAPROCENABLED(r13); \
326 cmpwi 0,r10,0; \
327 beq- label##_iSeries_masked; \
328 EXCEPTION_PROLOG_ISERIES_2; \
329 b label##_common; \
330
331#ifdef DO_SOFT_DISABLE
332#define DISABLE_INTS \
333 lbz r10,PACAPROCENABLED(r13); \
334 li r11,0; \
335 std r10,SOFTE(r1); \
336 mfmsr r10; \
337 stb r11,PACAPROCENABLED(r13); \
338 ori r10,r10,MSR_EE; \
339 mtmsrd r10,1
340
341#define ENABLE_INTS \
342 lbz r10,PACAPROCENABLED(r13); \
343 mfmsr r11; \
344 std r10,SOFTE(r1); \
345 ori r11,r11,MSR_EE; \
346 mtmsrd r11,1
347
348#else /* hard enable/disable interrupts */
349#define DISABLE_INTS
350
351#define ENABLE_INTS \
352 ld r12,_MSR(r1); \
353 mfmsr r11; \
354 rlwimi r11,r12,0,MSR_EE; \
355 mtmsrd r11,1
356
357#endif
358
359#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
360 .align 7; \
361 .globl label##_common; \
362label##_common: \
363 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
364 DISABLE_INTS; \
365 bl .save_nvgprs; \
366 addi r3,r1,STACK_FRAME_OVERHEAD; \
367 bl hdlr; \
368 b .ret_from_except
369
370#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
371 .align 7; \
372 .globl label##_common; \
373label##_common: \
374 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
375 DISABLE_INTS; \
376 addi r3,r1,STACK_FRAME_OVERHEAD; \
377 bl hdlr; \
378 b .ret_from_except_lite
379
380/*
381 * Start of pSeries system interrupt routines
382 */
383 . = 0x100
384 .globl __start_interrupts
385__start_interrupts:
386
387 STD_EXCEPTION_PSERIES(0x100, system_reset)
388
389 . = 0x200
390_machine_check_pSeries:
391 HMT_MEDIUM
392 mtspr SPRN_SPRG1,r13 /* save r13 */
393 RUNLATCH_ON(r13)
394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
395
396 . = 0x300
397 .globl data_access_pSeries
398data_access_pSeries:
399 HMT_MEDIUM
400 mtspr SPRN_SPRG1,r13
401BEGIN_FTR_SECTION
402 mtspr SPRN_SPRG2,r12
403 mfspr r13,SPRN_DAR
404 mfspr r12,SPRN_DSISR
405 srdi r13,r13,60
406 rlwimi r13,r12,16,0x20
407 mfcr r12
408 cmpwi r13,0x2c
409 beq .do_stab_bolted_pSeries
410 mtcrf 0x80,r12
411 mfspr r12,SPRN_SPRG2
412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
414
415 . = 0x380
416 .globl data_access_slb_pSeries
417data_access_slb_pSeries:
418 HMT_MEDIUM
419 mtspr SPRN_SPRG1,r13
420 RUNLATCH_ON(r13)
421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
423 std r10,PACA_EXSLB+EX_R10(r13)
424 std r11,PACA_EXSLB+EX_R11(r13)
425 std r12,PACA_EXSLB+EX_R12(r13)
426 std r3,PACA_EXSLB+EX_R3(r13)
427 mfspr r9,SPRN_SPRG1
428 std r9,PACA_EXSLB+EX_R13(r13)
429 mfcr r9
430 mfspr r12,SPRN_SRR1 /* and SRR1 */
431 mfspr r3,SPRN_DAR
432 b .do_slb_miss /* Rel. branch works in real mode */
433
434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
435
436 . = 0x480
437 .globl instruction_access_slb_pSeries
438instruction_access_slb_pSeries:
439 HMT_MEDIUM
440 mtspr SPRN_SPRG1,r13
441 RUNLATCH_ON(r13)
442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRN_SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SPRN_SRR1 /* and SRR1 */
452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 RUNLATCH_ON(r9)
468 mr r9,r13
469 mfmsr r10
470 mfspr r13,SPRN_SPRG3
471 mfspr r11,SPRN_SRR0
472 clrrdi r12,r13,32
473 oris r12,r12,system_call_common@h
474 ori r12,r12,system_call_common@l
475 mtspr SPRN_SRR0,r12
476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
477 mfspr r12,SPRN_SRR1
478 mtspr SPRN_SRR1,r10
479 rfid
480 b . /* prevent speculative execution */
481
482 STD_EXCEPTION_PSERIES(0xd00, single_step)
483 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
484
485 /* We need to deal with the Altivec unavailable exception
486 * here which is at 0xf20, thus in the middle of the
487 * prolog code of the PerformanceMonitor one. A little
488 * trickery is thus necessary
489 */
490 . = 0xf00
491 b performance_monitor_pSeries
492
493 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
494
495 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
496 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
497
498 . = 0x3000
499
500/*** pSeries interrupt support ***/
501
502 /* moved from 0xf00 */
503 STD_EXCEPTION_PSERIES(., performance_monitor)
504
505 .align 7
506_GLOBAL(do_stab_bolted_pSeries)
507 mtcrf 0x80,r12
508 mfspr r12,SPRN_SPRG2
509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
510
511/*
512 * Vectors for the FWNMI option. Share common code.
513 */
514 .globl system_reset_fwnmi
515system_reset_fwnmi:
516 HMT_MEDIUM
517 mtspr SPRN_SPRG1,r13 /* save r13 */
518 RUNLATCH_ON(r13)
519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
520
521 .globl machine_check_fwnmi
522machine_check_fwnmi:
523 HMT_MEDIUM
524 mtspr SPRN_SPRG1,r13 /* save r13 */
525 RUNLATCH_ON(r13)
526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
527
528#ifdef CONFIG_PPC_ISERIES
529/*** ISeries-LPAR interrupt handlers ***/
530
531 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
532
533 .globl data_access_iSeries
534data_access_iSeries:
535 mtspr SPRN_SPRG1,r13
536BEGIN_FTR_SECTION
537 mtspr SPRN_SPRG2,r12
538 mfspr r13,SPRN_DAR
539 mfspr r12,SPRN_DSISR
540 srdi r13,r13,60
541 rlwimi r13,r12,16,0x20
542 mfcr r12
543 cmpwi r13,0x2c
544 beq .do_stab_bolted_iSeries
545 mtcrf 0x80,r12
546 mfspr r12,SPRN_SPRG2
547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
549 EXCEPTION_PROLOG_ISERIES_2
550 b data_access_common
551
552.do_stab_bolted_iSeries:
553 mtcrf 0x80,r12
554 mfspr r12,SPRN_SPRG2
555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
556 EXCEPTION_PROLOG_ISERIES_2
557 b .do_stab_bolted
558
559 .globl data_access_slb_iSeries
560data_access_slb_iSeries:
561 mtspr SPRN_SPRG1,r13 /* save r13 */
562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
563 std r3,PACA_EXSLB+EX_R3(r13)
564 ld r12,PACALPPACA+LPPACASRR1(r13)
565 mfspr r3,SPRN_DAR
566 b .do_slb_miss
567
568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
569
570 .globl instruction_access_slb_iSeries
571instruction_access_slb_iSeries:
572 mtspr SPRN_SPRG1,r13 /* save r13 */
573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
574 std r3,PACA_EXSLB+EX_R3(r13)
575 ld r12,PACALPPACA+LPPACASRR1(r13)
576 ld r3,PACALPPACA+LPPACASRR0(r13)
577 b .do_slb_miss
578
579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
582 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
583 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
584 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
585 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
586
587 .globl system_call_iSeries
588system_call_iSeries:
589 mr r9,r13
590 mfspr r13,SPRN_SPRG3
591 EXCEPTION_PROLOG_ISERIES_2
592 b system_call_common
593
594 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
596 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
597
598 .globl system_reset_iSeries
599system_reset_iSeries:
600 mfspr r13,SPRN_SPRG3 /* Get paca address */
601 mfmsr r24
602 ori r24,r24,MSR_RI
603 mtmsrd r24 /* RI on */
604 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
605 cmpwi 0,r24,0 /* Are we processor 0? */
606 beq .__start_initialization_iSeries /* Start up the first processor */
607 mfspr r4,SPRN_CTRLF
608 li r5,CTRL_RUNLATCH /* Turn off the run light */
609 andc r4,r4,r5
610 mtspr SPRN_CTRLT,r4
611
6121:
613 HMT_LOW
614#ifdef CONFIG_SMP
615 lbz r23,PACAPROCSTART(r13) /* Test if this processor
616 * should start */
617 sync
618 LOADADDR(r3,current_set)
619 sldi r28,r24,3 /* get current_set[cpu#] */
620 ldx r3,r3,r28
621 addi r1,r3,THREAD_SIZE
622 subi r1,r1,STACK_FRAME_OVERHEAD
623
624 cmpwi 0,r23,0
625 beq iSeries_secondary_smp_loop /* Loop until told to go */
626 bne .__secondary_start /* Loop until told to go */
627iSeries_secondary_smp_loop:
628 /* Let the Hypervisor know we are alive */
629 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
630 lis r3,0x8002
631 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
632#else /* CONFIG_SMP */
633 /* Yield the processor. This is required for non-SMP kernels
634 which are running on multi-threaded machines. */
635 lis r3,0x8000
636 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
637 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
638 li r4,0 /* "yield timed" */
639 li r5,-1 /* "yield forever" */
640#endif /* CONFIG_SMP */
641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
642 sc /* Invoke the hypervisor via a system call */
643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
644 b 1b /* If SMP not configured, secondaries
645 * loop forever */
646
647 .globl decrementer_iSeries_masked
648decrementer_iSeries_masked:
649 li r11,1
650 stb r11,PACALPPACA+LPPACADECRINT(r13)
651 lwz r12,PACADEFAULTDECR(r13)
652 mtspr SPRN_DEC,r12
653 /* fall through */
654
655 .globl hardware_interrupt_iSeries_masked
656hardware_interrupt_iSeries_masked:
657 mtcrf 0x80,r9 /* Restore regs */
658 ld r11,PACALPPACA+LPPACASRR0(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13)
660 mtspr SPRN_SRR0,r11
661 mtspr SPRN_SRR1,r12
662 ld r9,PACA_EXGEN+EX_R9(r13)
663 ld r10,PACA_EXGEN+EX_R10(r13)
664 ld r11,PACA_EXGEN+EX_R11(r13)
665 ld r12,PACA_EXGEN+EX_R12(r13)
666 ld r13,PACA_EXGEN+EX_R13(r13)
667 rfid
668 b . /* prevent speculative execution */
669#endif /* CONFIG_PPC_ISERIES */
670
671/*** Common interrupt handlers ***/
672
673 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
674
675 /*
676 * Machine check is different because we use a different
677 * save area: PACA_EXMC instead of PACA_EXGEN.
678 */
679 .align 7
680 .globl machine_check_common
681machine_check_common:
682 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
683 DISABLE_INTS
684 bl .save_nvgprs
685 addi r3,r1,STACK_FRAME_OVERHEAD
686 bl .machine_check_exception
687 b .ret_from_except
688
689 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
690 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
692 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
693 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
694 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
695 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
696#ifdef CONFIG_ALTIVEC
697 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
698#else
699 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
700#endif
701
702/*
703 * Here we have detected that the kernel stack pointer is bad.
704 * R9 contains the saved CR, r13 points to the paca,
705 * r10 contains the (bad) kernel stack pointer,
706 * r11 and r12 contain the saved SRR0 and SRR1.
707 * We switch to using an emergency stack, save the registers there,
708 * and call kernel_bad_stack(), which panics.
709 */
710bad_stack:
711 ld r1,PACAEMERGSP(r13)
712 subi r1,r1,64+INT_FRAME_SIZE
713 std r9,_CCR(r1)
714 std r10,GPR1(r1)
715 std r11,_NIP(r1)
716 std r12,_MSR(r1)
717 mfspr r11,SPRN_DAR
718 mfspr r12,SPRN_DSISR
719 std r11,_DAR(r1)
720 std r12,_DSISR(r1)
721 mflr r10
722 mfctr r11
723 mfxer r12
724 std r10,_LINK(r1)
725 std r11,_CTR(r1)
726 std r12,_XER(r1)
727 SAVE_GPR(0,r1)
728 SAVE_GPR(2,r1)
729 SAVE_4GPRS(3,r1)
730 SAVE_2GPRS(7,r1)
731 SAVE_10GPRS(12,r1)
732 SAVE_10GPRS(22,r1)
733 addi r11,r1,INT_FRAME_SIZE
734 std r11,0(r1)
735 li r12,0
736 std r12,0(r11)
737 ld r2,PACATOC(r13)
7381: addi r3,r1,STACK_FRAME_OVERHEAD
739 bl .kernel_bad_stack
740 b 1b
741
742/*
743 * Return from an exception with minimal checks.
744 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
745 * If interrupts have been enabled, or anything has been
746 * done that might have changed the scheduling status of
747 * any task or sent any task a signal, you should use
748 * ret_from_except or ret_from_except_lite instead of this.
749 */
750 .globl fast_exception_return
751fast_exception_return:
752 ld r12,_MSR(r1)
753 ld r11,_NIP(r1)
754 andi. r3,r12,MSR_RI /* check if RI is set */
755 beq- unrecov_fer
756 ld r3,_CCR(r1)
757 ld r4,_LINK(r1)
758 ld r5,_CTR(r1)
759 ld r6,_XER(r1)
760 mtcr r3
761 mtlr r4
762 mtctr r5
763 mtxer r6
764 REST_GPR(0, r1)
765 REST_8GPRS(2, r1)
766
767 mfmsr r10
768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
769 mtmsrd r10,1
770
771 mtspr SPRN_SRR1,r12
772 mtspr SPRN_SRR0,r11
773 REST_4GPRS(10, r1)
774 ld r1,GPR1(r1)
775 rfid
776 b . /* prevent speculative execution */
777
778unrecov_fer:
779 bl .save_nvgprs
7801: addi r3,r1,STACK_FRAME_OVERHEAD
781 bl .unrecoverable_exception
782 b 1b
783
784/*
785 * Here r13 points to the paca, r9 contains the saved CR,
786 * SRR0 and SRR1 are saved in r11 and r12,
787 * r9 - r13 are saved in paca->exgen.
788 */
789 .align 7
790 .globl data_access_common
791data_access_common:
792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
793 mfspr r10,SPRN_DAR
794 std r10,PACA_EXGEN+EX_DAR(r13)
795 mfspr r10,SPRN_DSISR
796 stw r10,PACA_EXGEN+EX_DSISR(r13)
797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
798 ld r3,PACA_EXGEN+EX_DAR(r13)
799 lwz r4,PACA_EXGEN+EX_DSISR(r13)
800 li r5,0x300
801 b .do_hash_page /* Try to handle as hpte fault */
802
803 .align 7
804 .globl instruction_access_common
805instruction_access_common:
806 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
807 ld r3,_NIP(r1)
808 andis. r4,r12,0x5820
809 li r5,0x400
810 b .do_hash_page /* Try to handle as hpte fault */
811
812 .align 7
813 .globl hardware_interrupt_common
814 .globl hardware_interrupt_entry
815hardware_interrupt_common:
816 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
817hardware_interrupt_entry:
818 DISABLE_INTS
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 bl .do_IRQ
821 b .ret_from_except_lite
822
823 .align 7
824 .globl alignment_common
825alignment_common:
826 mfspr r10,SPRN_DAR
827 std r10,PACA_EXGEN+EX_DAR(r13)
828 mfspr r10,SPRN_DSISR
829 stw r10,PACA_EXGEN+EX_DSISR(r13)
830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
831 ld r3,PACA_EXGEN+EX_DAR(r13)
832 lwz r4,PACA_EXGEN+EX_DSISR(r13)
833 std r3,_DAR(r1)
834 std r4,_DSISR(r1)
835 bl .save_nvgprs
836 addi r3,r1,STACK_FRAME_OVERHEAD
837 ENABLE_INTS
838 bl .alignment_exception
839 b .ret_from_except
840
841 .align 7
842 .globl program_check_common
843program_check_common:
844 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
845 bl .save_nvgprs
846 addi r3,r1,STACK_FRAME_OVERHEAD
847 ENABLE_INTS
848 bl .program_check_exception
849 b .ret_from_except
850
851 .align 7
852 .globl fp_unavailable_common
853fp_unavailable_common:
854 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
855 bne .load_up_fpu /* if from user, just load it up */
856 bl .save_nvgprs
857 addi r3,r1,STACK_FRAME_OVERHEAD
858 ENABLE_INTS
859 bl .kernel_fp_unavailable_exception
860 BUG_OPCODE
861
862 .align 7
863 .globl altivec_unavailable_common
864altivec_unavailable_common:
865 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
866#ifdef CONFIG_ALTIVEC
867BEGIN_FTR_SECTION
868 bne .load_up_altivec /* if from user, just load it up */
869END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
870#endif
871 bl .save_nvgprs
872 addi r3,r1,STACK_FRAME_OVERHEAD
873 ENABLE_INTS
874 bl .altivec_unavailable_exception
875 b .ret_from_except
876
877#ifdef CONFIG_ALTIVEC
878/*
879 * load_up_altivec(unused, unused, tsk)
880 * Disable VMX for the task which had it previously,
881 * and save its vector registers in its thread_struct.
882 * Enables the VMX for use in the kernel on return.
883 * On SMP we know the VMX is free, since we give it up every
884 * switch (ie, no lazy save of the vector registers).
885 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
886 */
887_STATIC(load_up_altivec)
888 mfmsr r5 /* grab the current MSR */
889 oris r5,r5,MSR_VEC@h
890 mtmsrd r5 /* enable use of VMX now */
891 isync
892
893/*
894 * For SMP, we don't do lazy VMX switching because it just gets too
895 * horrendously complex, especially when a task switches from one CPU
896 * to another. Instead we call giveup_altvec in switch_to.
897 * VRSAVE isn't dealt with here, that is done in the normal context
898 * switch code. Note that we could rely on vrsave value to eventually
899 * avoid saving all of the VREGs here...
900 */
901#ifndef CONFIG_SMP
902 ld r3,last_task_used_altivec@got(r2)
903 ld r4,0(r3)
904 cmpdi 0,r4,0
905 beq 1f
906 /* Save VMX state to last_task_used_altivec's THREAD struct */
907 addi r4,r4,THREAD
908 SAVE_32VRS(0,r5,r4)
909 mfvscr vr0
910 li r10,THREAD_VSCR
911 stvx vr0,r10,r4
912 /* Disable VMX for last_task_used_altivec */
913 ld r5,PT_REGS(r4)
914 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
915 lis r6,MSR_VEC@h
916 andc r4,r4,r6
917 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9181:
919#endif /* CONFIG_SMP */
920 /* Hack: if we get an altivec unavailable trap with VRSAVE
921 * set to all zeros, we assume this is a broken application
922 * that fails to set it properly, and thus we switch it to
923 * all 1's
924 */
925 mfspr r4,SPRN_VRSAVE
926 cmpdi 0,r4,0
927 bne+ 1f
928 li r4,-1
929 mtspr SPRN_VRSAVE,r4
9301:
931 /* enable use of VMX after return */
932 ld r4,PACACURRENT(r13)
933 addi r5,r4,THREAD /* Get THREAD */
934 oris r12,r12,MSR_VEC@h
935 std r12,_MSR(r1)
936 li r4,1
937 li r10,THREAD_VSCR
938 stw r4,THREAD_USED_VR(r5)
939 lvx vr0,r10,r5
940 mtvscr vr0
941 REST_32VRS(0,r4,r5)
942#ifndef CONFIG_SMP
943 /* Update last_task_used_math to 'current' */
944 subi r4,r5,THREAD /* Back to 'current' */
945 std r4,0(r3)
946#endif /* CONFIG_SMP */
947 /* restore registers and return */
948 b fast_exception_return
949#endif /* CONFIG_ALTIVEC */
950
951/*
952 * Hash table stuff
953 */
954 .align 7
955_GLOBAL(do_hash_page)
956 std r3,_DAR(r1)
957 std r4,_DSISR(r1)
958
959 andis. r0,r4,0xa450 /* weird error? */
960 bne- .handle_page_fault /* if not, try to insert a HPTE */
961BEGIN_FTR_SECTION
962 andis. r0,r4,0x0020 /* Is it a segment table fault? */
963 bne- .do_ste_alloc /* If so handle it */
964END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
965
966 /*
967 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
968 * accessing a userspace segment (even from the kernel). We assume
969 * kernel addresses always have the high bit set.
970 */
971 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
972 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
973 orc r0,r12,r0 /* MSR_PR | ~high_bit */
974 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
975 ori r4,r4,1 /* add _PAGE_PRESENT */
976 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
977
978 /*
979 * On iSeries, we soft-disable interrupts here, then
980 * hard-enable interrupts so that the hash_page code can spin on
981 * the hash_table_lock without problems on a shared processor.
982 */
983 DISABLE_INTS
984
985 /*
986 * r3 contains the faulting address
987 * r4 contains the required access permissions
988 * r5 contains the trap number
989 *
990 * at return r3 = 0 for success
991 */
992 bl .hash_page /* build HPTE if possible */
993 cmpdi r3,0 /* see if hash_page succeeded */
994
995#ifdef DO_SOFT_DISABLE
996 /*
997 * If we had interrupts soft-enabled at the point where the
998 * DSI/ISI occurred, and an interrupt came in during hash_page,
999 * handle it now.
1000 * We jump to ret_from_except_lite rather than fast_exception_return
1001 * because ret_from_except_lite will check for and handle pending
1002 * interrupts if necessary.
1003 */
1004 beq .ret_from_except_lite
1005 /* For a hash failure, we don't bother re-enabling interrupts */
1006 ble- 12f
1007
1008 /*
1009 * hash_page couldn't handle it, set soft interrupt enable back
1010 * to what it was before the trap. Note that .local_irq_restore
1011 * handles any interrupts pending at this point.
1012 */
1013 ld r3,SOFTE(r1)
1014 bl .local_irq_restore
1015 b 11f
1016#else
1017 beq fast_exception_return /* Return from exception on success */
1018 ble- 12f /* Failure return from hash_page */
1019
1020 /* fall through */
1021#endif
1022
1023/* Here we have a page fault that hash_page can't handle. */
1024_GLOBAL(handle_page_fault)
1025 ENABLE_INTS
102611: ld r4,_DAR(r1)
1027 ld r5,_DSISR(r1)
1028 addi r3,r1,STACK_FRAME_OVERHEAD
1029 bl .do_page_fault
1030 cmpdi r3,0
1031 beq+ .ret_from_except_lite
1032 bl .save_nvgprs
1033 mr r5,r3
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1035 lwz r4,_DAR(r1)
1036 bl .bad_page_fault
1037 b .ret_from_except
1038
1039/* We have a page fault that hash_page could handle but HV refused
1040 * the PTE insertion
1041 */
104212: bl .save_nvgprs
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 lwz r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
1048 /* here we have a segment miss */
1049_GLOBAL(do_ste_alloc)
1050 bl .ste_allocate /* try to insert stab entry */
1051 cmpdi r3,0
1052 beq+ fast_exception_return
1053 b .handle_page_fault
1054
1055/*
1056 * r13 points to the PACA, r9 contains the saved CR,
1057 * r11 and r12 contain the saved SRR0 and SRR1.
1058 * r9 - r13 are saved in paca->exslb.
1059 * We assume we aren't going to take any exceptions during this procedure.
1060 * We assume (DAR >> 60) == 0xc.
1061 */
1062 .align 7
1063_GLOBAL(do_stab_bolted)
1064 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1065 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1066
1067 /* Hash to the primary group */
1068 ld r10,PACASTABVIRT(r13)
1069 mfspr r11,SPRN_DAR
1070 srdi r11,r11,28
1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1072
1073 /* Calculate VSID */
1074 /* This is a kernel address, so protovsid = ESID */
1075 ASM_VSID_SCRAMBLE(r11, r9)
1076 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1077
1078 /* Search the primary group for a free entry */
10791: ld r11,0(r10) /* Test valid bit of the current ste */
1080 andi. r11,r11,0x80
1081 beq 2f
1082 addi r10,r10,16
1083 andi. r11,r10,0x70
1084 bne 1b
1085
1086 /* Stick for only searching the primary group for now. */
1087 /* At least for now, we use a very simple random castout scheme */
1088 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1089 mftb r11
1090 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1091 ori r11,r11,0x10
1092
1093 /* r10 currently points to an ste one past the group of interest */
1094 /* make it point to the randomly selected entry */
1095 subi r10,r10,128
1096 or r10,r10,r11 /* r10 is the entry to invalidate */
1097
1098 isync /* mark the entry invalid */
1099 ld r11,0(r10)
1100 rldicl r11,r11,56,1 /* clear the valid bit */
1101 rotldi r11,r11,8
1102 std r11,0(r10)
1103 sync
1104
1105 clrrdi r11,r11,28 /* Get the esid part of the ste */
1106 slbie r11
1107
11082: std r9,8(r10) /* Store the vsid part of the ste */
1109 eieio
1110
1111 mfspr r11,SPRN_DAR /* Get the new esid */
1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1113 ori r11,r11,0x90 /* Turn on valid and kp */
1114 std r11,0(r10) /* Put new entry back into the stab */
1115
1116 sync
1117
1118 /* All done -- return from exception. */
1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1120 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1121
1122 andi. r10,r12,MSR_RI
1123 beq- unrecov_slb
1124
1125 mtcrf 0x80,r9 /* restore CR */
1126
1127 mfmsr r10
1128 clrrdi r10,r10,2
1129 mtmsrd r10,1
1130
1131 mtspr SPRN_SRR0,r11
1132 mtspr SPRN_SRR1,r12
1133 ld r9,PACA_EXSLB+EX_R9(r13)
1134 ld r10,PACA_EXSLB+EX_R10(r13)
1135 ld r11,PACA_EXSLB+EX_R11(r13)
1136 ld r12,PACA_EXSLB+EX_R12(r13)
1137 ld r13,PACA_EXSLB+EX_R13(r13)
1138 rfid
1139 b . /* prevent speculative execution */
1140
1141/*
1142 * r13 points to the PACA, r9 contains the saved CR,
1143 * r11 and r12 contain the saved SRR0 and SRR1.
1144 * r3 has the faulting address
1145 * r9 - r13 are saved in paca->exslb.
1146 * r3 is saved in paca->slb_r3
1147 * We assume we aren't going to take any exceptions during this procedure.
1148 */
1149_GLOBAL(do_slb_miss)
1150 mflr r10
1151
1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1154
1155 bl .slb_allocate /* handle it */
1156
1157 /* All done -- return from exception. */
1158
1159 ld r10,PACA_EXSLB+EX_LR(r13)
1160 ld r3,PACA_EXSLB+EX_R3(r13)
1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1162#ifdef CONFIG_PPC_ISERIES
1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1164#endif /* CONFIG_PPC_ISERIES */
1165
1166 mtlr r10
1167
1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1169 beq- unrecov_slb
1170
1171.machine push
1172.machine "power4"
1173 mtcrf 0x80,r9
1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1175.machine pop
1176
1177#ifdef CONFIG_PPC_ISERIES
1178 mtspr SPRN_SRR0,r11
1179 mtspr SPRN_SRR1,r12
1180#endif /* CONFIG_PPC_ISERIES */
1181 ld r9,PACA_EXSLB+EX_R9(r13)
1182 ld r10,PACA_EXSLB+EX_R10(r13)
1183 ld r11,PACA_EXSLB+EX_R11(r13)
1184 ld r12,PACA_EXSLB+EX_R12(r13)
1185 ld r13,PACA_EXSLB+EX_R13(r13)
1186 rfid
1187 b . /* prevent speculative execution */
1188
1189unrecov_slb:
1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1191 DISABLE_INTS
1192 bl .save_nvgprs
11931: addi r3,r1,STACK_FRAME_OVERHEAD
1194 bl .unrecoverable_exception
1195 b 1b
1196
1197/*
1198 * Space for CPU0's segment table.
1199 *
1200 * On iSeries, the hypervisor must fill in at least one entry before
1201 * we get control (with relocate on). The address is give to the hv
1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1204 * PAGE_SHIFT).
1205 */
1206 . = STAB0_PHYS_ADDR /* 0x6000 */
1207 .globl initial_stab
1208initial_stab:
1209 .space 4096
1210
1211/*
1212 * Data area reserved for FWNMI option.
1213 * This address (0x7000) is fixed by the RPA.
1214 */
1215 .= 0x7000
1216 .globl fwnmi_data_area
1217fwnmi_data_area:
1218
1219 /* iSeries does not use the FWNMI stuff, so it is safe to put
1220 * this here, even if we later allow kernels that will boot on
1221 * both pSeries and iSeries */
1222#ifdef CONFIG_PPC_ISERIES
1223 . = LPARMAP_PHYS
1224#include "lparmap.s"
1225/*
1226 * This ".text" is here for old compilers that generate a trailing
1227 * .note section when compiling .c files to .s
1228 */
1229 .text
1230#endif /* CONFIG_PPC_ISERIES */
1231
1232 . = 0x8000
1233
1234/*
1235 * On pSeries, secondary processors spin in the following code.
1236 * At entry, r3 = this processor's number (physical cpu id)
1237 */
1238_GLOBAL(pSeries_secondary_smp_init)
1239 mr r24,r3
1240
1241 /* turn on 64-bit mode */
1242 bl .enable_64b_mode
1243 isync
1244
1245 /* Copy some CPU settings from CPU 0 */
1246 bl .__restore_cpu_setup
1247
1248 /* Set up a paca value for this processor. Since we have the
1249 * physical cpu id in r24, we need to search the pacas to find
1250 * which logical id maps to our physical one.
1251 */
1252 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1253 li r5,0 /* logical cpu id */
12541: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1255 cmpw r6,r24 /* Compare to our id */
1256 beq 2f
1257 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1258 addi r5,r5,1
1259 cmpwi r5,NR_CPUS
1260 blt 1b
1261
1262 mr r3,r24 /* not found, copy phys to r3 */
1263 b .kexec_wait /* next kernel might do better */
1264
12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1266 /* From now on, r24 is expected to be logical cpuid */
1267 mr r24,r5
12683: HMT_LOW
1269 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1270 /* start. */
1271 sync
1272
1273 /* Create a temp kernel stack for use before relocation is on. */
1274 ld r1,PACAEMERGSP(r13)
1275 subi r1,r1,STACK_FRAME_OVERHEAD
1276
1277 cmpwi 0,r23,0
1278#ifdef CONFIG_SMP
1279 bne .__secondary_start
1280#endif
1281 b 3b /* Loop until told to go */
1282
1283#ifdef CONFIG_PPC_ISERIES
1284_STATIC(__start_initialization_iSeries)
1285 /* Clear out the BSS */
1286 LOADADDR(r11,__bss_stop)
1287 LOADADDR(r8,__bss_start)
1288 sub r11,r11,r8 /* bss size */
1289 addi r11,r11,7 /* round up to an even double word */
1290 rldicl. r11,r11,61,3 /* shift right by 3 */
1291 beq 4f
1292 addi r8,r8,-8
1293 li r0,0
1294 mtctr r11 /* zero this many doublewords */
12953: stdu r0,8(r8)
1296 bdnz 3b
12974:
1298 LOADADDR(r1,init_thread_union)
1299 addi r1,r1,THREAD_SIZE
1300 li r0,0
1301 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1302
1303 LOADADDR(r3,cpu_specs)
1304 LOADADDR(r4,cur_cpu_spec)
1305 li r5,0
1306 bl .identify_cpu
1307
1308 LOADADDR(r2,__toc_start)
1309 addi r2,r2,0x4000
1310 addi r2,r2,0x4000
1311
1312 bl .iSeries_early_setup
1313 bl .early_setup
1314
1315 /* relocation is on at this point */
1316
1317 b .start_here_common
1318#endif /* CONFIG_PPC_ISERIES */
1319
1320#ifdef CONFIG_PPC_MULTIPLATFORM
1321
1322_STATIC(__mmu_off)
1323 mfmsr r3
1324 andi. r0,r3,MSR_IR|MSR_DR
1325 beqlr
1326 andc r3,r3,r0
1327 mtspr SPRN_SRR0,r4
1328 mtspr SPRN_SRR1,r3
1329 sync
1330 rfid
1331 b . /* prevent speculative execution */
1332
1333
1334/*
1335 * Here is our main kernel entry point. We support currently 2 kind of entries
1336 * depending on the value of r5.
1337 *
1338 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1339 * in r3...r7
1340 *
1341 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1342 * DT block, r4 is a physical pointer to the kernel itself
1343 *
1344 */
1345_GLOBAL(__start_initialization_multiplatform)
1346 /*
1347 * Are we booted from a PROM Of-type client-interface ?
1348 */
1349 cmpldi cr0,r5,0
1350 bne .__boot_from_prom /* yes -> prom */
1351
1352 /* Save parameters */
1353 mr r31,r3
1354 mr r30,r4
1355
1356 /* Make sure we are running in 64 bits mode */
1357 bl .enable_64b_mode
1358
1359 /* Setup some critical 970 SPRs before switching MMU off */
1360 bl .__970_cpu_preinit
1361
1362 /* cpu # */
1363 li r24,0
1364
1365 /* Switch off MMU if not already */
1366 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1367 add r4,r4,r30
1368 bl .__mmu_off
1369 b .__after_prom_start
1370
1371_STATIC(__boot_from_prom)
1372 /* Save parameters */
1373 mr r31,r3
1374 mr r30,r4
1375 mr r29,r5
1376 mr r28,r6
1377 mr r27,r7
1378
1379 /* Make sure we are running in 64 bits mode */
1380 bl .enable_64b_mode
1381
1382 /* put a relocation offset into r3 */
1383 bl .reloc_offset
1384
1385 LOADADDR(r2,__toc_start)
1386 addi r2,r2,0x4000
1387 addi r2,r2,0x4000
1388
1389 /* Relocate the TOC from a virt addr to a real addr */
1390 add r2,r2,r3
1391
1392 /* Restore parameters */
1393 mr r3,r31
1394 mr r4,r30
1395 mr r5,r29
1396 mr r6,r28
1397 mr r7,r27
1398
1399 /* Do all of the interaction with OF client interface */
1400 bl .prom_init
1401 /* We never return */
1402 trap
1403
1404/*
1405 * At this point, r3 contains the physical address we are running at,
1406 * returned by prom_init()
1407 */
1408_STATIC(__after_prom_start)
1409
1410/*
1411 * We need to run with __start at physical address 0.
1412 * This will leave some code in the first 256B of
1413 * real memory, which are reserved for software use.
1414 * The remainder of the first page is loaded with the fixed
1415 * interrupt vectors. The next two pages are filled with
1416 * unknown exception placeholders.
1417 *
1418 * Note: This process overwrites the OF exception vectors.
1419 * r26 == relocation offset
1420 * r27 == KERNELBASE
1421 */
1422 bl .reloc_offset
1423 mr r26,r3
1424 SET_REG_TO_CONST(r27,KERNELBASE)
1425
1426 li r3,0 /* target addr */
1427
1428 // XXX FIXME: Use phys returned by OF (r30)
1429 add r4,r27,r26 /* source addr */
1430 /* current address of _start */
1431 /* i.e. where we are running */
1432 /* the source addr */
1433
1434 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1435 sub r5,r5,r27
1436
1437 li r6,0x100 /* Start offset, the first 0x100 */
1438 /* bytes were copied earlier. */
1439
1440 bl .copy_and_flush /* copy the first n bytes */
1441 /* this includes the code being */
1442 /* executed here. */
1443
1444 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1445 mtctr r0 /* that we just made/relocated */
1446 bctr
1447
14484: LOADADDR(r5,klimit)
1449 add r5,r5,r26
1450 ld r5,0(r5) /* get the value of klimit */
1451 sub r5,r5,r27
1452 bl .copy_and_flush /* copy the rest */
1453 b .start_here_multiplatform
1454
1455#endif /* CONFIG_PPC_MULTIPLATFORM */
1456
1457/*
1458 * Copy routine used to copy the kernel to start at physical address 0
1459 * and flush and invalidate the caches as needed.
1460 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1461 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1462 *
1463 * Note: this routine *only* clobbers r0, r6 and lr
1464 */
1465_GLOBAL(copy_and_flush)
1466 addi r5,r5,-8
1467 addi r6,r6,-8
14684: li r0,16 /* Use the least common */
1469 /* denominator cache line */
1470 /* size. This results in */
1471 /* extra cache line flushes */
1472 /* but operation is correct. */
1473 /* Can't get cache line size */
1474 /* from NACA as it is being */
1475 /* moved too. */
1476
1477 mtctr r0 /* put # words/line in ctr */
14783: addi r6,r6,8 /* copy a cache line */
1479 ldx r0,r6,r4
1480 stdx r0,r6,r3
1481 bdnz 3b
1482 dcbst r6,r3 /* write it to memory */
1483 sync
1484 icbi r6,r3 /* flush the icache line */
1485 cmpld 0,r6,r5
1486 blt 4b
1487 sync
1488 addi r5,r5,8
1489 addi r6,r6,8
1490 blr
1491
1492.align 8
1493copy_to_here:
1494
1495#ifdef CONFIG_SMP
1496#ifdef CONFIG_PPC_PMAC
1497/*
1498 * On PowerMac, secondary processors starts from the reset vector, which
1499 * is temporarily turned into a call to one of the functions below.
1500 */
1501 .section ".text";
1502 .align 2 ;
1503
1504 .globl __secondary_start_pmac_0
1505__secondary_start_pmac_0:
1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1507 li r24,0
1508 b 1f
1509 li r24,1
1510 b 1f
1511 li r24,2
1512 b 1f
1513 li r24,3
15141:
1515
1516_GLOBAL(pmac_secondary_start)
1517 /* turn on 64-bit mode */
1518 bl .enable_64b_mode
1519 isync
1520
1521 /* Copy some CPU settings from CPU 0 */
1522 bl .__restore_cpu_setup
1523
1524 /* pSeries do that early though I don't think we really need it */
1525 mfmsr r3
1526 ori r3,r3,MSR_RI
1527 mtmsrd r3 /* RI on */
1528
1529 /* Set up a paca value for this processor. */
1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1532 add r13,r13,r4 /* for this processor. */
1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1534
1535 /* Create a temp kernel stack for use before relocation is on. */
1536 ld r1,PACAEMERGSP(r13)
1537 subi r1,r1,STACK_FRAME_OVERHEAD
1538
1539 b .__secondary_start
1540
1541#endif /* CONFIG_PPC_PMAC */
1542
1543/*
1544 * This function is called after the master CPU has released the
1545 * secondary processors. The execution environment is relocation off.
1546 * The paca for this processor has the following fields initialized at
1547 * this point:
1548 * 1. Processor number
1549 * 2. Segment table pointer (virtual address)
1550 * On entry the following are set:
1551 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1552 * r24 = cpu# (in Linux terms)
1553 * r13 = paca virtual address
1554 * SPRG3 = paca virtual address
1555 */
1556_GLOBAL(__secondary_start)
1557
1558 HMT_MEDIUM /* Set thread priority to MEDIUM */
1559
1560 ld r2,PACATOC(r13)
1561 li r6,0
1562 stb r6,PACAPROCENABLED(r13)
1563
1564#ifndef CONFIG_PPC_ISERIES
1565 /* Initialize the page table pointer register. */
1566 LOADADDR(r6,_SDR1)
1567 ld r6,0(r6) /* get the value of _SDR1 */
1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1569#endif
1570 /* Initialize the first segment table (or SLB) entry */
1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1572 bl .stab_initialize
1573
1574 /* Initialize the kernel stack. Just a repeat for iSeries. */
1575 LOADADDR(r3,current_set)
1576 sldi r28,r24,3 /* get current_set[cpu#] */
1577 ldx r1,r3,r28
1578 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1579 std r1,PACAKSAVE(r13)
1580
1581 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1582 ori r4,r3,1 /* turn on valid bit */
1583
1584#ifdef CONFIG_PPC_ISERIES
1585 li r0,-1 /* hypervisor call */
1586 li r3,1
1587 sldi r3,r3,63 /* 0x8000000000000000 */
1588 ori r3,r3,4 /* 0x8000000000000004 */
1589 sc /* HvCall_setASR */
1590#else
1591 /* set the ASR */
1592 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1593 ld r3,0(r3)
1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1596 beq 98f /* branch if result is 0 */
1597 mfspr r3,SPRN_PVR
1598 srwi r3,r3,16
1599 cmpwi r3,0x37 /* SStar */
1600 beq 97f
1601 cmpwi r3,0x36 /* IStar */
1602 beq 97f
1603 cmpwi r3,0x34 /* Pulsar */
1604 bne 98f
160597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1606 HVSC /* Invoking hcall */
1607 b 99f
160898: /* !(rpa hypervisor) || !(star) */
1609 mtasr r4 /* set the stab location */
161099:
1611#endif
1612 li r7,0
1613 mtlr r7
1614
1615 /* enable MMU and jump to start_secondary */
1616 LOADADDR(r3,.start_secondary_prolog)
1617 SET_REG_TO_CONST(r4, MSR_KERNEL)
1618#ifdef DO_SOFT_DISABLE
1619 ori r4,r4,MSR_EE
1620#endif
1621 mtspr SPRN_SRR0,r3
1622 mtspr SPRN_SRR1,r4
1623 rfid
1624 b . /* prevent speculative execution */
1625
1626/*
1627 * Running with relocation on at this point. All we want to do is
1628 * zero the stack back-chain pointer before going into C code.
1629 */
1630_GLOBAL(start_secondary_prolog)
1631 li r3,0
1632 std r3,0(r1) /* Zero the stack frame pointer */
1633 bl .start_secondary
1634#endif
1635
1636/*
1637 * This subroutine clobbers r11 and r12
1638 */
1639_GLOBAL(enable_64b_mode)
1640 mfmsr r11 /* grab the current MSR */
1641 li r12,1
1642 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1643 or r11,r11,r12
1644 li r12,1
1645 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1646 or r11,r11,r12
1647 mtmsrd r11
1648 isync
1649 blr
1650
1651#ifdef CONFIG_PPC_MULTIPLATFORM
1652/*
1653 * This is where the main kernel code starts.
1654 */
1655_STATIC(start_here_multiplatform)
1656 /* get a new offset, now that the kernel has moved. */
1657 bl .reloc_offset
1658 mr r26,r3
1659
1660 /* Clear out the BSS. It may have been done in prom_init,
1661 * already but that's irrelevant since prom_init will soon
1662 * be detached from the kernel completely. Besides, we need
1663 * to clear it now for kexec-style entry.
1664 */
1665 LOADADDR(r11,__bss_stop)
1666 LOADADDR(r8,__bss_start)
1667 sub r11,r11,r8 /* bss size */
1668 addi r11,r11,7 /* round up to an even double word */
1669 rldicl. r11,r11,61,3 /* shift right by 3 */
1670 beq 4f
1671 addi r8,r8,-8
1672 li r0,0
1673 mtctr r11 /* zero this many doublewords */
16743: stdu r0,8(r8)
1675 bdnz 3b
16764:
1677
1678 mfmsr r6
1679 ori r6,r6,MSR_RI
1680 mtmsrd r6 /* RI on */
1681
1682#ifdef CONFIG_HMT
1683 /* Start up the second thread on cpu 0 */
1684 mfspr r3,SPRN_PVR
1685 srwi r3,r3,16
1686 cmpwi r3,0x34 /* Pulsar */
1687 beq 90f
1688 cmpwi r3,0x36 /* Icestar */
1689 beq 90f
1690 cmpwi r3,0x37 /* SStar */
1691 beq 90f
1692 b 91f /* HMT not supported */
169390: li r3,0
1694 bl .hmt_start_secondary
169591:
1696#endif
1697
1698 /* The following gets the stack and TOC set up with the regs */
1699 /* pointing to the real addr of the kernel stack. This is */
1700 /* all done to support the C function call below which sets */
1701 /* up the htab. This is done because we have relocated the */
1702 /* kernel but are still running in real mode. */
1703
1704 LOADADDR(r3,init_thread_union)
1705 add r3,r3,r26
1706
1707 /* set up a stack pointer (physical address) */
1708 addi r1,r3,THREAD_SIZE
1709 li r0,0
1710 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1711
1712 /* set up the TOC (physical address) */
1713 LOADADDR(r2,__toc_start)
1714 addi r2,r2,0x4000
1715 addi r2,r2,0x4000
1716 add r2,r2,r26
1717
1718 LOADADDR(r3,cpu_specs)
1719 add r3,r3,r26
1720 LOADADDR(r4,cur_cpu_spec)
1721 add r4,r4,r26
1722 mr r5,r26
1723 bl .identify_cpu
1724
1725 /* Save some low level config HIDs of CPU0 to be copied to
1726 * other CPUs later on, or used for suspend/resume
1727 */
1728 bl .__save_cpu_setup
1729 sync
1730
1731 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1732 * note that boot_cpuid can always be 0 nowadays since there is
1733 * nowhere it can be initialized differently before we reach this
1734 * code
1735 */
1736 LOADADDR(r27, boot_cpuid)
1737 add r27,r27,r26
1738 lwz r27,0(r27)
1739
1740 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1742 add r13,r13,r24 /* for this processor. */
1743 add r13,r13,r26 /* convert to physical addr */
1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1745
1746 /* Do very early kernel initializations, including initial hash table,
1747 * stab and slb setup before we turn on relocation. */
1748
1749 /* Restore parameters passed from prom_init/kexec */
1750 mr r3,r31
1751 bl .early_setup
1752
1753 /* set the ASR */
1754 ld r3,PACASTABREAL(r13)
1755 ori r4,r3,1 /* turn on valid bit */
1756 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1757 ld r3,0(r3)
1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1760 beq 98f /* branch if result is 0 */
1761 mfspr r3,SPRN_PVR
1762 srwi r3,r3,16
1763 cmpwi r3,0x37 /* SStar */
1764 beq 97f
1765 cmpwi r3,0x36 /* IStar */
1766 beq 97f
1767 cmpwi r3,0x34 /* Pulsar */
1768 bne 98f
176997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1770 HVSC /* Invoking hcall */
1771 b 99f
177298: /* !(rpa hypervisor) || !(star) */
1773 mtasr r4 /* set the stab location */
177499:
1775 /* Set SDR1 (hash table pointer) */
1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1777 ld r3,0(r3)
1778 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1779 /* Test if bit 0 is set (LPAR bit) */
1780 andi. r3,r3,PLATFORM_LPAR
1781 bne 98f /* branch if result is !0 */
1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1783 add r6,r6,r26
1784 ld r6,0(r6) /* get the value of _SDR1 */
1785 mtspr SPRN_SDR1,r6 /* set the htab location */
178698:
1787 LOADADDR(r3,.start_here_common)
1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1789 mtspr SPRN_SRR0,r3
1790 mtspr SPRN_SRR1,r4
1791 rfid
1792 b . /* prevent speculative execution */
1793#endif /* CONFIG_PPC_MULTIPLATFORM */
1794
1795 /* This is where all platforms converge execution */
1796_STATIC(start_here_common)
1797 /* relocation is on at this point */
1798
1799 /* The following code sets up the SP and TOC now that we are */
1800 /* running with translation enabled. */
1801
1802 LOADADDR(r3,init_thread_union)
1803
1804 /* set up the stack */
1805 addi r1,r3,THREAD_SIZE
1806 li r0,0
1807 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1808
1809 /* Apply the CPUs-specific fixups (nop out sections not relevant
1810 * to this CPU
1811 */
1812 li r3,0
1813 bl .do_cpu_ftr_fixups
1814
1815 LOADADDR(r26, boot_cpuid)
1816 lwz r26,0(r26)
1817
1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1820 add r13,r13,r24 /* for this processor. */
1821 mtspr SPRN_SPRG3,r13
1822
1823 /* ptr to current */
1824 LOADADDR(r4,init_task)
1825 std r4,PACACURRENT(r13)
1826
1827 /* Load the TOC */
1828 ld r2,PACATOC(r13)
1829 std r1,PACAKSAVE(r13)
1830
1831 bl .setup_system
1832
1833 /* Load up the kernel context */
18345:
1835#ifdef DO_SOFT_DISABLE
1836 li r5,0
1837 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1838 mfmsr r5
1839 ori r5,r5,MSR_EE /* Hard Enabled */
1840 mtmsrd r5
1841#endif
1842
1843 bl .start_kernel
1844
1845_GLOBAL(hmt_init)
1846#ifdef CONFIG_HMT
1847 LOADADDR(r5, hmt_thread_data)
1848 mfspr r7,SPRN_PVR
1849 srwi r7,r7,16
1850 cmpwi r7,0x34 /* Pulsar */
1851 beq 90f
1852 cmpwi r7,0x36 /* Icestar */
1853 beq 91f
1854 cmpwi r7,0x37 /* SStar */
1855 beq 91f
1856 b 101f
185790: mfspr r6,SPRN_PIR
1858 andi. r6,r6,0x1f
1859 b 92f
186091: mfspr r6,SPRN_PIR
1861 andi. r6,r6,0x3ff
186292: sldi r4,r24,3
1863 stwx r6,r5,r4
1864 bl .hmt_start_secondary
1865 b 101f
1866
1867__hmt_secondary_hold:
1868 LOADADDR(r5, hmt_thread_data)
1869 clrldi r5,r5,4
1870 li r7,0
1871 mfspr r6,SPRN_PIR
1872 mfspr r8,SPRN_PVR
1873 srwi r8,r8,16
1874 cmpwi r8,0x34
1875 bne 93f
1876 andi. r6,r6,0x1f
1877 b 103f
187893: andi. r6,r6,0x3f
1879
1880103: lwzx r8,r5,r7
1881 cmpw r8,r6
1882 beq 104f
1883 addi r7,r7,8
1884 b 103b
1885
1886104: addi r7,r7,4
1887 lwzx r9,r5,r7
1888 mr r24,r9
1889101:
1890#endif
1891 mr r3,r24
1892 b .pSeries_secondary_smp_init
1893
1894#ifdef CONFIG_HMT
1895_GLOBAL(hmt_start_secondary)
1896 LOADADDR(r4,__hmt_secondary_hold)
1897 clrldi r4,r4,4
1898 mtspr SPRN_NIADORM, r4
1899 mfspr r4, SPRN_MSRDORM
1900 li r5, -65
1901 and r4, r4, r5
1902 mtspr SPRN_MSRDORM, r4
1903 lis r4,0xffef
1904 ori r4,r4,0x7403
1905 mtspr SPRN_TSC, r4
1906 li r4,0x1f4
1907 mtspr SPRN_TST, r4
1908 mfspr r4, SPRN_HID0
1909 ori r4, r4, 0x1
1910 mtspr SPRN_HID0, r4
1911 mfspr r4, SPRN_CTRLF
1912 oris r4, r4, 0x40
1913 mtspr SPRN_CTRLT, r4
1914 blr
1915#endif
1916
1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1918_GLOBAL(smp_release_cpus)
1919 /* All secondary cpus are spinning on a common
1920 * spinloop, release them all now so they can start
1921 * to spin on their individual paca spinloops.
1922 * For non SMP kernels, the secondary cpus never
1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1926 */
1927 li r3,1
1928 LOADADDR(r5,__secondary_hold_spinloop)
1929 std r3,0(r5)
1930 sync
1931 blr
1932#endif /* CONFIG_SMP */
1933
1934
1935/*
1936 * We put a few things here that have to be page-aligned.
1937 * This stuff goes at the beginning of the bss, which is page-aligned.
1938 */
1939 .section ".bss"
1940
1941 .align PAGE_SHIFT
1942
1943 .globl empty_zero_page
1944empty_zero_page:
1945 .space PAGE_SIZE
1946
1947 .globl swapper_pg_dir
1948swapper_pg_dir:
1949 .space PAGE_SIZE
1950
1951/*
1952 * This space gets a copy of optional info passed to us by the bootstrap
1953 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1954 */
1955 .globl cmd_line
1956cmd_line:
1957 .space COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644
index 000000000000..de0978742221
--- /dev/null
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -0,0 +1,860 @@
1/*
2 * arch/ppc/kernel/except_8xx.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications by Dan Malek
12 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains low-level support and setup for PowerPC 8xx
15 * embedded processors, including trap and interrupt dispatch.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cache.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34
35/* Macro to make the code more readable. */
36#ifdef CONFIG_8xx_CPU6
37#define DO_8xx_CPU6(val, reg) \
38 li reg, val; \
39 stw reg, 12(r0); \
40 lwz reg, 12(r0);
41#else
42#define DO_8xx_CPU6(val, reg)
43#endif
44 .text
45 .globl _stext
46_stext:
47 .text
48 .globl _start
49_start:
50
51/* MPC8xx
52 * This port was done on an MBX board with an 860. Right now I only
53 * support an ELF compressed (zImage) boot from EPPC-Bug because the
54 * code there loads up some registers before calling us:
55 * r3: ptr to board info data
56 * r4: initrd_start or if no initrd then 0
57 * r5: initrd_end - unused if r4 is 0
58 * r6: Start of command line string
59 * r7: End of command line string
60 *
61 * I decided to use conditional compilation instead of checking PVR and
62 * adding more processor specific branches around code I don't need.
63 * Since this is an embedded processor, I also appreciate any memory
64 * savings I can get.
65 *
66 * The MPC8xx does not have any BATs, but it supports large page sizes.
67 * We first initialize the MMU to support 8M byte pages, then load one
68 * entry into each of the instruction and data TLBs to map the first
69 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
70 * the "internal" processor registers before MMU_init is called.
71 *
72 * The TLB code currently contains a major hack. Since I use the condition
73 * code register, I have to save and restore it. I am out of registers, so
74 * I just store it in memory location 0 (the TLB handlers are not reentrant).
75 * To avoid making any decisions, I need to use the "segment" valid bit
76 * in the first level table, but that would require many changes to the
77 * Linux page directory/table functions that I don't want to do right now.
78 *
79 * I used to use SPRG2 for a temporary register in the TLB handler, but it
80 * has since been put to other uses. I now use a hack to save a register
81 * and the CCR at memory location 0.....Someday I'll fix this.....
82 * -- Dan
83 */
84 .globl __start
85__start:
86 mr r31,r3 /* save parameters */
87 mr r30,r4
88 mr r29,r5
89 mr r28,r6
90 mr r27,r7
91
92 /* We have to turn on the MMU right away so we get cache modes
93 * set correctly.
94 */
95 bl initial_mmu
96
97/* We now have the lower 8 Meg mapped into TLB entries, and the caches
98 * ready to work.
99 */
100
101turn_on_mmu:
102 mfmsr r0
103 ori r0,r0,MSR_DR|MSR_IR
104 mtspr SPRN_SRR1,r0
105 lis r0,start_here@h
106 ori r0,r0,start_here@l
107 mtspr SPRN_SRR0,r0
108 SYNC
109 rfi /* enables MMU */
110
111/*
112 * Exception entry code. This code runs with address translation
113 * turned off, i.e. using physical addresses.
114 * We assume sprg3 has the physical address of the current
115 * task's thread_struct.
116 */
117#define EXCEPTION_PROLOG \
118 mtspr SPRN_SPRG0,r10; \
119 mtspr SPRN_SPRG1,r11; \
120 mfcr r10; \
121 EXCEPTION_PROLOG_1; \
122 EXCEPTION_PROLOG_2
123
124#define EXCEPTION_PROLOG_1 \
125 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
126 andi. r11,r11,MSR_PR; \
127 tophys(r11,r1); /* use tophys(r1) if kernel */ \
128 beq 1f; \
129 mfspr r11,SPRN_SPRG3; \
130 lwz r11,THREAD_INFO-THREAD(r11); \
131 addi r11,r11,THREAD_SIZE; \
132 tophys(r11,r11); \
1331: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
134
135
136#define EXCEPTION_PROLOG_2 \
137 CLR_TOP32(r11); \
138 stw r10,_CCR(r11); /* save registers */ \
139 stw r12,GPR12(r11); \
140 stw r9,GPR9(r11); \
141 mfspr r10,SPRN_SPRG0; \
142 stw r10,GPR10(r11); \
143 mfspr r12,SPRN_SPRG1; \
144 stw r12,GPR11(r11); \
145 mflr r10; \
146 stw r10,_LINK(r11); \
147 mfspr r12,SPRN_SRR0; \
148 mfspr r9,SPRN_SRR1; \
149 stw r1,GPR1(r11); \
150 stw r1,0(r11); \
151 tovirt(r1,r11); /* set new kernel sp */ \
152 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
153 MTMSRD(r10); /* (except for mach check in rtas) */ \
154 stw r0,GPR0(r11); \
155 SAVE_4GPRS(3, r11); \
156 SAVE_2GPRS(7, r11)
157
158/*
159 * Note: code which follows this uses cr0.eq (set if from kernel),
160 * r11, r12 (SRR0), and r9 (SRR1).
161 *
162 * Note2: once we have set r1 we are in a position to take exceptions
163 * again, and we could thus set MSR:RI at that point.
164 */
165
166/*
167 * Exception vectors.
168 */
169#define EXCEPTION(n, label, hdlr, xfer) \
170 . = n; \
171label: \
172 EXCEPTION_PROLOG; \
173 addi r3,r1,STACK_FRAME_OVERHEAD; \
174 xfer(n, hdlr)
175
176#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
177 li r10,trap; \
178 stw r10,TRAP(r11); \
179 li r10,MSR_KERNEL; \
180 copyee(r10, r9); \
181 bl tfer; \
182i##n: \
183 .long hdlr; \
184 .long ret
185
186#define COPY_EE(d, s) rlwimi d,s,0,16,16
187#define NOCOPY(d, s)
188
189#define EXC_XFER_STD(n, hdlr) \
190 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
191 ret_from_except_full)
192
193#define EXC_XFER_LITE(n, hdlr) \
194 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
195 ret_from_except)
196
197#define EXC_XFER_EE(n, hdlr) \
198 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
199 ret_from_except_full)
200
201#define EXC_XFER_EE_LITE(n, hdlr) \
202 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
203 ret_from_except)
204
205/* System reset */
206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207
208/* Machine check */
209 . = 0x200
210MachineCheck:
211 EXCEPTION_PROLOG
212 mfspr r4,SPRN_DAR
213 stw r4,_DAR(r11)
214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, machine_check_exception)
218
219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other
221 * translation errors.
222 */
223 . = 0x300
224DataAccess:
225 EXCEPTION_PROLOG
226 mfspr r10,SPRN_DSISR
227 stw r10,_DSISR(r11)
228 mr r5,r10
229 mfspr r4,SPRN_DAR
230 EXC_XFER_EE_LITE(0x300, handle_page_fault)
231
232/* Instruction access exception.
233 * This is "never generated" by the MPC8xx. We jump to it for other
234 * translation errors.
235 */
236 . = 0x400
237InstructionAccess:
238 EXCEPTION_PROLOG
239 mr r4,r12
240 mr r5,r9
241 EXC_XFER_EE_LITE(0x400, handle_page_fault)
242
243/* External interrupt */
244 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
245
246/* Alignment exception */
247 . = 0x600
248Alignment:
249 EXCEPTION_PROLOG
250 mfspr r4,SPRN_DAR
251 stw r4,_DAR(r11)
252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, alignment_exception)
256
257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259
260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/
262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263
264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266
267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269
270/* System call */
271 . = 0xc00
272SystemCall:
273 EXCEPTION_PROLOG
274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275
276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions.
283 */
284 EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
285
286 . = 0x1100
287/*
288 * For the MPC8xx, this is a software tablewalk to load the instruction
289 * TLB. It is modelled after the example in the Motorola manual. The task
290 * switch loads the M_TWB register with the pointer to the first level table.
291 * If we discover there is no second level table (value is zero) or if there
292 * is an invalid pte, we load that into the TLB, which causes another fault
293 * into the TLB Error interrupt where we can handle such problems.
294 * We have to use the MD_xxx registers for the tablewalk because the
295 * equivalent MI_xxx registers only perform the attribute functions.
296 */
297InstructionTLBMiss:
298#ifdef CONFIG_8xx_CPU6
299 stw r3, 8(r0)
300#endif
301 DO_8xx_CPU6(0x3f80, r3)
302 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
303 mfcr r10
304 stw r10, 0(r0)
305 stw r11, 4(r0)
306 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
307 DO_8xx_CPU6(0x3780, r3)
308 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
309 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
310
311 /* If we are faulting a kernel address, we have to use the
312 * kernel page tables.
313 */
314 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
315 beq 3f
316 lis r11, swapper_pg_dir@h
317 ori r11, r11, swapper_pg_dir@l
318 rlwimi r10, r11, 0, 2, 19
3193:
320 lwz r11, 0(r10) /* Get the level 1 entry */
321 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
322 beq 2f /* If zero, don't try to find a pte */
323
324 /* We have a pte table, so load the MI_TWC with the attributes
325 * for this "segment."
326 */
327 ori r11,r11,1 /* Set valid bit */
328 DO_8xx_CPU6(0x2b80, r3)
329 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
330 DO_8xx_CPU6(0x3b80, r3)
331 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
332 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
333 lwz r10, 0(r11) /* Get the pte */
334
335 ori r10, r10, _PAGE_ACCESSED
336 stw r10, 0(r11)
337
338 /* The Linux PTE won't go exactly into the MMU TLB.
339 * Software indicator bits 21, 22 and 28 must be clear.
340 * Software indicator bits 24, 25, 26, and 27 must be
341 * set. All other Linux PTE bits control the behavior
342 * of the MMU.
343 */
3442: li r11, 0x00f0
345 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
346 DO_8xx_CPU6(0x2d80, r3)
347 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
348
349 mfspr r10, SPRN_M_TW /* Restore registers */
350 lwz r11, 0(r0)
351 mtcr r11
352 lwz r11, 4(r0)
353#ifdef CONFIG_8xx_CPU6
354 lwz r3, 8(r0)
355#endif
356 rfi
357
358 . = 0x1200
359DataStoreTLBMiss:
360#ifdef CONFIG_8xx_CPU6
361 stw r3, 8(r0)
362#endif
363 DO_8xx_CPU6(0x3f80, r3)
364 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
365 mfcr r10
366 stw r10, 0(r0)
367 stw r11, 4(r0)
368 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
369
370 /* If we are faulting a kernel address, we have to use the
371 * kernel page tables.
372 */
373 andi. r11, r10, 0x0800
374 beq 3f
375 lis r11, swapper_pg_dir@h
376 ori r11, r11, swapper_pg_dir@l
377 rlwimi r10, r11, 0, 2, 19
3783:
379 lwz r11, 0(r10) /* Get the level 1 entry */
380 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
381 beq 2f /* If zero, don't try to find a pte */
382
383 /* We have a pte table, so load fetch the pte from the table.
384 */
385 ori r11, r11, 1 /* Set valid bit in physical L2 page */
386 DO_8xx_CPU6(0x3b80, r3)
387 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
388 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
389 lwz r10, 0(r10) /* Get the pte */
390
391 /* Insert the Guarded flag into the TWC from the Linux PTE.
392 * It is bit 27 of both the Linux PTE and the TWC (at least
393 * I got that right :-). It will be better when we can put
394 * this into the Linux pgd/pmd and load it in the operation
395 * above.
396 */
397 rlwimi r11, r10, 0, 27, 27
398 DO_8xx_CPU6(0x3b80, r3)
399 mtspr SPRN_MD_TWC, r11
400
401 mfspr r11, SPRN_MD_TWC /* get the pte address again */
402 ori r10, r10, _PAGE_ACCESSED
403 stw r10, 0(r11)
404
405 /* The Linux PTE won't go exactly into the MMU TLB.
406 * Software indicator bits 21, 22 and 28 must be clear.
407 * Software indicator bits 24, 25, 26, and 27 must be
408 * set. All other Linux PTE bits control the behavior
409 * of the MMU.
410 */
4112: li r11, 0x00f0
412 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
413 DO_8xx_CPU6(0x3d80, r3)
414 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
415
416 mfspr r10, SPRN_M_TW /* Restore registers */
417 lwz r11, 0(r0)
418 mtcr r11
419 lwz r11, 4(r0)
420#ifdef CONFIG_8xx_CPU6
421 lwz r3, 8(r0)
422#endif
423 rfi
424
425/* This is an instruction TLB error on the MPC8xx. This could be due
426 * to many reasons, such as executing guarded memory or illegal instruction
427 * addresses. There is nothing to do but handle a big time error fault.
428 */
429 . = 0x1300
430InstructionTLBError:
431 b InstructionAccess
432
433/* This is the data TLB error on the MPC8xx. This could be due to
434 * many reasons, including a dirty update to a pte. We can catch that
435 * one here, but anything else is an error. First, we track down the
436 * Linux pte. If it is valid, write access is allowed, but the
437 * page dirty bit is not set, we will set it and reload the TLB. For
438 * any other case, we bail out to a higher level function that can
439 * handle it.
440 */
441 . = 0x1400
442DataTLBError:
443#ifdef CONFIG_8xx_CPU6
444 stw r3, 8(r0)
445#endif
446 DO_8xx_CPU6(0x3f80, r3)
447 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
448 mfcr r10
449 stw r10, 0(r0)
450 stw r11, 4(r0)
451
452 /* First, make sure this was a store operation.
453 */
454 mfspr r10, SPRN_DSISR
455 andis. r11, r10, 0x0200 /* If set, indicates store op */
456 beq 2f
457
458 /* The EA of a data TLB miss is automatically stored in the MD_EPN
459 * register. The EA of a data TLB error is automatically stored in
460 * the DAR, but not the MD_EPN register. We must copy the 20 most
461 * significant bits of the EA from the DAR to MD_EPN before we
462 * start walking the page tables. We also need to copy the CASID
463 * value from the M_CASID register.
464 * Addendum: The EA of a data TLB error is _supposed_ to be stored
465 * in DAR, but it seems that this doesn't happen in some cases, such
466 * as when the error is due to a dcbi instruction to a page with a
467 * TLB that doesn't have the changed bit set. In such cases, there
468 * does not appear to be any way to recover the EA of the error
469 * since it is neither in DAR nor MD_EPN. As a workaround, the
470 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
471 * are initialized in mapin_ram(). This will avoid the problem,
472 * assuming we only use the dcbi instruction on kernel addresses.
473 */
474 mfspr r10, SPRN_DAR
475 rlwinm r11, r10, 0, 0, 19
476 ori r11, r11, MD_EVALID
477 mfspr r10, SPRN_M_CASID
478 rlwimi r11, r10, 0, 28, 31
479 DO_8xx_CPU6(0x3780, r3)
480 mtspr SPRN_MD_EPN, r11
481
482 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
483
484 /* If we are faulting a kernel address, we have to use the
485 * kernel page tables.
486 */
487 andi. r11, r10, 0x0800
488 beq 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 rlwimi r10, r11, 0, 2, 19
4923:
493 lwz r11, 0(r10) /* Get the level 1 entry */
494 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
495 beq 2f /* If zero, bail */
496
497 /* We have a pte table, so fetch the pte from the table.
498 */
499 ori r11, r11, 1 /* Set valid bit in physical L2 page */
500 DO_8xx_CPU6(0x3b80, r3)
501 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
502 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
503 lwz r10, 0(r11) /* Get the pte */
504
505 andi. r11, r10, _PAGE_RW /* Is it writeable? */
506 beq 2f /* Bail out if not */
507
508 /* Update 'changed', among others.
509 */
510 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
511 mfspr r11, SPRN_MD_TWC /* Get pte address again */
512 stw r10, 0(r11) /* and update pte in table */
513
514 /* The Linux PTE won't go exactly into the MMU TLB.
515 * Software indicator bits 21, 22 and 28 must be clear.
516 * Software indicator bits 24, 25, 26, and 27 must be
517 * set. All other Linux PTE bits control the behavior
518 * of the MMU.
519 */
520 li r11, 0x00f0
521 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
522 DO_8xx_CPU6(0x3d80, r3)
523 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
524
525 mfspr r10, SPRN_M_TW /* Restore registers */
526 lwz r11, 0(r0)
527 mtcr r11
528 lwz r11, 4(r0)
529#ifdef CONFIG_8xx_CPU6
530 lwz r3, 8(r0)
531#endif
532 rfi
5332:
534 mfspr r10, SPRN_M_TW /* Restore registers */
535 lwz r11, 0(r0)
536 mtcr r11
537 lwz r11, 4(r0)
538#ifdef CONFIG_8xx_CPU6
539 lwz r3, 8(r0)
540#endif
541 b DataAccess
542
543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550
551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to
553 * using them.
554 */
555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559
560 . = 0x2000
561
562 .globl giveup_fpu
563giveup_fpu:
564 blr
565
566/*
567 * This is where the main kernel code starts.
568 */
569start_here:
570 /* ptr to current */
571 lis r2,init_task@h
572 ori r2,r2,init_task@l
573
574 /* ptr to phys current thread */
575 tophys(r4,r2)
576 addi r4,r4,THREAD /* init task's THREAD */
577 mtspr SPRN_SPRG3,r4
578 li r3,0
579 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
580
581 /* stack */
582 lis r1,init_thread_union@ha
583 addi r1,r1,init_thread_union@l
584 li r0,0
585 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
586
587 bl early_init /* We have to do this with MMU on */
588
589/*
590 * Decide what sort of machine this is and initialize the MMU.
591 */
592 mr r3,r31
593 mr r4,r30
594 mr r5,r29
595 mr r6,r28
596 mr r7,r27
597 bl machine_init
598 bl MMU_init
599
600/*
601 * Go back to running unmapped so we can load up new values
602 * and change to using our exception vectors.
603 * On the 8xx, all we have to do is invalidate the TLB to clear
604 * the old 8M byte TLB mappings and load the page table base register.
605 */
606 /* The right way to do this would be to track it down through
607 * init's THREAD like the context switch code does, but this is
608 * easier......until someone changes init's static structures.
609 */
610 lis r6, swapper_pg_dir@h
611 ori r6, r6, swapper_pg_dir@l
612 tophys(r6,r6)
613#ifdef CONFIG_8xx_CPU6
614 lis r4, cpu6_errata_word@h
615 ori r4, r4, cpu6_errata_word@l
616 li r3, 0x3980
617 stw r3, 12(r4)
618 lwz r3, 12(r4)
619#endif
620 mtspr SPRN_M_TWB, r6
621 lis r4,2f@h
622 ori r4,r4,2f@l
623 tophys(r4,r4)
624 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
625 mtspr SPRN_SRR0,r4
626 mtspr SPRN_SRR1,r3
627 rfi
628/* Load up the kernel context */
6292:
630 SYNC /* Force all PTE updates to finish */
631 tlbia /* Clear all TLB entries */
632 sync /* wait for tlbia/tlbie to finish */
633 TLBSYNC /* ... on all CPUs */
634
635 /* set up the PTE pointers for the Abatron bdiGDB.
636 */
637 tovirt(r6,r6)
638 lis r5, abatron_pteptrs@h
639 ori r5, r5, abatron_pteptrs@l
640 stw r5, 0xf0(r0) /* Must match your Abatron config file */
641 tophys(r5,r5)
642 stw r6, 0(r5)
643
644/* Now turn on the MMU for real! */
645 li r4,MSR_KERNEL
646 lis r3,start_kernel@h
647 ori r3,r3,start_kernel@l
648 mtspr SPRN_SRR0,r3
649 mtspr SPRN_SRR1,r4
650 rfi /* enable MMU and jump to start_kernel */
651
652/* Set up the initial MMU state so we can do the first level of
653 * kernel initialization. This maps the first 8 MBytes of memory 1:1
654 * virtual to physical. Also, set the cache mode since that is defined
655 * by TLB entries and perform any additional mapping (like of the IMMR).
656 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
657 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
658 * these mappings is mapped by page tables.
659 */
660initial_mmu:
661 tlbia /* Invalidate all TLB entries */
662#ifdef CONFIG_PIN_TLB
663 lis r8, MI_RSV4I@h
664 ori r8, r8, 0x1c00
665#else
666 li r8, 0
667#endif
668 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
669
670#ifdef CONFIG_PIN_TLB
671 lis r10, (MD_RSV4I | MD_RESETVAL)@h
672 ori r10, r10, 0x1c00
673 mr r8, r10
674#else
675 lis r10, MD_RESETVAL@h
676#endif
677#ifndef CONFIG_8xx_COPYBACK
678 oris r10, r10, MD_WTDEF@h
679#endif
680 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
681
682 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
683 * we can load the instruction and data TLB registers with the
684 * same values.
685 */
686 lis r8, KERNELBASE@h /* Create vaddr for TLB */
687 ori r8, r8, MI_EVALID /* Mark it valid */
688 mtspr SPRN_MI_EPN, r8
689 mtspr SPRN_MD_EPN, r8
690 li r8, MI_PS8MEG /* Set 8M byte page */
691 ori r8, r8, MI_SVALID /* Make it valid */
692 mtspr SPRN_MI_TWC, r8
693 mtspr SPRN_MD_TWC, r8
694 li r8, MI_BOOTINIT /* Create RPN for address 0 */
695 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
696 mtspr SPRN_MD_RPN, r8
697 lis r8, MI_Kp@h /* Set the protection mode */
698 mtspr SPRN_MI_AP, r8
699 mtspr SPRN_MD_AP, r8
700
701 /* Map another 8 MByte at the IMMR to get the processor
702 * internal registers (among other things).
703 */
704#ifdef CONFIG_PIN_TLB
705 addi r10, r10, 0x0100
706 mtspr SPRN_MD_CTR, r10
707#endif
708 mfspr r9, 638 /* Get current IMMR */
709 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
710
711 mr r8, r9 /* Create vaddr for TLB */
712 ori r8, r8, MD_EVALID /* Mark it valid */
713 mtspr SPRN_MD_EPN, r8
714 li r8, MD_PS8MEG /* Set 8M byte page */
715 ori r8, r8, MD_SVALID /* Make it valid */
716 mtspr SPRN_MD_TWC, r8
717 mr r8, r9 /* Create paddr for TLB */
718 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
719 mtspr SPRN_MD_RPN, r8
720
721#ifdef CONFIG_PIN_TLB
722 /* Map two more 8M kernel data pages.
723 */
724 addi r10, r10, 0x0100
725 mtspr SPRN_MD_CTR, r10
726
727 lis r8, KERNELBASE@h /* Create vaddr for TLB */
728 addis r8, r8, 0x0080 /* Add 8M */
729 ori r8, r8, MI_EVALID /* Mark it valid */
730 mtspr SPRN_MD_EPN, r8
731 li r9, MI_PS8MEG /* Set 8M byte page */
732 ori r9, r9, MI_SVALID /* Make it valid */
733 mtspr SPRN_MD_TWC, r9
734 li r11, MI_BOOTINIT /* Create RPN for address 0 */
735 addis r11, r11, 0x0080 /* Add 8M */
736 mtspr SPRN_MD_RPN, r8
737
738 addis r8, r8, 0x0080 /* Add 8M */
739 mtspr SPRN_MD_EPN, r8
740 mtspr SPRN_MD_TWC, r9
741 addis r11, r11, 0x0080 /* Add 8M */
742 mtspr SPRN_MD_RPN, r8
743#endif
744
745 /* Since the cache is enabled according to the information we
746 * just loaded into the TLB, invalidate and enable the caches here.
747 * We should probably check/set other modes....later.
748 */
749 lis r8, IDC_INVALL@h
750 mtspr SPRN_IC_CST, r8
751 mtspr SPRN_DC_CST, r8
752 lis r8, IDC_ENABLE@h
753 mtspr SPRN_IC_CST, r8
754#ifdef CONFIG_8xx_COPYBACK
755 mtspr SPRN_DC_CST, r8
756#else
757 /* For a debug option, I left this here to easily enable
758 * the write through cache mode
759 */
760 lis r8, DC_SFWT@h
761 mtspr SPRN_DC_CST, r8
762 lis r8, IDC_ENABLE@h
763 mtspr SPRN_DC_CST, r8
764#endif
765 blr
766
767
768/*
769 * Set up to use a given MMU context.
770 * r3 is context number, r4 is PGD pointer.
771 *
772 * We place the physical address of the new task page directory loaded
773 * into the MMU base register, and set the ASID compare register with
774 * the new "context."
775 */
776_GLOBAL(set_context)
777
778#ifdef CONFIG_BDI_SWITCH
779 /* Context switch the PTE pointer for the Abatron BDI2000.
780 * The PGDIR is passed as second argument.
781 */
782 lis r5, KERNELBASE@h
783 lwz r5, 0xf0(r5)
784 stw r4, 0x4(r5)
785#endif
786
787#ifdef CONFIG_8xx_CPU6
788 lis r6, cpu6_errata_word@h
789 ori r6, r6, cpu6_errata_word@l
790 tophys (r4, r4)
791 li r7, 0x3980
792 stw r7, 12(r6)
793 lwz r7, 12(r6)
794 mtspr SPRN_M_TWB, r4 /* Update MMU base address */
795 li r7, 0x3380
796 stw r7, 12(r6)
797 lwz r7, 12(r6)
798 mtspr SPRN_M_CASID, r3 /* Update context */
799#else
800 mtspr SPRN_M_CASID,r3 /* Update context */
801 tophys (r4, r4)
802 mtspr SPRN_M_TWB, r4 /* and pgd */
803#endif
804 SYNC
805 blr
806
807#ifdef CONFIG_8xx_CPU6
808/* It's here because it is unique to the 8xx.
809 * It is important we get called with interrupts disabled. I used to
810 * do that, but it appears that all code that calls this already had
811 * interrupt disabled.
812 */
813 .globl set_dec_cpu6
814set_dec_cpu6:
815 lis r7, cpu6_errata_word@h
816 ori r7, r7, cpu6_errata_word@l
817 li r4, 0x2c00
818 stw r4, 8(r7)
819 lwz r4, 8(r7)
820 mtspr 22, r3 /* Update Decrementer */
821 SYNC
822 blr
823#endif
824
825/*
826 * We put a few things here that have to be page-aligned.
827 * This stuff goes at the beginning of the data segment,
828 * which is page-aligned.
829 */
830 .data
831 .globl sdata
832sdata:
833 .globl empty_zero_page
834empty_zero_page:
835 .space 4096
836
837 .globl swapper_pg_dir
838swapper_pg_dir:
839 .space 4096
840
841/*
842 * This space gets a copy of optional info passed to us by the bootstrap
843 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
844 */
845 .globl cmd_line
846cmd_line:
847 .space 512
848
849/* Room for two PTE table poiners, usually the kernel and current user
850 * pointer to their respective root page table (pgdir).
851 */
852abatron_pteptrs:
853 .space 8
854
855#ifdef CONFIG_8xx_CPU6
856 .globl cpu6_errata_word
857cpu6_errata_word:
858 .space 16
859#endif
860
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644
index 000000000000..5063c603fad4
--- /dev/null
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -0,0 +1,1063 @@
1/*
2 * arch/ppc/kernel/head_fsl_booke.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2004 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 * Copyright 2004 Freescale Semiconductor, Inc
27 * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the
31 * Free Software Foundation; either version 2 of the License, or (at your
32 * option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/threads.h>
37#include <asm/processor.h>
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm/pgtable.h>
41#include <asm/cputable.h>
42#include <asm/thread_info.h>
43#include <asm/ppc_asm.h>
44#include <asm/asm-offsets.h>
45#include "head_booke.h"
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/* We try to not make any assumptions about how the boot loader
77 * setup or used the TLBs. We invalidate all mappings from the
78 * boot loader and load a single entry in TLB1[0] to map the
79 * first 16M of kernel memory. Any boot info passed from the
80 * bootloader needs to live in this first 16M.
81 *
82 * Requirement on bootloader:
83 * - The page we're executing in needs to reside in TLB1 and
84 * have IPROT=1. If not an invalidate broadcast could
85 * evict the entry we're currently executing in.
86 *
87 * r3 = Index of TLB1 were executing in
88 * r4 = Current MSR[IS]
89 * r5 = Index of TLB1 temp mapping
90 *
91 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
92 * if needed
93 */
94
95/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */
98 mfmsr r7
99 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
100 mfspr r7, SPRN_PID0
101 slwi r7,r7,16
102 or r7,r7,r4
103 mtspr SPRN_MAS6,r7
104 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
105#ifndef CONFIG_E200
106 mfspr r7,SPRN_MAS1
107 andis. r7,r7,MAS1_VALID@h
108 bne match_TLB
109 mfspr r7,SPRN_PID1
110 slwi r7,r7,16
111 or r7,r7,r4
112 mtspr SPRN_MAS6,r7
113 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
114 mfspr r7,SPRN_MAS1
115 andis. r7,r7,MAS1_VALID@h
116 bne match_TLB
117 mfspr r7, SPRN_PID2
118 slwi r7,r7,16
119 or r7,r7,r4
120 mtspr SPRN_MAS6,r7
121 tlbsx 0,r6 /* Fall through, we had to match */
122#endif
123match_TLB:
124 mfspr r7,SPRN_MAS0
125 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
126
127 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
128 oris r7,r7,MAS1_IPROT@h
129 mtspr SPRN_MAS1,r7
130 tlbwe
131
132/* 2. Invalidate all entries except the entry we're executing in */
133 mfspr r9,SPRN_TLB1CFG
134 andi. r9,r9,0xfff
135 li r6,0 /* Set Entry counter to 0 */
1361: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
137 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
138 mtspr SPRN_MAS0,r7
139 tlbre
140 mfspr r7,SPRN_MAS1
141 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
142 cmpw r3,r6
143 beq skpinv /* Dont update the current execution TLB */
144 mtspr SPRN_MAS1,r7
145 tlbwe
146 isync
147skpinv: addi r6,r6,1 /* Increment */
148 cmpw r6,r9 /* Are we done? */
149 bne 1b /* If not, repeat */
150
151 /* Invalidate TLB0 */
152 li r6,0x04
153 tlbivax 0,r6
154#ifdef CONFIG_SMP
155 tlbsync
156#endif
157 /* Invalidate TLB1 */
158 li r6,0x0c
159 tlbivax 0,r6
160#ifdef CONFIG_SMP
161 tlbsync
162#endif
163 msync
164
165/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
167 addi r5, r5, 0x1
168 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
169 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
170 mtspr SPRN_MAS0,r7
171 tlbre
172
173 /* Just modify the entry ID and EPN for the temp mapping */
174 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
175 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
176 mtspr SPRN_MAS0,r7
177 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
178 slwi r6,r6,12
179 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
180 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
181 mtspr SPRN_MAS1,r6
182 mfspr r6,SPRN_MAS2
183 li r7,0 /* temp EPN = 0 */
184 rlwimi r7,r6,0,20,31
185 mtspr SPRN_MAS2,r7
186 tlbwe
187
188 xori r6,r4,1
189 slwi r6,r6,5 /* setup new context with other address space */
190 bl 1f /* Find our address */
1911: mflr r9
192 rlwimi r7,r9,0,20,31
193 addi r7,r7,24
194 mtspr SPRN_SRR0,r7
195 mtspr SPRN_SRR1,r6
196 rfi
197
198/* 4. Clear out PIDs & Search info */
199 li r6,0
200 mtspr SPRN_PID0,r6
201#ifndef CONFIG_E200
202 mtspr SPRN_PID1,r6
203 mtspr SPRN_PID2,r6
204#endif
205 mtspr SPRN_MAS6,r6
206
207/* 5. Invalidate mapping we started in */
208 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
209 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
210 mtspr SPRN_MAS0,r7
211 tlbre
212 li r6,0
213 mtspr SPRN_MAS1,r6
214 tlbwe
215 /* Invalidate TLB1 */
216 li r9,0x0c
217 tlbivax 0,r9
218#ifdef CONFIG_SMP
219 tlbsync
220#endif
221 msync
222
223/* 6. Setup KERNELBASE mapping in TLB1[0] */
224 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
225 mtspr SPRN_MAS0,r6
226 lis r6,(MAS1_VALID|MAS1_IPROT)@h
227 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
228 mtspr SPRN_MAS1,r6
229 li r7,0
230 lis r6,KERNELBASE@h
231 ori r6,r6,KERNELBASE@l
232 rlwimi r6,r7,0,20,31
233 mtspr SPRN_MAS2,r6
234 li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
235 mtspr SPRN_MAS3,r7
236 tlbwe
237
238/* 7. Jump to KERNELBASE mapping */
239 lis r7,MSR_KERNEL@h
240 ori r7,r7,MSR_KERNEL@l
241 bl 1f /* Find our address */
2421: mflr r9
243 rlwimi r6,r9,0,20,31
244 addi r6,r6,24
245 mtspr SPRN_SRR0,r6
246 mtspr SPRN_SRR1,r7
247 rfi /* start execution out of TLB1[0] entry */
248
249/* 8. Clear out the temp mapping */
250 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
251 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
252 mtspr SPRN_MAS0,r7
253 tlbre
254 mtspr SPRN_MAS1,r8
255 tlbwe
256 /* Invalidate TLB1 */
257 li r9,0x0c
258 tlbivax 0,r9
259#ifdef CONFIG_SMP
260 tlbsync
261#endif
262 msync
263
264 /* Establish the interrupt vector offsets */
265 SET_IVOR(0, CriticalInput);
266 SET_IVOR(1, MachineCheck);
267 SET_IVOR(2, DataStorage);
268 SET_IVOR(3, InstructionStorage);
269 SET_IVOR(4, ExternalInput);
270 SET_IVOR(5, Alignment);
271 SET_IVOR(6, Program);
272 SET_IVOR(7, FloatingPointUnavailable);
273 SET_IVOR(8, SystemCall);
274 SET_IVOR(9, AuxillaryProcessorUnavailable);
275 SET_IVOR(10, Decrementer);
276 SET_IVOR(11, FixedIntervalTimer);
277 SET_IVOR(12, WatchdogTimer);
278 SET_IVOR(13, DataTLBError);
279 SET_IVOR(14, InstructionTLBError);
280 SET_IVOR(15, Debug);
281 SET_IVOR(32, SPEUnavailable);
282 SET_IVOR(33, SPEFloatingPointData);
283 SET_IVOR(34, SPEFloatingPointRound);
284#ifndef CONFIG_E200
285 SET_IVOR(35, PerformanceMonitor);
286#endif
287
288 /* Establish the interrupt vector base */
289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
290 mtspr SPRN_IVPR,r4
291
292 /* Setup the defaults for TLB entries */
293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif
297 mtspr SPRN_MAS4, r2
298
299#if 0
300 /* Enable DOZE */
301 mfspr r2,SPRN_HID0
302 oris r2,r2,HID0_DOZE@h
303 mtspr SPRN_HID0, r2
304#endif
305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2
310#endif
311
312#if !defined(CONFIG_BDI_SWITCH)
313 /*
314 * The Abatron BDI JTAG debugger does not tolerate others
315 * mucking with the debug registers.
316 */
317 lis r2,DBCR0_IDM@h
318 mtspr SPRN_DBCR0,r2
319 /* clear any residual debug events */
320 li r2,-1
321 mtspr SPRN_DBSR,r2
322#endif
323
324 /*
325 * This is where the main kernel code starts.
326 */
327
328 /* ptr to current */
329 lis r2,init_task@h
330 ori r2,r2,init_task@l
331
332 /* ptr to current thread */
333 addi r4,r2,THREAD /* init task's THREAD */
334 mtspr SPRN_SPRG3,r4
335
336 /* stack */
337 lis r1,init_thread_union@h
338 ori r1,r1,init_thread_union@l
339 li r0,0
340 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
341
342 bl early_init
343
344 mfspr r3,SPRN_TLB1CFG
345 andi. r3,r3,0xfff
346 lis r4,num_tlbcam_entries@ha
347 stw r3,num_tlbcam_entries@l(r4)
348/*
349 * Decide what sort of machine this is and initialize the MMU.
350 */
351 mr r3,r31
352 mr r4,r30
353 mr r5,r29
354 mr r6,r28
355 mr r7,r27
356 bl machine_init
357 bl MMU_init
358
359 /* Setup PTE pointers for the Abatron bdiGDB */
360 lis r6, swapper_pg_dir@h
361 ori r6, r6, swapper_pg_dir@l
362 lis r5, abatron_pteptrs@h
363 ori r5, r5, abatron_pteptrs@l
364 lis r4, KERNELBASE@h
365 ori r4, r4, KERNELBASE@l
366 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
367 stw r6, 0(r5)
368
369 /* Let's move on */
370 lis r4,start_kernel@h
371 ori r4,r4,start_kernel@l
372 lis r3,MSR_KERNEL@h
373 ori r3,r3,MSR_KERNEL@l
374 mtspr SPRN_SRR0,r4
375 mtspr SPRN_SRR1,r3
376 rfi /* change context and jump to start_kernel */
377
378/* Macros to hide the PTE size differences
379 *
380 * FIND_PTE -- walks the page tables given EA & pgdir pointer
381 * r10 -- EA of fault
382 * r11 -- PGDIR pointer
383 * r12 -- free
384 * label 2: is the bailout case
385 *
386 * if we find the pte (fall through):
387 * r11 is low pte word
388 * r12 is pointer to the pte
389 */
390#ifdef CONFIG_PTE_64BIT
391#define PTE_FLAGS_OFFSET 4
392#define FIND_PTE \
393 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
394 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
395 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
396 beq 2f; /* Bail if no table */ \
397 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
398 lwz r11, 4(r12); /* Get pte entry */
399#else
400#define PTE_FLAGS_OFFSET 0
401#define FIND_PTE \
402 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
403 lwz r11, 0(r11); /* Get L1 entry */ \
404 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
405 beq 2f; /* Bail if no table */ \
406 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
407 lwz r11, 0(r12); /* Get Linux PTE */
408#endif
409
410/*
411 * Interrupt vector entry code
412 *
413 * The Book E MMUs are always on so we don't need to handle
414 * interrupts in real mode as with previous PPC processors. In
415 * this case we handle interrupts in the kernel virtual address
416 * space.
417 *
418 * Interrupt vectors are dynamically placed relative to the
419 * interrupt prefix as determined by the address of interrupt_base.
420 * The interrupt vectors offsets are programmed using the labels
421 * for each interrupt vector entry.
422 *
423 * Interrupt vectors must be aligned on a 16 byte boundary.
424 * We align on a 32 byte cache line boundary for good measure.
425 */
426
427interrupt_base:
428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430
431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif
438
439 /* Data Storage Interrupt */
440 START_EXCEPTION(DataStorage)
441 mtspr SPRN_SPRG0, r10 /* Save some working registers */
442 mtspr SPRN_SPRG1, r11
443 mtspr SPRN_SPRG4W, r12
444 mtspr SPRN_SPRG5W, r13
445 mfcr r11
446 mtspr SPRN_SPRG7W, r11
447
448 /*
449 * Check if it was a store fault, if not then bail
450 * because a user tried to access a kernel or
451 * read-protected page. Otherwise, get the
452 * offending address and handle it.
453 */
454 mfspr r10, SPRN_ESR
455 andis. r10, r10, ESR_ST@h
456 beq 2f
457
458 mfspr r10, SPRN_DEAR /* Get faulting address */
459
460 /* If we are faulting a kernel address, we have to use the
461 * kernel page tables.
462 */
463 lis r11, TASK_SIZE@h
464 ori r11, r11, TASK_SIZE@l
465 cmplw 0, r10, r11
466 bge 2f
467
468 /* Get the PGD for the current thread */
4693:
470 mfspr r11,SPRN_SPRG3
471 lwz r11,PGDIR(r11)
4724:
473 FIND_PTE
474
475 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
476 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
477 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
478 bne 2f /* Bail if not */
479
480 /* Update 'changed'. */
481 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
482 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
483
484 /* MAS2 not updated as the entry does exist in the tlb, this
485 fault taken to detect state transition (eg: COW -> DIRTY)
486 */
487 andi. r11, r11, _PAGE_HWEXEC
488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
490
491 /* update search PID in MAS6, AS = 0 */
492 mfspr r12, SPRN_PID0
493 slwi r12, r12, 16
494 mtspr SPRN_MAS6, r12
495
496 /* find the TLB index that caused the fault. It has to be here. */
497 tlbsx 0, r10
498
499 /* only update the perm bits, assume the RPN is fine */
500 mfspr r12, SPRN_MAS3
501 rlwimi r12, r11, 0, 20, 31
502 mtspr SPRN_MAS3,r12
503 tlbwe
504
505 /* Done...restore registers and get out of here. */
506 mfspr r11, SPRN_SPRG7R
507 mtcr r11
508 mfspr r13, SPRN_SPRG5R
509 mfspr r12, SPRN_SPRG4R
510 mfspr r11, SPRN_SPRG1
511 mfspr r10, SPRN_SPRG0
512 rfi /* Force context change */
513
5142:
515 /*
516 * The bailout. Restore registers to pre-exception conditions
517 * and call the heavyweights to help us out.
518 */
519 mfspr r11, SPRN_SPRG7R
520 mtcr r11
521 mfspr r13, SPRN_SPRG5R
522 mfspr r12, SPRN_SPRG4R
523 mfspr r11, SPRN_SPRG1
524 mfspr r10, SPRN_SPRG0
525 b data_access
526
527 /* Instruction Storage Interrupt */
528 INSTRUCTION_STORAGE_EXCEPTION
529
530 /* External Input Interrupt */
531 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
532
533 /* Alignment Interrupt */
534 ALIGNMENT_EXCEPTION
535
536 /* Program Interrupt */
537 PROGRAM_EXCEPTION
538
539 /* Floating Point Unavailable Interrupt */
540#ifdef CONFIG_PPC_FPU
541 FP_UNAVAILABLE_EXCEPTION
542#else
543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif
549#endif
550
551 /* System Call Interrupt */
552 START_EXCEPTION(SystemCall)
553 NORMAL_EXCEPTION_PROLOG
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555
556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558
559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION
561
562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565
566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif
572
573 /* Data TLB Error Interrupt */
574 START_EXCEPTION(DataTLBError)
575 mtspr SPRN_SPRG0, r10 /* Save some working registers */
576 mtspr SPRN_SPRG1, r11
577 mtspr SPRN_SPRG4W, r12
578 mtspr SPRN_SPRG5W, r13
579 mfcr r11
580 mtspr SPRN_SPRG7W, r11
581 mfspr r10, SPRN_DEAR /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 ori r11, r11, TASK_SIZE@l
588 cmplw 5, r10, r11
589 blt 5, 3f
590 lis r11, swapper_pg_dir@h
591 ori r11, r11, swapper_pg_dir@l
592
593 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
594 rlwinm r12,r12,0,16,1
595 mtspr SPRN_MAS1,r12
596
597 b 4f
598
599 /* Get the PGD for the current thread */
6003:
601 mfspr r11,SPRN_SPRG3
602 lwz r11,PGDIR(r11)
603
6044:
605 FIND_PTE
606 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
607 beq 2f /* Bail if not present */
608
609#ifdef CONFIG_PTE_64BIT
610 lwz r13, 0(r12)
611#endif
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, PTE_FLAGS_OFFSET(r12)
614
615 /* Jump to common tlb load */
616 b finish_tlb_load
6172:
618 /* The bailout. Restore registers to pre-exception conditions
619 * and call the heavyweights to help us out.
620 */
621 mfspr r11, SPRN_SPRG7R
622 mtcr r11
623 mfspr r13, SPRN_SPRG5R
624 mfspr r12, SPRN_SPRG4R
625 mfspr r11, SPRN_SPRG1
626 mfspr r10, SPRN_SPRG0
627 b data_access
628
629 /* Instruction TLB Error Interrupt */
630 /*
631 * Nearly the same as above, except we get our
632 * information from different registers and bailout
633 * to a different point.
634 */
635 START_EXCEPTION(InstructionTLBError)
636 mtspr SPRN_SPRG0, r10 /* Save some working registers */
637 mtspr SPRN_SPRG1, r11
638 mtspr SPRN_SPRG4W, r12
639 mtspr SPRN_SPRG5W, r13
640 mfcr r11
641 mtspr SPRN_SPRG7W, r11
642 mfspr r10, SPRN_SRR0 /* Get faulting address */
643
644 /* If we are faulting a kernel address, we have to use the
645 * kernel page tables.
646 */
647 lis r11, TASK_SIZE@h
648 ori r11, r11, TASK_SIZE@l
649 cmplw 5, r10, r11
650 blt 5, 3f
651 lis r11, swapper_pg_dir@h
652 ori r11, r11, swapper_pg_dir@l
653
654 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
655 rlwinm r12,r12,0,16,1
656 mtspr SPRN_MAS1,r12
657
658 b 4f
659
660 /* Get the PGD for the current thread */
6613:
662 mfspr r11,SPRN_SPRG3
663 lwz r11,PGDIR(r11)
664
6654:
666 FIND_PTE
667 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
668 beq 2f /* Bail if not present */
669
670#ifdef CONFIG_PTE_64BIT
671 lwz r13, 0(r12)
672#endif
673 ori r11, r11, _PAGE_ACCESSED
674 stw r11, PTE_FLAGS_OFFSET(r12)
675
676 /* Jump to common TLB load point */
677 b finish_tlb_load
678
6792:
680 /* The bailout. Restore registers to pre-exception conditions
681 * and call the heavyweights to help us out.
682 */
683 mfspr r11, SPRN_SPRG7R
684 mtcr r11
685 mfspr r13, SPRN_SPRG5R
686 mfspr r12, SPRN_SPRG4R
687 mfspr r11, SPRN_SPRG1
688 mfspr r10, SPRN_SPRG0
689 b InstructionStorage
690
691#ifdef CONFIG_SPE
692 /* SPE Unavailable */
693 START_EXCEPTION(SPEUnavailable)
694 NORMAL_EXCEPTION_PROLOG
695 bne load_up_spe
696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else
699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */
701
702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */
708
709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711
712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714
715
716 /* Debug Interrupt */
717 DEBUG_EXCEPTION
718
719/*
720 * Local functions
721 */
722
723 /*
724 * Data TLB exceptions will bail out to this point
725 * if they can't resolve the lightweight TLB fault.
726 */
727data_access:
728 NORMAL_EXCEPTION_PROLOG
729 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
730 stw r5,_ESR(r11)
731 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
732 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
733 bne 1f
734 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7351:
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
738
739/*
740
741 * Both the instruction and data TLB miss get to this
742 * point to load the TLB.
743 * r10 - EA of fault
744 * r11 - TLB (info from Linux PTE)
745 * r12, r13 - available to use
746 * CR5 - results of addr < TASK_SIZE
747 * MAS0, MAS1 - loaded with proper value when we get here
748 * MAS2, MAS3 - will need additional info from Linux PTE
749 * Upon exit, we reload everything and RFI.
750 */
751finish_tlb_load:
752 /*
753 * We set execute, because we don't have the granularity to
754 * properly set this at the page level (Linux problem).
755 * Many of these bits are software only. Bits we don't set
756 * here we (properly should) assume have the appropriate value.
757 */
758
759 mfspr r12, SPRN_MAS2
760#ifdef CONFIG_PTE_64BIT
761 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
762#else
763 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
764#endif
765 mtspr SPRN_MAS2, r12
766
767 bge 5, 1f
768
769 /* is user addr */
770 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
771 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
772 srwi r10, r12, 1
773 or r12, r12, r10 /* Copy user perms into supervisor */
774 iseleq r12, 0, r12
775 b 2f
776
777 /* is kernel addr */
7781: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
779 ori r12, r12, (MAS3_SX | MAS3_SR)
780
781#ifdef CONFIG_PTE_64BIT
7822: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
783 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
784 mtspr SPRN_MAS3, r12
785BEGIN_FTR_SECTION
786 srwi r10, r13, 8 /* grab RPN[8:31] */
787 mtspr SPRN_MAS7, r10
788END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
789#else
7902: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
791 mtspr SPRN_MAS3, r11
792#endif
793#ifdef CONFIG_E200
794 /* Round robin TLB1 entries assignment */
795 mfspr r12, SPRN_MAS0
796
797 /* Extract TLB1CFG(NENTRY) */
798 mfspr r11, SPRN_TLB1CFG
799 andi. r11, r11, 0xfff
800
801 /* Extract MAS0(NV) */
802 andi. r13, r12, 0xfff
803 addi r13, r13, 1
804 cmpw 0, r13, r11
805 addi r12, r12, 1
806
807 /* check if we need to wrap */
808 blt 7f
809
810 /* wrap back to first free tlbcam entry */
811 lis r13, tlbcam_index@ha
812 lwz r13, tlbcam_index@l(r13)
813 rlwimi r12, r13, 0, 20, 31
8147:
815 mtspr SPRN_MAS0,r12
816#endif /* CONFIG_E200 */
817
818 tlbwe
819
820 /* Done...restore registers and get out of here. */
821 mfspr r11, SPRN_SPRG7R
822 mtcr r11
823 mfspr r13, SPRN_SPRG5R
824 mfspr r12, SPRN_SPRG4R
825 mfspr r11, SPRN_SPRG1
826 mfspr r10, SPRN_SPRG0
827 rfi /* Force context change */
828
829#ifdef CONFIG_SPE
830/* Note that the SPE support is closely modeled after the AltiVec
831 * support. Changes to one are likely to be applicable to the
832 * other! */
833load_up_spe:
834/*
835 * Disable SPE for the task which had SPE previously,
836 * and save its SPE registers in its thread_struct.
837 * Enables SPE for use in the kernel on return.
838 * On SMP we know the SPE units are free, since we give it up every
839 * switch. -- Kumar
840 */
841 mfmsr r5
842 oris r5,r5,MSR_SPE@h
843 mtmsr r5 /* enable use of SPE now */
844 isync
845/*
846 * For SMP, we don't do lazy SPE switching because it just gets too
847 * horrendously complex, especially when a task switches from one CPU
848 * to another. Instead we call giveup_spe in switch_to.
849 */
850#ifndef CONFIG_SMP
851 lis r3,last_task_used_spe@ha
852 lwz r4,last_task_used_spe@l(r3)
853 cmpi 0,r4,0
854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC
860 evstddx evr10, r4, r5 /* save off accumulator */
861 lwz r5,PT_REGS(r4)
862 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
863 lis r10,MSR_SPE@h
864 andc r4,r4,r10 /* disable SPE for previous task */
865 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8661:
867#endif /* CONFIG_SMP */
868 /* enable use of SPE after return */
869 oris r9,r9,MSR_SPE@h
870 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
871 li r4,1
872 li r10,THREAD_ACC
873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5
875 evmra evr4,evr4
876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3)
880#endif /* CONFIG_SMP */
881 /* restore registers and return */
8822: REST_4GPRS(3, r11)
883 lwz r10,_CCR(r11)
884 REST_GPR(1, r11)
885 mtcr r10
886 lwz r10,_LINK(r11)
887 mtlr r10
888 REST_GPR(10, r11)
889 mtspr SPRN_SRR1,r9
890 mtspr SPRN_SRR0,r12
891 REST_GPR(9, r11)
892 REST_GPR(12, r11)
893 lwz r11,GPR11(r11)
894 SYNC
895 rfi
896
897/*
898 * SPE unavailable trap from kernel - print a message, but let
899 * the task use SPE in the kernel until it returns to user mode.
900 */
901KernelSPE:
902 lwz r3,_MSR(r1)
903 oris r3,r3,MSR_SPE@h
904 stw r3,_MSR(r1) /* enable use of SPE after return */
905 lis r3,87f@h
906 ori r3,r3,87f@l
907 mr r4,r2 /* current */
908 lwz r5,_NIP(r1)
909 bl printk
910 b ret_from_except
91187: .string "SPE used in kernel (task=%p, pc=%x) \n"
912 .align 4,0
913
914#endif /* CONFIG_SPE */
915
916/*
917 * Global functions
918 */
919
920/*
921 * extern void loadcam_entry(unsigned int index)
922 *
923 * Load TLBCAM[index] entry in to the L2 CAM MMU
924 */
925_GLOBAL(loadcam_entry)
926 lis r4,TLBCAM@ha
927 addi r4,r4,TLBCAM@l
928 mulli r5,r3,20
929 add r3,r5,r4
930 lwz r4,0(r3)
931 mtspr SPRN_MAS0,r4
932 lwz r4,4(r3)
933 mtspr SPRN_MAS1,r4
934 lwz r4,8(r3)
935 mtspr SPRN_MAS2,r4
936 lwz r4,12(r3)
937 mtspr SPRN_MAS3,r4
938 tlbwe
939 isync
940 blr
941
942/*
943 * extern void giveup_altivec(struct task_struct *prev)
944 *
945 * The e500 core does not have an AltiVec unit.
946 */
947_GLOBAL(giveup_altivec)
948 blr
949
950#ifdef CONFIG_SPE
951/*
952 * extern void giveup_spe(struct task_struct *prev)
953 *
954 */
955_GLOBAL(giveup_spe)
956 mfmsr r5
957 oris r5,r5,MSR_SPE@h
958 SYNC
959 mtmsr r5 /* enable use of SPE now */
960 isync
961 cmpi 0,r3,0
962 beqlr- /* if no previous owner, done */
963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0
966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC
970 evstddx evr6, r4, r3 /* save off accumulator */
971 mfspr r6,SPRN_SPEFSCR
972 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
973 beq 1f
974 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
975 lis r3,MSR_SPE@h
976 andc r4,r4,r3 /* disable SPE for previous task */
977 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9781:
979#ifndef CONFIG_SMP
980 li r5,0
981 lis r4,last_task_used_spe@ha
982 stw r5,last_task_used_spe@l(r4)
983#endif /* CONFIG_SMP */
984 blr
985#endif /* CONFIG_SPE */
986
987/*
988 * extern void giveup_fpu(struct task_struct *prev)
989 *
990 * Not all FSL Book-E cores have an FPU
991 */
992#ifndef CONFIG_PPC_FPU
993_GLOBAL(giveup_fpu)
994 blr
995#endif
996
997/*
998 * extern void abort(void)
999 *
1000 * At present, this routine just applies a system reset.
1001 */
1002_GLOBAL(abort)
1003 li r13,0
1004 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1005 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13
1008 mfspr r13,SPRN_DBCR0
1009 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1010 mtspr SPRN_DBCR0,r13
1011
1012_GLOBAL(set_context)
1013
1014#ifdef CONFIG_BDI_SWITCH
1015 /* Context switch the PTE pointer for the Abatron BDI2000.
1016 * The PGDIR is the second parameter.
1017 */
1018 lis r5, abatron_pteptrs@h
1019 ori r5, r5, abatron_pteptrs@l
1020 stw r4, 0x4(r5)
1021#endif
1022 mtspr SPRN_PID,r3
1023 isync /* Force context change */
1024 blr
1025
1026/*
1027 * We put a few things here that have to be page-aligned. This stuff
1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */
1030 .data
1031 .align 12
1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1036 .space 4096
1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1039 .space 4096
1040
1041/* Reserved 4k for the critical exception stack & 4k for the machine
1042 * check stack per CPU for kernel mode exceptions */
1043 .section .bss
1044 .align 12
1045exception_stack_bottom:
1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1047 .globl exception_stack_top
1048exception_stack_top:
1049
1050/*
1051 * This space gets a copy of optional info passed to us by the bootstrap
1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1053 */
1054 .globl cmd_line
1055cmd_line:
1056 .space 512
1057
1058/*
1059 * Room for two PTE pointers, usually the kernel and current user pointers
1060 * to their respective root page table.
1061 */
1062abatron_pteptrs:
1063 .space 8
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644
index 000000000000..444fdcc769f1
--- /dev/null
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -0,0 +1,233 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/reg.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h
105 beq 2f
106BEGIN_FTR_SECTION
107 /* Disable L2 prefetch on some 745x and try to ensure
108 * L2 prefetch engines are idle. As explained by errata
109 * text, we can't be sure they are, we just hope very hard
110 * that well be enough (sic !). At least I noticed Apple
111 * doesn't even bother doing the dcbf's here...
112 */
113 mfspr r4,SPRN_MSSCR0
114 rlwinm r4,r4,0,0,29
115 sync
116 mtspr SPRN_MSSCR0,r4
117 sync
118 isync
119 lis r4,KERNELBASE@h
120 dcbf 0,r4
121 dcbf 0,r4
122 dcbf 0,r4
123 dcbf 0,r4
124END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
125#ifdef DEBUG
126 lis r6,nap_enter_count@ha
127 lwz r4,nap_enter_count@l(r6)
128 addi r4,r4,1
129 stw r4,nap_enter_count@l(r6)
130#endif
1312:
132BEGIN_FTR_SECTION
133 /* Go to low speed mode on some 750FX */
134 lis r4,powersave_lowspeed@ha
135 lwz r4,powersave_lowspeed@l(r4)
136 cmpwi 0,r4,0
137 beq 1f
138 mfspr r4,SPRN_HID1
139 oris r4,r4,0x0001
140 mtspr SPRN_HID1,r4
1411:
142END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
143
144 /* Go to NAP or DOZE now */
145 mfspr r4,SPRN_HID0
146 lis r5,(HID0_NAP|HID0_SLEEP)@h
147BEGIN_FTR_SECTION
148 oris r5,r5,HID0_DOZE@h
149END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
150 andc r4,r4,r5
151 or r4,r4,r3
152BEGIN_FTR_SECTION
153 oris r4,r4,HID0_DPM@h /* that should be done once for all */
154END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
155 mtspr SPRN_HID0,r4
156BEGIN_FTR_SECTION
157 DSSALL
158 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */
161 oris r7,r7,MSR_POW@h
162 sync
163 isync
164 mtmsr r7
165 isync
166 sync
167 blr
168
169/*
170 * Return from NAP/DOZE mode, restore some CPU specific registers,
171 * we are called with DR/IR still off and r2 containing physical
172 * address of current.
173 */
174_GLOBAL(power_save_6xx_restore)
175 mfspr r11,SPRN_HID0
176 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
177 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
178BEGIN_FTR_SECTION
179 rlwinm r11,r11,0,9,7 /* Clear DOZE */
180END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
181 mtspr SPRN_HID0, r11
182
183#ifdef DEBUG
184 beq cr1,1f
185 lis r11,(nap_return_count-KERNELBASE)@ha
186 lwz r9,nap_return_count@l(r11)
187 addi r9,r9,1
188 stw r9,nap_return_count@l(r11)
1891:
190#endif
191
192 rlwinm r9,r1,0,0,18
193 tophys(r9,r9)
194 lwz r11,TI_CPU(r9)
195 slwi r11,r11,2
196 /* Todo make sure all these are in the same page
197 * and load r22 (@ha part + CPU offset) only once
198 */
199BEGIN_FTR_SECTION
200 beq cr1,1f
201 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
202 lwz r9,nap_save_msscr0@l(r9)
203 mtspr SPRN_MSSCR0, r9
204 sync
205 isync
2061:
207END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
208BEGIN_FTR_SECTION
209 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
210 lwz r9,nap_save_hid1@l(r9)
211 mtspr SPRN_HID1, r9
212END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
213 b transfer_to_handler_cont
214
215 .data
216
217_GLOBAL(nap_save_msscr0)
218 .space 4*NR_CPUS
219
220_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS
222
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed)
226 .long 0
227
228#ifdef DEBUG
229_GLOBAL(nap_enter_count)
230 .space 4
231_GLOBAL(nap_return_count)
232 .space 4
233#endif
diff --git a/arch/ppc64/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index ca02afe2a795..1494e2f177f7 100644
--- a/arch/ppc64/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -39,13 +39,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
39 * can be cleared by CPU init after the fixups are done 39 * can be cleared by CPU init after the fixups are done
40 */ 40 */
41 LOADBASE(r3,cur_cpu_spec) 41 LOADBASE(r3,cur_cpu_spec)
42 ld r4,cur_cpu_spec@l(r3) 42 ld r4,OFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4) 43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP 44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr 45 beqlr
46 /* Now check if user or arch enabled NAP mode */ 46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap) 47 LOADBASE(r3,powersave_nap)
48 lwz r4,powersave_nap@l(r3) 48 lwz r4,OFF(powersave_nap)(r3)
49 cmpwi 0,r4,0 49 cmpwi 0,r4,0
50 beqlr 50 beqlr
51 51
@@ -63,8 +63,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
63 beq 1f 63 beq 1f
64 mtmsrd r7 /* out of line this ? */ 64 mtmsrd r7 /* out of line this ? */
65 blr 65 blr
661: 661:
67 /* Go to NAP now */ 67 /* Go to NAP now */
68BEGIN_FTR_SECTION 68BEGIN_FTR_SECTION
69 DSSALL 69 DSSALL
70 sync 70 sync
@@ -76,4 +76,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
76 isync 76 isync
77 sync 77 sync
78 blr 78 blr
79
diff --git a/arch/ppc64/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 941043ae040f..941043ae040f 100644
--- a/arch/ppc64/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
diff --git a/arch/ppc64/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index b81de286df5e..b81de286df5e 100644
--- a/arch/ppc64/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644
index 000000000000..303229b090b8
--- /dev/null
+++ b/arch/powerpc/kernel/misc_32.S
@@ -0,0 +1,1064 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
39 * This returns the high 64 bits of the product of two 64-bit numbers.
40 */
41_GLOBAL(mulhdu)
42 cmpwi r6,0
43 cmpwi cr1,r3,0
44 mr r10,r4
45 mulhwu r4,r4,r5
46 beq 1f
47 mulhwu r0,r10,r6
48 mullw r7,r10,r5
49 addc r7,r0,r7
50 addze r4,r4
511: beqlr cr1 /* all done if high part of A is 0 */
52 mr r10,r3
53 mullw r9,r3,r5
54 mulhwu r3,r3,r5
55 beq 2f
56 mullw r0,r10,r6
57 mulhwu r8,r10,r6
58 addc r7,r0,r7
59 adde r4,r4,r8
60 addze r3,r3
612: addc r4,r4,r9
62 addze r3,r3
63 blr
64
65/*
66 * Returns (address we're running at) - (address we were linked at)
67 * for use before the text and data are mapped to KERNELBASE.
68 */
69_GLOBAL(reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r3
73 LOADADDR(r4,1b)
74 subf r3,r4,r3
75 mtlr r0
76 blr
77
78/*
79 * add_reloc_offset(x) returns x + reloc_offset().
80 */
81_GLOBAL(add_reloc_offset)
82 mflr r0
83 bl 1f
841: mflr r5
85 LOADADDR(r4,1b)
86 subf r5,r4,r5
87 add r3,r3,r5
88 mtlr r0
89 blr
90
91/*
92 * sub_reloc_offset(x) returns x - reloc_offset().
93 */
94_GLOBAL(sub_reloc_offset)
95 mflr r0
96 bl 1f
971: mflr r5
98 lis r4,1b@ha
99 addi r4,r4,1b@l
100 subf r5,r4,r5
101 subf r3,r5,r3
102 mtlr r0
103 blr
104
105/*
106 * reloc_got2 runs through the .got2 section adding an offset
107 * to each entry.
108 */
109_GLOBAL(reloc_got2)
110 mflr r11
111 lis r7,__got2_start@ha
112 addi r7,r7,__got2_start@l
113 lis r8,__got2_end@ha
114 addi r8,r8,__got2_end@l
115 subf r8,r7,r8
116 srwi. r8,r8,2
117 beqlr
118 mtctr r8
119 bl 1f
1201: mflr r0
121 lis r4,1b@ha
122 addi r4,r4,1b@l
123 subf r0,r4,r0
124 add r7,r0,r7
1252: lwz r0,0(r7)
126 add r0,r0,r3
127 stw r0,0(r7)
128 addi r7,r7,4
129 bdnz 2b
130 mtlr r11
131 blr
132
133/*
134 * identify_cpu,
135 * called with r3 = data offset and r4 = CPU number
136 * doesn't change r3
137 */
138_GLOBAL(identify_cpu)
139 addis r8,r3,cpu_specs@ha
140 addi r8,r8,cpu_specs@l
141 mfpvr r7
1421:
143 lwz r5,CPU_SPEC_PVR_MASK(r8)
144 and r5,r5,r7
145 lwz r6,CPU_SPEC_PVR_VALUE(r8)
146 cmplw 0,r6,r5
147 beq 1f
148 addi r8,r8,CPU_SPEC_ENTRY_SIZE
149 b 1b
1501:
151 addis r6,r3,cur_cpu_spec@ha
152 addi r6,r6,cur_cpu_spec@l
153 sub r8,r8,r3
154 stw r8,0(r6)
155 blr
156
157/*
158 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
159 * and writes nop's over sections of code that don't apply for this cpu.
160 * r3 = data offset (not changed)
161 */
162_GLOBAL(do_cpu_ftr_fixups)
163 /* Get CPU 0 features */
164 addis r6,r3,cur_cpu_spec@ha
165 addi r6,r6,cur_cpu_spec@l
166 lwz r4,0(r6)
167 add r4,r4,r3
168 lwz r4,CPU_SPEC_FEATURES(r4)
169
170 /* Get the fixup table */
171 addis r6,r3,__start___ftr_fixup@ha
172 addi r6,r6,__start___ftr_fixup@l
173 addis r7,r3,__stop___ftr_fixup@ha
174 addi r7,r7,__stop___ftr_fixup@l
175
176 /* Do the fixup */
1771: cmplw 0,r6,r7
178 bgelr
179 addi r6,r6,16
180 lwz r8,-16(r6) /* mask */
181 and r8,r8,r4
182 lwz r9,-12(r6) /* value */
183 cmplw 0,r8,r9
184 beq 1b
185 lwz r8,-8(r6) /* section begin */
186 lwz r9,-4(r6) /* section end */
187 subf. r9,r8,r9
188 beq 1b
189 /* write nops over the section of code */
190 /* todo: if large section, add a branch at the start of it */
191 srwi r9,r9,2
192 mtctr r9
193 add r8,r8,r3
194 lis r0,0x60000000@h /* nop */
1953: stw r0,0(r8)
196 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
197 beq 2f
198 dcbst 0,r8 /* suboptimal, but simpler */
199 sync
200 icbi 0,r8
2012: addi r8,r8,4
202 bdnz 3b
203 sync /* additional sync needed on g4 */
204 isync
205 b 1b
206
207/*
208 * call_setup_cpu - call the setup_cpu function for this cpu
209 * r3 = data offset, r24 = cpu number
210 *
211 * Setup function is called with:
212 * r3 = data offset
213 * r4 = ptr to CPU spec (relocated)
214 */
215_GLOBAL(call_setup_cpu)
216 addis r4,r3,cur_cpu_spec@ha
217 addi r4,r4,cur_cpu_spec@l
218 lwz r4,0(r4)
219 add r4,r4,r3
220 lwz r5,CPU_SPEC_SETUP(r4)
221 cmpi 0,r5,0
222 add r5,r5,r3
223 beqlr
224 mtctr r5
225 bctr
226
227#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
228
229/* This gets called by via-pmu.c to switch the PLL selection
230 * on 750fx CPU. This function should really be moved to some
231 * other place (as most of the cpufreq code in via-pmu
232 */
233_GLOBAL(low_choose_750fx_pll)
234 /* Clear MSR:EE */
235 mfmsr r7
236 rlwinm r0,r7,0,17,15
237 mtmsr r0
238
239 /* If switching to PLL1, disable HID0:BTIC */
240 cmplwi cr0,r3,0
241 beq 1f
242 mfspr r5,SPRN_HID0
243 rlwinm r5,r5,0,27,25
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Calc new HID1 value */
251 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
252 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
253 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
254 or r4,r4,r5
255 mtspr SPRN_HID1,r4
256
257 /* Store new HID1 image */
258 rlwinm r6,r1,0,0,18
259 lwz r6,TI_CPU(r6)
260 slwi r6,r6,2
261 addis r6,r6,nap_save_hid1@ha
262 stw r4,nap_save_hid1@l(r6)
263
264 /* If switching to PLL0, enable HID0:BTIC */
265 cmplwi cr0,r3,0
266 bne 1f
267 mfspr r5,SPRN_HID0
268 ori r5,r5,HID0_BTIC
269 sync
270 mtspr SPRN_HID0,r5
271 isync
272 sync
273
2741:
275 /* Return */
276 mtmsr r7
277 blr
278
279_GLOBAL(low_choose_7447a_dfs)
280 /* Clear MSR:EE */
281 mfmsr r7
282 rlwinm r0,r7,0,17,15
283 mtmsr r0
284
285 /* Calc new HID1 value */
286 mfspr r4,SPRN_HID1
287 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
288 sync
289 mtspr SPRN_HID1,r4
290 sync
291 isync
292
293 /* Return */
294 mtmsr r7
295 blr
296
297#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
298
299/*
300 * complement mask on the msr then "or" some values on.
301 * _nmask_and_or_msr(nmask, value_to_or)
302 */
303_GLOBAL(_nmask_and_or_msr)
304 mfmsr r0 /* Get current msr */
305 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
306 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
307 SYNC /* Some chip revs have problems here... */
308 mtmsr r0 /* Update machine state */
309 isync
310 blr /* Done */
311
312
313/*
314 * Flush MMU TLB
315 */
316_GLOBAL(_tlbia)
317#if defined(CONFIG_40x)
318 sync /* Flush to memory before changing mapping */
319 tlbia
320 isync /* Flush shadow TLB */
321#elif defined(CONFIG_44x)
322 li r3,0
323 sync
324
325 /* Load high watermark */
326 lis r4,tlb_44x_hwater@ha
327 lwz r5,tlb_44x_hwater@l(r4)
328
3291: tlbwe r3,r3,PPC44x_TLB_PAGEID
330 addi r3,r3,1
331 cmpw 0,r3,r5
332 ble 1b
333
334 isync
335#elif defined(CONFIG_FSL_BOOKE)
336 /* Invalidate all entries in TLB0 */
337 li r3, 0x04
338 tlbivax 0,3
339 /* Invalidate all entries in TLB1 */
340 li r3, 0x0c
341 tlbivax 0,3
342 /* Invalidate all entries in TLB2 */
343 li r3, 0x14
344 tlbivax 0,3
345 /* Invalidate all entries in TLB3 */
346 li r3, 0x1c
347 tlbivax 0,3
348 msync
349#ifdef CONFIG_SMP
350 tlbsync
351#endif /* CONFIG_SMP */
352#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
353#if defined(CONFIG_SMP)
354 rlwinm r8,r1,0,0,18
355 lwz r8,TI_CPU(r8)
356 oris r8,r8,10
357 mfmsr r10
358 SYNC
359 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
360 rlwinm r0,r0,0,28,26 /* clear DR */
361 mtmsr r0
362 SYNC_601
363 isync
364 lis r9,mmu_hash_lock@h
365 ori r9,r9,mmu_hash_lock@l
366 tophys(r9,r9)
36710: lwarx r7,0,r9
368 cmpwi 0,r7,0
369 bne- 10b
370 stwcx. r8,0,r9
371 bne- 10b
372 sync
373 tlbia
374 sync
375 TLBSYNC
376 li r0,0
377 stw r0,0(r9) /* clear mmu_hash_lock */
378 mtmsr r10
379 SYNC_601
380 isync
381#else /* CONFIG_SMP */
382 sync
383 tlbia
384 sync
385#endif /* CONFIG_SMP */
386#endif /* ! defined(CONFIG_40x) */
387 blr
388
389/*
390 * Flush MMU TLB for a particular address
391 */
392_GLOBAL(_tlbie)
393#if defined(CONFIG_40x)
394 tlbsx. r3, 0, r3
395 bne 10f
396 sync
397 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
398 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
399 * the TLB entry. */
400 tlbwe r3, r3, TLB_TAG
401 isync
40210:
403#elif defined(CONFIG_44x)
404 mfspr r4,SPRN_MMUCR
405 mfspr r5,SPRN_PID /* Get PID */
406 rlwimi r4,r5,0,24,31 /* Set TID */
407 mtspr SPRN_MMUCR,r4
408
409 tlbsx. r3, 0, r3
410 bne 10f
411 sync
412 /* There are only 64 TLB entries, so r3 < 64,
413 * which means bit 22, is clear. Since 22 is
414 * the V bit in the TLB_PAGEID, loading this
415 * value will invalidate the TLB entry.
416 */
417 tlbwe r3, r3, PPC44x_TLB_PAGEID
418 isync
41910:
420#elif defined(CONFIG_FSL_BOOKE)
421 rlwinm r4, r3, 0, 0, 19
422 ori r5, r4, 0x08 /* TLBSEL = 1 */
423 ori r6, r4, 0x10 /* TLBSEL = 2 */
424 ori r7, r4, 0x18 /* TLBSEL = 3 */
425 tlbivax 0, r4
426 tlbivax 0, r5
427 tlbivax 0, r6
428 tlbivax 0, r7
429 msync
430#if defined(CONFIG_SMP)
431 tlbsync
432#endif /* CONFIG_SMP */
433#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
434#if defined(CONFIG_SMP)
435 rlwinm r8,r1,0,0,18
436 lwz r8,TI_CPU(r8)
437 oris r8,r8,11
438 mfmsr r10
439 SYNC
440 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
441 rlwinm r0,r0,0,28,26 /* clear DR */
442 mtmsr r0
443 SYNC_601
444 isync
445 lis r9,mmu_hash_lock@h
446 ori r9,r9,mmu_hash_lock@l
447 tophys(r9,r9)
44810: lwarx r7,0,r9
449 cmpwi 0,r7,0
450 bne- 10b
451 stwcx. r8,0,r9
452 bne- 10b
453 eieio
454 tlbie r3
455 sync
456 TLBSYNC
457 li r0,0
458 stw r0,0(r9) /* clear mmu_hash_lock */
459 mtmsr r10
460 SYNC_601
461 isync
462#else /* CONFIG_SMP */
463 tlbie r3
464 sync
465#endif /* CONFIG_SMP */
466#endif /* ! CONFIG_40x */
467 blr
468
469/*
470 * Flush instruction cache.
471 * This is a no-op on the 601.
472 */
473_GLOBAL(flush_instruction_cache)
474#if defined(CONFIG_8xx)
475 isync
476 lis r5, IDC_INVALL@h
477 mtspr SPRN_IC_CST, r5
478#elif defined(CONFIG_4xx)
479#ifdef CONFIG_403GCX
480 li r3, 512
481 mtctr r3
482 lis r4, KERNELBASE@h
4831: iccci 0, r4
484 addi r4, r4, 16
485 bdnz 1b
486#else
487 lis r3, KERNELBASE@h
488 iccci 0,r3
489#endif
490#elif CONFIG_FSL_BOOKE
491BEGIN_FTR_SECTION
492 mfspr r3,SPRN_L1CSR0
493 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
494 /* msync; isync recommended here */
495 mtspr SPRN_L1CSR0,r3
496 isync
497 blr
498END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 mfspr r3,SPRN_L1CSR1
500 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
501 mtspr SPRN_L1CSR1,r3
502#else
503 mfspr r3,SPRN_PVR
504 rlwinm r3,r3,16,16,31
505 cmpwi 0,r3,1
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
508 mfspr r3,SPRN_HID0
509 ori r3,r3,HID0_ICFI
510 mtspr SPRN_HID0,r3
511#endif /* CONFIG_8xx/4xx */
512 isync
513 blr
514
515/*
516 * Write any modified data cache blocks out to memory
517 * and invalidate the corresponding instruction cache blocks.
518 * This is a no-op on the 601.
519 *
520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */
522_GLOBAL(flush_icache_range)
523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
526 li r5,L1_CACHE_BYTES-1
527 andc r3,r3,r5
528 subf r4,r3,r4
529 add r4,r4,r5
530 srwi. r4,r4,L1_CACHE_SHIFT
531 beqlr
532 mtctr r4
533 mr r6,r3
5341: dcbst 0,r3
535 addi r3,r3,L1_CACHE_BYTES
536 bdnz 1b
537 sync /* wait for dcbst's to get to ram */
538 mtctr r4
5392: icbi 0,r6
540 addi r6,r6,L1_CACHE_BYTES
541 bdnz 2b
542 sync /* additional sync needed on g4 */
543 isync
544 blr
545/*
546 * Write any modified data cache blocks out to memory.
547 * Does not invalidate the corresponding cache lines (especially for
548 * any corresponding instruction cache).
549 *
550 * clean_dcache_range(unsigned long start, unsigned long stop)
551 */
552_GLOBAL(clean_dcache_range)
553 li r5,L1_CACHE_BYTES-1
554 andc r3,r3,r5
555 subf r4,r3,r4
556 add r4,r4,r5
557 srwi. r4,r4,L1_CACHE_SHIFT
558 beqlr
559 mtctr r4
560
5611: dcbst 0,r3
562 addi r3,r3,L1_CACHE_BYTES
563 bdnz 1b
564 sync /* wait for dcbst's to get to ram */
565 blr
566
567/*
568 * Write any modified data cache blocks out to memory and invalidate them.
569 * Does not invalidate the corresponding instruction cache blocks.
570 *
571 * flush_dcache_range(unsigned long start, unsigned long stop)
572 */
573_GLOBAL(flush_dcache_range)
574 li r5,L1_CACHE_BYTES-1
575 andc r3,r3,r5
576 subf r4,r3,r4
577 add r4,r4,r5
578 srwi. r4,r4,L1_CACHE_SHIFT
579 beqlr
580 mtctr r4
581
5821: dcbf 0,r3
583 addi r3,r3,L1_CACHE_BYTES
584 bdnz 1b
585 sync /* wait for dcbst's to get to ram */
586 blr
587
588/*
589 * Like above, but invalidate the D-cache. This is used by the 8xx
590 * to invalidate the cache so the PPC core doesn't get stale data
591 * from the CPM (no cache snooping here :-).
592 *
593 * invalidate_dcache_range(unsigned long start, unsigned long stop)
594 */
595_GLOBAL(invalidate_dcache_range)
596 li r5,L1_CACHE_BYTES-1
597 andc r3,r3,r5
598 subf r4,r3,r4
599 add r4,r4,r5
600 srwi. r4,r4,L1_CACHE_SHIFT
601 beqlr
602 mtctr r4
603
6041: dcbi 0,r3
605 addi r3,r3,L1_CACHE_BYTES
606 bdnz 1b
607 sync /* wait for dcbi's to get to ram */
608 blr
609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
626 addi r5, r5, L1_CACHE_BYTES
627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/*
632 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not*
634 * snoop from the data cache.
635 * This is a no-op on the 601 which has a unified cache.
636 *
637 * void __flush_dcache_icache(void *page)
638 */
639_GLOBAL(__flush_dcache_icache)
640BEGIN_FTR_SECTION
641 blr /* for 601, do nothing */
642END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
643 rlwinm r3,r3,0,0,19 /* Get page base address */
644 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
645 mtctr r4
646 mr r6,r3
6470: dcbst 0,r3 /* Write line to ram */
648 addi r3,r3,L1_CACHE_BYTES
649 bdnz 0b
650 sync
651 mtctr r4
6521: icbi 0,r6
653 addi r6,r6,L1_CACHE_BYTES
654 bdnz 1b
655 sync
656 isync
657 blr
658
659/*
660 * Flush a particular page from the data cache to RAM, identified
661 * by its physical address. We turn off the MMU so we can just use
662 * the physical address (this may be a highmem page without a kernel
663 * mapping).
664 *
665 * void __flush_dcache_icache_phys(unsigned long physaddr)
666 */
667_GLOBAL(__flush_dcache_icache_phys)
668BEGIN_FTR_SECTION
669 blr /* for 601, do nothing */
670END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
671 mfmsr r10
672 rlwinm r0,r10,0,28,26 /* clear DR */
673 mtmsr r0
674 isync
675 rlwinm r3,r3,0,0,19 /* Get page base address */
676 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
677 mtctr r4
678 mr r6,r3
6790: dcbst 0,r3 /* Write line to ram */
680 addi r3,r3,L1_CACHE_BYTES
681 bdnz 0b
682 sync
683 mtctr r4
6841: icbi 0,r6
685 addi r6,r6,L1_CACHE_BYTES
686 bdnz 1b
687 sync
688 mtmsr r10 /* restore DR */
689 isync
690 blr
691
692/*
693 * Clear pages using the dcbz instruction, which doesn't cause any
694 * memory traffic (except to write out any cache lines which get
695 * displaced). This only works on cacheable memory.
696 *
697 * void clear_pages(void *page, int order) ;
698 */
699_GLOBAL(clear_pages)
700 li r0,4096/L1_CACHE_BYTES
701 slw r0,r0,r4
702 mtctr r0
703#ifdef CONFIG_8xx
704 li r4, 0
7051: stw r4, 0(r3)
706 stw r4, 4(r3)
707 stw r4, 8(r3)
708 stw r4, 12(r3)
709#else
7101: dcbz 0,r3
711#endif
712 addi r3,r3,L1_CACHE_BYTES
713 bdnz 1b
714 blr
715
716/*
717 * Copy a whole page. We use the dcbz instruction on the destination
718 * to reduce memory traffic (it eliminates the unnecessary reads of
719 * the destination into cache). This requires that the destination
720 * is cacheable.
721 */
722#define COPY_16_BYTES \
723 lwz r6,4(r4); \
724 lwz r7,8(r4); \
725 lwz r8,12(r4); \
726 lwzu r9,16(r4); \
727 stw r6,4(r3); \
728 stw r7,8(r3); \
729 stw r8,12(r3); \
730 stwu r9,16(r3)
731
732_GLOBAL(copy_page)
733 addi r3,r3,-4
734 addi r4,r4,-4
735
736#ifdef CONFIG_8xx
737 /* don't use prefetch on 8xx */
738 li r0,4096/L1_CACHE_BYTES
739 mtctr r0
7401: COPY_16_BYTES
741 bdnz 1b
742 blr
743
744#else /* not 8xx, we can prefetch */
745 li r5,4
746
747#if MAX_COPY_PREFETCH > 1
748 li r0,MAX_COPY_PREFETCH
749 li r11,4
750 mtctr r0
75111: dcbt r11,r4
752 addi r11,r11,L1_CACHE_BYTES
753 bdnz 11b
754#else /* MAX_COPY_PREFETCH == 1 */
755 dcbt r5,r4
756 li r11,L1_CACHE_BYTES+4
757#endif /* MAX_COPY_PREFETCH */
758 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
759 crclr 4*cr0+eq
7602:
761 mtctr r0
7621:
763 dcbt r11,r4
764 dcbz r5,r3
765 COPY_16_BYTES
766#if L1_CACHE_BYTES >= 32
767 COPY_16_BYTES
768#if L1_CACHE_BYTES >= 64
769 COPY_16_BYTES
770 COPY_16_BYTES
771#if L1_CACHE_BYTES >= 128
772 COPY_16_BYTES
773 COPY_16_BYTES
774 COPY_16_BYTES
775 COPY_16_BYTES
776#endif
777#endif
778#endif
779 bdnz 1b
780 beqlr
781 crnot 4*cr0+eq,4*cr0+eq
782 li r0,MAX_COPY_PREFETCH
783 li r11,4
784 b 2b
785#endif /* CONFIG_8xx */
786
787/*
788 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
789 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
790 */
791_GLOBAL(atomic_clear_mask)
79210: lwarx r5,0,r4
793 andc r5,r5,r3
794 PPC405_ERR77(0,r4)
795 stwcx. r5,0,r4
796 bne- 10b
797 blr
798_GLOBAL(atomic_set_mask)
79910: lwarx r5,0,r4
800 or r5,r5,r3
801 PPC405_ERR77(0,r4)
802 stwcx. r5,0,r4
803 bne- 10b
804 blr
805
806/*
807 * I/O string operations
808 *
809 * insb(port, buf, len)
810 * outsb(port, buf, len)
811 * insw(port, buf, len)
812 * outsw(port, buf, len)
813 * insl(port, buf, len)
814 * outsl(port, buf, len)
815 * insw_ns(port, buf, len)
816 * outsw_ns(port, buf, len)
817 * insl_ns(port, buf, len)
818 * outsl_ns(port, buf, len)
819 *
820 * The *_ns versions don't do byte-swapping.
821 */
822_GLOBAL(_insb)
823 cmpwi 0,r5,0
824 mtctr r5
825 subi r4,r4,1
826 blelr-
82700: lbz r5,0(r3)
828 eieio
829 stbu r5,1(r4)
830 bdnz 00b
831 blr
832
833_GLOBAL(_outsb)
834 cmpwi 0,r5,0
835 mtctr r5
836 subi r4,r4,1
837 blelr-
83800: lbzu r5,1(r4)
839 stb r5,0(r3)
840 eieio
841 bdnz 00b
842 blr
843
844_GLOBAL(_insw)
845 cmpwi 0,r5,0
846 mtctr r5
847 subi r4,r4,2
848 blelr-
84900: lhbrx r5,0,r3
850 eieio
851 sthu r5,2(r4)
852 bdnz 00b
853 blr
854
855_GLOBAL(_outsw)
856 cmpwi 0,r5,0
857 mtctr r5
858 subi r4,r4,2
859 blelr-
86000: lhzu r5,2(r4)
861 eieio
862 sthbrx r5,0,r3
863 bdnz 00b
864 blr
865
866_GLOBAL(_insl)
867 cmpwi 0,r5,0
868 mtctr r5
869 subi r4,r4,4
870 blelr-
87100: lwbrx r5,0,r3
872 eieio
873 stwu r5,4(r4)
874 bdnz 00b
875 blr
876
877_GLOBAL(_outsl)
878 cmpwi 0,r5,0
879 mtctr r5
880 subi r4,r4,4
881 blelr-
88200: lwzu r5,4(r4)
883 stwbrx r5,0,r3
884 eieio
885 bdnz 00b
886 blr
887
888_GLOBAL(__ide_mm_insw)
889_GLOBAL(_insw_ns)
890 cmpwi 0,r5,0
891 mtctr r5
892 subi r4,r4,2
893 blelr-
89400: lhz r5,0(r3)
895 eieio
896 sthu r5,2(r4)
897 bdnz 00b
898 blr
899
900_GLOBAL(__ide_mm_outsw)
901_GLOBAL(_outsw_ns)
902 cmpwi 0,r5,0
903 mtctr r5
904 subi r4,r4,2
905 blelr-
90600: lhzu r5,2(r4)
907 sth r5,0(r3)
908 eieio
909 bdnz 00b
910 blr
911
912_GLOBAL(__ide_mm_insl)
913_GLOBAL(_insl_ns)
914 cmpwi 0,r5,0
915 mtctr r5
916 subi r4,r4,4
917 blelr-
91800: lwz r5,0(r3)
919 eieio
920 stwu r5,4(r4)
921 bdnz 00b
922 blr
923
924_GLOBAL(__ide_mm_outsl)
925_GLOBAL(_outsl_ns)
926 cmpwi 0,r5,0
927 mtctr r5
928 subi r4,r4,4
929 blelr-
93000: lwzu r5,4(r4)
931 stw r5,0(r3)
932 eieio
933 bdnz 00b
934 blr
935
936/*
937 * Extended precision shifts.
938 *
939 * Updated to be valid for shift counts from 0 to 63 inclusive.
940 * -- Gabriel
941 *
942 * R3/R4 has 64 bit value
943 * R5 has shift count
944 * result in R3/R4
945 *
946 * ashrdi3: arithmetic right shift (sign propagation)
947 * lshrdi3: logical right shift
948 * ashldi3: left shift
949 */
950_GLOBAL(__ashrdi3)
951 subfic r6,r5,32
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
956 sraw r7,r3,r7 # t2 = MSW >> (count-32)
957 or r4,r4,r6 # LSW |= t1
958 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
959 sraw r3,r3,r5 # MSW = MSW >> count
960 or r4,r4,r7 # LSW |= t2
961 blr
962
963_GLOBAL(__ashldi3)
964 subfic r6,r5,32
965 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
966 addi r7,r5,32 # could be xori, or addi with -32
967 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
968 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
969 or r3,r3,r6 # MSW |= t1
970 slw r4,r4,r5 # LSW = LSW << count
971 or r3,r3,r7 # MSW |= t2
972 blr
973
974_GLOBAL(__lshrdi3)
975 subfic r6,r5,32
976 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
977 addi r7,r5,32 # could be xori, or addi with -32
978 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
979 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
980 or r4,r4,r6 # LSW |= t1
981 srw r3,r3,r5 # MSW = MSW >> count
982 or r4,r4,r7 # LSW |= t2
983 blr
984
985_GLOBAL(abs)
986 srawi r4,r3,31
987 xor r3,r3,r4
988 sub r3,r3,r4
989 blr
990
991_GLOBAL(_get_SP)
992 mr r3,r1 /* Close enough */
993 blr
994
995/*
996 * These are used in the alignment trap handler when emulating
997 * single-precision loads and stores.
998 * We restore and save the fpscr so the task gets the same result
999 * and exceptions as if the cpu had performed the load or store.
1000 */
1001
1002#ifdef CONFIG_PPC_FPU
1003_GLOBAL(cvt_fd)
1004 lfd 0,-4(r5) /* load up fpscr value */
1005 mtfsf 0xff,0
1006 lfs 0,0(r3)
1007 stfd 0,0(r4)
1008 mffs 0 /* save new fpscr value */
1009 stfd 0,-4(r5)
1010 blr
1011
1012_GLOBAL(cvt_df)
1013 lfd 0,-4(r5) /* load up fpscr value */
1014 mtfsf 0xff,0
1015 lfd 0,0(r3)
1016 stfs 0,0(r4)
1017 mffs 0 /* save new fpscr value */
1018 stfd 0,-4(r5)
1019 blr
1020#endif
1021
1022/*
1023 * Create a kernel thread
1024 * kernel_thread(fn, arg, flags)
1025 */
1026_GLOBAL(kernel_thread)
1027 stwu r1,-16(r1)
1028 stw r30,8(r1)
1029 stw r31,12(r1)
1030 mr r30,r3 /* function */
1031 mr r31,r4 /* argument */
1032 ori r3,r5,CLONE_VM /* flags */
1033 oris r3,r3,CLONE_UNTRACED>>16
1034 li r4,0 /* new sp (unused) */
1035 li r0,__NR_clone
1036 sc
1037 cmpwi 0,r3,0 /* parent or child? */
1038 bne 1f /* return if parent */
1039 li r0,0 /* make top-level stack frame */
1040 stwu r0,-16(r1)
1041 mtlr r30 /* fn addr in lr */
1042 mr r3,r31 /* load arg and call fn */
1043 PPC440EP_ERR42
1044 blrl
1045 li r0,__NR_exit /* exit if function returns */
1046 li r3,0
1047 sc
10481: lwz r30,8(r1)
1049 lwz r31,12(r1)
1050 addi r1,r1,16
1051 blr
1052
1053_GLOBAL(execve)
1054 li r0,__NR_execve
1055 sc
1056 bnslr
1057 neg r3,r3
1058 blr
1059
1060/*
1061 * This routine is just here to keep GCC happy - sigh...
1062 */
1063_GLOBAL(__main)
1064 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644
index 000000000000..4775bed42cac
--- /dev/null
+++ b/arch/powerpc/kernel/misc_64.S
@@ -0,0 +1,899 @@
1/*
2 * arch/powerpc/kernel/misc64.S
3 *
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * and Paul Mackerras.
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30
31 .text
32
33/*
34 * Returns (address we are running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
36 */
37
38_GLOBAL(reloc_offset)
39 mflr r0
40 bl 1f
411: mflr r3
42 LOADADDR(r4,1b)
43 subf r3,r4,r3
44 mtlr r0
45 blr
46
47/*
48 * add_reloc_offset(x) returns x + reloc_offset().
49 */
50_GLOBAL(add_reloc_offset)
51 mflr r0
52 bl 1f
531: mflr r5
54 LOADADDR(r4,1b)
55 subf r5,r4,r5
56 add r3,r3,r5
57 mtlr r0
58 blr
59
60_GLOBAL(get_msr)
61 mfmsr r3
62 blr
63
64_GLOBAL(get_dar)
65 mfdar r3
66 blr
67
68_GLOBAL(get_srr0)
69 mfsrr0 r3
70 blr
71
72_GLOBAL(get_srr1)
73 mfsrr1 r3
74 blr
75
76_GLOBAL(get_sp)
77 mr r3,r1
78 blr
79
80#ifdef CONFIG_IRQSTACKS
81_GLOBAL(call_do_softirq)
82 mflr r0
83 std r0,16(r1)
84 stdu r1,THREAD_SIZE-112(r3)
85 mr r1,r3
86 bl .__do_softirq
87 ld r1,0(r1)
88 ld r0,16(r1)
89 mtlr r0
90 blr
91
92_GLOBAL(call_handle_IRQ_event)
93 mflr r0
94 std r0,16(r1)
95 stdu r1,THREAD_SIZE-112(r6)
96 mr r1,r6
97 bl .handle_IRQ_event
98 ld r1,0(r1)
99 ld r0,16(r1)
100 mtlr r0
101 blr
102#endif /* CONFIG_IRQSTACKS */
103
104 /*
105 * To be called by C code which needs to do some operations with MMU
106 * disabled. Note that interrupts have to be disabled by the caller
107 * prior to calling us. The code called _MUST_ be in the RMO of course
108 * and part of the linear mapping as we don't attempt to translate the
109 * stack pointer at all. The function is called with the stack switched
110 * to this CPU emergency stack
111 *
112 * prototype is void *call_with_mmu_off(void *func, void *data);
113 *
114 * the called function is expected to be of the form
115 *
116 * void *called(void *data);
117 */
118_GLOBAL(call_with_mmu_off)
119 mflr r0 /* get link, save it on stackframe */
120 std r0,16(r1)
121 mr r1,r5 /* save old stack ptr */
122 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
123 subi r1,r1,STACK_FRAME_OVERHEAD
124 std r0,16(r1) /* save link on emerg. stack */
125 std r5,0(r1) /* save old stack ptr in backchain */
126 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
127 bl 2f /* we need LR to return, continue at label 2 */
128
129 ld r0,16(r1) /* we return here from the call, get LR and */
130 ld r1,0(r1) /* .. old stack ptr */
131 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
132 mfmsr r4
133 ori r4,r4,MSR_IR|MSR_DR
134 mtspr SPRN_SRR1,r4
135 rfid
136
1372: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
138 mr r3,r4 /* get parameter */
139 mfmsr r0
140 ori r0,r0,MSR_IR|MSR_DR
141 xori r0,r0,MSR_IR|MSR_DR
142 mtspr SPRN_SRR1,r0
143 rfid
144
145
146 .section ".toc","aw"
147PPC64_CACHES:
148 .tc ppc64_caches[TC],ppc64_caches
149 .section ".text"
150
151/*
152 * Write any modified data cache blocks out to memory
153 * and invalidate the corresponding instruction cache blocks.
154 *
155 * flush_icache_range(unsigned long start, unsigned long stop)
156 *
157 * flush all bytes from start through stop-1 inclusive
158 */
159
160_KPROBE(__flush_icache_range)
161
162/*
163 * Flush the data cache to memory
164 *
165 * Different systems have different cache line sizes
166 * and in some cases i-cache and d-cache line sizes differ from
167 * each other.
168 */
169 ld r10,PPC64_CACHES@toc(r2)
170 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
171 addi r5,r7,-1
172 andc r6,r3,r5 /* round low to line bdy */
173 subf r8,r6,r4 /* compute length */
174 add r8,r8,r5 /* ensure we get enough */
175 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
176 srw. r8,r8,r9 /* compute line count */
177 beqlr /* nothing to do? */
178 mtctr r8
1791: dcbst 0,r6
180 add r6,r6,r7
181 bdnz 1b
182 sync
183
184/* Now invalidate the instruction cache */
185
186 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
187 addi r5,r7,-1
188 andc r6,r3,r5 /* round low to line bdy */
189 subf r8,r6,r4 /* compute length */
190 add r8,r8,r5
191 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
192 srw. r8,r8,r9 /* compute line count */
193 beqlr /* nothing to do? */
194 mtctr r8
1952: icbi 0,r6
196 add r6,r6,r7
197 bdnz 2b
198 isync
199 blr
200 .previous .text
201/*
202 * Like above, but only do the D-cache.
203 *
204 * flush_dcache_range(unsigned long start, unsigned long stop)
205 *
206 * flush all bytes from start to stop-1 inclusive
207 */
208_GLOBAL(flush_dcache_range)
209
210/*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215 ld r10,PPC64_CACHES@toc(r2)
216 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
217 addi r5,r7,-1
218 andc r6,r3,r5 /* round low to line bdy */
219 subf r8,r6,r4 /* compute length */
220 add r8,r8,r5 /* ensure we get enough */
221 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
222 srw. r8,r8,r9 /* compute line count */
223 beqlr /* nothing to do? */
224 mtctr r8
2250: dcbst 0,r6
226 add r6,r6,r7
227 bdnz 0b
228 sync
229 blr
230
231/*
232 * Like above, but works on non-mapped physical addresses.
233 * Use only for non-LPAR setups ! It also assumes real mode
234 * is cacheable. Used for flushing out the DART before using
235 * it as uncacheable memory
236 *
237 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
238 *
239 * flush all bytes from start to stop-1 inclusive
240 */
241_GLOBAL(flush_dcache_phys_range)
242 ld r10,PPC64_CACHES@toc(r2)
243 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
244 addi r5,r7,-1
245 andc r6,r3,r5 /* round low to line bdy */
246 subf r8,r6,r4 /* compute length */
247 add r8,r8,r5 /* ensure we get enough */
248 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
249 srw. r8,r8,r9 /* compute line count */
250 beqlr /* nothing to do? */
251 mfmsr r5 /* Disable MMU Data Relocation */
252 ori r0,r5,MSR_DR
253 xori r0,r0,MSR_DR
254 sync
255 mtmsr r0
256 sync
257 isync
258 mtctr r8
2590: dcbst 0,r6
260 add r6,r6,r7
261 bdnz 0b
262 sync
263 isync
264 mtmsr r5 /* Re-enable MMU Data Relocation */
265 sync
266 isync
267 blr
268
269_GLOBAL(flush_inval_dcache_range)
270 ld r10,PPC64_CACHES@toc(r2)
271 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
272 addi r5,r7,-1
273 andc r6,r3,r5 /* round low to line bdy */
274 subf r8,r6,r4 /* compute length */
275 add r8,r8,r5 /* ensure we get enough */
276 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
277 srw. r8,r8,r9 /* compute line count */
278 beqlr /* nothing to do? */
279 sync
280 isync
281 mtctr r8
2820: dcbf 0,r6
283 add r6,r6,r7
284 bdnz 0b
285 sync
286 isync
287 blr
288
289
290/*
291 * Flush a particular page from the data cache to RAM.
292 * Note: this is necessary because the instruction cache does *not*
293 * snoop from the data cache.
294 *
295 * void __flush_dcache_icache(void *page)
296 */
297_GLOBAL(__flush_dcache_icache)
298/*
299 * Flush the data cache to memory
300 *
301 * Different systems have different cache line sizes
302 */
303
304/* Flush the dcache */
305 ld r7,PPC64_CACHES@toc(r2)
306 clrrdi r3,r3,PAGE_SHIFT /* Page align */
307 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
308 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
309 mr r6,r3
310 mtctr r4
3110: dcbst 0,r6
312 add r6,r6,r5
313 bdnz 0b
314 sync
315
316/* Now invalidate the icache */
317
318 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
319 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
320 mtctr r4
3211: icbi 0,r3
322 add r3,r3,r5
323 bdnz 1b
324 isync
325 blr
326
327/*
328 * I/O string operations
329 *
330 * insb(port, buf, len)
331 * outsb(port, buf, len)
332 * insw(port, buf, len)
333 * outsw(port, buf, len)
334 * insl(port, buf, len)
335 * outsl(port, buf, len)
336 * insw_ns(port, buf, len)
337 * outsw_ns(port, buf, len)
338 * insl_ns(port, buf, len)
339 * outsl_ns(port, buf, len)
340 *
341 * The *_ns versions don't do byte-swapping.
342 */
343_GLOBAL(_insb)
344 cmpwi 0,r5,0
345 mtctr r5
346 subi r4,r4,1
347 blelr-
34800: lbz r5,0(r3)
349 eieio
350 stbu r5,1(r4)
351 bdnz 00b
352 twi 0,r5,0
353 isync
354 blr
355
356_GLOBAL(_outsb)
357 cmpwi 0,r5,0
358 mtctr r5
359 subi r4,r4,1
360 blelr-
36100: lbzu r5,1(r4)
362 stb r5,0(r3)
363 bdnz 00b
364 sync
365 blr
366
367_GLOBAL(_insw)
368 cmpwi 0,r5,0
369 mtctr r5
370 subi r4,r4,2
371 blelr-
37200: lhbrx r5,0,r3
373 eieio
374 sthu r5,2(r4)
375 bdnz 00b
376 twi 0,r5,0
377 isync
378 blr
379
380_GLOBAL(_outsw)
381 cmpwi 0,r5,0
382 mtctr r5
383 subi r4,r4,2
384 blelr-
38500: lhzu r5,2(r4)
386 sthbrx r5,0,r3
387 bdnz 00b
388 sync
389 blr
390
391_GLOBAL(_insl)
392 cmpwi 0,r5,0
393 mtctr r5
394 subi r4,r4,4
395 blelr-
39600: lwbrx r5,0,r3
397 eieio
398 stwu r5,4(r4)
399 bdnz 00b
400 twi 0,r5,0
401 isync
402 blr
403
404_GLOBAL(_outsl)
405 cmpwi 0,r5,0
406 mtctr r5
407 subi r4,r4,4
408 blelr-
40900: lwzu r5,4(r4)
410 stwbrx r5,0,r3
411 bdnz 00b
412 sync
413 blr
414
415/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
416_GLOBAL(_insw_ns)
417 cmpwi 0,r5,0
418 mtctr r5
419 subi r4,r4,2
420 blelr-
42100: lhz r5,0(r3)
422 eieio
423 sthu r5,2(r4)
424 bdnz 00b
425 twi 0,r5,0
426 isync
427 blr
428
429/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
430_GLOBAL(_outsw_ns)
431 cmpwi 0,r5,0
432 mtctr r5
433 subi r4,r4,2
434 blelr-
43500: lhzu r5,2(r4)
436 sth r5,0(r3)
437 bdnz 00b
438 sync
439 blr
440
441_GLOBAL(_insl_ns)
442 cmpwi 0,r5,0
443 mtctr r5
444 subi r4,r4,4
445 blelr-
44600: lwz r5,0(r3)
447 eieio
448 stwu r5,4(r4)
449 bdnz 00b
450 twi 0,r5,0
451 isync
452 blr
453
454_GLOBAL(_outsl_ns)
455 cmpwi 0,r5,0
456 mtctr r5
457 subi r4,r4,4
458 blelr-
45900: lwzu r5,4(r4)
460 stw r5,0(r3)
461 bdnz 00b
462 sync
463 blr
464
465
466_GLOBAL(cvt_fd)
467 lfd 0,0(r5) /* load up fpscr value */
468 mtfsf 0xff,0
469 lfs 0,0(r3)
470 stfd 0,0(r4)
471 mffs 0 /* save new fpscr value */
472 stfd 0,0(r5)
473 blr
474
475_GLOBAL(cvt_df)
476 lfd 0,0(r5) /* load up fpscr value */
477 mtfsf 0xff,0
478 lfd 0,0(r3)
479 stfs 0,0(r4)
480 mffs 0 /* save new fpscr value */
481 stfd 0,0(r5)
482 blr
483
484/*
485 * identify_cpu and calls setup_cpu
486 * In: r3 = base of the cpu_specs array
487 * r4 = address of cur_cpu_spec
488 * r5 = relocation offset
489 */
490_GLOBAL(identify_cpu)
491 mfpvr r7
4921:
493 lwz r8,CPU_SPEC_PVR_MASK(r3)
494 and r8,r8,r7
495 lwz r9,CPU_SPEC_PVR_VALUE(r3)
496 cmplw 0,r9,r8
497 beq 1f
498 addi r3,r3,CPU_SPEC_ENTRY_SIZE
499 b 1b
5001:
501 sub r0,r3,r5
502 std r0,0(r4)
503 ld r4,CPU_SPEC_SETUP(r3)
504 add r4,r4,r5
505 ld r4,0(r4)
506 add r4,r4,r5
507 mtctr r4
508 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
509 mr r4,r3
510 mr r3,r5
511 bctr
512
513/*
514 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
515 * and writes nop's over sections of code that don't apply for this cpu.
516 * r3 = data offset (not changed)
517 */
518_GLOBAL(do_cpu_ftr_fixups)
519 /* Get CPU 0 features */
520 LOADADDR(r6,cur_cpu_spec)
521 sub r6,r6,r3
522 ld r4,0(r6)
523 sub r4,r4,r3
524 ld r4,CPU_SPEC_FEATURES(r4)
525 /* Get the fixup table */
526 LOADADDR(r6,__start___ftr_fixup)
527 sub r6,r6,r3
528 LOADADDR(r7,__stop___ftr_fixup)
529 sub r7,r7,r3
530 /* Do the fixup */
5311: cmpld r6,r7
532 bgelr
533 addi r6,r6,32
534 ld r8,-32(r6) /* mask */
535 and r8,r8,r4
536 ld r9,-24(r6) /* value */
537 cmpld r8,r9
538 beq 1b
539 ld r8,-16(r6) /* section begin */
540 ld r9,-8(r6) /* section end */
541 subf. r9,r8,r9
542 beq 1b
543 /* write nops over the section of code */
544 /* todo: if large section, add a branch at the start of it */
545 srwi r9,r9,2
546 mtctr r9
547 sub r8,r8,r3
548 lis r0,0x60000000@h /* nop */
5493: stw r0,0(r8)
550 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
551 beq 2f
552 dcbst 0,r8 /* suboptimal, but simpler */
553 sync
554 icbi 0,r8
5552: addi r8,r8,4
556 bdnz 3b
557 sync /* additional sync needed on g4 */
558 isync
559 b 1b
560
561#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
562/*
563 * Do an IO access in real mode
564 */
565_GLOBAL(real_readb)
566 mfmsr r7
567 ori r0,r7,MSR_DR
568 xori r0,r0,MSR_DR
569 sync
570 mtmsrd r0
571 sync
572 isync
573 mfspr r6,SPRN_HID4
574 rldicl r5,r6,32,0
575 ori r5,r5,0x100
576 rldicl r5,r5,32,0
577 sync
578 mtspr SPRN_HID4,r5
579 isync
580 slbia
581 isync
582 lbz r3,0(r3)
583 sync
584 mtspr SPRN_HID4,r6
585 isync
586 slbia
587 isync
588 mtmsrd r7
589 sync
590 isync
591 blr
592
593 /*
594 * Do an IO access in real mode
595 */
596_GLOBAL(real_writeb)
597 mfmsr r7
598 ori r0,r7,MSR_DR
599 xori r0,r0,MSR_DR
600 sync
601 mtmsrd r0
602 sync
603 isync
604 mfspr r6,SPRN_HID4
605 rldicl r5,r6,32,0
606 ori r5,r5,0x100
607 rldicl r5,r5,32,0
608 sync
609 mtspr SPRN_HID4,r5
610 isync
611 slbia
612 isync
613 stb r3,0(r4)
614 sync
615 mtspr SPRN_HID4,r6
616 isync
617 slbia
618 isync
619 mtmsrd r7
620 sync
621 isync
622 blr
623#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
624
625/*
626 * Create a kernel thread
627 * kernel_thread(fn, arg, flags)
628 */
629_GLOBAL(kernel_thread)
630 std r29,-24(r1)
631 std r30,-16(r1)
632 stdu r1,-STACK_FRAME_OVERHEAD(r1)
633 mr r29,r3
634 mr r30,r4
635 ori r3,r5,CLONE_VM /* flags */
636 oris r3,r3,(CLONE_UNTRACED>>16)
637 li r4,0 /* new sp (unused) */
638 li r0,__NR_clone
639 sc
640 cmpdi 0,r3,0 /* parent or child? */
641 bne 1f /* return if parent */
642 li r0,0
643 stdu r0,-STACK_FRAME_OVERHEAD(r1)
644 ld r2,8(r29)
645 ld r29,0(r29)
646 mtlr r29 /* fn addr in lr */
647 mr r3,r30 /* load arg and call fn */
648 blrl
649 li r0,__NR_exit /* exit after child exits */
650 li r3,0
651 sc
6521: addi r1,r1,STACK_FRAME_OVERHEAD
653 ld r29,-24(r1)
654 ld r30,-16(r1)
655 blr
656
657/*
658 * disable_kernel_fp()
659 * Disable the FPU.
660 */
661_GLOBAL(disable_kernel_fp)
662 mfmsr r3
663 rldicl r0,r3,(63-MSR_FP_LG),1
664 rldicl r3,r0,(MSR_FP_LG+1),0
665 mtmsrd r3 /* disable use of fpu now */
666 isync
667 blr
668
669#ifdef CONFIG_ALTIVEC
670
671#if 0 /* this has no callers for now */
672/*
673 * disable_kernel_altivec()
674 * Disable the VMX.
675 */
676_GLOBAL(disable_kernel_altivec)
677 mfmsr r3
678 rldicl r0,r3,(63-MSR_VEC_LG),1
679 rldicl r3,r0,(MSR_VEC_LG+1),0
680 mtmsrd r3 /* disable use of VMX now */
681 isync
682 blr
683#endif /* 0 */
684
685/*
686 * giveup_altivec(tsk)
687 * Disable VMX for the task given as the argument,
688 * and save the vector registers in its thread_struct.
689 * Enables the VMX for use in the kernel on return.
690 */
691_GLOBAL(giveup_altivec)
692 mfmsr r5
693 oris r5,r5,MSR_VEC@h
694 mtmsrd r5 /* enable use of VMX now */
695 isync
696 cmpdi 0,r3,0
697 beqlr- /* if no previous owner, done */
698 addi r3,r3,THREAD /* want THREAD of task */
699 ld r5,PT_REGS(r3)
700 cmpdi 0,r5,0
701 SAVE_32VRS(0,r4,r3)
702 mfvscr vr0
703 li r4,THREAD_VSCR
704 stvx vr0,r4,r3
705 beq 1f
706 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
707 lis r3,MSR_VEC@h
708 andc r4,r4,r3 /* disable FP for previous task */
709 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7101:
711#ifndef CONFIG_SMP
712 li r5,0
713 ld r4,last_task_used_altivec@got(r2)
714 std r5,0(r4)
715#endif /* CONFIG_SMP */
716 blr
717
718#endif /* CONFIG_ALTIVEC */
719
720_GLOBAL(__setup_cpu_power3)
721 blr
722
723_GLOBAL(execve)
724 li r0,__NR_execve
725 sc
726 bnslr
727 neg r3,r3
728 blr
729
730/* kexec_wait(phys_cpu)
731 *
732 * wait for the flag to change, indicating this kernel is going away but
733 * the slave code for the next one is at addresses 0 to 100.
734 *
735 * This is used by all slaves.
736 *
737 * Physical (hardware) cpu id should be in r3.
738 */
739_GLOBAL(kexec_wait)
740 bl 1f
7411: mflr r5
742 addi r5,r5,kexec_flag-1b
743
74499: HMT_LOW
745#ifdef CONFIG_KEXEC /* use no memory without kexec */
746 lwz r4,0(r5)
747 cmpwi 0,r4,0
748 bnea 0x60
749#endif
750 b 99b
751
752/* this can be in text because we won't change it until we are
753 * running in real anyways
754 */
755kexec_flag:
756 .long 0
757
758
759#ifdef CONFIG_KEXEC
760
761/* kexec_smp_wait(void)
762 *
763 * call with interrupts off
764 * note: this is a terminal routine, it does not save lr
765 *
766 * get phys id from paca
767 * set paca id to -1 to say we got here
768 * switch to real mode
769 * join other cpus in kexec_wait(phys_id)
770 */
771_GLOBAL(kexec_smp_wait)
772 lhz r3,PACAHWCPUID(r13)
773 li r4,-1
774 sth r4,PACAHWCPUID(r13) /* let others know we left */
775 bl real_mode
776 b .kexec_wait
777
778/*
779 * switch to real mode (turn mmu off)
780 * we use the early kernel trick that the hardware ignores bits
781 * 0 and 1 (big endian) of the effective address in real mode
782 *
783 * don't overwrite r3 here, it is live for kexec_wait above.
784 */
785real_mode: /* assume normal blr return */
7861: li r9,MSR_RI
787 li r10,MSR_DR|MSR_IR
788 mflr r11 /* return address to SRR0 */
789 mfmsr r12
790 andc r9,r12,r9
791 andc r10,r12,r10
792
793 mtmsrd r9,1
794 mtspr SPRN_SRR1,r10
795 mtspr SPRN_SRR0,r11
796 rfid
797
798
799/*
800 * kexec_sequence(newstack, start, image, control, clear_all())
801 *
802 * does the grungy work with stack switching and real mode switches
803 * also does simple calls to other code
804 */
805
806_GLOBAL(kexec_sequence)
807 mflr r0
808 std r0,16(r1)
809
810 /* switch stacks to newstack -- &kexec_stack.stack */
811 stdu r1,THREAD_SIZE-112(r3)
812 mr r1,r3
813
814 li r0,0
815 std r0,16(r1)
816
817 /* save regs for local vars on new stack.
818 * yes, we won't go back, but ...
819 */
820 std r31,-8(r1)
821 std r30,-16(r1)
822 std r29,-24(r1)
823 std r28,-32(r1)
824 std r27,-40(r1)
825 std r26,-48(r1)
826 std r25,-56(r1)
827
828 stdu r1,-112-64(r1)
829
830 /* save args into preserved regs */
831 mr r31,r3 /* newstack (both) */
832 mr r30,r4 /* start (real) */
833 mr r29,r5 /* image (virt) */
834 mr r28,r6 /* control, unused */
835 mr r27,r7 /* clear_all() fn desc */
836 mr r26,r8 /* spare */
837 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
838
839 /* disable interrupts, we are overwriting kernel data next */
840 mfmsr r3
841 rlwinm r3,r3,0,17,15
842 mtmsrd r3,1
843
844 /* copy dest pages, flush whole dest image */
845 mr r3,r29
846 bl .kexec_copy_flush /* (image) */
847
848 /* turn off mmu */
849 bl real_mode
850
851 /* clear out hardware hash page table and tlb */
852 ld r5,0(r27) /* deref function descriptor */
853 mtctr r5
854 bctrl /* ppc_md.hash_clear_all(void); */
855
856/*
857 * kexec image calling is:
858 * the first 0x100 bytes of the entry point are copied to 0
859 *
860 * all slaves branch to slave = 0x60 (absolute)
861 * slave(phys_cpu_id);
862 *
863 * master goes to start = entry point
864 * start(phys_cpu_id, start, 0);
865 *
866 *
867 * a wrapper is needed to call existing kernels, here is an approximate
868 * description of one method:
869 *
870 * v2: (2.6.10)
871 * start will be near the boot_block (maybe 0x100 bytes before it?)
872 * it will have a 0x60, which will b to boot_block, where it will wait
873 * and 0 will store phys into struct boot-block and load r3 from there,
874 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
875 *
876 * v1: (2.6.9)
877 * boot block will have all cpus scanning device tree to see if they
878 * are the boot cpu ?????
879 * other device tree differences (prop sizes, va vs pa, etc)...
880 */
881
882 /* copy 0x100 bytes starting at start to 0 */
883 li r3,0
884 mr r4,r30
885 li r5,0x100
886 li r6,0
887 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
8881: /* assume normal blr return */
889
890 /* release other cpus to the new kernel secondary start at 0x60 */
891 mflr r5
892 li r6,1
893 stw r6,kexec_flag-1b(5)
894 mr r3,r25 # my phys cpu
895 mr r4,r30 # start, aka phys mem offset
896 mtlr 4
897 li r5,0
898 blr /* image->start(physid, image->start, 0); */
899#endif /* CONFIG_KEXEC */
diff --git a/arch/ppc64/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 9f200f0f2ad5..766718814515 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -184,6 +184,7 @@ void of_release_dev(struct device *dev)
184 struct of_device *ofdev; 184 struct of_device *ofdev;
185 185
186 ofdev = to_of_device(dev); 186 ofdev = to_of_device(dev);
187 of_node_put(ofdev->node);
187 kfree(ofdev); 188 kfree(ofdev);
188} 189}
189 190
@@ -244,7 +245,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
244 return NULL; 245 return NULL;
245 memset(dev, 0, sizeof(*dev)); 246 memset(dev, 0, sizeof(*dev));
246 247
247 dev->node = np; 248 dev->node = of_node_get(np);
248 dev->dma_mask = 0xffffffffUL; 249 dev->dma_mask = 0xffffffffUL;
249 dev->dev.dma_mask = &dev->dma_mask; 250 dev->dev.dma_mask = &dev->dma_mask;
250 dev->dev.parent = parent; 251 dev->dev.parent = parent;
@@ -261,7 +262,6 @@ struct of_device* of_platform_device_create(struct device_node *np,
261 return dev; 262 return dev;
262} 263}
263 264
264
265EXPORT_SYMBOL(of_match_device); 265EXPORT_SYMBOL(of_match_device);
266EXPORT_SYMBOL(of_platform_bus_type); 266EXPORT_SYMBOL(of_platform_bus_type);
267EXPORT_SYMBOL(of_register_driver); 267EXPORT_SYMBOL(of_register_driver);
diff --git a/arch/ppc64/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 63d9481c3ec2..2d333cc84082 100644
--- a/arch/ppc64/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -1,7 +1,10 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/pmc.c 2 * arch/powerpc/kernel/pmc.c
3 * 3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation. 4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
5 * 8 *
6 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -17,6 +20,20 @@
17#include <asm/processor.h> 20#include <asm/processor.h>
18#include <asm/pmc.h> 21#include <asm/pmc.h>
19 22
23#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
24static void dummy_perf(struct pt_regs *regs)
25{
26 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
27
28 pmgc0 &= ~PMGC0_PMIE;
29 mtpmr(PMRN_PMGC0, pmgc0);
30}
31#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32
33#ifndef MMCR0_PMAO
34#define MMCR0_PMAO 0
35#endif
36
20/* Ensure exceptions are disabled */ 37/* Ensure exceptions are disabled */
21static void dummy_perf(struct pt_regs *regs) 38static void dummy_perf(struct pt_regs *regs)
22{ 39{
@@ -25,6 +42,11 @@ static void dummy_perf(struct pt_regs *regs)
25 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO); 42 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
26 mtspr(SPRN_MMCR0, mmcr0); 43 mtspr(SPRN_MMCR0, mmcr0);
27} 44}
45#else
46static void dummy_perf(struct pt_regs *regs)
47{
48}
49#endif
28 50
29static DEFINE_SPINLOCK(pmc_owner_lock); 51static DEFINE_SPINLOCK(pmc_owner_lock);
30static void *pmc_owner_caller; /* mostly for debugging */ 52static void *pmc_owner_caller; /* mostly for debugging */
@@ -66,11 +88,12 @@ void release_pmc_hardware(void)
66} 88}
67EXPORT_SYMBOL_GPL(release_pmc_hardware); 89EXPORT_SYMBOL_GPL(release_pmc_hardware);
68 90
91#ifdef CONFIG_PPC64
69void power4_enable_pmcs(void) 92void power4_enable_pmcs(void)
70{ 93{
71 unsigned long hid0; 94 unsigned long hid0;
72 95
73 hid0 = mfspr(HID0); 96 hid0 = mfspr(SPRN_HID0);
74 hid0 |= 1UL << (63 - 20); 97 hid0 |= 1UL << (63 - 20);
75 98
76 /* POWER4 requires the following sequence */ 99 /* POWER4 requires the following sequence */
@@ -83,6 +106,7 @@ void power4_enable_pmcs(void)
83 "mfspr %0, %1\n" 106 "mfspr %0, %1\n"
84 "mfspr %0, %1\n" 107 "mfspr %0, %1\n"
85 "mfspr %0, %1\n" 108 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 109 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
87 "memory"); 110 "memory");
88} 111}
112#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644
index 000000000000..254bf9c0b5bb
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -0,0 +1,280 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/threads.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/elfcore.h>
7#include <linux/string.h>
8#include <linux/interrupt.h>
9#include <linux/tty.h>
10#include <linux/vt_kern.h>
11#include <linux/nvram.h>
12#include <linux/console.h>
13#include <linux/irq.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/ide.h>
17#include <linux/pm.h>
18#include <linux/bitops.h>
19
20#include <asm/page.h>
21#include <asm/semaphore.h>
22#include <asm/processor.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
25#include <asm/ide.h>
26#include <asm/atomic.h>
27#include <asm/checksum.h>
28#include <asm/pgtable.h>
29#include <asm/tlbflush.h>
30#include <linux/adb.h>
31#include <linux/cuda.h>
32#include <linux/pmu.h>
33#include <asm/prom.h>
34#include <asm/system.h>
35#include <asm/pci-bridge.h>
36#include <asm/irq.h>
37#include <asm/pmac_feature.h>
38#include <asm/dma.h>
39#include <asm/machdep.h>
40#include <asm/hw_irq.h>
41#include <asm/nvram.h>
42#include <asm/mmu_context.h>
43#include <asm/backlight.h>
44#include <asm/time.h>
45#include <asm/cputable.h>
46#include <asm/btext.h>
47#include <asm/div64.h>
48#include <asm/xmon.h>
49
50#ifdef CONFIG_8xx
51#include <asm/commproc.h>
52#endif
53
54#ifdef CONFIG_PPC32
55extern void transfer_to_handler(void);
56extern void do_IRQ(struct pt_regs *regs);
57extern void machine_check_exception(struct pt_regs *regs);
58extern void alignment_exception(struct pt_regs *regs);
59extern void program_check_exception(struct pt_regs *regs);
60extern void single_step_exception(struct pt_regs *regs);
61extern int do_signal(sigset_t *, struct pt_regs *);
62extern int pmac_newworld;
63extern int sys_sigreturn(struct pt_regs *regs);
64
65EXPORT_SYMBOL(clear_pages);
66EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
67EXPORT_SYMBOL(DMA_MODE_READ);
68EXPORT_SYMBOL(DMA_MODE_WRITE);
69EXPORT_SYMBOL(__div64_32);
70
71EXPORT_SYMBOL(do_signal);
72EXPORT_SYMBOL(transfer_to_handler);
73EXPORT_SYMBOL(do_IRQ);
74EXPORT_SYMBOL(machine_check_exception);
75EXPORT_SYMBOL(alignment_exception);
76EXPORT_SYMBOL(program_check_exception);
77EXPORT_SYMBOL(single_step_exception);
78EXPORT_SYMBOL(sys_sigreturn);
79#endif
80
81#if defined(CONFIG_PPC_PREP)
82EXPORT_SYMBOL(_prep_type);
83EXPORT_SYMBOL(ucSystemType);
84#endif
85
86#if !defined(__INLINE_BITOPS)
87EXPORT_SYMBOL(set_bit);
88EXPORT_SYMBOL(clear_bit);
89EXPORT_SYMBOL(change_bit);
90EXPORT_SYMBOL(test_and_set_bit);
91EXPORT_SYMBOL(test_and_clear_bit);
92EXPORT_SYMBOL(test_and_change_bit);
93#endif /* __INLINE_BITOPS */
94
95EXPORT_SYMBOL(strcpy);
96EXPORT_SYMBOL(strncpy);
97EXPORT_SYMBOL(strcat);
98EXPORT_SYMBOL(strncat);
99EXPORT_SYMBOL(strchr);
100EXPORT_SYMBOL(strrchr);
101EXPORT_SYMBOL(strpbrk);
102EXPORT_SYMBOL(strstr);
103EXPORT_SYMBOL(strlen);
104EXPORT_SYMBOL(strnlen);
105EXPORT_SYMBOL(strcmp);
106EXPORT_SYMBOL(strncmp);
107EXPORT_SYMBOL(strcasecmp);
108
109EXPORT_SYMBOL(csum_partial);
110EXPORT_SYMBOL(csum_partial_copy_generic);
111EXPORT_SYMBOL(ip_fast_csum);
112EXPORT_SYMBOL(csum_tcpudp_magic);
113
114EXPORT_SYMBOL(__copy_tofrom_user);
115EXPORT_SYMBOL(__clear_user);
116EXPORT_SYMBOL(__strncpy_from_user);
117EXPORT_SYMBOL(__strnlen_user);
118
119EXPORT_SYMBOL(_insb);
120EXPORT_SYMBOL(_outsb);
121EXPORT_SYMBOL(_insw);
122EXPORT_SYMBOL(_outsw);
123EXPORT_SYMBOL(_insl);
124EXPORT_SYMBOL(_outsl);
125EXPORT_SYMBOL(_insw_ns);
126EXPORT_SYMBOL(_outsw_ns);
127EXPORT_SYMBOL(_insl_ns);
128EXPORT_SYMBOL(_outsl_ns);
129EXPORT_SYMBOL(ioremap);
130#ifdef CONFIG_44x
131EXPORT_SYMBOL(ioremap64);
132#endif
133EXPORT_SYMBOL(__ioremap);
134EXPORT_SYMBOL(iounmap);
135#ifdef CONFIG_PPC32
136EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
137#endif
138
139#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
140EXPORT_SYMBOL(ppc_ide_md);
141#endif
142
143#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
144EXPORT_SYMBOL(isa_io_base);
145EXPORT_SYMBOL(isa_mem_base);
146EXPORT_SYMBOL(pci_dram_offset);
147EXPORT_SYMBOL(pci_alloc_consistent);
148EXPORT_SYMBOL(pci_free_consistent);
149EXPORT_SYMBOL(pci_bus_io_base);
150EXPORT_SYMBOL(pci_bus_io_base_phys);
151EXPORT_SYMBOL(pci_bus_mem_base_phys);
152EXPORT_SYMBOL(pci_bus_to_hose);
153EXPORT_SYMBOL(pci_resource_to_bus);
154EXPORT_SYMBOL(pci_phys_to_bus);
155EXPORT_SYMBOL(pci_bus_to_phys);
156#endif /* CONFIG_PCI */
157
158#ifdef CONFIG_NOT_COHERENT_CACHE
159EXPORT_SYMBOL(flush_dcache_all);
160#endif
161
162EXPORT_SYMBOL(start_thread);
163EXPORT_SYMBOL(kernel_thread);
164
165EXPORT_SYMBOL(giveup_fpu);
166#ifdef CONFIG_ALTIVEC
167EXPORT_SYMBOL(giveup_altivec);
168#endif /* CONFIG_ALTIVEC */
169#ifdef CONFIG_SPE
170EXPORT_SYMBOL(giveup_spe);
171#endif /* CONFIG_SPE */
172
173#ifdef CONFIG_PPC64
174EXPORT_SYMBOL(__flush_icache_range);
175#else
176EXPORT_SYMBOL(flush_instruction_cache);
177EXPORT_SYMBOL(flush_icache_range);
178EXPORT_SYMBOL(flush_tlb_kernel_range);
179EXPORT_SYMBOL(flush_tlb_page);
180EXPORT_SYMBOL(_tlbie);
181#endif
182EXPORT_SYMBOL(flush_dcache_range);
183
184#ifdef CONFIG_SMP
185EXPORT_SYMBOL(smp_call_function);
186#ifdef CONFIG_PPC32
187EXPORT_SYMBOL(smp_hw_index);
188#endif
189#endif
190
191#ifdef CONFIG_ADB
192EXPORT_SYMBOL(adb_request);
193EXPORT_SYMBOL(adb_register);
194EXPORT_SYMBOL(adb_unregister);
195EXPORT_SYMBOL(adb_poll);
196EXPORT_SYMBOL(adb_try_handler_change);
197#endif /* CONFIG_ADB */
198#ifdef CONFIG_ADB_CUDA
199EXPORT_SYMBOL(cuda_request);
200EXPORT_SYMBOL(cuda_poll);
201#endif /* CONFIG_ADB_CUDA */
202#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
203EXPORT_SYMBOL(_machine);
204#endif
205#ifdef CONFIG_PPC_PMAC
206EXPORT_SYMBOL(sys_ctrler);
207#endif
208#ifdef CONFIG_VT
209EXPORT_SYMBOL(kd_mksound);
210#endif
211EXPORT_SYMBOL(to_tm);
212
213#ifdef CONFIG_PPC32
214long long __ashrdi3(long long, int);
215long long __ashldi3(long long, int);
216long long __lshrdi3(long long, int);
217EXPORT_SYMBOL(__ashrdi3);
218EXPORT_SYMBOL(__ashldi3);
219EXPORT_SYMBOL(__lshrdi3);
220#endif
221
222EXPORT_SYMBOL(memcpy);
223EXPORT_SYMBOL(memset);
224EXPORT_SYMBOL(memmove);
225EXPORT_SYMBOL(memscan);
226EXPORT_SYMBOL(memcmp);
227EXPORT_SYMBOL(memchr);
228
229#if defined(CONFIG_FB_VGA16_MODULE)
230EXPORT_SYMBOL(screen_info);
231#endif
232
233#ifdef CONFIG_PPC32
234EXPORT_SYMBOL(pm_power_off);
235EXPORT_SYMBOL(__delay);
236EXPORT_SYMBOL(timer_interrupt);
237EXPORT_SYMBOL(irq_desc);
238EXPORT_SYMBOL(tb_ticks_per_jiffy);
239EXPORT_SYMBOL(console_drivers);
240EXPORT_SYMBOL(cacheable_memcpy);
241#endif
242
243#ifdef CONFIG_XMON
244EXPORT_SYMBOL(xmon);
245EXPORT_SYMBOL(xmon_printf);
246#endif
247EXPORT_SYMBOL(__up);
248EXPORT_SYMBOL(__down);
249EXPORT_SYMBOL(__down_interruptible);
250
251#ifdef CONFIG_8xx
252EXPORT_SYMBOL(cpm_install_handler);
253EXPORT_SYMBOL(cpm_free_handler);
254#endif /* CONFIG_8xx */
255#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
256 defined(CONFIG_83xx)
257EXPORT_SYMBOL(__res);
258#endif
259
260#ifdef CONFIG_PPC32
261EXPORT_SYMBOL(next_mmu_context);
262EXPORT_SYMBOL(set_context);
263#endif
264
265#ifdef CONFIG_PPC_STD_MMU_32
266extern long mol_trampoline;
267EXPORT_SYMBOL(mol_trampoline); /* For MOL */
268EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
269EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
270#ifdef CONFIG_SMP
271extern int mmu_hash_lock;
272EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
273#endif /* CONFIG_SMP */
274extern long *intercept_table;
275EXPORT_SYMBOL(intercept_table);
276#endif /* CONFIG_PPC_STD_MMU_32 */
277#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
278EXPORT_SYMBOL(__mtdcr);
279EXPORT_SYMBOL(__mfdcr);
280#endif
diff --git a/arch/ppc64/kernel/process.c b/arch/powerpc/kernel/process.c
index 887005358eb1..047da1ae21fe 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/process.c 2 * arch/ppc/kernel/process.c
3 * 3 *
4 * Derived from "arch/i386/kernel/process.c" 4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1995 Linus Torvalds
@@ -7,7 +7,7 @@
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au) 8 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * 9 *
10 * PowerPC version 10 * PowerPC version
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * 12 *
13 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/errno.h> 20#include <linux/errno.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -26,15 +25,17 @@
26#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
27#include <linux/stddef.h> 26#include <linux/stddef.h>
28#include <linux/unistd.h> 27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/user.h> 30#include <linux/user.h>
31#include <linux/elf.h> 31#include <linux/elf.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/init_task.h>
34#include <linux/prctl.h> 33#include <linux/prctl.h>
35#include <linux/ptrace.h> 34#include <linux/init_task.h>
35#include <linux/module.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/interrupt.h> 37#include <linux/mqueue.h>
38#include <linux/hardirq.h>
38#include <linux/utsname.h> 39#include <linux/utsname.h>
39#include <linux/kprobes.h> 40#include <linux/kprobes.h>
40 41
@@ -44,21 +45,19 @@
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/mmu.h> 47#include <asm/mmu.h>
47#include <asm/mmu_context.h>
48#include <asm/prom.h> 48#include <asm/prom.h>
49#include <asm/ppcdebug.h> 49#ifdef CONFIG_PPC64
50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h>
53#include <asm/firmware.h> 50#include <asm/firmware.h>
54#include <asm/sections.h>
55#include <asm/tlbflush.h>
56#include <asm/time.h>
57#include <asm/plpar_wrappers.h> 51#include <asm/plpar_wrappers.h>
52#include <asm/time.h>
53#endif
54
55extern unsigned long _get_SP(void);
58 56
59#ifndef CONFIG_SMP 57#ifndef CONFIG_SMP
60struct task_struct *last_task_used_math = NULL; 58struct task_struct *last_task_used_math = NULL;
61struct task_struct *last_task_used_altivec = NULL; 59struct task_struct *last_task_used_altivec = NULL;
60struct task_struct *last_task_used_spe = NULL;
62#endif 61#endif
63 62
64/* 63/*
@@ -121,7 +120,6 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
121} 120}
122 121
123#ifdef CONFIG_ALTIVEC 122#ifdef CONFIG_ALTIVEC
124
125void enable_kernel_altivec(void) 123void enable_kernel_altivec(void)
126{ 124{
127 WARN_ON(preemptible()); 125 WARN_ON(preemptible());
@@ -130,7 +128,7 @@ void enable_kernel_altivec(void)
130 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 128 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
131 giveup_altivec(current); 129 giveup_altivec(current);
132 else 130 else
133 giveup_altivec(NULL); /* just enables FP for kernel */ 131 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
134#else 132#else
135 giveup_altivec(last_task_used_altivec); 133 giveup_altivec(last_task_used_altivec);
136#endif /* CONFIG_SMP */ 134#endif /* CONFIG_SMP */
@@ -161,9 +159,48 @@ int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 159 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
162 return 1; 160 return 1;
163} 161}
164
165#endif /* CONFIG_ALTIVEC */ 162#endif /* CONFIG_ALTIVEC */
166 163
164#ifdef CONFIG_SPE
165
166void enable_kernel_spe(void)
167{
168 WARN_ON(preemptible());
169
170#ifdef CONFIG_SMP
171 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
172 giveup_spe(current);
173 else
174 giveup_spe(NULL); /* just enable SPE for kernel - force */
175#else
176 giveup_spe(last_task_used_spe);
177#endif /* __SMP __ */
178}
179EXPORT_SYMBOL(enable_kernel_spe);
180
181void flush_spe_to_thread(struct task_struct *tsk)
182{
183 if (tsk->thread.regs) {
184 preempt_disable();
185 if (tsk->thread.regs->msr & MSR_SPE) {
186#ifdef CONFIG_SMP
187 BUG_ON(tsk != current);
188#endif
189 giveup_spe(current);
190 }
191 preempt_enable();
192 }
193}
194
195int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
196{
197 flush_spe_to_thread(current);
198 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
199 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
200 return 1;
201}
202#endif /* CONFIG_SPE */
203
167static void set_dabr_spr(unsigned long val) 204static void set_dabr_spr(unsigned long val)
168{ 205{
169 mtspr(SPRN_DABR, val); 206 mtspr(SPRN_DABR, val);
@@ -173,24 +210,27 @@ int set_dabr(unsigned long dabr)
173{ 210{
174 int ret = 0; 211 int ret = 0;
175 212
213#ifdef CONFIG_PPC64
176 if (firmware_has_feature(FW_FEATURE_XDABR)) { 214 if (firmware_has_feature(FW_FEATURE_XDABR)) {
177 /* We want to catch accesses from kernel and userspace */ 215 /* We want to catch accesses from kernel and userspace */
178 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; 216 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
179 ret = plpar_set_xdabr(dabr, flags); 217 ret = plpar_set_xdabr(dabr, flags);
180 } else if (firmware_has_feature(FW_FEATURE_DABR)) { 218 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
181 ret = plpar_set_dabr(dabr); 219 ret = plpar_set_dabr(dabr);
182 } else { 220 } else
221#endif
183 set_dabr_spr(dabr); 222 set_dabr_spr(dabr);
184 }
185 223
186 return ret; 224 return ret;
187} 225}
188 226
227#ifdef CONFIG_PPC64
189DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 228DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
190static DEFINE_PER_CPU(unsigned long, current_dabr); 229static DEFINE_PER_CPU(unsigned long, current_dabr);
230#endif
191 231
192struct task_struct *__switch_to(struct task_struct *prev, 232struct task_struct *__switch_to(struct task_struct *prev,
193 struct task_struct *new) 233 struct task_struct *new)
194{ 234{
195 struct thread_struct *new_thread, *old_thread; 235 struct thread_struct *new_thread, *old_thread;
196 unsigned long flags; 236 unsigned long flags;
@@ -200,7 +240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
200 /* avoid complexity of lazy save/restore of fpu 240 /* avoid complexity of lazy save/restore of fpu
201 * by just saving it every time we switch out if 241 * by just saving it every time we switch out if
202 * this task used the fpu during the last quantum. 242 * this task used the fpu during the last quantum.
203 * 243 *
204 * If it tries to use the fpu again, it'll trap and 244 * If it tries to use the fpu again, it'll trap and
205 * reload its fp regs. So we don't have to do a restore 245 * reload its fp regs. So we don't have to do a restore
206 * every switch, just a save. 246 * every switch, just a save.
@@ -209,31 +249,65 @@ struct task_struct *__switch_to(struct task_struct *prev,
209 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 249 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
210 giveup_fpu(prev); 250 giveup_fpu(prev);
211#ifdef CONFIG_ALTIVEC 251#ifdef CONFIG_ALTIVEC
252 /*
253 * If the previous thread used altivec in the last quantum
254 * (thus changing altivec regs) then save them.
255 * We used to check the VRSAVE register but not all apps
256 * set it, so we don't rely on it now (and in fact we need
257 * to save & restore VSCR even if VRSAVE == 0). -- paulus
258 *
259 * On SMP we always save/restore altivec regs just to avoid the
260 * complexity of changing processors.
261 * -- Cort
262 */
212 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 263 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
213 giveup_altivec(prev); 264 giveup_altivec(prev);
214#endif /* CONFIG_ALTIVEC */ 265#endif /* CONFIG_ALTIVEC */
215#endif /* CONFIG_SMP */ 266#ifdef CONFIG_SPE
267 /*
268 * If the previous thread used spe in the last quantum
269 * (thus changing spe regs) then save them.
270 *
271 * On SMP we always save/restore spe regs just to avoid the
272 * complexity of changing processors.
273 */
274 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
275 giveup_spe(prev);
276#endif /* CONFIG_SPE */
216 277
217#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP) 278#else /* CONFIG_SMP */
279#ifdef CONFIG_ALTIVEC
218 /* Avoid the trap. On smp this this never happens since 280 /* Avoid the trap. On smp this this never happens since
219 * we don't set last_task_used_altivec -- Cort 281 * we don't set last_task_used_altivec -- Cort
220 */ 282 */
221 if (new->thread.regs && last_task_used_altivec == new) 283 if (new->thread.regs && last_task_used_altivec == new)
222 new->thread.regs->msr |= MSR_VEC; 284 new->thread.regs->msr |= MSR_VEC;
223#endif /* CONFIG_ALTIVEC */ 285#endif /* CONFIG_ALTIVEC */
286#ifdef CONFIG_SPE
287 /* Avoid the trap. On smp this this never happens since
288 * we don't set last_task_used_spe
289 */
290 if (new->thread.regs && last_task_used_spe == new)
291 new->thread.regs->msr |= MSR_SPE;
292#endif /* CONFIG_SPE */
224 293
294#endif /* CONFIG_SMP */
295
296#ifdef CONFIG_PPC64 /* for now */
225 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { 297 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
226 set_dabr(new->thread.dabr); 298 set_dabr(new->thread.dabr);
227 __get_cpu_var(current_dabr) = new->thread.dabr; 299 __get_cpu_var(current_dabr) = new->thread.dabr;
228 } 300 }
229 301
230 flush_tlb_pending(); 302 flush_tlb_pending();
303#endif
231 304
232 new_thread = &new->thread; 305 new_thread = &new->thread;
233 old_thread = &current->thread; 306 old_thread = &current->thread;
234 307
235 /* Collect purr utilization data per process and per processor 308#ifdef CONFIG_PPC64
236 * wise purr is nothing but processor time base 309 /*
310 * Collect processor utilization data per process
237 */ 311 */
238 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 312 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
239 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 313 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
@@ -243,6 +317,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
243 old_thread->accum_tb += (current_tb - start_tb); 317 old_thread->accum_tb += (current_tb - start_tb);
244 new_thread->start_tb = current_tb; 318 new_thread->start_tb = current_tb;
245 } 319 }
320#endif
246 321
247 local_irq_save(flags); 322 local_irq_save(flags);
248 last = _switch(old_thread, new_thread); 323 last = _switch(old_thread, new_thread);
@@ -254,6 +329,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
254 329
255static int instructions_to_print = 16; 330static int instructions_to_print = 16;
256 331
332#ifdef CONFIG_PPC64
333#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
334 (REGION_ID(pc) != VMALLOC_REGION_ID))
335#else
336#define BAD_PC(pc) ((pc) < KERNELBASE)
337#endif
338
257static void show_instructions(struct pt_regs *regs) 339static void show_instructions(struct pt_regs *regs)
258{ 340{
259 int i; 341 int i;
@@ -268,9 +350,7 @@ static void show_instructions(struct pt_regs *regs)
268 if (!(i % 8)) 350 if (!(i % 8))
269 printk("\n"); 351 printk("\n");
270 352
271 if (((REGION_ID(pc) != KERNEL_REGION_ID) && 353 if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
272 (REGION_ID(pc) != VMALLOC_REGION_ID)) ||
273 __get_user(instr, (unsigned int *)pc)) {
274 printk("XXXXXXXX "); 354 printk("XXXXXXXX ");
275 } else { 355 } else {
276 if (regs->nip == pc) 356 if (regs->nip == pc)
@@ -285,50 +365,82 @@ static void show_instructions(struct pt_regs *regs)
285 printk("\n"); 365 printk("\n");
286} 366}
287 367
368static struct regbit {
369 unsigned long bit;
370 const char *name;
371} msr_bits[] = {
372 {MSR_EE, "EE"},
373 {MSR_PR, "PR"},
374 {MSR_FP, "FP"},
375 {MSR_ME, "ME"},
376 {MSR_IR, "IR"},
377 {MSR_DR, "DR"},
378 {0, NULL}
379};
380
381static void printbits(unsigned long val, struct regbit *bits)
382{
383 const char *sep = "";
384
385 printk("<");
386 for (; bits->bit; ++bits)
387 if (val & bits->bit) {
388 printk("%s%s", sep, bits->name);
389 sep = ",";
390 }
391 printk(">");
392}
393
394#ifdef CONFIG_PPC64
395#define REG "%016lX"
396#define REGS_PER_LINE 4
397#define LAST_VOLATILE 13
398#else
399#define REG "%08lX"
400#define REGS_PER_LINE 8
401#define LAST_VOLATILE 12
402#endif
403
288void show_regs(struct pt_regs * regs) 404void show_regs(struct pt_regs * regs)
289{ 405{
290 int i; 406 int i, trap;
291 unsigned long trap;
292 407
293 printk("NIP: %016lX XER: %08X LR: %016lX CTR: %016lX\n", 408 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
294 regs->nip, (unsigned int)regs->xer, regs->link, regs->ctr); 409 regs->nip, regs->link, regs->ctr);
295 printk("REGS: %p TRAP: %04lx %s (%s)\n", 410 printk("REGS: %p TRAP: %04lx %s (%s)\n",
296 regs, regs->trap, print_tainted(), system_utsname.release); 411 regs, regs->trap, print_tainted(), system_utsname.release);
297 printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x " 412 printk("MSR: "REG" ", regs->msr);
298 "IR/DR: %01x%01x CR: %08X\n", 413 printbits(regs->msr, msr_bits);
299 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, 414 printk(" CR: %08lX XER: %08lX\n", regs->ccr, regs->xer);
300 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
301 regs->msr&MSR_IR ? 1 : 0,
302 regs->msr&MSR_DR ? 1 : 0,
303 (unsigned int)regs->ccr);
304 trap = TRAP(regs); 415 trap = TRAP(regs);
305 printk("DAR: %016lx DSISR: %016lx\n", regs->dar, regs->dsisr); 416 if (trap == 0x300 || trap == 0x600)
306 printk("TASK: %p[%d] '%s' THREAD: %p", 417 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
418 printk("TASK = %p[%d] '%s' THREAD: %p",
307 current, current->pid, current->comm, current->thread_info); 419 current, current->pid, current->comm, current->thread_info);
308 420
309#ifdef CONFIG_SMP 421#ifdef CONFIG_SMP
310 printk(" CPU: %d", smp_processor_id()); 422 printk(" CPU: %d", smp_processor_id());
311#endif /* CONFIG_SMP */ 423#endif /* CONFIG_SMP */
312 424
313 for (i = 0; i < 32; i++) { 425 for (i = 0; i < 32; i++) {
314 if ((i % 4) == 0) { 426 if ((i % REGS_PER_LINE) == 0)
315 printk("\n" KERN_INFO "GPR%02d: ", i); 427 printk("\n" KERN_INFO "GPR%02d: ", i);
316 } 428 printk(REG " ", regs->gpr[i]);
317 429 if (i == LAST_VOLATILE && !FULL_REGS(regs))
318 printk("%016lX ", regs->gpr[i]);
319 if (i == 13 && !FULL_REGS(regs))
320 break; 430 break;
321 } 431 }
322 printk("\n"); 432 printk("\n");
433#ifdef CONFIG_KALLSYMS
323 /* 434 /*
324 * Lookup NIP late so we have the best change of getting the 435 * Lookup NIP late so we have the best change of getting the
325 * above info out without failing 436 * above info out without failing
326 */ 437 */
327 printk("NIP [%016lx] ", regs->nip); 438 printk("NIP ["REG"] ", regs->nip);
328 print_symbol("%s\n", regs->nip); 439 print_symbol("%s\n", regs->nip);
329 printk("LR [%016lx] ", regs->link); 440 printk("LR ["REG"] ", regs->link);
330 print_symbol("%s\n", regs->link); 441 print_symbol("%s\n", regs->link);
331 show_stack(current, (unsigned long *)regs->gpr[1]); 442#endif
443 show_stack(current, (unsigned long *) regs->gpr[1]);
332 if (!user_mode(regs)) 444 if (!user_mode(regs))
333 show_instructions(regs); 445 show_instructions(regs);
334} 446}
@@ -344,16 +456,22 @@ void exit_thread(void)
344 if (last_task_used_altivec == current) 456 if (last_task_used_altivec == current)
345 last_task_used_altivec = NULL; 457 last_task_used_altivec = NULL;
346#endif /* CONFIG_ALTIVEC */ 458#endif /* CONFIG_ALTIVEC */
459#ifdef CONFIG_SPE
460 if (last_task_used_spe == current)
461 last_task_used_spe = NULL;
462#endif
347#endif /* CONFIG_SMP */ 463#endif /* CONFIG_SMP */
348} 464}
349 465
350void flush_thread(void) 466void flush_thread(void)
351{ 467{
468#ifdef CONFIG_PPC64
352 struct thread_info *t = current_thread_info(); 469 struct thread_info *t = current_thread_info();
353 470
354 kprobe_flush_task(current);
355 if (t->flags & _TIF_ABI_PENDING) 471 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); 472 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
473#endif
474 kprobe_flush_task(current);
357 475
358#ifndef CONFIG_SMP 476#ifndef CONFIG_SMP
359 if (last_task_used_math == current) 477 if (last_task_used_math == current)
@@ -362,12 +480,18 @@ void flush_thread(void)
362 if (last_task_used_altivec == current) 480 if (last_task_used_altivec == current)
363 last_task_used_altivec = NULL; 481 last_task_used_altivec = NULL;
364#endif /* CONFIG_ALTIVEC */ 482#endif /* CONFIG_ALTIVEC */
483#ifdef CONFIG_SPE
484 if (last_task_used_spe == current)
485 last_task_used_spe = NULL;
486#endif
365#endif /* CONFIG_SMP */ 487#endif /* CONFIG_SMP */
366 488
489#ifdef CONFIG_PPC64 /* for now */
367 if (current->thread.dabr) { 490 if (current->thread.dabr) {
368 current->thread.dabr = 0; 491 current->thread.dabr = 0;
369 set_dabr(0); 492 set_dabr(0);
370 } 493 }
494#endif
371} 495}
372 496
373void 497void
@@ -375,7 +499,6 @@ release_thread(struct task_struct *t)
375{ 499{
376} 500}
377 501
378
379/* 502/*
380 * This gets called before we allocate a new thread and copy 503 * This gets called before we allocate a new thread and copy
381 * the current task into it. 504 * the current task into it.
@@ -384,36 +507,44 @@ void prepare_to_copy(struct task_struct *tsk)
384{ 507{
385 flush_fp_to_thread(current); 508 flush_fp_to_thread(current);
386 flush_altivec_to_thread(current); 509 flush_altivec_to_thread(current);
510 flush_spe_to_thread(current);
387} 511}
388 512
389/* 513/*
390 * Copy a thread.. 514 * Copy a thread..
391 */ 515 */
392int 516int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
393copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 517 unsigned long unused, struct task_struct *p,
394 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 518 struct pt_regs *regs)
395{ 519{
396 struct pt_regs *childregs, *kregs; 520 struct pt_regs *childregs, *kregs;
397 extern void ret_from_fork(void); 521 extern void ret_from_fork(void);
398 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; 522 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
399 523
524 CHECK_FULL_REGS(regs);
400 /* Copy registers */ 525 /* Copy registers */
401 sp -= sizeof(struct pt_regs); 526 sp -= sizeof(struct pt_regs);
402 childregs = (struct pt_regs *) sp; 527 childregs = (struct pt_regs *) sp;
403 *childregs = *regs; 528 *childregs = *regs;
404 if ((childregs->msr & MSR_PR) == 0) { 529 if ((childregs->msr & MSR_PR) == 0) {
405 /* for kernel thread, set stackptr in new task */ 530 /* for kernel thread, set `current' and stackptr in new task */
406 childregs->gpr[1] = sp + sizeof(struct pt_regs); 531 childregs->gpr[1] = sp + sizeof(struct pt_regs);
407 p->thread.regs = NULL; /* no user register state */ 532#ifdef CONFIG_PPC32
533 childregs->gpr[2] = (unsigned long) p;
534#else
408 clear_ti_thread_flag(p->thread_info, TIF_32BIT); 535 clear_ti_thread_flag(p->thread_info, TIF_32BIT);
536#endif
537 p->thread.regs = NULL; /* no user register state */
409 } else { 538 } else {
410 childregs->gpr[1] = usp; 539 childregs->gpr[1] = usp;
411 p->thread.regs = childregs; 540 p->thread.regs = childregs;
412 if (clone_flags & CLONE_SETTLS) { 541 if (clone_flags & CLONE_SETTLS) {
413 if (test_thread_flag(TIF_32BIT)) 542#ifdef CONFIG_PPC64
414 childregs->gpr[2] = childregs->gpr[6]; 543 if (!test_thread_flag(TIF_32BIT))
415 else
416 childregs->gpr[13] = childregs->gpr[6]; 544 childregs->gpr[13] = childregs->gpr[6];
545 else
546#endif
547 childregs->gpr[2] = childregs->gpr[6];
417 } 548 }
418 } 549 }
419 childregs->gpr[3] = 0; /* Result from fork() */ 550 childregs->gpr[3] = 0; /* Result from fork() */
@@ -431,6 +562,8 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
431 kregs = (struct pt_regs *) sp; 562 kregs = (struct pt_regs *) sp;
432 sp -= STACK_FRAME_OVERHEAD; 563 sp -= STACK_FRAME_OVERHEAD;
433 p->thread.ksp = sp; 564 p->thread.ksp = sp;
565
566#ifdef CONFIG_PPC64
434 if (cpu_has_feature(CPU_FTR_SLB)) { 567 if (cpu_has_feature(CPU_FTR_SLB)) {
435 unsigned long sp_vsid = get_kernel_vsid(sp); 568 unsigned long sp_vsid = get_kernel_vsid(sp);
436 569
@@ -449,6 +582,10 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
449 * function. 582 * function.
450 */ 583 */
451 kregs->nip = *((unsigned long *)ret_from_fork); 584 kregs->nip = *((unsigned long *)ret_from_fork);
585#else
586 kregs->nip = (unsigned long)ret_from_fork;
587 p->thread.last_syscall = -1;
588#endif
452 589
453 return 0; 590 return 0;
454} 591}
@@ -456,30 +593,17 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
456/* 593/*
457 * Set up a thread for executing a new program 594 * Set up a thread for executing a new program
458 */ 595 */
459void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp) 596void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
460{ 597{
461 unsigned long entry, toc, load_addr = regs->gpr[2]; 598#ifdef CONFIG_PPC64
599 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
600#endif
462 601
463 /* fdptr is a relocated pointer to the function descriptor for
464 * the elf _start routine. The first entry in the function
465 * descriptor is the entry address of _start and the second
466 * entry is the TOC value we need to use.
467 */
468 set_fs(USER_DS); 602 set_fs(USER_DS);
469 __get_user(entry, (unsigned long __user *)fdptr);
470 __get_user(toc, (unsigned long __user *)fdptr+1);
471
472 /* Check whether the e_entry function descriptor entries
473 * need to be relocated before we can use them.
474 */
475 if (load_addr != 0) {
476 entry += load_addr;
477 toc += load_addr;
478 }
479 603
480 /* 604 /*
481 * If we exec out of a kernel thread then thread.regs will not be 605 * If we exec out of a kernel thread then thread.regs will not be
482 * set. Do it now. 606 * set. Do it now.
483 */ 607 */
484 if (!current->thread.regs) { 608 if (!current->thread.regs) {
485 unsigned long childregs = (unsigned long)current->thread_info + 609 unsigned long childregs = (unsigned long)current->thread_info +
@@ -488,36 +612,101 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
488 current->thread.regs = (struct pt_regs *)childregs; 612 current->thread.regs = (struct pt_regs *)childregs;
489 } 613 }
490 614
491 regs->nip = entry; 615 memset(regs->gpr, 0, sizeof(regs->gpr));
616 regs->ctr = 0;
617 regs->link = 0;
618 regs->xer = 0;
619 regs->ccr = 0;
492 regs->gpr[1] = sp; 620 regs->gpr[1] = sp;
493 regs->gpr[2] = toc; 621
494 regs->msr = MSR_USER64; 622#ifdef CONFIG_PPC32
623 regs->mq = 0;
624 regs->nip = start;
625 regs->msr = MSR_USER;
626#else
627 if (!test_thread_flag(TIF_32BIT)) {
628 unsigned long entry, toc;
629
630 /* start is a relocated pointer to the function descriptor for
631 * the elf _start routine. The first entry in the function
632 * descriptor is the entry address of _start and the second
633 * entry is the TOC value we need to use.
634 */
635 __get_user(entry, (unsigned long __user *)start);
636 __get_user(toc, (unsigned long __user *)start+1);
637
638 /* Check whether the e_entry function descriptor entries
639 * need to be relocated before we can use them.
640 */
641 if (load_addr != 0) {
642 entry += load_addr;
643 toc += load_addr;
644 }
645 regs->nip = entry;
646 regs->gpr[2] = toc;
647 regs->msr = MSR_USER64;
648 } else {
649 regs->nip = start;
650 regs->gpr[2] = 0;
651 regs->msr = MSR_USER32;
652 }
653#endif
654
495#ifndef CONFIG_SMP 655#ifndef CONFIG_SMP
496 if (last_task_used_math == current) 656 if (last_task_used_math == current)
497 last_task_used_math = 0; 657 last_task_used_math = NULL;
658#ifdef CONFIG_ALTIVEC
659 if (last_task_used_altivec == current)
660 last_task_used_altivec = NULL;
661#endif
662#ifdef CONFIG_SPE
663 if (last_task_used_spe == current)
664 last_task_used_spe = NULL;
665#endif
498#endif /* CONFIG_SMP */ 666#endif /* CONFIG_SMP */
499 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 667 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
500 current->thread.fpscr = 0; 668 current->thread.fpscr = 0;
501#ifdef CONFIG_ALTIVEC 669#ifdef CONFIG_ALTIVEC
502#ifndef CONFIG_SMP
503 if (last_task_used_altivec == current)
504 last_task_used_altivec = 0;
505#endif /* CONFIG_SMP */
506 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 670 memset(current->thread.vr, 0, sizeof(current->thread.vr));
507 current->thread.vscr.u[0] = 0; 671 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
508 current->thread.vscr.u[1] = 0;
509 current->thread.vscr.u[2] = 0;
510 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 672 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
511 current->thread.vrsave = 0; 673 current->thread.vrsave = 0;
512 current->thread.used_vr = 0; 674 current->thread.used_vr = 0;
513#endif /* CONFIG_ALTIVEC */ 675#endif /* CONFIG_ALTIVEC */
676#ifdef CONFIG_SPE
677 memset(current->thread.evr, 0, sizeof(current->thread.evr));
678 current->thread.acc = 0;
679 current->thread.spefscr = 0;
680 current->thread.used_spe = 0;
681#endif /* CONFIG_SPE */
514} 682}
515EXPORT_SYMBOL(start_thread); 683
684#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
685 | PR_FP_EXC_RES | PR_FP_EXC_INV)
516 686
517int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 687int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
518{ 688{
519 struct pt_regs *regs = tsk->thread.regs; 689 struct pt_regs *regs = tsk->thread.regs;
520 690
691 /* This is a bit hairy. If we are an SPE enabled processor
692 * (have embedded fp) we store the IEEE exception enable flags in
693 * fpexc_mode. fpexc_mode is also used for setting FP exception
694 * mode (asyn, precise, disabled) for 'Classic' FP. */
695 if (val & PR_FP_EXC_SW_ENABLE) {
696#ifdef CONFIG_SPE
697 tsk->thread.fpexc_mode = val &
698 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
699 return 0;
700#else
701 return -EINVAL;
702#endif
703 }
704
705 /* on a CONFIG_SPE this does not hurt us. The bits that
706 * __pack_fe01 use do not overlap with bits used for
707 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
708 * on CONFIG_SPE implementations are reserved so writing to
709 * them does not change anything */
521 if (val > PR_FP_EXC_PRECISE) 710 if (val > PR_FP_EXC_PRECISE)
522 return -EINVAL; 711 return -EINVAL;
523 tsk->thread.fpexc_mode = __pack_fe01(val); 712 tsk->thread.fpexc_mode = __pack_fe01(val);
@@ -531,38 +720,41 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
531{ 720{
532 unsigned int val; 721 unsigned int val;
533 722
534 val = __unpack_fe01(tsk->thread.fpexc_mode); 723 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
724#ifdef CONFIG_SPE
725 val = tsk->thread.fpexc_mode;
726#else
727 return -EINVAL;
728#endif
729 else
730 val = __unpack_fe01(tsk->thread.fpexc_mode);
535 return put_user(val, (unsigned int __user *) adr); 731 return put_user(val, (unsigned int __user *) adr);
536} 732}
537 733
538int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3, 734#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
539 unsigned long p4, unsigned long p5, unsigned long p6, 735
736int sys_clone(unsigned long clone_flags, unsigned long usp,
737 int __user *parent_tidp, void __user *child_threadptr,
738 int __user *child_tidp, int p6,
540 struct pt_regs *regs) 739 struct pt_regs *regs)
541{ 740{
542 unsigned long parent_tidptr = 0; 741 CHECK_FULL_REGS(regs);
543 unsigned long child_tidptr = 0; 742 if (usp == 0)
544 743 usp = regs->gpr[1]; /* stack pointer for child */
545 if (p2 == 0) 744#ifdef CONFIG_PPC64
546 p2 = regs->gpr[1]; /* stack pointer for child */ 745 if (test_thread_flag(TIF_32BIT)) {
547 746 parent_tidp = TRUNC_PTR(parent_tidp);
548 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | 747 child_tidp = TRUNC_PTR(child_tidp);
549 CLONE_CHILD_CLEARTID)) {
550 parent_tidptr = p3;
551 child_tidptr = p5;
552 if (test_thread_flag(TIF_32BIT)) {
553 parent_tidptr &= 0xffffffff;
554 child_tidptr &= 0xffffffff;
555 }
556 } 748 }
557 749#endif
558 return do_fork(clone_flags, p2, regs, 0, 750 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
559 (int __user *)parent_tidptr, (int __user *)child_tidptr);
560} 751}
561 752
562int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 753int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
563 unsigned long p4, unsigned long p5, unsigned long p6, 754 unsigned long p4, unsigned long p5, unsigned long p6,
564 struct pt_regs *regs) 755 struct pt_regs *regs)
565{ 756{
757 CHECK_FULL_REGS(regs);
566 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 758 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
567} 759}
568 760
@@ -570,8 +762,9 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
570 unsigned long p4, unsigned long p5, unsigned long p6, 762 unsigned long p4, unsigned long p5, unsigned long p6,
571 struct pt_regs *regs) 763 struct pt_regs *regs)
572{ 764{
573 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0, 765 CHECK_FULL_REGS(regs);
574 NULL, NULL); 766 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
767 regs, 0, NULL, NULL);
575} 768}
576 769
577int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 770int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -579,30 +772,27 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
579 struct pt_regs *regs) 772 struct pt_regs *regs)
580{ 773{
581 int error; 774 int error;
582 char * filename; 775 char *filename;
583 776
584 filename = getname((char __user *) a0); 777 filename = getname((char __user *) a0);
585 error = PTR_ERR(filename); 778 error = PTR_ERR(filename);
586 if (IS_ERR(filename)) 779 if (IS_ERR(filename))
587 goto out; 780 goto out;
588 flush_fp_to_thread(current); 781 flush_fp_to_thread(current);
589 flush_altivec_to_thread(current); 782 flush_altivec_to_thread(current);
783 flush_spe_to_thread(current);
590 error = do_execve(filename, (char __user * __user *) a1, 784 error = do_execve(filename, (char __user * __user *) a1,
591 (char __user * __user *) a2, regs); 785 (char __user * __user *) a2, regs);
592
593 if (error == 0) { 786 if (error == 0) {
594 task_lock(current); 787 task_lock(current);
595 current->ptrace &= ~PT_DTRACE; 788 current->ptrace &= ~PT_DTRACE;
596 task_unlock(current); 789 task_unlock(current);
597 } 790 }
598 putname(filename); 791 putname(filename);
599
600out: 792out:
601 return error; 793 return error;
602} 794}
603 795
604static int kstack_depth_to_print = 64;
605
606static int validate_sp(unsigned long sp, struct task_struct *p, 796static int validate_sp(unsigned long sp, struct task_struct *p,
607 unsigned long nbytes) 797 unsigned long nbytes)
608{ 798{
@@ -627,6 +817,20 @@ static int validate_sp(unsigned long sp, struct task_struct *p,
627 return 0; 817 return 0;
628} 818}
629 819
820#ifdef CONFIG_PPC64
821#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
822#define FRAME_LR_SAVE 2
823#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
824#define REGS_MARKER 0x7265677368657265ul
825#define FRAME_MARKER 12
826#else
827#define MIN_STACK_FRAME 16
828#define FRAME_LR_SAVE 1
829#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
830#define REGS_MARKER 0x72656773ul
831#define FRAME_MARKER 2
832#endif
833
630unsigned long get_wchan(struct task_struct *p) 834unsigned long get_wchan(struct task_struct *p)
631{ 835{
632 unsigned long ip, sp; 836 unsigned long ip, sp;
@@ -636,15 +840,15 @@ unsigned long get_wchan(struct task_struct *p)
636 return 0; 840 return 0;
637 841
638 sp = p->thread.ksp; 842 sp = p->thread.ksp;
639 if (!validate_sp(sp, p, 112)) 843 if (!validate_sp(sp, p, MIN_STACK_FRAME))
640 return 0; 844 return 0;
641 845
642 do { 846 do {
643 sp = *(unsigned long *)sp; 847 sp = *(unsigned long *)sp;
644 if (!validate_sp(sp, p, 112)) 848 if (!validate_sp(sp, p, MIN_STACK_FRAME))
645 return 0; 849 return 0;
646 if (count > 0) { 850 if (count > 0) {
647 ip = *(unsigned long *)(sp + 16); 851 ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
648 if (!in_sched_functions(ip)) 852 if (!in_sched_functions(ip))
649 return ip; 853 return ip;
650 } 854 }
@@ -653,33 +857,35 @@ unsigned long get_wchan(struct task_struct *p)
653} 857}
654EXPORT_SYMBOL(get_wchan); 858EXPORT_SYMBOL(get_wchan);
655 859
656void show_stack(struct task_struct *p, unsigned long *_sp) 860static int kstack_depth_to_print = 64;
861
862void show_stack(struct task_struct *tsk, unsigned long *stack)
657{ 863{
658 unsigned long ip, newsp, lr; 864 unsigned long sp, ip, lr, newsp;
659 int count = 0; 865 int count = 0;
660 unsigned long sp = (unsigned long)_sp;
661 int firstframe = 1; 866 int firstframe = 1;
662 867
868 sp = (unsigned long) stack;
869 if (tsk == NULL)
870 tsk = current;
663 if (sp == 0) { 871 if (sp == 0) {
664 if (p) { 872 if (tsk == current)
665 sp = p->thread.ksp; 873 asm("mr %0,1" : "=r" (sp));
666 } else { 874 else
667 sp = __get_SP(); 875 sp = tsk->thread.ksp;
668 p = current;
669 }
670 } 876 }
671 877
672 lr = 0; 878 lr = 0;
673 printk("Call Trace:\n"); 879 printk("Call Trace:\n");
674 do { 880 do {
675 if (!validate_sp(sp, p, 112)) 881 if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
676 return; 882 return;
677 883
678 _sp = (unsigned long *) sp; 884 stack = (unsigned long *) sp;
679 newsp = _sp[0]; 885 newsp = stack[0];
680 ip = _sp[2]; 886 ip = stack[FRAME_LR_SAVE];
681 if (!firstframe || ip != lr) { 887 if (!firstframe || ip != lr) {
682 printk("[%016lx] [%016lx] ", sp, ip); 888 printk("["REG"] ["REG"] ", sp, ip);
683 print_symbol("%s", ip); 889 print_symbol("%s", ip);
684 if (firstframe) 890 if (firstframe)
685 printk(" (unreliable)"); 891 printk(" (unreliable)");
@@ -691,8 +897,8 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
691 * See if this is an exception frame. 897 * See if this is an exception frame.
692 * We look for the "regshere" marker in the current frame. 898 * We look for the "regshere" marker in the current frame.
693 */ 899 */
694 if (validate_sp(sp, p, sizeof(struct pt_regs) + 400) 900 if (validate_sp(sp, tsk, INT_FRAME_SIZE)
695 && _sp[12] == 0x7265677368657265ul) { 901 && stack[FRAME_MARKER] == REGS_MARKER) {
696 struct pt_regs *regs = (struct pt_regs *) 902 struct pt_regs *regs = (struct pt_regs *)
697 (sp + STACK_FRAME_OVERHEAD); 903 (sp + STACK_FRAME_OVERHEAD);
698 printk("--- Exception: %lx", regs->trap); 904 printk("--- Exception: %lx", regs->trap);
@@ -708,6 +914,6 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
708 914
709void dump_stack(void) 915void dump_stack(void)
710{ 916{
711 show_stack(current, (unsigned long *)__get_SP()); 917 show_stack(current, NULL);
712} 918}
713EXPORT_SYMBOL(dump_stack); 919EXPORT_SYMBOL(dump_stack);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644
index 000000000000..c8d288457b4c
--- /dev/null
+++ b/arch/powerpc/kernel/prom.c
@@ -0,0 +1,2125 @@
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static unsigned long memory_limit;
82
83static int __initdata dt_root_addr_cells;
84static int __initdata dt_root_size_cells;
85
86#ifdef CONFIG_PPC64
87static int __initdata iommu_is_off;
88int __initdata iommu_force_on;
89extern unsigned long tce_alloc_start, tce_alloc_end;
90#endif
91
92typedef u32 cell_t;
93
94#if 0
95static struct boot_param_header *initial_boot_params __initdata;
96#else
97struct boot_param_header *initial_boot_params;
98#endif
99
100static struct device_node *allnodes = NULL;
101
102/* use when traversing tree through the allnext, child, sibling,
103 * or parent members of struct device_node.
104 */
105static DEFINE_RWLOCK(devtree_lock);
106
107/* export that to outside world */
108struct device_node *of_chosen;
109
110struct device_node *dflt_interrupt_controller;
111int num_interrupt_controllers;
112
113u32 rtas_data;
114u32 rtas_entry;
115
116/*
117 * Wrapper for allocating memory for various data that needs to be
118 * attached to device nodes as they are processed at boot or when
119 * added to the device tree later (e.g. DLPAR). At boot there is
120 * already a region reserved so we just increment *mem_start by size;
121 * otherwise we call kmalloc.
122 */
123static void * prom_alloc(unsigned long size, unsigned long *mem_start)
124{
125 unsigned long tmp;
126
127 if (!mem_start)
128 return kmalloc(size, GFP_KERNEL);
129
130 tmp = *mem_start;
131 *mem_start += size;
132 return (void *)tmp;
133}
134
135/*
136 * Find the device_node with a given phandle.
137 */
138static struct device_node * find_phandle(phandle ph)
139{
140 struct device_node *np;
141
142 for (np = allnodes; np != 0; np = np->allnext)
143 if (np->linux_phandle == ph)
144 return np;
145 return NULL;
146}
147
148/*
149 * Find the interrupt parent of a node.
150 */
151static struct device_node * __devinit intr_parent(struct device_node *p)
152{
153 phandle *parp;
154
155 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
156 if (parp == NULL)
157 return p->parent;
158 p = find_phandle(*parp);
159 if (p != NULL)
160 return p;
161 /*
162 * On a powermac booted with BootX, we don't get to know the
163 * phandles for any nodes, so find_phandle will return NULL.
164 * Fortunately these machines only have one interrupt controller
165 * so there isn't in fact any ambiguity. -- paulus
166 */
167 if (num_interrupt_controllers == 1)
168 p = dflt_interrupt_controller;
169 return p;
170}
171
172/*
173 * Find out the size of each entry of the interrupts property
174 * for a node.
175 */
176int __devinit prom_n_intr_cells(struct device_node *np)
177{
178 struct device_node *p;
179 unsigned int *icp;
180
181 for (p = np; (p = intr_parent(p)) != NULL; ) {
182 icp = (unsigned int *)
183 get_property(p, "#interrupt-cells", NULL);
184 if (icp != NULL)
185 return *icp;
186 if (get_property(p, "interrupt-controller", NULL) != NULL
187 || get_property(p, "interrupt-map", NULL) != NULL) {
188 printk("oops, node %s doesn't have #interrupt-cells\n",
189 p->full_name);
190 return 1;
191 }
192 }
193#ifdef DEBUG_IRQ
194 printk("prom_n_intr_cells failed for %s\n", np->full_name);
195#endif
196 return 1;
197}
198
199/*
200 * Map an interrupt from a device up to the platform interrupt
201 * descriptor.
202 */
203static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
204 struct device_node *np, unsigned int *ints,
205 int nintrc)
206{
207 struct device_node *p, *ipar;
208 unsigned int *imap, *imask, *ip;
209 int i, imaplen, match;
210 int newintrc = 0, newaddrc = 0;
211 unsigned int *reg;
212 int naddrc;
213
214 reg = (unsigned int *) get_property(np, "reg", NULL);
215 naddrc = prom_n_addr_cells(np);
216 p = intr_parent(np);
217 while (p != NULL) {
218 if (get_property(p, "interrupt-controller", NULL) != NULL)
219 /* this node is an interrupt controller, stop here */
220 break;
221 imap = (unsigned int *)
222 get_property(p, "interrupt-map", &imaplen);
223 if (imap == NULL) {
224 p = intr_parent(p);
225 continue;
226 }
227 imask = (unsigned int *)
228 get_property(p, "interrupt-map-mask", NULL);
229 if (imask == NULL) {
230 printk("oops, %s has interrupt-map but no mask\n",
231 p->full_name);
232 return 0;
233 }
234 imaplen /= sizeof(unsigned int);
235 match = 0;
236 ipar = NULL;
237 while (imaplen > 0 && !match) {
238 /* check the child-interrupt field */
239 match = 1;
240 for (i = 0; i < naddrc && match; ++i)
241 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
242 for (; i < naddrc + nintrc && match; ++i)
243 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
244 imap += naddrc + nintrc;
245 imaplen -= naddrc + nintrc;
246 /* grab the interrupt parent */
247 ipar = find_phandle((phandle) *imap++);
248 --imaplen;
249 if (ipar == NULL && num_interrupt_controllers == 1)
250 /* cope with BootX not giving us phandles */
251 ipar = dflt_interrupt_controller;
252 if (ipar == NULL) {
253 printk("oops, no int parent %x in map of %s\n",
254 imap[-1], p->full_name);
255 return 0;
256 }
257 /* find the parent's # addr and intr cells */
258 ip = (unsigned int *)
259 get_property(ipar, "#interrupt-cells", NULL);
260 if (ip == NULL) {
261 printk("oops, no #interrupt-cells on %s\n",
262 ipar->full_name);
263 return 0;
264 }
265 newintrc = *ip;
266 ip = (unsigned int *)
267 get_property(ipar, "#address-cells", NULL);
268 newaddrc = (ip == NULL)? 0: *ip;
269 imap += newaddrc + newintrc;
270 imaplen -= newaddrc + newintrc;
271 }
272 if (imaplen < 0) {
273 printk("oops, error decoding int-map on %s, len=%d\n",
274 p->full_name, imaplen);
275 return 0;
276 }
277 if (!match) {
278#ifdef DEBUG_IRQ
279 printk("oops, no match in %s int-map for %s\n",
280 p->full_name, np->full_name);
281#endif
282 return 0;
283 }
284 p = ipar;
285 naddrc = newaddrc;
286 nintrc = newintrc;
287 ints = imap - nintrc;
288 reg = ints - naddrc;
289 }
290 if (p == NULL) {
291#ifdef DEBUG_IRQ
292 printk("hmmm, int tree for %s doesn't have ctrler\n",
293 np->full_name);
294#endif
295 return 0;
296 }
297 *irq = ints;
298 *ictrler = p;
299 return nintrc;
300}
301
302static int __devinit finish_node_interrupts(struct device_node *np,
303 unsigned long *mem_start,
304 int measure_only)
305{
306 unsigned int *ints;
307 int intlen, intrcells, intrcount;
308 int i, j, n;
309 unsigned int *irq, virq;
310 struct device_node *ic;
311
312 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
313 if (ints == NULL)
314 return 0;
315 intrcells = prom_n_intr_cells(np);
316 intlen /= intrcells * sizeof(unsigned int);
317
318 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
319 if (!np->intrs)
320 return -ENOMEM;
321
322 if (measure_only)
323 return 0;
324
325 intrcount = 0;
326 for (i = 0; i < intlen; ++i, ints += intrcells) {
327 n = map_interrupt(&irq, &ic, np, ints, intrcells);
328 if (n <= 0)
329 continue;
330
331 /* don't map IRQ numbers under a cascaded 8259 controller */
332 if (ic && device_is_compatible(ic, "chrp,iic")) {
333 np->intrs[intrcount].line = irq[0];
334 } else {
335#ifdef CONFIG_PPC64
336 virq = virt_irq_create_mapping(irq[0]);
337 if (virq == NO_IRQ) {
338 printk(KERN_CRIT "Could not allocate interrupt"
339 " number for %s\n", np->full_name);
340 continue;
341 }
342 virq = irq_offset_up(virq);
343#else
344 virq = irq[0];
345#endif
346 np->intrs[intrcount].line = virq;
347 }
348
349#ifdef CONFIG_PPC64
350 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
351 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
352 char *name = get_property(ic->parent, "name", NULL);
353 if (name && !strcmp(name, "u3"))
354 np->intrs[intrcount].line += 128;
355 else if (!(name && !strcmp(name, "mac-io")))
356 /* ignore other cascaded controllers, such as
357 the k2-sata-root */
358 break;
359 }
360#endif
361 np->intrs[intrcount].sense = 1;
362 if (n > 1)
363 np->intrs[intrcount].sense = irq[1];
364 if (n > 2) {
365 printk("hmmm, got %d intr cells for %s:", n,
366 np->full_name);
367 for (j = 0; j < n; ++j)
368 printk(" %d", irq[j]);
369 printk("\n");
370 }
371 ++intrcount;
372 }
373 np->n_intrs = intrcount;
374
375 return 0;
376}
377
378static int __devinit interpret_pci_props(struct device_node *np,
379 unsigned long *mem_start,
380 int naddrc, int nsizec,
381 int measure_only)
382{
383 struct address_range *adr;
384 struct pci_reg_property *pci_addrs;
385 int i, l, n_addrs;
386
387 pci_addrs = (struct pci_reg_property *)
388 get_property(np, "assigned-addresses", &l);
389 if (!pci_addrs)
390 return 0;
391
392 n_addrs = l / sizeof(*pci_addrs);
393
394 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
395 if (!adr)
396 return -ENOMEM;
397
398 if (measure_only)
399 return 0;
400
401 np->addrs = adr;
402 np->n_addrs = n_addrs;
403
404 for (i = 0; i < n_addrs; i++) {
405 adr[i].space = pci_addrs[i].addr.a_hi;
406 adr[i].address = pci_addrs[i].addr.a_lo |
407 ((u64)pci_addrs[i].addr.a_mid << 32);
408 adr[i].size = pci_addrs[i].size_lo;
409 }
410
411 return 0;
412}
413
414static int __init interpret_dbdma_props(struct device_node *np,
415 unsigned long *mem_start,
416 int naddrc, int nsizec,
417 int measure_only)
418{
419 struct reg_property32 *rp;
420 struct address_range *adr;
421 unsigned long base_address;
422 int i, l;
423 struct device_node *db;
424
425 base_address = 0;
426 if (!measure_only) {
427 for (db = np->parent; db != NULL; db = db->parent) {
428 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
429 base_address = db->addrs[0].address;
430 break;
431 }
432 }
433 }
434
435 rp = (struct reg_property32 *) get_property(np, "reg", &l);
436 if (rp != 0 && l >= sizeof(struct reg_property32)) {
437 i = 0;
438 adr = (struct address_range *) (*mem_start);
439 while ((l -= sizeof(struct reg_property32)) >= 0) {
440 if (!measure_only) {
441 adr[i].space = 2;
442 adr[i].address = rp[i].address + base_address;
443 adr[i].size = rp[i].size;
444 }
445 ++i;
446 }
447 np->addrs = adr;
448 np->n_addrs = i;
449 (*mem_start) += i * sizeof(struct address_range);
450 }
451
452 return 0;
453}
454
455static int __init interpret_macio_props(struct device_node *np,
456 unsigned long *mem_start,
457 int naddrc, int nsizec,
458 int measure_only)
459{
460 struct reg_property32 *rp;
461 struct address_range *adr;
462 unsigned long base_address;
463 int i, l;
464 struct device_node *db;
465
466 base_address = 0;
467 if (!measure_only) {
468 for (db = np->parent; db != NULL; db = db->parent) {
469 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
470 base_address = db->addrs[0].address;
471 break;
472 }
473 }
474 }
475
476 rp = (struct reg_property32 *) get_property(np, "reg", &l);
477 if (rp != 0 && l >= sizeof(struct reg_property32)) {
478 i = 0;
479 adr = (struct address_range *) (*mem_start);
480 while ((l -= sizeof(struct reg_property32)) >= 0) {
481 if (!measure_only) {
482 adr[i].space = 2;
483 adr[i].address = rp[i].address + base_address;
484 adr[i].size = rp[i].size;
485 }
486 ++i;
487 }
488 np->addrs = adr;
489 np->n_addrs = i;
490 (*mem_start) += i * sizeof(struct address_range);
491 }
492
493 return 0;
494}
495
496static int __init interpret_isa_props(struct device_node *np,
497 unsigned long *mem_start,
498 int naddrc, int nsizec,
499 int measure_only)
500{
501 struct isa_reg_property *rp;
502 struct address_range *adr;
503 int i, l;
504
505 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
506 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
507 i = 0;
508 adr = (struct address_range *) (*mem_start);
509 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
510 if (!measure_only) {
511 adr[i].space = rp[i].space;
512 adr[i].address = rp[i].address;
513 adr[i].size = rp[i].size;
514 }
515 ++i;
516 }
517 np->addrs = adr;
518 np->n_addrs = i;
519 (*mem_start) += i * sizeof(struct address_range);
520 }
521
522 return 0;
523}
524
525static int __init interpret_root_props(struct device_node *np,
526 unsigned long *mem_start,
527 int naddrc, int nsizec,
528 int measure_only)
529{
530 struct address_range *adr;
531 int i, l;
532 unsigned int *rp;
533 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
534
535 rp = (unsigned int *) get_property(np, "reg", &l);
536 if (rp != 0 && l >= rpsize) {
537 i = 0;
538 adr = (struct address_range *) (*mem_start);
539 while ((l -= rpsize) >= 0) {
540 if (!measure_only) {
541 adr[i].space = 0;
542 adr[i].address = rp[naddrc - 1];
543 adr[i].size = rp[naddrc + nsizec - 1];
544 }
545 ++i;
546 rp += naddrc + nsizec;
547 }
548 np->addrs = adr;
549 np->n_addrs = i;
550 (*mem_start) += i * sizeof(struct address_range);
551 }
552
553 return 0;
554}
555
556static int __devinit finish_node(struct device_node *np,
557 unsigned long *mem_start,
558 interpret_func *ifunc,
559 int naddrc, int nsizec,
560 int measure_only)
561{
562 struct device_node *child;
563 int *ip, rc = 0;
564
565 /* get the device addresses and interrupts */
566 if (ifunc != NULL)
567 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
568 if (rc)
569 goto out;
570
571 rc = finish_node_interrupts(np, mem_start, measure_only);
572 if (rc)
573 goto out;
574
575 /* Look for #address-cells and #size-cells properties. */
576 ip = (int *) get_property(np, "#address-cells", NULL);
577 if (ip != NULL)
578 naddrc = *ip;
579 ip = (int *) get_property(np, "#size-cells", NULL);
580 if (ip != NULL)
581 nsizec = *ip;
582
583 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
584 ifunc = interpret_root_props;
585 else if (np->type == 0)
586 ifunc = NULL;
587 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
588 ifunc = interpret_pci_props;
589 else if (!strcmp(np->type, "dbdma"))
590 ifunc = interpret_dbdma_props;
591 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
592 ifunc = interpret_macio_props;
593 else if (!strcmp(np->type, "isa"))
594 ifunc = interpret_isa_props;
595 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
596 ifunc = interpret_root_props;
597 else if (!((ifunc == interpret_dbdma_props
598 || ifunc == interpret_macio_props)
599 && (!strcmp(np->type, "escc")
600 || !strcmp(np->type, "media-bay"))))
601 ifunc = NULL;
602
603 for (child = np->child; child != NULL; child = child->sibling) {
604 rc = finish_node(child, mem_start, ifunc,
605 naddrc, nsizec, measure_only);
606 if (rc)
607 goto out;
608 }
609out:
610 return rc;
611}
612
613static void __init scan_interrupt_controllers(void)
614{
615 struct device_node *np;
616 int n = 0;
617 char *name, *ic;
618 int iclen;
619
620 for (np = allnodes; np != NULL; np = np->allnext) {
621 ic = get_property(np, "interrupt-controller", &iclen);
622 name = get_property(np, "name", NULL);
623 /* checking iclen makes sure we don't get a false
624 match on /chosen.interrupt_controller */
625 if ((name != NULL
626 && strcmp(name, "interrupt-controller") == 0)
627 || (ic != NULL && iclen == 0
628 && strcmp(name, "AppleKiwi"))) {
629 if (n == 0)
630 dflt_interrupt_controller = np;
631 ++n;
632 }
633 }
634 num_interrupt_controllers = n;
635}
636
637/**
638 * finish_device_tree is called once things are running normally
639 * (i.e. with text and data mapped to the address they were linked at).
640 * It traverses the device tree and fills in some of the additional,
641 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
642 * mapping is also initialized at this point.
643 */
644void __init finish_device_tree(void)
645{
646 unsigned long start, end, size = 0;
647
648 DBG(" -> finish_device_tree\n");
649
650#ifdef CONFIG_PPC64
651 /* Initialize virtual IRQ map */
652 virt_irq_init();
653#endif
654 scan_interrupt_controllers();
655
656 /*
657 * Finish device-tree (pre-parsing some properties etc...)
658 * We do this in 2 passes. One with "measure_only" set, which
659 * will only measure the amount of memory needed, then we can
660 * allocate that memory, and call finish_node again. However,
661 * we must be careful as most routines will fail nowadays when
662 * prom_alloc() returns 0, so we must make sure our first pass
663 * doesn't start at 0. We pre-initialize size to 16 for that
664 * reason and then remove those additional 16 bytes
665 */
666 size = 16;
667 finish_node(allnodes, &size, NULL, 0, 0, 1);
668 size -= 16;
669 end = start = (unsigned long) __va(lmb_alloc(size, 128));
670 finish_node(allnodes, &end, NULL, 0, 0, 0);
671 BUG_ON(end != start + size);
672
673 DBG(" <- finish_device_tree\n");
674}
675
676static inline char *find_flat_dt_string(u32 offset)
677{
678 return ((char *)initial_boot_params) +
679 initial_boot_params->off_dt_strings + offset;
680}
681
682/**
683 * This function is used to scan the flattened device-tree, it is
684 * used to extract the memory informations at boot before we can
685 * unflatten the tree
686 */
687static int __init scan_flat_dt(int (*it)(unsigned long node,
688 const char *uname, int depth,
689 void *data),
690 void *data)
691{
692 unsigned long p = ((unsigned long)initial_boot_params) +
693 initial_boot_params->off_dt_struct;
694 int rc = 0;
695 int depth = -1;
696
697 do {
698 u32 tag = *((u32 *)p);
699 char *pathp;
700
701 p += 4;
702 if (tag == OF_DT_END_NODE) {
703 depth --;
704 continue;
705 }
706 if (tag == OF_DT_NOP)
707 continue;
708 if (tag == OF_DT_END)
709 break;
710 if (tag == OF_DT_PROP) {
711 u32 sz = *((u32 *)p);
712 p += 8;
713 if (initial_boot_params->version < 0x10)
714 p = _ALIGN(p, sz >= 8 ? 8 : 4);
715 p += sz;
716 p = _ALIGN(p, 4);
717 continue;
718 }
719 if (tag != OF_DT_BEGIN_NODE) {
720 printk(KERN_WARNING "Invalid tag %x scanning flattened"
721 " device tree !\n", tag);
722 return -EINVAL;
723 }
724 depth++;
725 pathp = (char *)p;
726 p = _ALIGN(p + strlen(pathp) + 1, 4);
727 if ((*pathp) == '/') {
728 char *lp, *np;
729 for (lp = NULL, np = pathp; *np; np++)
730 if ((*np) == '/')
731 lp = np+1;
732 if (lp != NULL)
733 pathp = lp;
734 }
735 rc = it(p, pathp, depth, data);
736 if (rc != 0)
737 break;
738 } while(1);
739
740 return rc;
741}
742
743/**
744 * This function can be used within scan_flattened_dt callback to get
745 * access to properties
746 */
747static void* __init get_flat_dt_prop(unsigned long node, const char *name,
748 unsigned long *size)
749{
750 unsigned long p = node;
751
752 do {
753 u32 tag = *((u32 *)p);
754 u32 sz, noff;
755 const char *nstr;
756
757 p += 4;
758 if (tag == OF_DT_NOP)
759 continue;
760 if (tag != OF_DT_PROP)
761 return NULL;
762
763 sz = *((u32 *)p);
764 noff = *((u32 *)(p + 4));
765 p += 8;
766 if (initial_boot_params->version < 0x10)
767 p = _ALIGN(p, sz >= 8 ? 8 : 4);
768
769 nstr = find_flat_dt_string(noff);
770 if (nstr == NULL) {
771 printk(KERN_WARNING "Can't find property index"
772 " name !\n");
773 return NULL;
774 }
775 if (strcmp(name, nstr) == 0) {
776 if (size)
777 *size = sz;
778 return (void *)p;
779 }
780 p += sz;
781 p = _ALIGN(p, 4);
782 } while(1);
783}
784
785static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
786 unsigned long align)
787{
788 void *res;
789
790 *mem = _ALIGN(*mem, align);
791 res = (void *)*mem;
792 *mem += size;
793
794 return res;
795}
796
797static unsigned long __init unflatten_dt_node(unsigned long mem,
798 unsigned long *p,
799 struct device_node *dad,
800 struct device_node ***allnextpp,
801 unsigned long fpsize)
802{
803 struct device_node *np;
804 struct property *pp, **prev_pp = NULL;
805 char *pathp;
806 u32 tag;
807 unsigned int l, allocl;
808 int has_name = 0;
809 int new_format = 0;
810
811 tag = *((u32 *)(*p));
812 if (tag != OF_DT_BEGIN_NODE) {
813 printk("Weird tag at start of node: %x\n", tag);
814 return mem;
815 }
816 *p += 4;
817 pathp = (char *)*p;
818 l = allocl = strlen(pathp) + 1;
819 *p = _ALIGN(*p + l, 4);
820
821 /* version 0x10 has a more compact unit name here instead of the full
822 * path. we accumulate the full path size using "fpsize", we'll rebuild
823 * it later. We detect this because the first character of the name is
824 * not '/'.
825 */
826 if ((*pathp) != '/') {
827 new_format = 1;
828 if (fpsize == 0) {
829 /* root node: special case. fpsize accounts for path
830 * plus terminating zero. root node only has '/', so
831 * fpsize should be 2, but we want to avoid the first
832 * level nodes to have two '/' so we use fpsize 1 here
833 */
834 fpsize = 1;
835 allocl = 2;
836 } else {
837 /* account for '/' and path size minus terminal 0
838 * already in 'l'
839 */
840 fpsize += l;
841 allocl = fpsize;
842 }
843 }
844
845
846 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
847 __alignof__(struct device_node));
848 if (allnextpp) {
849 memset(np, 0, sizeof(*np));
850 np->full_name = ((char*)np) + sizeof(struct device_node);
851 if (new_format) {
852 char *p = np->full_name;
853 /* rebuild full path for new format */
854 if (dad && dad->parent) {
855 strcpy(p, dad->full_name);
856#ifdef DEBUG
857 if ((strlen(p) + l + 1) != allocl) {
858 DBG("%s: p: %d, l: %d, a: %d\n",
859 pathp, strlen(p), l, allocl);
860 }
861#endif
862 p += strlen(p);
863 }
864 *(p++) = '/';
865 memcpy(p, pathp, l);
866 } else
867 memcpy(np->full_name, pathp, l);
868 prev_pp = &np->properties;
869 **allnextpp = np;
870 *allnextpp = &np->allnext;
871 if (dad != NULL) {
872 np->parent = dad;
873 /* we temporarily use the next field as `last_child'*/
874 if (dad->next == 0)
875 dad->child = np;
876 else
877 dad->next->sibling = np;
878 dad->next = np;
879 }
880 kref_init(&np->kref);
881 }
882 while(1) {
883 u32 sz, noff;
884 char *pname;
885
886 tag = *((u32 *)(*p));
887 if (tag == OF_DT_NOP) {
888 *p += 4;
889 continue;
890 }
891 if (tag != OF_DT_PROP)
892 break;
893 *p += 4;
894 sz = *((u32 *)(*p));
895 noff = *((u32 *)((*p) + 4));
896 *p += 8;
897 if (initial_boot_params->version < 0x10)
898 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
899
900 pname = find_flat_dt_string(noff);
901 if (pname == NULL) {
902 printk("Can't find property name in list !\n");
903 break;
904 }
905 if (strcmp(pname, "name") == 0)
906 has_name = 1;
907 l = strlen(pname) + 1;
908 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
909 __alignof__(struct property));
910 if (allnextpp) {
911 if (strcmp(pname, "linux,phandle") == 0) {
912 np->node = *((u32 *)*p);
913 if (np->linux_phandle == 0)
914 np->linux_phandle = np->node;
915 }
916 if (strcmp(pname, "ibm,phandle") == 0)
917 np->linux_phandle = *((u32 *)*p);
918 pp->name = pname;
919 pp->length = sz;
920 pp->value = (void *)*p;
921 *prev_pp = pp;
922 prev_pp = &pp->next;
923 }
924 *p = _ALIGN((*p) + sz, 4);
925 }
926 /* with version 0x10 we may not have the name property, recreate
927 * it here from the unit name if absent
928 */
929 if (!has_name) {
930 char *p = pathp, *ps = pathp, *pa = NULL;
931 int sz;
932
933 while (*p) {
934 if ((*p) == '@')
935 pa = p;
936 if ((*p) == '/')
937 ps = p + 1;
938 p++;
939 }
940 if (pa < ps)
941 pa = p;
942 sz = (pa - ps) + 1;
943 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
944 __alignof__(struct property));
945 if (allnextpp) {
946 pp->name = "name";
947 pp->length = sz;
948 pp->value = (unsigned char *)(pp + 1);
949 *prev_pp = pp;
950 prev_pp = &pp->next;
951 memcpy(pp->value, ps, sz - 1);
952 ((char *)pp->value)[sz - 1] = 0;
953 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
954 }
955 }
956 if (allnextpp) {
957 *prev_pp = NULL;
958 np->name = get_property(np, "name", NULL);
959 np->type = get_property(np, "device_type", NULL);
960
961 if (!np->name)
962 np->name = "<NULL>";
963 if (!np->type)
964 np->type = "<NULL>";
965 }
966 while (tag == OF_DT_BEGIN_NODE) {
967 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
968 tag = *((u32 *)(*p));
969 }
970 if (tag != OF_DT_END_NODE) {
971 printk("Weird tag at end of node: %x\n", tag);
972 return mem;
973 }
974 *p += 4;
975 return mem;
976}
977
978
979/**
980 * unflattens the device-tree passed by the firmware, creating the
981 * tree of struct device_node. It also fills the "name" and "type"
982 * pointers of the nodes so the normal device-tree walking functions
983 * can be used (this used to be done by finish_device_tree)
984 */
985void __init unflatten_device_tree(void)
986{
987 unsigned long start, mem, size;
988 struct device_node **allnextp = &allnodes;
989 char *p = NULL;
990 int l = 0;
991
992 DBG(" -> unflatten_device_tree()\n");
993
994 /* First pass, scan for size */
995 start = ((unsigned long)initial_boot_params) +
996 initial_boot_params->off_dt_struct;
997 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
998 size = (size | 3) + 1;
999
1000 DBG(" size is %lx, allocating...\n", size);
1001
1002 /* Allocate memory for the expanded device tree */
1003 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1004 if (!mem) {
1005 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1006 panic("Couldn't allocate memory with lmb_alloc()!\n");
1007 }
1008 mem = (unsigned long) __va(mem);
1009
1010 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1011
1012 DBG(" unflattening %lx...\n", mem);
1013
1014 /* Second pass, do actual unflattening */
1015 start = ((unsigned long)initial_boot_params) +
1016 initial_boot_params->off_dt_struct;
1017 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1018 if (*((u32 *)start) != OF_DT_END)
1019 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1020 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1021 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1022 ((u32 *)mem)[size / 4] );
1023 *allnextp = NULL;
1024
1025 /* Get pointer to OF "/chosen" node for use everywhere */
1026 of_chosen = of_find_node_by_path("/chosen");
1027
1028 /* Retreive command line */
1029 if (of_chosen != NULL) {
1030 p = (char *)get_property(of_chosen, "bootargs", &l);
1031 if (p != NULL && l > 0)
1032 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1033 }
1034#ifdef CONFIG_CMDLINE
1035 if (l == 0 || (l == 1 && (*p) == 0))
1036 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1037#endif /* CONFIG_CMDLINE */
1038
1039 DBG("Command line is: %s\n", cmd_line);
1040
1041 DBG(" <- unflatten_device_tree()\n");
1042}
1043
1044
1045static int __init early_init_dt_scan_cpus(unsigned long node,
1046 const char *uname, int depth, void *data)
1047{
1048 char *type = get_flat_dt_prop(node, "device_type", NULL);
1049 u32 *prop;
1050 unsigned long size = 0;
1051
1052 /* We are scanning "cpu" nodes only */
1053 if (type == NULL || strcmp(type, "cpu") != 0)
1054 return 0;
1055
1056#ifdef CONFIG_PPC_PSERIES
1057 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1058 */
1059 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1060 u32 *pft_size;
1061 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1062 if (pft_size != NULL) {
1063 /* pft_size[0] is the NUMA CEC cookie */
1064 ppc64_pft_size = pft_size[1];
1065 }
1066 }
1067#endif
1068
1069#ifdef CONFIG_PPC64
1070 if (initial_boot_params && initial_boot_params->version >= 2) {
1071 /* version 2 of the kexec param format adds the phys cpuid
1072 * of booted proc.
1073 */
1074 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1075 boot_cpuid = 0;
1076 } else {
1077 /* Check if it's the boot-cpu, set it's hw index in paca now */
1078 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1079 prop = get_flat_dt_prop(node, "reg", NULL);
1080 set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1081 boot_cpuid_phys = get_hard_smp_processor_id(0);
1082 }
1083 }
1084#endif
1085
1086#ifdef CONFIG_ALTIVEC
1087 /* Check if we have a VMX and eventually update CPU features */
1088 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1089 if (prop && (*prop) > 0) {
1090 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1091 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1092 }
1093
1094 /* Same goes for Apple's "altivec" property */
1095 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1096 if (prop) {
1097 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1098 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1099 }
1100#endif /* CONFIG_ALTIVEC */
1101
1102#ifdef CONFIG_PPC_PSERIES
1103 /*
1104 * Check for an SMT capable CPU and set the CPU feature. We do
1105 * this by looking at the size of the ibm,ppc-interrupt-server#s
1106 * property
1107 */
1108 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1109 &size);
1110 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1111 if (prop && ((size / sizeof(u32)) > 1))
1112 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1113#endif
1114
1115 return 0;
1116}
1117
1118static int __init early_init_dt_scan_chosen(unsigned long node,
1119 const char *uname, int depth, void *data)
1120{
1121 u32 *prop;
1122 unsigned long *lprop;
1123
1124 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1125
1126 if (depth != 1 || strcmp(uname, "chosen") != 0)
1127 return 0;
1128
1129 /* get platform type */
1130 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1131 if (prop == NULL)
1132 return 0;
1133#ifdef CONFIG_PPC64
1134 systemcfg->platform = *prop;
1135#else
1136 _machine = *prop;
1137#endif
1138
1139#ifdef CONFIG_PPC64
1140 /* check if iommu is forced on or off */
1141 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1142 iommu_is_off = 1;
1143 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1144 iommu_force_on = 1;
1145#endif
1146
1147 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1148 if (lprop)
1149 memory_limit = *lprop;
1150
1151#ifdef CONFIG_PPC64
1152 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1153 if (lprop)
1154 tce_alloc_start = *lprop;
1155 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1156 if (lprop)
1157 tce_alloc_end = *lprop;
1158#endif
1159
1160#ifdef CONFIG_PPC_RTAS
1161 /* To help early debugging via the front panel, we retreive a minimal
1162 * set of RTAS infos now if available
1163 */
1164 {
1165 u64 *basep, *entryp;
1166
1167 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1168 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1169 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1170 if (basep && entryp && prop) {
1171 rtas.base = *basep;
1172 rtas.entry = *entryp;
1173 rtas.size = *prop;
1174 }
1175 }
1176#endif /* CONFIG_PPC_RTAS */
1177
1178 /* break now */
1179 return 1;
1180}
1181
1182static int __init early_init_dt_scan_root(unsigned long node,
1183 const char *uname, int depth, void *data)
1184{
1185 u32 *prop;
1186
1187 if (depth != 0)
1188 return 0;
1189
1190 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1191 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1192 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1193
1194 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1195 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1196 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1197
1198 /* break now */
1199 return 1;
1200}
1201
1202static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1203{
1204 cell_t *p = *cellp;
1205 unsigned long r;
1206
1207 /* Ignore more than 2 cells */
1208 while (s > sizeof(unsigned long) / 4) {
1209 p++;
1210 s--;
1211 }
1212 r = *p++;
1213#ifdef CONFIG_PPC64
1214 if (s > 1) {
1215 r <<= 32;
1216 r |= *(p++);
1217 s--;
1218 }
1219#endif
1220
1221 *cellp = p;
1222 return r;
1223}
1224
1225
1226static int __init early_init_dt_scan_memory(unsigned long node,
1227 const char *uname, int depth, void *data)
1228{
1229 char *type = get_flat_dt_prop(node, "device_type", NULL);
1230 cell_t *reg, *endp;
1231 unsigned long l;
1232
1233 /* We are scanning "memory" nodes only */
1234 if (type == NULL || strcmp(type, "memory") != 0)
1235 return 0;
1236
1237 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1238 if (reg == NULL)
1239 return 0;
1240
1241 endp = reg + (l / sizeof(cell_t));
1242
1243 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1244 uname, l, reg[0], reg[1], reg[2], reg[3]);
1245
1246 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1247 unsigned long base, size;
1248
1249 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1250 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1251
1252 if (size == 0)
1253 continue;
1254 DBG(" - %lx , %lx\n", base, size);
1255#ifdef CONFIG_PPC64
1256 if (iommu_is_off) {
1257 if (base >= 0x80000000ul)
1258 continue;
1259 if ((base + size) > 0x80000000ul)
1260 size = 0x80000000ul - base;
1261 }
1262#endif
1263 lmb_add(base, size);
1264 }
1265 return 0;
1266}
1267
1268static void __init early_reserve_mem(void)
1269{
1270 unsigned long base, size;
1271 unsigned long *reserve_map;
1272
1273 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1274 initial_boot_params->off_mem_rsvmap);
1275 while (1) {
1276 base = *(reserve_map++);
1277 size = *(reserve_map++);
1278 if (size == 0)
1279 break;
1280 DBG("reserving: %lx -> %lx\n", base, size);
1281 lmb_reserve(base, size);
1282 }
1283
1284#if 0
1285 DBG("memory reserved, lmbs :\n");
1286 lmb_dump_all();
1287#endif
1288}
1289
1290void __init early_init_devtree(void *params)
1291{
1292 DBG(" -> early_init_devtree()\n");
1293
1294 /* Setup flat device-tree pointer */
1295 initial_boot_params = params;
1296
1297 /* Retrieve various informations from the /chosen node of the
1298 * device-tree, including the platform type, initrd location and
1299 * size, TCE reserve, and more ...
1300 */
1301 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1302
1303 /* Scan memory nodes and rebuild LMBs */
1304 lmb_init();
1305 scan_flat_dt(early_init_dt_scan_root, NULL);
1306 scan_flat_dt(early_init_dt_scan_memory, NULL);
1307 lmb_enforce_memory_limit(memory_limit);
1308 lmb_analyze();
1309#ifdef CONFIG_PPC64
1310 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1311#endif
1312 lmb_reserve(0, __pa(klimit));
1313
1314 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1315
1316 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1317 early_reserve_mem();
1318
1319 DBG("Scanning CPUs ...\n");
1320
1321 /* Retreive hash table size from flattened tree plus other
1322 * CPU related informations (altivec support, boot CPU ID, ...)
1323 */
1324 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1325
1326 DBG(" <- early_init_devtree()\n");
1327}
1328
1329#undef printk
1330
1331int
1332prom_n_addr_cells(struct device_node* np)
1333{
1334 int* ip;
1335 do {
1336 if (np->parent)
1337 np = np->parent;
1338 ip = (int *) get_property(np, "#address-cells", NULL);
1339 if (ip != NULL)
1340 return *ip;
1341 } while (np->parent);
1342 /* No #address-cells property for the root node, default to 1 */
1343 return 1;
1344}
1345
1346int
1347prom_n_size_cells(struct device_node* np)
1348{
1349 int* ip;
1350 do {
1351 if (np->parent)
1352 np = np->parent;
1353 ip = (int *) get_property(np, "#size-cells", NULL);
1354 if (ip != NULL)
1355 return *ip;
1356 } while (np->parent);
1357 /* No #size-cells property for the root node, default to 1 */
1358 return 1;
1359}
1360
1361/**
1362 * Work out the sense (active-low level / active-high edge)
1363 * of each interrupt from the device tree.
1364 */
1365void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1366{
1367 struct device_node *np;
1368 int i, j;
1369
1370 /* default to level-triggered */
1371 memset(senses, 1, max - off);
1372
1373 for (np = allnodes; np != 0; np = np->allnext) {
1374 for (j = 0; j < np->n_intrs; j++) {
1375 i = np->intrs[j].line;
1376 if (i >= off && i < max)
1377 senses[i-off] = np->intrs[j].sense ?
1378 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1379 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1380 }
1381 }
1382}
1383
1384/**
1385 * Construct and return a list of the device_nodes with a given name.
1386 */
1387struct device_node *find_devices(const char *name)
1388{
1389 struct device_node *head, **prevp, *np;
1390
1391 prevp = &head;
1392 for (np = allnodes; np != 0; np = np->allnext) {
1393 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1394 *prevp = np;
1395 prevp = &np->next;
1396 }
1397 }
1398 *prevp = NULL;
1399 return head;
1400}
1401EXPORT_SYMBOL(find_devices);
1402
1403/**
1404 * Construct and return a list of the device_nodes with a given type.
1405 */
1406struct device_node *find_type_devices(const char *type)
1407{
1408 struct device_node *head, **prevp, *np;
1409
1410 prevp = &head;
1411 for (np = allnodes; np != 0; np = np->allnext) {
1412 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1413 *prevp = np;
1414 prevp = &np->next;
1415 }
1416 }
1417 *prevp = NULL;
1418 return head;
1419}
1420EXPORT_SYMBOL(find_type_devices);
1421
1422/**
1423 * Returns all nodes linked together
1424 */
1425struct device_node *find_all_nodes(void)
1426{
1427 struct device_node *head, **prevp, *np;
1428
1429 prevp = &head;
1430 for (np = allnodes; np != 0; np = np->allnext) {
1431 *prevp = np;
1432 prevp = &np->next;
1433 }
1434 *prevp = NULL;
1435 return head;
1436}
1437EXPORT_SYMBOL(find_all_nodes);
1438
1439/** Checks if the given "compat" string matches one of the strings in
1440 * the device's "compatible" property
1441 */
1442int device_is_compatible(struct device_node *device, const char *compat)
1443{
1444 const char* cp;
1445 int cplen, l;
1446
1447 cp = (char *) get_property(device, "compatible", &cplen);
1448 if (cp == NULL)
1449 return 0;
1450 while (cplen > 0) {
1451 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1452 return 1;
1453 l = strlen(cp) + 1;
1454 cp += l;
1455 cplen -= l;
1456 }
1457
1458 return 0;
1459}
1460EXPORT_SYMBOL(device_is_compatible);
1461
1462
1463/**
1464 * Indicates whether the root node has a given value in its
1465 * compatible property.
1466 */
1467int machine_is_compatible(const char *compat)
1468{
1469 struct device_node *root;
1470 int rc = 0;
1471
1472 root = of_find_node_by_path("/");
1473 if (root) {
1474 rc = device_is_compatible(root, compat);
1475 of_node_put(root);
1476 }
1477 return rc;
1478}
1479EXPORT_SYMBOL(machine_is_compatible);
1480
1481/**
1482 * Construct and return a list of the device_nodes with a given type
1483 * and compatible property.
1484 */
1485struct device_node *find_compatible_devices(const char *type,
1486 const char *compat)
1487{
1488 struct device_node *head, **prevp, *np;
1489
1490 prevp = &head;
1491 for (np = allnodes; np != 0; np = np->allnext) {
1492 if (type != NULL
1493 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1494 continue;
1495 if (device_is_compatible(np, compat)) {
1496 *prevp = np;
1497 prevp = &np->next;
1498 }
1499 }
1500 *prevp = NULL;
1501 return head;
1502}
1503EXPORT_SYMBOL(find_compatible_devices);
1504
1505/**
1506 * Find the device_node with a given full_name.
1507 */
1508struct device_node *find_path_device(const char *path)
1509{
1510 struct device_node *np;
1511
1512 for (np = allnodes; np != 0; np = np->allnext)
1513 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1514 return np;
1515 return NULL;
1516}
1517EXPORT_SYMBOL(find_path_device);
1518
1519/*******
1520 *
1521 * New implementation of the OF "find" APIs, return a refcounted
1522 * object, call of_node_put() when done. The device tree and list
1523 * are protected by a rw_lock.
1524 *
1525 * Note that property management will need some locking as well,
1526 * this isn't dealt with yet.
1527 *
1528 *******/
1529
1530/**
1531 * of_find_node_by_name - Find a node by its "name" property
1532 * @from: The node to start searching from or NULL, the node
1533 * you pass will not be searched, only the next one
1534 * will; typically, you pass what the previous call
1535 * returned. of_node_put() will be called on it
1536 * @name: The name string to match against
1537 *
1538 * Returns a node pointer with refcount incremented, use
1539 * of_node_put() on it when done.
1540 */
1541struct device_node *of_find_node_by_name(struct device_node *from,
1542 const char *name)
1543{
1544 struct device_node *np;
1545
1546 read_lock(&devtree_lock);
1547 np = from ? from->allnext : allnodes;
1548 for (; np != 0; np = np->allnext)
1549 if (np->name != 0 && strcasecmp(np->name, name) == 0
1550 && of_node_get(np))
1551 break;
1552 if (from)
1553 of_node_put(from);
1554 read_unlock(&devtree_lock);
1555 return np;
1556}
1557EXPORT_SYMBOL(of_find_node_by_name);
1558
1559/**
1560 * of_find_node_by_type - Find a node by its "device_type" property
1561 * @from: The node to start searching from or NULL, the node
1562 * you pass will not be searched, only the next one
1563 * will; typically, you pass what the previous call
1564 * returned. of_node_put() will be called on it
1565 * @name: The type string to match against
1566 *
1567 * Returns a node pointer with refcount incremented, use
1568 * of_node_put() on it when done.
1569 */
1570struct device_node *of_find_node_by_type(struct device_node *from,
1571 const char *type)
1572{
1573 struct device_node *np;
1574
1575 read_lock(&devtree_lock);
1576 np = from ? from->allnext : allnodes;
1577 for (; np != 0; np = np->allnext)
1578 if (np->type != 0 && strcasecmp(np->type, type) == 0
1579 && of_node_get(np))
1580 break;
1581 if (from)
1582 of_node_put(from);
1583 read_unlock(&devtree_lock);
1584 return np;
1585}
1586EXPORT_SYMBOL(of_find_node_by_type);
1587
1588/**
1589 * of_find_compatible_node - Find a node based on type and one of the
1590 * tokens in its "compatible" property
1591 * @from: The node to start searching from or NULL, the node
1592 * you pass will not be searched, only the next one
1593 * will; typically, you pass what the previous call
1594 * returned. of_node_put() will be called on it
1595 * @type: The type string to match "device_type" or NULL to ignore
1596 * @compatible: The string to match to one of the tokens in the device
1597 * "compatible" list.
1598 *
1599 * Returns a node pointer with refcount incremented, use
1600 * of_node_put() on it when done.
1601 */
1602struct device_node *of_find_compatible_node(struct device_node *from,
1603 const char *type, const char *compatible)
1604{
1605 struct device_node *np;
1606
1607 read_lock(&devtree_lock);
1608 np = from ? from->allnext : allnodes;
1609 for (; np != 0; np = np->allnext) {
1610 if (type != NULL
1611 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1612 continue;
1613 if (device_is_compatible(np, compatible) && of_node_get(np))
1614 break;
1615 }
1616 if (from)
1617 of_node_put(from);
1618 read_unlock(&devtree_lock);
1619 return np;
1620}
1621EXPORT_SYMBOL(of_find_compatible_node);
1622
1623/**
1624 * of_find_node_by_path - Find a node matching a full OF path
1625 * @path: The full path to match
1626 *
1627 * Returns a node pointer with refcount incremented, use
1628 * of_node_put() on it when done.
1629 */
1630struct device_node *of_find_node_by_path(const char *path)
1631{
1632 struct device_node *np = allnodes;
1633
1634 read_lock(&devtree_lock);
1635 for (; np != 0; np = np->allnext) {
1636 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1637 && of_node_get(np))
1638 break;
1639 }
1640 read_unlock(&devtree_lock);
1641 return np;
1642}
1643EXPORT_SYMBOL(of_find_node_by_path);
1644
1645/**
1646 * of_find_node_by_phandle - Find a node given a phandle
1647 * @handle: phandle of the node to find
1648 *
1649 * Returns a node pointer with refcount incremented, use
1650 * of_node_put() on it when done.
1651 */
1652struct device_node *of_find_node_by_phandle(phandle handle)
1653{
1654 struct device_node *np;
1655
1656 read_lock(&devtree_lock);
1657 for (np = allnodes; np != 0; np = np->allnext)
1658 if (np->linux_phandle == handle)
1659 break;
1660 if (np)
1661 of_node_get(np);
1662 read_unlock(&devtree_lock);
1663 return np;
1664}
1665EXPORT_SYMBOL(of_find_node_by_phandle);
1666
1667/**
1668 * of_find_all_nodes - Get next node in global list
1669 * @prev: Previous node or NULL to start iteration
1670 * of_node_put() will be called on it
1671 *
1672 * Returns a node pointer with refcount incremented, use
1673 * of_node_put() on it when done.
1674 */
1675struct device_node *of_find_all_nodes(struct device_node *prev)
1676{
1677 struct device_node *np;
1678
1679 read_lock(&devtree_lock);
1680 np = prev ? prev->allnext : allnodes;
1681 for (; np != 0; np = np->allnext)
1682 if (of_node_get(np))
1683 break;
1684 if (prev)
1685 of_node_put(prev);
1686 read_unlock(&devtree_lock);
1687 return np;
1688}
1689EXPORT_SYMBOL(of_find_all_nodes);
1690
1691/**
1692 * of_get_parent - Get a node's parent if any
1693 * @node: Node to get parent
1694 *
1695 * Returns a node pointer with refcount incremented, use
1696 * of_node_put() on it when done.
1697 */
1698struct device_node *of_get_parent(const struct device_node *node)
1699{
1700 struct device_node *np;
1701
1702 if (!node)
1703 return NULL;
1704
1705 read_lock(&devtree_lock);
1706 np = of_node_get(node->parent);
1707 read_unlock(&devtree_lock);
1708 return np;
1709}
1710EXPORT_SYMBOL(of_get_parent);
1711
1712/**
1713 * of_get_next_child - Iterate a node childs
1714 * @node: parent node
1715 * @prev: previous child of the parent node, or NULL to get first
1716 *
1717 * Returns a node pointer with refcount incremented, use
1718 * of_node_put() on it when done.
1719 */
1720struct device_node *of_get_next_child(const struct device_node *node,
1721 struct device_node *prev)
1722{
1723 struct device_node *next;
1724
1725 read_lock(&devtree_lock);
1726 next = prev ? prev->sibling : node->child;
1727 for (; next != 0; next = next->sibling)
1728 if (of_node_get(next))
1729 break;
1730 if (prev)
1731 of_node_put(prev);
1732 read_unlock(&devtree_lock);
1733 return next;
1734}
1735EXPORT_SYMBOL(of_get_next_child);
1736
1737/**
1738 * of_node_get - Increment refcount of a node
1739 * @node: Node to inc refcount, NULL is supported to
1740 * simplify writing of callers
1741 *
1742 * Returns node.
1743 */
1744struct device_node *of_node_get(struct device_node *node)
1745{
1746 if (node)
1747 kref_get(&node->kref);
1748 return node;
1749}
1750EXPORT_SYMBOL(of_node_get);
1751
1752static inline struct device_node * kref_to_device_node(struct kref *kref)
1753{
1754 return container_of(kref, struct device_node, kref);
1755}
1756
1757/**
1758 * of_node_release - release a dynamically allocated node
1759 * @kref: kref element of the node to be released
1760 *
1761 * In of_node_put() this function is passed to kref_put()
1762 * as the destructor.
1763 */
1764static void of_node_release(struct kref *kref)
1765{
1766 struct device_node *node = kref_to_device_node(kref);
1767 struct property *prop = node->properties;
1768
1769 if (!OF_IS_DYNAMIC(node))
1770 return;
1771 while (prop) {
1772 struct property *next = prop->next;
1773 kfree(prop->name);
1774 kfree(prop->value);
1775 kfree(prop);
1776 prop = next;
1777 }
1778 kfree(node->intrs);
1779 kfree(node->addrs);
1780 kfree(node->full_name);
1781 kfree(node->data);
1782 kfree(node);
1783}
1784
1785/**
1786 * of_node_put - Decrement refcount of a node
1787 * @node: Node to dec refcount, NULL is supported to
1788 * simplify writing of callers
1789 *
1790 */
1791void of_node_put(struct device_node *node)
1792{
1793 if (node)
1794 kref_put(&node->kref, of_node_release);
1795}
1796EXPORT_SYMBOL(of_node_put);
1797
1798/*
1799 * Plug a device node into the tree and global list.
1800 */
1801void of_attach_node(struct device_node *np)
1802{
1803 write_lock(&devtree_lock);
1804 np->sibling = np->parent->child;
1805 np->allnext = allnodes;
1806 np->parent->child = np;
1807 allnodes = np;
1808 write_unlock(&devtree_lock);
1809}
1810
1811/*
1812 * "Unplug" a node from the device tree. The caller must hold
1813 * a reference to the node. The memory associated with the node
1814 * is not freed until its refcount goes to zero.
1815 */
1816void of_detach_node(const struct device_node *np)
1817{
1818 struct device_node *parent;
1819
1820 write_lock(&devtree_lock);
1821
1822 parent = np->parent;
1823
1824 if (allnodes == np)
1825 allnodes = np->allnext;
1826 else {
1827 struct device_node *prev;
1828 for (prev = allnodes;
1829 prev->allnext != np;
1830 prev = prev->allnext)
1831 ;
1832 prev->allnext = np->allnext;
1833 }
1834
1835 if (parent->child == np)
1836 parent->child = np->sibling;
1837 else {
1838 struct device_node *prevsib;
1839 for (prevsib = np->parent->child;
1840 prevsib->sibling != np;
1841 prevsib = prevsib->sibling)
1842 ;
1843 prevsib->sibling = np->sibling;
1844 }
1845
1846 write_unlock(&devtree_lock);
1847}
1848
1849#ifdef CONFIG_PPC_PSERIES
1850/*
1851 * Fix up the uninitialized fields in a new device node:
1852 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1853 *
1854 * A lot of boot-time code is duplicated here, because functions such
1855 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1856 * slab allocator.
1857 *
1858 * This should probably be split up into smaller chunks.
1859 */
1860
1861static int of_finish_dynamic_node(struct device_node *node,
1862 unsigned long *unused1, int unused2,
1863 int unused3, int unused4)
1864{
1865 struct device_node *parent = of_get_parent(node);
1866 int err = 0;
1867 phandle *ibm_phandle;
1868
1869 node->name = get_property(node, "name", NULL);
1870 node->type = get_property(node, "device_type", NULL);
1871
1872 if (!parent) {
1873 err = -ENODEV;
1874 goto out;
1875 }
1876
1877 /* We don't support that function on PowerMac, at least
1878 * not yet
1879 */
1880 if (systemcfg->platform == PLATFORM_POWERMAC)
1881 return -ENODEV;
1882
1883 /* fix up new node's linux_phandle field */
1884 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1885 node->linux_phandle = *ibm_phandle;
1886
1887out:
1888 of_node_put(parent);
1889 return err;
1890}
1891
1892static int prom_reconfig_notifier(struct notifier_block *nb,
1893 unsigned long action, void *node)
1894{
1895 int err;
1896
1897 switch (action) {
1898 case PSERIES_RECONFIG_ADD:
1899 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1900 if (err < 0) {
1901 printk(KERN_ERR "finish_node returned %d\n", err);
1902 err = NOTIFY_BAD;
1903 }
1904 break;
1905 default:
1906 err = NOTIFY_DONE;
1907 break;
1908 }
1909 return err;
1910}
1911
1912static struct notifier_block prom_reconfig_nb = {
1913 .notifier_call = prom_reconfig_notifier,
1914 .priority = 10, /* This one needs to run first */
1915};
1916
1917static int __init prom_reconfig_setup(void)
1918{
1919 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1920}
1921__initcall(prom_reconfig_setup);
1922#endif
1923
1924/*
1925 * Find a property with a given name for a given node
1926 * and return the value.
1927 */
1928unsigned char *get_property(struct device_node *np, const char *name,
1929 int *lenp)
1930{
1931 struct property *pp;
1932
1933 for (pp = np->properties; pp != 0; pp = pp->next)
1934 if (strcmp(pp->name, name) == 0) {
1935 if (lenp != 0)
1936 *lenp = pp->length;
1937 return pp->value;
1938 }
1939 return NULL;
1940}
1941EXPORT_SYMBOL(get_property);
1942
1943/*
1944 * Add a property to a node
1945 */
1946void prom_add_property(struct device_node* np, struct property* prop)
1947{
1948 struct property **next = &np->properties;
1949
1950 prop->next = NULL;
1951 while (*next)
1952 next = &(*next)->next;
1953 *next = prop;
1954}
1955
1956/* I quickly hacked that one, check against spec ! */
1957static inline unsigned long
1958bus_space_to_resource_flags(unsigned int bus_space)
1959{
1960 u8 space = (bus_space >> 24) & 0xf;
1961 if (space == 0)
1962 space = 0x02;
1963 if (space == 0x02)
1964 return IORESOURCE_MEM;
1965 else if (space == 0x01)
1966 return IORESOURCE_IO;
1967 else {
1968 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
1969 bus_space);
1970 return 0;
1971 }
1972}
1973
1974static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
1975 struct address_range *range)
1976{
1977 unsigned long mask;
1978 int i;
1979
1980 /* Check this one */
1981 mask = bus_space_to_resource_flags(range->space);
1982 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
1983 if ((pdev->resource[i].flags & mask) == mask &&
1984 pdev->resource[i].start <= range->address &&
1985 pdev->resource[i].end > range->address) {
1986 if ((range->address + range->size - 1) > pdev->resource[i].end) {
1987 /* Add better message */
1988 printk(KERN_WARNING "PCI/OF resource overlap !\n");
1989 return NULL;
1990 }
1991 break;
1992 }
1993 }
1994 if (i == DEVICE_COUNT_RESOURCE)
1995 return NULL;
1996 return &pdev->resource[i];
1997}
1998
1999/*
2000 * Request an OF device resource. Currently handles child of PCI devices,
2001 * or other nodes attached to the root node. Ultimately, put some
2002 * link to resources in the OF node.
2003 */
2004struct resource *request_OF_resource(struct device_node* node, int index,
2005 const char* name_postfix)
2006{
2007 struct pci_dev* pcidev;
2008 u8 pci_bus, pci_devfn;
2009 unsigned long iomask;
2010 struct device_node* nd;
2011 struct resource* parent;
2012 struct resource *res = NULL;
2013 int nlen, plen;
2014
2015 if (index >= node->n_addrs)
2016 goto fail;
2017
2018 /* Sanity check on bus space */
2019 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2020 if (iomask & IORESOURCE_MEM)
2021 parent = &iomem_resource;
2022 else if (iomask & IORESOURCE_IO)
2023 parent = &ioport_resource;
2024 else
2025 goto fail;
2026
2027 /* Find a PCI parent if any */
2028 nd = node;
2029 pcidev = NULL;
2030 while (nd) {
2031 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2032 pcidev = pci_find_slot(pci_bus, pci_devfn);
2033 if (pcidev) break;
2034 nd = nd->parent;
2035 }
2036 if (pcidev)
2037 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2038 if (!parent) {
2039 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2040 node->name);
2041 goto fail;
2042 }
2043
2044 res = __request_region(parent, node->addrs[index].address,
2045 node->addrs[index].size, NULL);
2046 if (!res)
2047 goto fail;
2048 nlen = strlen(node->name);
2049 plen = name_postfix ? strlen(name_postfix) : 0;
2050 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2051 if (res->name) {
2052 strcpy((char *)res->name, node->name);
2053 if (plen)
2054 strcpy((char *)res->name+nlen, name_postfix);
2055 }
2056 return res;
2057fail:
2058 return NULL;
2059}
2060EXPORT_SYMBOL(request_OF_resource);
2061
2062int release_OF_resource(struct device_node *node, int index)
2063{
2064 struct pci_dev* pcidev;
2065 u8 pci_bus, pci_devfn;
2066 unsigned long iomask, start, end;
2067 struct device_node* nd;
2068 struct resource* parent;
2069 struct resource *res = NULL;
2070
2071 if (index >= node->n_addrs)
2072 return -EINVAL;
2073
2074 /* Sanity check on bus space */
2075 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2076 if (iomask & IORESOURCE_MEM)
2077 parent = &iomem_resource;
2078 else if (iomask & IORESOURCE_IO)
2079 parent = &ioport_resource;
2080 else
2081 return -EINVAL;
2082
2083 /* Find a PCI parent if any */
2084 nd = node;
2085 pcidev = NULL;
2086 while(nd) {
2087 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2088 pcidev = pci_find_slot(pci_bus, pci_devfn);
2089 if (pcidev) break;
2090 nd = nd->parent;
2091 }
2092 if (pcidev)
2093 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2094 if (!parent) {
2095 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2096 node->name);
2097 return -ENODEV;
2098 }
2099
2100 /* Find us in the parent and its childs */
2101 res = parent->child;
2102 start = node->addrs[index].address;
2103 end = start + node->addrs[index].size - 1;
2104 while (res) {
2105 if (res->start == start && res->end == end &&
2106 (res->flags & IORESOURCE_BUSY))
2107 break;
2108 if (res->start <= start && res->end >= end)
2109 res = res->child;
2110 else
2111 res = res->sibling;
2112 }
2113 if (!res)
2114 return -ENODEV;
2115
2116 if (res->name) {
2117 kfree(res->name);
2118 res->name = NULL;
2119 }
2120 release_resource(res);
2121 kfree(res);
2122
2123 return 0;
2124}
2125EXPORT_SYMBOL(release_OF_resource);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
new file mode 100644
index 000000000000..095659d51b4b
--- /dev/null
+++ b/arch/powerpc/kernel/prom_init.c
@@ -0,0 +1,2065 @@
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/proc_fs.h>
28#include <linux/stringify.h>
29#include <linux/delay.h>
30#include <linux/initrd.h>
31#include <linux/bitops.h>
32#include <asm/prom.h>
33#include <asm/rtas.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <asm/irq.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/system.h>
40#include <asm/mmu.h>
41#include <asm/pgtable.h>
42#include <asm/pci.h>
43#include <asm/iommu.h>
44#include <asm/bootinfo.h>
45#include <asm/btext.h>
46#include <asm/sections.h>
47#include <asm/machdep.h>
48
49#ifdef CONFIG_LOGO_LINUX_CLUT224
50#include <linux/linux_logo.h>
51extern const struct linux_logo logo_linux_clut224;
52#endif
53
54/*
55 * Properties whose value is longer than this get excluded from our
56 * copy of the device tree. This value does need to be big enough to
57 * ensure that we don't lose things like the interrupt-map property
58 * on a PCI-PCI bridge.
59 */
60#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
61
62/*
63 * Eventually bump that one up
64 */
65#define DEVTREE_CHUNK_SIZE 0x100000
66
67/*
68 * This is the size of the local memory reserve map that gets copied
69 * into the boot params passed to the kernel. That size is totally
70 * flexible as the kernel just reads the list until it encounters an
71 * entry with size 0, so it can be changed without breaking binary
72 * compatibility
73 */
74#define MEM_RESERVE_MAP_SIZE 8
75
76/*
77 * prom_init() is called very early on, before the kernel text
78 * and data have been mapped to KERNELBASE. At this point the code
79 * is running at whatever address it has been loaded at.
80 * On ppc32 we compile with -mrelocatable, which means that references
81 * to extern and static variables get relocated automatically.
82 * On ppc64 we have to relocate the references explicitly with
83 * RELOC. (Note that strings count as static variables.)
84 *
85 * Because OF may have mapped I/O devices into the area starting at
86 * KERNELBASE, particularly on CHRP machines, we can't safely call
87 * OF once the kernel has been mapped to KERNELBASE. Therefore all
88 * OF calls must be done within prom_init().
89 *
90 * ADDR is used in calls to call_prom. The 4th and following
91 * arguments to call_prom should be 32-bit values.
92 * On ppc64, 64 bit values are truncated to 32 bits (and
93 * fortunately don't get interpreted as two arguments).
94 */
95#ifdef CONFIG_PPC64
96#define RELOC(x) (*PTRRELOC(&(x)))
97#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
98#else
99#define RELOC(x) (x)
100#define ADDR(x) (u32) (x)
101#endif
102
103#define PROM_BUG() do { \
104 prom_printf("kernel BUG at %s line 0x%x!\n", \
105 RELOC(__FILE__), __LINE__); \
106 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
107} while (0)
108
109#ifdef DEBUG_PROM
110#define prom_debug(x...) prom_printf(x)
111#else
112#define prom_debug(x...)
113#endif
114
115#ifdef CONFIG_PPC32
116#define PLATFORM_POWERMAC _MACH_Pmac
117#define PLATFORM_CHRP _MACH_chrp
118#endif
119
120
121typedef u32 prom_arg_t;
122
123struct prom_args {
124 u32 service;
125 u32 nargs;
126 u32 nret;
127 prom_arg_t args[10];
128};
129
130struct prom_t {
131 ihandle root;
132 ihandle chosen;
133 int cpu;
134 ihandle stdout;
135};
136
137struct mem_map_entry {
138 unsigned long base;
139 unsigned long size;
140};
141
142typedef u32 cell_t;
143
144extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
145
146#ifdef CONFIG_PPC64
147extern void enter_prom(struct prom_args *args, unsigned long entry);
148#else
149static inline void enter_prom(struct prom_args *args, unsigned long entry)
150{
151 ((void (*)(struct prom_args *))entry)(args);
152}
153#endif
154
155extern void copy_and_flush(unsigned long dest, unsigned long src,
156 unsigned long size, unsigned long offset);
157
158/* prom structure */
159static struct prom_t __initdata prom;
160
161static unsigned long prom_entry __initdata;
162
163#define PROM_SCRATCH_SIZE 256
164
165static char __initdata of_stdout_device[256];
166static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
167
168static unsigned long __initdata dt_header_start;
169static unsigned long __initdata dt_struct_start, dt_struct_end;
170static unsigned long __initdata dt_string_start, dt_string_end;
171
172static unsigned long __initdata prom_initrd_start, prom_initrd_end;
173
174#ifdef CONFIG_PPC64
175static int __initdata iommu_force_on;
176static int __initdata ppc64_iommu_off;
177static unsigned long __initdata prom_tce_alloc_start;
178static unsigned long __initdata prom_tce_alloc_end;
179#endif
180
181static int __initdata of_platform;
182
183static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
184
185static unsigned long __initdata prom_memory_limit;
186
187static unsigned long __initdata alloc_top;
188static unsigned long __initdata alloc_top_high;
189static unsigned long __initdata alloc_bottom;
190static unsigned long __initdata rmo_top;
191static unsigned long __initdata ram_top;
192
193static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194static int __initdata mem_reserve_cnt;
195
196static cell_t __initdata regbuf[1024];
197
198
199#define MAX_CPU_THREADS 2
200
201/* TO GO */
202#ifdef CONFIG_HMT
203struct {
204 unsigned int pir;
205 unsigned int threadid;
206} hmt_thread_data[NR_CPUS];
207#endif /* CONFIG_HMT */
208
209/*
210 * Error results ... some OF calls will return "-1" on error, some
211 * will return 0, some will return either. To simplify, here are
212 * macros to use with any ihandle or phandle return value to check if
213 * it is valid
214 */
215
216#define PROM_ERROR (-1u)
217#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
218#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
219
220
221/* This is the one and *ONLY* place where we actually call open
222 * firmware.
223 */
224
225static int __init call_prom(const char *service, int nargs, int nret, ...)
226{
227 int i;
228 struct prom_args args;
229 va_list list;
230
231 args.service = ADDR(service);
232 args.nargs = nargs;
233 args.nret = nret;
234
235 va_start(list, nret);
236 for (i = 0; i < nargs; i++)
237 args.args[i] = va_arg(list, prom_arg_t);
238 va_end(list);
239
240 for (i = 0; i < nret; i++)
241 args.args[nargs+i] = 0;
242
243 enter_prom(&args, RELOC(prom_entry));
244
245 return (nret > 0) ? args.args[nargs] : 0;
246}
247
248static int __init call_prom_ret(const char *service, int nargs, int nret,
249 prom_arg_t *rets, ...)
250{
251 int i;
252 struct prom_args args;
253 va_list list;
254
255 args.service = ADDR(service);
256 args.nargs = nargs;
257 args.nret = nret;
258
259 va_start(list, rets);
260 for (i = 0; i < nargs; i++)
261 args.args[i] = va_arg(list, prom_arg_t);
262 va_end(list);
263
264 for (i = 0; i < nret; i++)
265 rets[nargs+i] = 0;
266
267 enter_prom(&args, RELOC(prom_entry));
268
269 if (rets != NULL)
270 for (i = 1; i < nret; ++i)
271 rets[i-1] = args.args[nargs+i];
272
273 return (nret > 0) ? args.args[nargs] : 0;
274}
275
276
277static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
278 unsigned long align)
279{
280 return (unsigned int)call_prom("claim", 3, 1,
281 (prom_arg_t)virt, (prom_arg_t)size,
282 (prom_arg_t)align);
283}
284
285static void __init prom_print(const char *msg)
286{
287 const char *p, *q;
288 struct prom_t *_prom = &RELOC(prom);
289
290 if (_prom->stdout == 0)
291 return;
292
293 for (p = msg; *p != 0; p = q) {
294 for (q = p; *q != 0 && *q != '\n'; ++q)
295 ;
296 if (q > p)
297 call_prom("write", 3, 1, _prom->stdout, p, q - p);
298 if (*q == 0)
299 break;
300 ++q;
301 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
302 }
303}
304
305
306static void __init prom_print_hex(unsigned long val)
307{
308 int i, nibbles = sizeof(val)*2;
309 char buf[sizeof(val)*2+1];
310 struct prom_t *_prom = &RELOC(prom);
311
312 for (i = nibbles-1; i >= 0; i--) {
313 buf[i] = (val & 0xf) + '0';
314 if (buf[i] > '9')
315 buf[i] += ('a'-'0'-10);
316 val >>= 4;
317 }
318 buf[nibbles] = '\0';
319 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
320}
321
322
323static void __init prom_printf(const char *format, ...)
324{
325 const char *p, *q, *s;
326 va_list args;
327 unsigned long v;
328 struct prom_t *_prom = &RELOC(prom);
329
330 va_start(args, format);
331#ifdef CONFIG_PPC64
332 format = PTRRELOC(format);
333#endif
334 for (p = format; *p != 0; p = q) {
335 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
336 ;
337 if (q > p)
338 call_prom("write", 3, 1, _prom->stdout, p, q - p);
339 if (*q == 0)
340 break;
341 if (*q == '\n') {
342 ++q;
343 call_prom("write", 3, 1, _prom->stdout,
344 ADDR("\r\n"), 2);
345 continue;
346 }
347 ++q;
348 if (*q == 0)
349 break;
350 switch (*q) {
351 case 's':
352 ++q;
353 s = va_arg(args, const char *);
354 prom_print(s);
355 break;
356 case 'x':
357 ++q;
358 v = va_arg(args, unsigned long);
359 prom_print_hex(v);
360 break;
361 }
362 }
363}
364
365
366static void __init __attribute__((noreturn)) prom_panic(const char *reason)
367{
368#ifdef CONFIG_PPC64
369 reason = PTRRELOC(reason);
370#endif
371 prom_print(reason);
372 /* ToDo: should put up an SRC here on p/iSeries */
373 call_prom("exit", 0, 0);
374
375 for (;;) /* should never get here */
376 ;
377}
378
379
380static int __init prom_next_node(phandle *nodep)
381{
382 phandle node;
383
384 if ((node = *nodep) != 0
385 && (*nodep = call_prom("child", 1, 1, node)) != 0)
386 return 1;
387 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
388 return 1;
389 for (;;) {
390 if ((node = call_prom("parent", 1, 1, node)) == 0)
391 return 0;
392 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
393 return 1;
394 }
395}
396
397static int __init prom_getprop(phandle node, const char *pname,
398 void *value, size_t valuelen)
399{
400 return call_prom("getprop", 4, 1, node, ADDR(pname),
401 (u32)(unsigned long) value, (u32) valuelen);
402}
403
404static int __init prom_getproplen(phandle node, const char *pname)
405{
406 return call_prom("getproplen", 2, 1, node, ADDR(pname));
407}
408
409static int __init prom_setprop(phandle node, const char *pname,
410 void *value, size_t valuelen)
411{
412 return call_prom("setprop", 4, 1, node, ADDR(pname),
413 (u32)(unsigned long) value, (u32) valuelen);
414}
415
416/* We can't use the standard versions because of RELOC headaches. */
417#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
418 || ('a' <= (c) && (c) <= 'f') \
419 || ('A' <= (c) && (c) <= 'F'))
420
421#define isdigit(c) ('0' <= (c) && (c) <= '9')
422#define islower(c) ('a' <= (c) && (c) <= 'z')
423#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
424
425unsigned long prom_strtoul(const char *cp, const char **endp)
426{
427 unsigned long result = 0, base = 10, value;
428
429 if (*cp == '0') {
430 base = 8;
431 cp++;
432 if (toupper(*cp) == 'X') {
433 cp++;
434 base = 16;
435 }
436 }
437
438 while (isxdigit(*cp) &&
439 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
440 result = result * base + value;
441 cp++;
442 }
443
444 if (endp)
445 *endp = cp;
446
447 return result;
448}
449
450unsigned long prom_memparse(const char *ptr, const char **retptr)
451{
452 unsigned long ret = prom_strtoul(ptr, retptr);
453 int shift = 0;
454
455 /*
456 * We can't use a switch here because GCC *may* generate a
457 * jump table which won't work, because we're not running at
458 * the address we're linked at.
459 */
460 if ('G' == **retptr || 'g' == **retptr)
461 shift = 30;
462
463 if ('M' == **retptr || 'm' == **retptr)
464 shift = 20;
465
466 if ('K' == **retptr || 'k' == **retptr)
467 shift = 10;
468
469 if (shift) {
470 ret <<= shift;
471 (*retptr)++;
472 }
473
474 return ret;
475}
476
477/*
478 * Early parsing of the command line passed to the kernel, used for
479 * "mem=x" and the options that affect the iommu
480 */
481static void __init early_cmdline_parse(void)
482{
483 struct prom_t *_prom = &RELOC(prom);
484 char *opt, *p;
485 int l = 0;
486
487 RELOC(prom_cmd_line[0]) = 0;
488 p = RELOC(prom_cmd_line);
489 if ((long)_prom->chosen > 0)
490 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
491#ifdef CONFIG_CMDLINE
492 if (l == 0) /* dbl check */
493 strlcpy(RELOC(prom_cmd_line),
494 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
495#endif /* CONFIG_CMDLINE */
496 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
497
498#ifdef CONFIG_PPC64
499 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
500 if (opt) {
501 prom_printf("iommu opt is: %s\n", opt);
502 opt += 6;
503 while (*opt && *opt == ' ')
504 opt++;
505 if (!strncmp(opt, RELOC("off"), 3))
506 RELOC(ppc64_iommu_off) = 1;
507 else if (!strncmp(opt, RELOC("force"), 5))
508 RELOC(iommu_force_on) = 1;
509 }
510#endif
511
512 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
513 if (opt) {
514 opt += 4;
515 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
516#ifdef CONFIG_PPC64
517 /* Align to 16 MB == size of ppc64 large page */
518 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
519#endif
520 }
521}
522
523#ifdef CONFIG_PPC_PSERIES
524/*
525 * To tell the firmware what our capabilities are, we have to pass
526 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
527 * that contain structures that contain the actual values.
528 */
529static struct fake_elf {
530 Elf32_Ehdr elfhdr;
531 Elf32_Phdr phdr[2];
532 struct chrpnote {
533 u32 namesz;
534 u32 descsz;
535 u32 type;
536 char name[8]; /* "PowerPC" */
537 struct chrpdesc {
538 u32 real_mode;
539 u32 real_base;
540 u32 real_size;
541 u32 virt_base;
542 u32 virt_size;
543 u32 load_base;
544 } chrpdesc;
545 } chrpnote;
546 struct rpanote {
547 u32 namesz;
548 u32 descsz;
549 u32 type;
550 char name[24]; /* "IBM,RPA-Client-Config" */
551 struct rpadesc {
552 u32 lpar_affinity;
553 u32 min_rmo_size;
554 u32 min_rmo_percent;
555 u32 max_pft_size;
556 u32 splpar;
557 u32 min_load;
558 u32 new_mem_def;
559 u32 ignore_me;
560 } rpadesc;
561 } rpanote;
562} fake_elf = {
563 .elfhdr = {
564 .e_ident = { 0x7f, 'E', 'L', 'F',
565 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
566 .e_type = ET_EXEC, /* yeah right */
567 .e_machine = EM_PPC,
568 .e_version = EV_CURRENT,
569 .e_phoff = offsetof(struct fake_elf, phdr),
570 .e_phentsize = sizeof(Elf32_Phdr),
571 .e_phnum = 2
572 },
573 .phdr = {
574 [0] = {
575 .p_type = PT_NOTE,
576 .p_offset = offsetof(struct fake_elf, chrpnote),
577 .p_filesz = sizeof(struct chrpnote)
578 }, [1] = {
579 .p_type = PT_NOTE,
580 .p_offset = offsetof(struct fake_elf, rpanote),
581 .p_filesz = sizeof(struct rpanote)
582 }
583 },
584 .chrpnote = {
585 .namesz = sizeof("PowerPC"),
586 .descsz = sizeof(struct chrpdesc),
587 .type = 0x1275,
588 .name = "PowerPC",
589 .chrpdesc = {
590 .real_mode = ~0U, /* ~0 means "don't care" */
591 .real_base = ~0U,
592 .real_size = ~0U,
593 .virt_base = ~0U,
594 .virt_size = ~0U,
595 .load_base = ~0U
596 },
597 },
598 .rpanote = {
599 .namesz = sizeof("IBM,RPA-Client-Config"),
600 .descsz = sizeof(struct rpadesc),
601 .type = 0x12759999,
602 .name = "IBM,RPA-Client-Config",
603 .rpadesc = {
604 .lpar_affinity = 0,
605 .min_rmo_size = 64, /* in megabytes */
606 .min_rmo_percent = 0,
607 .max_pft_size = 48, /* 2^48 bytes max PFT size */
608 .splpar = 1,
609 .min_load = ~0U,
610 .new_mem_def = 0
611 }
612 }
613};
614
615static void __init prom_send_capabilities(void)
616{
617 ihandle elfloader;
618
619 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
620 if (elfloader == 0) {
621 prom_printf("couldn't open /packages/elf-loader\n");
622 return;
623 }
624 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
625 elfloader, ADDR(&fake_elf));
626 call_prom("close", 1, 0, elfloader);
627}
628#endif
629
630/*
631 * Memory allocation strategy... our layout is normally:
632 *
633 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
634 * rare cases, initrd might end up being before the kernel though.
635 * We assume this won't override the final kernel at 0, we have no
636 * provision to handle that in this version, but it should hopefully
637 * never happen.
638 *
639 * alloc_top is set to the top of RMO, eventually shrink down if the
640 * TCEs overlap
641 *
642 * alloc_bottom is set to the top of kernel/initrd
643 *
644 * from there, allocations are done this way : rtas is allocated
645 * topmost, and the device-tree is allocated from the bottom. We try
646 * to grow the device-tree allocation as we progress. If we can't,
647 * then we fail, we don't currently have a facility to restart
648 * elsewhere, but that shouldn't be necessary.
649 *
650 * Note that calls to reserve_mem have to be done explicitly, memory
651 * allocated with either alloc_up or alloc_down isn't automatically
652 * reserved.
653 */
654
655
656/*
657 * Allocates memory in the RMO upward from the kernel/initrd
658 *
659 * When align is 0, this is a special case, it means to allocate in place
660 * at the current location of alloc_bottom or fail (that is basically
661 * extending the previous allocation). Used for the device-tree flattening
662 */
663static unsigned long __init alloc_up(unsigned long size, unsigned long align)
664{
665 unsigned long base = _ALIGN_UP(RELOC(alloc_bottom), align);
666 unsigned long addr = 0;
667
668 prom_debug("alloc_up(%x, %x)\n", size, align);
669 if (RELOC(ram_top) == 0)
670 prom_panic("alloc_up() called with mem not initialized\n");
671
672 if (align)
673 base = _ALIGN_UP(RELOC(alloc_bottom), align);
674 else
675 base = RELOC(alloc_bottom);
676
677 for(; (base + size) <= RELOC(alloc_top);
678 base = _ALIGN_UP(base + 0x100000, align)) {
679 prom_debug(" trying: 0x%x\n\r", base);
680 addr = (unsigned long)prom_claim(base, size, 0);
681 if (addr != PROM_ERROR)
682 break;
683 addr = 0;
684 if (align == 0)
685 break;
686 }
687 if (addr == 0)
688 return 0;
689 RELOC(alloc_bottom) = addr;
690
691 prom_debug(" -> %x\n", addr);
692 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
693 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
694 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
695 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
696 prom_debug(" ram_top : %x\n", RELOC(ram_top));
697
698 return addr;
699}
700
701/*
702 * Allocates memory downward, either from top of RMO, or if highmem
703 * is set, from the top of RAM. Note that this one doesn't handle
704 * failures. It does claim memory if highmem is not set.
705 */
706static unsigned long __init alloc_down(unsigned long size, unsigned long align,
707 int highmem)
708{
709 unsigned long base, addr = 0;
710
711 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
712 highmem ? RELOC("(high)") : RELOC("(low)"));
713 if (RELOC(ram_top) == 0)
714 prom_panic("alloc_down() called with mem not initialized\n");
715
716 if (highmem) {
717 /* Carve out storage for the TCE table. */
718 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
719 if (addr <= RELOC(alloc_bottom))
720 return 0;
721 /* Will we bump into the RMO ? If yes, check out that we
722 * didn't overlap existing allocations there, if we did,
723 * we are dead, we must be the first in town !
724 */
725 if (addr < RELOC(rmo_top)) {
726 /* Good, we are first */
727 if (RELOC(alloc_top) == RELOC(rmo_top))
728 RELOC(alloc_top) = RELOC(rmo_top) = addr;
729 else
730 return 0;
731 }
732 RELOC(alloc_top_high) = addr;
733 goto bail;
734 }
735
736 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
737 for (; base > RELOC(alloc_bottom);
738 base = _ALIGN_DOWN(base - 0x100000, align)) {
739 prom_debug(" trying: 0x%x\n\r", base);
740 addr = (unsigned long)prom_claim(base, size, 0);
741 if (addr != PROM_ERROR)
742 break;
743 addr = 0;
744 }
745 if (addr == 0)
746 return 0;
747 RELOC(alloc_top) = addr;
748
749 bail:
750 prom_debug(" -> %x\n", addr);
751 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
752 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
753 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
754 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
755 prom_debug(" ram_top : %x\n", RELOC(ram_top));
756
757 return addr;
758}
759
760/*
761 * Parse a "reg" cell
762 */
763static unsigned long __init prom_next_cell(int s, cell_t **cellp)
764{
765 cell_t *p = *cellp;
766 unsigned long r = 0;
767
768 /* Ignore more than 2 cells */
769 while (s > sizeof(unsigned long) / 4) {
770 p++;
771 s--;
772 }
773 r = *p++;
774#ifdef CONFIG_PPC64
775 if (s > 1) {
776 r <<= 32;
777 r |= *(p++);
778 }
779#endif
780 *cellp = p;
781 return r;
782}
783
784/*
785 * Very dumb function for adding to the memory reserve list, but
786 * we don't need anything smarter at this point
787 *
788 * XXX Eventually check for collisions. They should NEVER happen.
789 * If problems seem to show up, it would be a good start to track
790 * them down.
791 */
792static void reserve_mem(unsigned long base, unsigned long size)
793{
794 unsigned long top = base + size;
795 unsigned long cnt = RELOC(mem_reserve_cnt);
796
797 if (size == 0)
798 return;
799
800 /* We need to always keep one empty entry so that we
801 * have our terminator with "size" set to 0 since we are
802 * dumb and just copy this entire array to the boot params
803 */
804 base = _ALIGN_DOWN(base, PAGE_SIZE);
805 top = _ALIGN_UP(top, PAGE_SIZE);
806 size = top - base;
807
808 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
809 prom_panic("Memory reserve map exhausted !\n");
810 RELOC(mem_reserve_map)[cnt].base = base;
811 RELOC(mem_reserve_map)[cnt].size = size;
812 RELOC(mem_reserve_cnt) = cnt + 1;
813}
814
815/*
816 * Initialize memory allocation mecanism, parse "memory" nodes and
817 * obtain that way the top of memory and RMO to setup out local allocator
818 */
819static void __init prom_init_mem(void)
820{
821 phandle node;
822 char *path, type[64];
823 unsigned int plen;
824 cell_t *p, *endp;
825 struct prom_t *_prom = &RELOC(prom);
826 u32 rac, rsc;
827
828 /*
829 * We iterate the memory nodes to find
830 * 1) top of RMO (first node)
831 * 2) top of memory
832 */
833 rac = 2;
834 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
835 rsc = 1;
836 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
837 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
838 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
839
840 prom_debug("scanning memory:\n");
841 path = RELOC(prom_scratch);
842
843 for (node = 0; prom_next_node(&node); ) {
844 type[0] = 0;
845 prom_getprop(node, "device_type", type, sizeof(type));
846
847 if (strcmp(type, RELOC("memory")))
848 continue;
849
850 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
851 if (plen > sizeof(regbuf)) {
852 prom_printf("memory node too large for buffer !\n");
853 plen = sizeof(regbuf);
854 }
855 p = RELOC(regbuf);
856 endp = p + (plen / sizeof(cell_t));
857
858#ifdef DEBUG_PROM
859 memset(path, 0, PROM_SCRATCH_SIZE);
860 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
861 prom_debug(" node %s :\n", path);
862#endif /* DEBUG_PROM */
863
864 while ((endp - p) >= (rac + rsc)) {
865 unsigned long base, size;
866
867 base = prom_next_cell(rac, &p);
868 size = prom_next_cell(rsc, &p);
869
870 if (size == 0)
871 continue;
872 prom_debug(" %x %x\n", base, size);
873 if (base == 0)
874 RELOC(rmo_top) = size;
875 if ((base + size) > RELOC(ram_top))
876 RELOC(ram_top) = base + size;
877 }
878 }
879
880 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
881
882 /* Check if we have an initrd after the kernel, if we do move our bottom
883 * point to after it
884 */
885 if (RELOC(prom_initrd_start)) {
886 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
887 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
888 }
889
890 /*
891 * If prom_memory_limit is set we reduce the upper limits *except* for
892 * alloc_top_high. This must be the real top of RAM so we can put
893 * TCE's up there.
894 */
895
896 RELOC(alloc_top_high) = RELOC(ram_top);
897
898 if (RELOC(prom_memory_limit)) {
899 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
900 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
901 RELOC(prom_memory_limit));
902 RELOC(prom_memory_limit) = 0;
903 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
904 prom_printf("Ignoring mem=%x >= ram_top.\n",
905 RELOC(prom_memory_limit));
906 RELOC(prom_memory_limit) = 0;
907 } else {
908 RELOC(ram_top) = RELOC(prom_memory_limit);
909 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
910 }
911 }
912
913 /*
914 * Setup our top alloc point, that is top of RMO or top of
915 * segment 0 when running non-LPAR.
916 * Some RS64 machines have buggy firmware where claims up at
917 * 1GB fail. Cap at 768MB as a workaround.
918 * Since 768MB is plenty of room, and we need to cap to something
919 * reasonable on 32-bit, cap at 768MB on all machines.
920 */
921 if (!RELOC(rmo_top))
922 RELOC(rmo_top) = RELOC(ram_top);
923 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
924 RELOC(alloc_top) = RELOC(rmo_top);
925
926 prom_printf("memory layout at init:\n");
927 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
928 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
929 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
930 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
931 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
932 prom_printf(" ram_top : %x\n", RELOC(ram_top));
933}
934
935
936/*
937 * Allocate room for and instantiate RTAS
938 */
939static void __init prom_instantiate_rtas(void)
940{
941 phandle rtas_node;
942 ihandle rtas_inst;
943 u32 base, entry = 0;
944 u32 size = 0;
945
946 prom_debug("prom_instantiate_rtas: start...\n");
947
948 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
949 prom_debug("rtas_node: %x\n", rtas_node);
950 if (!PHANDLE_VALID(rtas_node))
951 return;
952
953 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
954 if (size == 0)
955 return;
956
957 base = alloc_down(size, PAGE_SIZE, 0);
958 if (base == 0) {
959 prom_printf("RTAS allocation failed !\n");
960 return;
961 }
962
963 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
964 if (!IHANDLE_VALID(rtas_inst)) {
965 prom_printf("opening rtas package failed");
966 return;
967 }
968
969 prom_printf("instantiating rtas at 0x%x ...", base);
970
971 if (call_prom_ret("call-method", 3, 2, &entry,
972 ADDR("instantiate-rtas"),
973 rtas_inst, base) == PROM_ERROR
974 || entry == 0) {
975 prom_printf(" failed\n");
976 return;
977 }
978 prom_printf(" done\n");
979
980 reserve_mem(base, size);
981
982 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
983 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
984
985 prom_debug("rtas base = 0x%x\n", base);
986 prom_debug("rtas entry = 0x%x\n", entry);
987 prom_debug("rtas size = 0x%x\n", (long)size);
988
989 prom_debug("prom_instantiate_rtas: end...\n");
990}
991
992#ifdef CONFIG_PPC64
993/*
994 * Allocate room for and initialize TCE tables
995 */
996static void __init prom_initialize_tce_table(void)
997{
998 phandle node;
999 ihandle phb_node;
1000 char compatible[64], type[64], model[64];
1001 char *path = RELOC(prom_scratch);
1002 u64 base, align;
1003 u32 minalign, minsize;
1004 u64 tce_entry, *tce_entryp;
1005 u64 local_alloc_top, local_alloc_bottom;
1006 u64 i;
1007
1008 if (RELOC(ppc64_iommu_off))
1009 return;
1010
1011 prom_debug("starting prom_initialize_tce_table\n");
1012
1013 /* Cache current top of allocs so we reserve a single block */
1014 local_alloc_top = RELOC(alloc_top_high);
1015 local_alloc_bottom = local_alloc_top;
1016
1017 /* Search all nodes looking for PHBs. */
1018 for (node = 0; prom_next_node(&node); ) {
1019 compatible[0] = 0;
1020 type[0] = 0;
1021 model[0] = 0;
1022 prom_getprop(node, "compatible",
1023 compatible, sizeof(compatible));
1024 prom_getprop(node, "device_type", type, sizeof(type));
1025 prom_getprop(node, "model", model, sizeof(model));
1026
1027 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1028 continue;
1029
1030 /* Keep the old logic in tack to avoid regression. */
1031 if (compatible[0] != 0) {
1032 if ((strstr(compatible, RELOC("python")) == NULL) &&
1033 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1034 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1035 continue;
1036 } else if (model[0] != 0) {
1037 if ((strstr(model, RELOC("ython")) == NULL) &&
1038 (strstr(model, RELOC("peedwagon")) == NULL) &&
1039 (strstr(model, RELOC("innipeg")) == NULL))
1040 continue;
1041 }
1042
1043 if (prom_getprop(node, "tce-table-minalign", &minalign,
1044 sizeof(minalign)) == PROM_ERROR)
1045 minalign = 0;
1046 if (prom_getprop(node, "tce-table-minsize", &minsize,
1047 sizeof(minsize)) == PROM_ERROR)
1048 minsize = 4UL << 20;
1049
1050 /*
1051 * Even though we read what OF wants, we just set the table
1052 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1053 * By doing this, we avoid the pitfalls of trying to DMA to
1054 * MMIO space and the DMA alias hole.
1055 *
1056 * On POWER4, firmware sets the TCE region by assuming
1057 * each TCE table is 8MB. Using this memory for anything
1058 * else will impact performance, so we always allocate 8MB.
1059 * Anton
1060 */
1061 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1062 minsize = 8UL << 20;
1063 else
1064 minsize = 4UL << 20;
1065
1066 /* Align to the greater of the align or size */
1067 align = max(minalign, minsize);
1068 base = alloc_down(minsize, align, 1);
1069 if (base == 0)
1070 prom_panic("ERROR, cannot find space for TCE table.\n");
1071 if (base < local_alloc_bottom)
1072 local_alloc_bottom = base;
1073
1074 /* Save away the TCE table attributes for later use. */
1075 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1076 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1077
1078 /* It seems OF doesn't null-terminate the path :-( */
1079 memset(path, 0, sizeof(path));
1080 /* Call OF to setup the TCE hardware */
1081 if (call_prom("package-to-path", 3, 1, node,
1082 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1083 prom_printf("package-to-path failed\n");
1084 }
1085
1086 prom_debug("TCE table: %s\n", path);
1087 prom_debug("\tnode = 0x%x\n", node);
1088 prom_debug("\tbase = 0x%x\n", base);
1089 prom_debug("\tsize = 0x%x\n", minsize);
1090
1091 /* Initialize the table to have a one-to-one mapping
1092 * over the allocated size.
1093 */
1094 tce_entryp = (unsigned long *)base;
1095 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1096 tce_entry = (i << PAGE_SHIFT);
1097 tce_entry |= 0x3;
1098 *tce_entryp = tce_entry;
1099 }
1100
1101 prom_printf("opening PHB %s", path);
1102 phb_node = call_prom("open", 1, 1, path);
1103 if (phb_node == 0)
1104 prom_printf("... failed\n");
1105 else
1106 prom_printf("... done\n");
1107
1108 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1109 phb_node, -1, minsize,
1110 (u32) base, (u32) (base >> 32));
1111 call_prom("close", 1, 0, phb_node);
1112 }
1113
1114 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1115
1116 if (RELOC(prom_memory_limit)) {
1117 /*
1118 * We align the start to a 16MB boundary so we can map
1119 * the TCE area using large pages if possible.
1120 * The end should be the top of RAM so no need to align it.
1121 */
1122 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1123 0x1000000);
1124 RELOC(prom_tce_alloc_end) = local_alloc_top;
1125 }
1126
1127 /* Flag the first invalid entry */
1128 prom_debug("ending prom_initialize_tce_table\n");
1129}
1130#endif
1131
1132/*
1133 * With CHRP SMP we need to use the OF to start the other processors.
1134 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1135 * so we have to put the processors into a holding pattern controlled
1136 * by the kernel (not OF) before we destroy the OF.
1137 *
1138 * This uses a chunk of low memory, puts some holding pattern
1139 * code there and sends the other processors off to there until
1140 * smp_boot_cpus tells them to do something. The holding pattern
1141 * checks that address until its cpu # is there, when it is that
1142 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1143 * of setting those values.
1144 *
1145 * We also use physical address 0x4 here to tell when a cpu
1146 * is in its holding pattern code.
1147 *
1148 * -- Cort
1149 */
1150static void __init prom_hold_cpus(void)
1151{
1152#ifdef CONFIG_PPC64
1153 unsigned long i;
1154 unsigned int reg;
1155 phandle node;
1156 char type[64];
1157 int cpuid = 0;
1158 unsigned int interrupt_server[MAX_CPU_THREADS];
1159 unsigned int cpu_threads, hw_cpu_num;
1160 int propsize;
1161 extern void __secondary_hold(void);
1162 extern unsigned long __secondary_hold_spinloop;
1163 extern unsigned long __secondary_hold_acknowledge;
1164 unsigned long *spinloop
1165 = (void *) __pa(&__secondary_hold_spinloop);
1166 unsigned long *acknowledge
1167 = (void *) __pa(&__secondary_hold_acknowledge);
1168#ifdef CONFIG_PPC64
1169 unsigned long secondary_hold
1170 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1171#else
1172 unsigned long secondary_hold = __pa(&__secondary_hold);
1173#endif
1174 struct prom_t *_prom = &RELOC(prom);
1175
1176 prom_debug("prom_hold_cpus: start...\n");
1177 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1178 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1179 prom_debug(" 1) acknowledge = 0x%x\n",
1180 (unsigned long)acknowledge);
1181 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1182 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1183
1184 /* Set the common spinloop variable, so all of the secondary cpus
1185 * will block when they are awakened from their OF spinloop.
1186 * This must occur for both SMP and non SMP kernels, since OF will
1187 * be trashed when we move the kernel.
1188 */
1189 *spinloop = 0;
1190
1191#ifdef CONFIG_HMT
1192 for (i = 0; i < NR_CPUS; i++) {
1193 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1194 }
1195#endif
1196 /* look for cpus */
1197 for (node = 0; prom_next_node(&node); ) {
1198 type[0] = 0;
1199 prom_getprop(node, "device_type", type, sizeof(type));
1200 if (strcmp(type, RELOC("cpu")) != 0)
1201 continue;
1202
1203 /* Skip non-configured cpus. */
1204 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1205 if (strcmp(type, RELOC("okay")) != 0)
1206 continue;
1207
1208 reg = -1;
1209 prom_getprop(node, "reg", &reg, sizeof(reg));
1210
1211 prom_debug("\ncpuid = 0x%x\n", cpuid);
1212 prom_debug("cpu hw idx = 0x%x\n", reg);
1213
1214 /* Init the acknowledge var which will be reset by
1215 * the secondary cpu when it awakens from its OF
1216 * spinloop.
1217 */
1218 *acknowledge = (unsigned long)-1;
1219
1220 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1221 &interrupt_server,
1222 sizeof(interrupt_server));
1223 if (propsize < 0) {
1224 /* no property. old hardware has no SMT */
1225 cpu_threads = 1;
1226 interrupt_server[0] = reg; /* fake it with phys id */
1227 } else {
1228 /* We have a threaded processor */
1229 cpu_threads = propsize / sizeof(u32);
1230 if (cpu_threads > MAX_CPU_THREADS) {
1231 prom_printf("SMT: too many threads!\n"
1232 "SMT: found %x, max is %x\n",
1233 cpu_threads, MAX_CPU_THREADS);
1234 cpu_threads = 1; /* ToDo: panic? */
1235 }
1236 }
1237
1238 hw_cpu_num = interrupt_server[0];
1239 if (hw_cpu_num != _prom->cpu) {
1240 /* Primary Thread of non-boot cpu */
1241 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1242 call_prom("start-cpu", 3, 0, node,
1243 secondary_hold, reg);
1244
1245 for ( i = 0 ; (i < 100000000) &&
1246 (*acknowledge == ((unsigned long)-1)); i++ )
1247 mb();
1248
1249 if (*acknowledge == reg) {
1250 prom_printf("done\n");
1251 /* We have to get every CPU out of OF,
1252 * even if we never start it. */
1253 if (cpuid >= NR_CPUS)
1254 goto next;
1255 } else {
1256 prom_printf("failed: %x\n", *acknowledge);
1257 }
1258 }
1259#ifdef CONFIG_SMP
1260 else
1261 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1262#endif
1263next:
1264#ifdef CONFIG_SMP
1265 /* Init paca for secondary threads. They start later. */
1266 for (i=1; i < cpu_threads; i++) {
1267 cpuid++;
1268 if (cpuid >= NR_CPUS)
1269 continue;
1270 }
1271#endif /* CONFIG_SMP */
1272 cpuid++;
1273 }
1274#ifdef CONFIG_HMT
1275 /* Only enable HMT on processors that provide support. */
1276 if (__is_processor(PV_PULSAR) ||
1277 __is_processor(PV_ICESTAR) ||
1278 __is_processor(PV_SSTAR)) {
1279 prom_printf(" starting secondary threads\n");
1280
1281 for (i = 0; i < NR_CPUS; i += 2) {
1282 if (!cpu_online(i))
1283 continue;
1284
1285 if (i == 0) {
1286 unsigned long pir = mfspr(SPRN_PIR);
1287 if (__is_processor(PV_PULSAR)) {
1288 RELOC(hmt_thread_data)[i].pir =
1289 pir & 0x1f;
1290 } else {
1291 RELOC(hmt_thread_data)[i].pir =
1292 pir & 0x3ff;
1293 }
1294 }
1295 }
1296 } else {
1297 prom_printf("Processor is not HMT capable\n");
1298 }
1299#endif
1300
1301 if (cpuid > NR_CPUS)
1302 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1303 ") exceeded: ignoring extras\n");
1304
1305 prom_debug("prom_hold_cpus: end...\n");
1306#endif
1307}
1308
1309
1310static void __init prom_init_client_services(unsigned long pp)
1311{
1312 struct prom_t *_prom = &RELOC(prom);
1313
1314 /* Get a handle to the prom entry point before anything else */
1315 RELOC(prom_entry) = pp;
1316
1317 /* get a handle for the stdout device */
1318 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1319 if (!PHANDLE_VALID(_prom->chosen))
1320 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1321
1322 /* get device tree root */
1323 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1324 if (!PHANDLE_VALID(_prom->root))
1325 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1326}
1327
1328static void __init prom_init_stdout(void)
1329{
1330 struct prom_t *_prom = &RELOC(prom);
1331 char *path = RELOC(of_stdout_device);
1332 char type[16];
1333 u32 val;
1334
1335 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1336 prom_panic("cannot find stdout");
1337
1338 _prom->stdout = val;
1339
1340 /* Get the full OF pathname of the stdout device */
1341 memset(path, 0, 256);
1342 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1343 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1344 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1345 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1346 prom_setprop(_prom->chosen, "linux,stdout-path",
1347 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1348
1349 /* If it's a display, note it */
1350 memset(type, 0, sizeof(type));
1351 prom_getprop(val, "device_type", type, sizeof(type));
1352 if (strcmp(type, RELOC("display")) == 0)
1353 prom_setprop(val, "linux,boot-display", NULL, 0);
1354}
1355
1356static void __init prom_close_stdin(void)
1357{
1358 struct prom_t *_prom = &RELOC(prom);
1359 ihandle val;
1360
1361 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1362 call_prom("close", 1, 0, val);
1363}
1364
1365static int __init prom_find_machine_type(void)
1366{
1367 struct prom_t *_prom = &RELOC(prom);
1368 char compat[256];
1369 int len, i = 0;
1370 phandle rtas;
1371
1372 len = prom_getprop(_prom->root, "compatible",
1373 compat, sizeof(compat)-1);
1374 if (len > 0) {
1375 compat[len] = 0;
1376 while (i < len) {
1377 char *p = &compat[i];
1378 int sl = strlen(p);
1379 if (sl == 0)
1380 break;
1381 if (strstr(p, RELOC("Power Macintosh")) ||
1382 strstr(p, RELOC("MacRISC4")))
1383 return PLATFORM_POWERMAC;
1384#ifdef CONFIG_PPC64
1385 if (strstr(p, RELOC("Momentum,Maple")))
1386 return PLATFORM_MAPLE;
1387#endif
1388 i += sl + 1;
1389 }
1390 }
1391#ifdef CONFIG_PPC64
1392 /* Default to pSeries. We need to know if we are running LPAR */
1393 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1394 if (PHANDLE_VALID(rtas)) {
1395 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1396 if (x != PROM_ERROR) {
1397 prom_printf("Hypertas detected, assuming LPAR !\n");
1398 return PLATFORM_PSERIES_LPAR;
1399 }
1400 }
1401 return PLATFORM_PSERIES;
1402#else
1403 return PLATFORM_CHRP;
1404#endif
1405}
1406
1407static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1408{
1409 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1410}
1411
1412/*
1413 * If we have a display that we don't know how to drive,
1414 * we will want to try to execute OF's open method for it
1415 * later. However, OF will probably fall over if we do that
1416 * we've taken over the MMU.
1417 * So we check whether we will need to open the display,
1418 * and if so, open it now.
1419 */
1420static void __init prom_check_displays(void)
1421{
1422 char type[16], *path;
1423 phandle node;
1424 ihandle ih;
1425 int i;
1426
1427 static unsigned char default_colors[] = {
1428 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0xaa,
1430 0x00, 0xaa, 0x00,
1431 0x00, 0xaa, 0xaa,
1432 0xaa, 0x00, 0x00,
1433 0xaa, 0x00, 0xaa,
1434 0xaa, 0xaa, 0x00,
1435 0xaa, 0xaa, 0xaa,
1436 0x55, 0x55, 0x55,
1437 0x55, 0x55, 0xff,
1438 0x55, 0xff, 0x55,
1439 0x55, 0xff, 0xff,
1440 0xff, 0x55, 0x55,
1441 0xff, 0x55, 0xff,
1442 0xff, 0xff, 0x55,
1443 0xff, 0xff, 0xff
1444 };
1445 const unsigned char *clut;
1446
1447 prom_printf("Looking for displays\n");
1448 for (node = 0; prom_next_node(&node); ) {
1449 memset(type, 0, sizeof(type));
1450 prom_getprop(node, "device_type", type, sizeof(type));
1451 if (strcmp(type, RELOC("display")) != 0)
1452 continue;
1453
1454 /* It seems OF doesn't null-terminate the path :-( */
1455 path = RELOC(prom_scratch);
1456 memset(path, 0, PROM_SCRATCH_SIZE);
1457
1458 /*
1459 * leave some room at the end of the path for appending extra
1460 * arguments
1461 */
1462 if (call_prom("package-to-path", 3, 1, node, path,
1463 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1464 continue;
1465 prom_printf("found display : %s, opening ... ", path);
1466
1467 ih = call_prom("open", 1, 1, path);
1468 if (ih == 0) {
1469 prom_printf("failed\n");
1470 continue;
1471 }
1472
1473 /* Success */
1474 prom_printf("done\n");
1475 prom_setprop(node, "linux,opened", NULL, 0);
1476
1477 /* Setup a usable color table when the appropriate
1478 * method is available. Should update this to set-colors */
1479 clut = RELOC(default_colors);
1480 for (i = 0; i < 32; i++, clut += 3)
1481 if (prom_set_color(ih, i, clut[0], clut[1],
1482 clut[2]) != 0)
1483 break;
1484
1485#ifdef CONFIG_LOGO_LINUX_CLUT224
1486 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1487 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1488 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1489 clut[2]) != 0)
1490 break;
1491#endif /* CONFIG_LOGO_LINUX_CLUT224 */
1492 }
1493}
1494
1495
1496/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1497static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1498 unsigned long needed, unsigned long align)
1499{
1500 void *ret;
1501
1502 *mem_start = _ALIGN(*mem_start, align);
1503 while ((*mem_start + needed) > *mem_end) {
1504 unsigned long room, chunk;
1505
1506 prom_debug("Chunk exhausted, claiming more at %x...\n",
1507 RELOC(alloc_bottom));
1508 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1509 if (room > DEVTREE_CHUNK_SIZE)
1510 room = DEVTREE_CHUNK_SIZE;
1511 if (room < PAGE_SIZE)
1512 prom_panic("No memory for flatten_device_tree (no room)");
1513 chunk = alloc_up(room, 0);
1514 if (chunk == 0)
1515 prom_panic("No memory for flatten_device_tree (claim failed)");
1516 *mem_end = RELOC(alloc_top);
1517 }
1518
1519 ret = (void *)*mem_start;
1520 *mem_start += needed;
1521
1522 return ret;
1523}
1524
1525#define dt_push_token(token, mem_start, mem_end) \
1526 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1527
1528static unsigned long __init dt_find_string(char *str)
1529{
1530 char *s, *os;
1531
1532 s = os = (char *)RELOC(dt_string_start);
1533 s += 4;
1534 while (s < (char *)RELOC(dt_string_end)) {
1535 if (strcmp(s, str) == 0)
1536 return s - os;
1537 s += strlen(s) + 1;
1538 }
1539 return 0;
1540}
1541
1542/*
1543 * The Open Firmware 1275 specification states properties must be 31 bytes or
1544 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1545 */
1546#define MAX_PROPERTY_NAME 64
1547
1548static void __init scan_dt_build_strings(phandle node,
1549 unsigned long *mem_start,
1550 unsigned long *mem_end)
1551{
1552 char *prev_name, *namep, *sstart;
1553 unsigned long soff;
1554 phandle child;
1555
1556 sstart = (char *)RELOC(dt_string_start);
1557
1558 /* get and store all property names */
1559 prev_name = RELOC("");
1560 for (;;) {
1561 /* 64 is max len of name including nul. */
1562 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1563 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1564 /* No more nodes: unwind alloc */
1565 *mem_start = (unsigned long)namep;
1566 break;
1567 }
1568
1569 /* skip "name" */
1570 if (strcmp(namep, RELOC("name")) == 0) {
1571 *mem_start = (unsigned long)namep;
1572 prev_name = RELOC("name");
1573 continue;
1574 }
1575 /* get/create string entry */
1576 soff = dt_find_string(namep);
1577 if (soff != 0) {
1578 *mem_start = (unsigned long)namep;
1579 namep = sstart + soff;
1580 } else {
1581 /* Trim off some if we can */
1582 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1583 RELOC(dt_string_end) = *mem_start;
1584 }
1585 prev_name = namep;
1586 }
1587
1588 /* do all our children */
1589 child = call_prom("child", 1, 1, node);
1590 while (child != 0) {
1591 scan_dt_build_strings(child, mem_start, mem_end);
1592 child = call_prom("peer", 1, 1, child);
1593 }
1594}
1595
1596static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1597 unsigned long *mem_end)
1598{
1599 phandle child;
1600 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1601 unsigned long soff;
1602 unsigned char *valp;
1603 static char pname[MAX_PROPERTY_NAME];
1604 int l;
1605
1606 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1607
1608 /* get the node's full name */
1609 namep = (char *)*mem_start;
1610 l = call_prom("package-to-path", 3, 1, node,
1611 namep, *mem_end - *mem_start);
1612 if (l >= 0) {
1613 /* Didn't fit? Get more room. */
1614 if ((l+1) > (*mem_end - *mem_start)) {
1615 namep = make_room(mem_start, mem_end, l+1, 1);
1616 call_prom("package-to-path", 3, 1, node, namep, l);
1617 }
1618 namep[l] = '\0';
1619
1620 /* Fixup an Apple bug where they have bogus \0 chars in the
1621 * middle of the path in some properties
1622 */
1623 for (p = namep, ep = namep + l; p < ep; p++)
1624 if (*p == '\0') {
1625 memmove(p, p+1, ep - p);
1626 ep--; l--; p--;
1627 }
1628
1629 /* now try to extract the unit name in that mess */
1630 for (p = namep, lp = NULL; *p; p++)
1631 if (*p == '/')
1632 lp = p + 1;
1633 if (lp != NULL)
1634 memmove(namep, lp, strlen(lp) + 1);
1635 *mem_start = _ALIGN(((unsigned long) namep) +
1636 strlen(namep) + 1, 4);
1637 }
1638
1639 /* get it again for debugging */
1640 path = RELOC(prom_scratch);
1641 memset(path, 0, PROM_SCRATCH_SIZE);
1642 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1643
1644 /* get and store all properties */
1645 prev_name = RELOC("");
1646 sstart = (char *)RELOC(dt_string_start);
1647 for (;;) {
1648 if (call_prom("nextprop", 3, 1, node, prev_name,
1649 RELOC(pname)) != 1)
1650 break;
1651
1652 /* skip "name" */
1653 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1654 prev_name = RELOC("name");
1655 continue;
1656 }
1657
1658 /* find string offset */
1659 soff = dt_find_string(RELOC(pname));
1660 if (soff == 0) {
1661 prom_printf("WARNING: Can't find string index for"
1662 " <%s>, node %s\n", RELOC(pname), path);
1663 break;
1664 }
1665 prev_name = sstart + soff;
1666
1667 /* get length */
1668 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1669
1670 /* sanity checks */
1671 if (l == PROM_ERROR)
1672 continue;
1673 if (l > MAX_PROPERTY_LENGTH) {
1674 prom_printf("WARNING: ignoring large property ");
1675 /* It seems OF doesn't null-terminate the path :-( */
1676 prom_printf("[%s] ", path);
1677 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1678 continue;
1679 }
1680
1681 /* push property head */
1682 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1683 dt_push_token(l, mem_start, mem_end);
1684 dt_push_token(soff, mem_start, mem_end);
1685
1686 /* push property content */
1687 valp = make_room(mem_start, mem_end, l, 4);
1688 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1689 *mem_start = _ALIGN(*mem_start, 4);
1690 }
1691
1692 /* Add a "linux,phandle" property. */
1693 soff = dt_find_string(RELOC("linux,phandle"));
1694 if (soff == 0)
1695 prom_printf("WARNING: Can't find string index for"
1696 " <linux-phandle> node %s\n", path);
1697 else {
1698 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1699 dt_push_token(4, mem_start, mem_end);
1700 dt_push_token(soff, mem_start, mem_end);
1701 valp = make_room(mem_start, mem_end, 4, 4);
1702 *(u32 *)valp = node;
1703 }
1704
1705 /* do all our children */
1706 child = call_prom("child", 1, 1, node);
1707 while (child != 0) {
1708 scan_dt_build_struct(child, mem_start, mem_end);
1709 child = call_prom("peer", 1, 1, child);
1710 }
1711
1712 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1713}
1714
1715static void __init flatten_device_tree(void)
1716{
1717 phandle root;
1718 unsigned long mem_start, mem_end, room;
1719 struct boot_param_header *hdr;
1720 struct prom_t *_prom = &RELOC(prom);
1721 char *namep;
1722 u64 *rsvmap;
1723
1724 /*
1725 * Check how much room we have between alloc top & bottom (+/- a
1726 * few pages), crop to 4Mb, as this is our "chuck" size
1727 */
1728 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1729 if (room > DEVTREE_CHUNK_SIZE)
1730 room = DEVTREE_CHUNK_SIZE;
1731 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1732
1733 /* Now try to claim that */
1734 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1735 if (mem_start == 0)
1736 prom_panic("Can't allocate initial device-tree chunk\n");
1737 mem_end = RELOC(alloc_top);
1738
1739 /* Get root of tree */
1740 root = call_prom("peer", 1, 1, (phandle)0);
1741 if (root == (phandle)0)
1742 prom_panic ("couldn't get device tree root\n");
1743
1744 /* Build header and make room for mem rsv map */
1745 mem_start = _ALIGN(mem_start, 4);
1746 hdr = make_room(&mem_start, &mem_end,
1747 sizeof(struct boot_param_header), 4);
1748 RELOC(dt_header_start) = (unsigned long)hdr;
1749 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1750
1751 /* Start of strings */
1752 mem_start = PAGE_ALIGN(mem_start);
1753 RELOC(dt_string_start) = mem_start;
1754 mem_start += 4; /* hole */
1755
1756 /* Add "linux,phandle" in there, we'll need it */
1757 namep = make_room(&mem_start, &mem_end, 16, 1);
1758 strcpy(namep, RELOC("linux,phandle"));
1759 mem_start = (unsigned long)namep + strlen(namep) + 1;
1760
1761 /* Build string array */
1762 prom_printf("Building dt strings...\n");
1763 scan_dt_build_strings(root, &mem_start, &mem_end);
1764 RELOC(dt_string_end) = mem_start;
1765
1766 /* Build structure */
1767 mem_start = PAGE_ALIGN(mem_start);
1768 RELOC(dt_struct_start) = mem_start;
1769 prom_printf("Building dt structure...\n");
1770 scan_dt_build_struct(root, &mem_start, &mem_end);
1771 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1772 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1773
1774 /* Finish header */
1775 hdr->boot_cpuid_phys = _prom->cpu;
1776 hdr->magic = OF_DT_HEADER;
1777 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1778 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1779 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1780 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1781 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1782 hdr->version = OF_DT_VERSION;
1783 /* Version 16 is not backward compatible */
1784 hdr->last_comp_version = 0x10;
1785
1786 /* Reserve the whole thing and copy the reserve map in, we
1787 * also bump mem_reserve_cnt to cause further reservations to
1788 * fail since it's too late.
1789 */
1790 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1791 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1792
1793#ifdef DEBUG_PROM
1794 {
1795 int i;
1796 prom_printf("reserved memory map:\n");
1797 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1798 prom_printf(" %x - %x\n",
1799 RELOC(mem_reserve_map)[i].base,
1800 RELOC(mem_reserve_map)[i].size);
1801 }
1802#endif
1803 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1804
1805 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1806 RELOC(dt_string_start), RELOC(dt_string_end));
1807 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1808 RELOC(dt_struct_start), RELOC(dt_struct_end));
1809
1810}
1811
1812
1813static void __init fixup_device_tree(void)
1814{
1815#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1816 phandle u3, i2c, mpic;
1817 u32 u3_rev;
1818 u32 interrupts[2];
1819 u32 parent;
1820
1821 /* Some G5s have a missing interrupt definition, fix it up here */
1822 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1823 if (!PHANDLE_VALID(u3))
1824 return;
1825 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1826 if (!PHANDLE_VALID(i2c))
1827 return;
1828 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1829 if (!PHANDLE_VALID(mpic))
1830 return;
1831
1832 /* check if proper rev of u3 */
1833 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1834 == PROM_ERROR)
1835 return;
1836 if (u3_rev != 0x35 && u3_rev != 0x37)
1837 return;
1838 /* does it need fixup ? */
1839 if (prom_getproplen(i2c, "interrupts") > 0)
1840 return;
1841
1842 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1843
1844 /* interrupt on this revision of u3 is number 0 and level */
1845 interrupts[0] = 0;
1846 interrupts[1] = 1;
1847 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1848 parent = (u32)mpic;
1849 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1850#endif
1851}
1852
1853
1854static void __init prom_find_boot_cpu(void)
1855{
1856 struct prom_t *_prom = &RELOC(prom);
1857 u32 getprop_rval;
1858 ihandle prom_cpu;
1859 phandle cpu_pkg;
1860
1861 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1862 prom_panic("cannot find boot cpu");
1863
1864 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1865
1866 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1867 _prom->cpu = getprop_rval;
1868
1869 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1870}
1871
1872static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1873{
1874#ifdef CONFIG_BLK_DEV_INITRD
1875 struct prom_t *_prom = &RELOC(prom);
1876
1877 if (r3 && r4 && r4 != 0xdeadbeef) {
1878 unsigned long val;
1879
1880 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1881 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1882
1883 val = RELOC(prom_initrd_start);
1884 prom_setprop(_prom->chosen, "linux,initrd-start", &val,
1885 sizeof(val));
1886 val = RELOC(prom_initrd_end);
1887 prom_setprop(_prom->chosen, "linux,initrd-end", &val,
1888 sizeof(val));
1889
1890 reserve_mem(RELOC(prom_initrd_start),
1891 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1892
1893 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1894 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1895 }
1896#endif /* CONFIG_BLK_DEV_INITRD */
1897}
1898
1899/*
1900 * We enter here early on, when the Open Firmware prom is still
1901 * handling exceptions and the MMU hash table for us.
1902 */
1903
1904unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1905 unsigned long pp,
1906 unsigned long r6, unsigned long r7)
1907{
1908 struct prom_t *_prom;
1909 extern char _stext[];
1910 unsigned long hdr;
1911 u32 getprop_rval;
1912 unsigned long offset = reloc_offset();
1913
1914#ifdef CONFIG_PPC32
1915 reloc_got2(offset);
1916#endif
1917
1918 _prom = &RELOC(prom);
1919
1920 /*
1921 * First zero the BSS
1922 */
1923 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
1924
1925 /*
1926 * Init interface to Open Firmware, get some node references,
1927 * like /chosen
1928 */
1929 prom_init_client_services(pp);
1930
1931 /*
1932 * Init prom stdout device
1933 */
1934 prom_init_stdout();
1935
1936 /*
1937 * Check for an initrd
1938 */
1939 prom_check_initrd(r3, r4);
1940
1941 /*
1942 * Get default machine type. At this point, we do not differentiate
1943 * between pSeries SMP and pSeries LPAR
1944 */
1945 RELOC(of_platform) = prom_find_machine_type();
1946 getprop_rval = RELOC(of_platform);
1947 prom_setprop(_prom->chosen, "linux,platform",
1948 &getprop_rval, sizeof(getprop_rval));
1949
1950#ifdef CONFIG_PPC_PSERIES
1951 /*
1952 * On pSeries, inform the firmware about our capabilities
1953 */
1954 if (RELOC(of_platform) & PLATFORM_PSERIES)
1955 prom_send_capabilities();
1956#endif
1957
1958 /*
1959 * On pSeries and BPA, copy the CPU hold code
1960 */
1961 if (RELOC(of_platform) != PLATFORM_POWERMAC)
1962 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
1963
1964 /*
1965 * Do early parsing of command line
1966 */
1967 early_cmdline_parse();
1968
1969 /*
1970 * Initialize memory management within prom_init
1971 */
1972 prom_init_mem();
1973
1974 /*
1975 * Determine which cpu is actually running right _now_
1976 */
1977 prom_find_boot_cpu();
1978
1979 /*
1980 * Initialize display devices
1981 */
1982 prom_check_displays();
1983
1984#ifdef CONFIG_PPC64
1985 /*
1986 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
1987 * that uses the allocator, we need to make sure we get the top of memory
1988 * available for us here...
1989 */
1990 if (RELOC(of_platform) == PLATFORM_PSERIES)
1991 prom_initialize_tce_table();
1992#endif
1993
1994 /*
1995 * On non-powermacs, try to instantiate RTAS and puts all CPUs
1996 * in spin-loops. PowerMacs don't have a working RTAS and use
1997 * a different way to spin CPUs
1998 */
1999 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2000 prom_instantiate_rtas();
2001 prom_hold_cpus();
2002 }
2003
2004 /*
2005 * Fill in some infos for use by the kernel later on
2006 */
2007 if (RELOC(prom_memory_limit))
2008 prom_setprop(_prom->chosen, "linux,memory-limit",
2009 &RELOC(prom_memory_limit),
2010 sizeof(prom_memory_limit));
2011#ifdef CONFIG_PPC64
2012 if (RELOC(ppc64_iommu_off))
2013 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2014
2015 if (RELOC(iommu_force_on))
2016 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2017
2018 if (RELOC(prom_tce_alloc_start)) {
2019 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2020 &RELOC(prom_tce_alloc_start),
2021 sizeof(prom_tce_alloc_start));
2022 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2023 &RELOC(prom_tce_alloc_end),
2024 sizeof(prom_tce_alloc_end));
2025 }
2026#endif
2027
2028 /*
2029 * Fixup any known bugs in the device-tree
2030 */
2031 fixup_device_tree();
2032
2033 /*
2034 * Now finally create the flattened device-tree
2035 */
2036 prom_printf("copying OF device tree ...\n");
2037 flatten_device_tree();
2038
2039 /* in case stdin is USB and still active on IBM machines... */
2040 prom_close_stdin();
2041
2042 /*
2043 * Call OF "quiesce" method to shut down pending DMA's from
2044 * devices etc...
2045 */
2046 prom_printf("Calling quiesce ...\n");
2047 call_prom("quiesce", 0, 0);
2048
2049 /*
2050 * And finally, call the kernel passing it the flattened device
2051 * tree and NULL as r5, thus triggering the new entry point which
2052 * is common to us and kexec
2053 */
2054 hdr = RELOC(dt_header_start);
2055 prom_printf("returning from prom_init\n");
2056 prom_debug("->dt_header_start=0x%x\n", hdr);
2057
2058#ifdef CONFIG_PPC32
2059 reloc_got2(-offset);
2060#endif
2061
2062 __start(hdr, KERNELBASE + offset, 0);
2063
2064 return 0;
2065}
diff --git a/arch/ppc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e7aee4108dea..943425a93354 100644
--- a/arch/ppc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc/kernel/ptrace.c
3 *
4 * PowerPC version 2 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 4 *
@@ -10,13 +8,14 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 9 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 11 * and Paul Mackerras (paulus@samba.org).
14 * 12 *
15 * This file is subject to the terms and conditions of the GNU General 13 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 14 * Public License. See the file README.legal in the main directory of
17 * this archive for more details. 15 * this archive for more details.
18 */ 16 */
19 17
18#include <linux/config.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/sched.h> 20#include <linux/sched.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
@@ -29,13 +28,19 @@
29#include <linux/signal.h> 28#include <linux/signal.h>
30#include <linux/seccomp.h> 29#include <linux/seccomp.h>
31#include <linux/audit.h> 30#include <linux/audit.h>
31#ifdef CONFIG_PPC32
32#include <linux/module.h> 32#include <linux/module.h>
33#endif
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/system.h> 38#include <asm/system.h>
39#ifdef CONFIG_PPC64
40#include <asm/ptrace-common.h>
41#endif
38 42
43#ifdef CONFIG_PPC32
39/* 44/*
40 * Set of msr bits that gdb can change on behalf of a process. 45 * Set of msr bits that gdb can change on behalf of a process.
41 */ 46 */
@@ -44,12 +49,14 @@
44#else 49#else
45#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) 50#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
46#endif 51#endif
52#endif /* CONFIG_PPC32 */
47 53
48/* 54/*
49 * does not yet catch signals sent when the child dies. 55 * does not yet catch signals sent when the child dies.
50 * in exit.c or in signal.c. 56 * in exit.c or in signal.c.
51 */ 57 */
52 58
59#ifdef CONFIG_PPC32
53/* 60/*
54 * Get contents of register REGNO in task TASK. 61 * Get contents of register REGNO in task TASK.
55 */ 62 */
@@ -228,6 +235,7 @@ clear_single_step(struct task_struct *task)
228#endif 235#endif
229 } 236 }
230} 237}
238#endif /* CONFIG_PPC32 */
231 239
232/* 240/*
233 * Called by kernel/ptrace.c when detaching.. 241 * Called by kernel/ptrace.c when detaching..
@@ -296,25 +304,28 @@ int sys_ptrace(long request, long pid, long addr, long data)
296 } 304 }
297 305
298 /* read the word at location addr in the USER area. */ 306 /* read the word at location addr in the USER area. */
299 /* XXX this will need fixing for 64-bit */
300 case PTRACE_PEEKUSR: { 307 case PTRACE_PEEKUSR: {
301 unsigned long index, tmp; 308 unsigned long index, tmp;
302 309
303 ret = -EIO; 310 ret = -EIO;
304 /* convert to index and check */ 311 /* convert to index and check */
312#ifdef CONFIG_PPC32
305 index = (unsigned long) addr >> 2; 313 index = (unsigned long) addr >> 2;
306 if ((addr & 3) || index > PT_FPSCR 314 if ((addr & 3) || (index > PT_FPSCR)
307 || child->thread.regs == NULL) 315 || (child->thread.regs == NULL))
316#else
317 index = (unsigned long) addr >> 3;
318 if ((addr & 7) || (index > PT_FPSCR))
319#endif
308 break; 320 break;
309 321
322#ifdef CONFIG_PPC32
310 CHECK_FULL_REGS(child->thread.regs); 323 CHECK_FULL_REGS(child->thread.regs);
324#endif
311 if (index < PT_FPR0) { 325 if (index < PT_FPR0) {
312 tmp = get_reg(child, (int) index); 326 tmp = get_reg(child, (int) index);
313 } else { 327 } else {
314 preempt_disable(); 328 flush_fp_to_thread(child);
315 if (child->thread.regs->msr & MSR_FP)
316 giveup_fpu(child);
317 preempt_enable();
318 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; 329 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
319 } 330 }
320 ret = put_user(tmp,(unsigned long __user *) data); 331 ret = put_user(tmp,(unsigned long __user *) data);
@@ -325,7 +336,8 @@ int sys_ptrace(long request, long pid, long addr, long data)
325 case PTRACE_POKETEXT: /* write the word at location addr. */ 336 case PTRACE_POKETEXT: /* write the word at location addr. */
326 case PTRACE_POKEDATA: 337 case PTRACE_POKEDATA:
327 ret = 0; 338 ret = 0;
328 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) 339 if (access_process_vm(child, addr, &data, sizeof(data), 1)
340 == sizeof(data))
329 break; 341 break;
330 ret = -EIO; 342 ret = -EIO;
331 break; 343 break;
@@ -336,21 +348,25 @@ int sys_ptrace(long request, long pid, long addr, long data)
336 348
337 ret = -EIO; 349 ret = -EIO;
338 /* convert to index and check */ 350 /* convert to index and check */
351#ifdef CONFIG_PPC32
339 index = (unsigned long) addr >> 2; 352 index = (unsigned long) addr >> 2;
340 if ((addr & 3) || index > PT_FPSCR 353 if ((addr & 3) || (index > PT_FPSCR)
341 || child->thread.regs == NULL) 354 || (child->thread.regs == NULL))
355#else
356 index = (unsigned long) addr >> 3;
357 if ((addr & 7) || (index > PT_FPSCR))
358#endif
342 break; 359 break;
343 360
361#ifdef CONFIG_PPC32
344 CHECK_FULL_REGS(child->thread.regs); 362 CHECK_FULL_REGS(child->thread.regs);
363#endif
345 if (index == PT_ORIG_R3) 364 if (index == PT_ORIG_R3)
346 break; 365 break;
347 if (index < PT_FPR0) { 366 if (index < PT_FPR0) {
348 ret = put_reg(child, index, data); 367 ret = put_reg(child, index, data);
349 } else { 368 } else {
350 preempt_disable(); 369 flush_fp_to_thread(child);
351 if (child->thread.regs->msr & MSR_FP)
352 giveup_fpu(child);
353 preempt_enable();
354 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; 370 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
355 ret = 0; 371 ret = 0;
356 } 372 }
@@ -362,11 +378,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
362 ret = -EIO; 378 ret = -EIO;
363 if (!valid_signal(data)) 379 if (!valid_signal(data))
364 break; 380 break;
365 if (request == PTRACE_SYSCALL) { 381 if (request == PTRACE_SYSCALL)
366 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 382 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
367 } else { 383 else
368 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 384 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
369 }
370 child->exit_code = data; 385 child->exit_code = data;
371 /* make sure the single step bit is not set. */ 386 /* make sure the single step bit is not set. */
372 clear_single_step(child); 387 clear_single_step(child);
@@ -404,28 +419,102 @@ int sys_ptrace(long request, long pid, long addr, long data)
404 break; 419 break;
405 } 420 }
406 421
422#ifdef CONFIG_PPC64
423 case PTRACE_GET_DEBUGREG: {
424 ret = -EINVAL;
425 /* We only support one DABR and no IABRS at the moment */
426 if (addr > 0)
427 break;
428 ret = put_user(child->thread.dabr,
429 (unsigned long __user *)data);
430 break;
431 }
432
433 case PTRACE_SET_DEBUGREG:
434 ret = ptrace_set_debugreg(child, addr, data);
435 break;
436#endif
437
407 case PTRACE_DETACH: 438 case PTRACE_DETACH:
408 ret = ptrace_detach(child, data); 439 ret = ptrace_detach(child, data);
409 break; 440 break;
410 441
442#ifdef CONFIG_PPC64
443 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
444 int i;
445 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
446 unsigned long __user *tmp = (unsigned long __user *)addr;
447
448 for (i = 0; i < 32; i++) {
449 ret = put_user(*reg, tmp);
450 if (ret)
451 break;
452 reg++;
453 tmp++;
454 }
455 break;
456 }
457
458 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
459 int i;
460 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
461 unsigned long __user *tmp = (unsigned long __user *)addr;
462
463 for (i = 0; i < 32; i++) {
464 ret = get_user(*reg, tmp);
465 if (ret)
466 break;
467 reg++;
468 tmp++;
469 }
470 break;
471 }
472
473 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
474 int i;
475 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
476 unsigned long __user *tmp = (unsigned long __user *)addr;
477
478 flush_fp_to_thread(child);
479
480 for (i = 0; i < 32; i++) {
481 ret = put_user(*reg, tmp);
482 if (ret)
483 break;
484 reg++;
485 tmp++;
486 }
487 break;
488 }
489
490 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
491 int i;
492 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
493 unsigned long __user *tmp = (unsigned long __user *)addr;
494
495 flush_fp_to_thread(child);
496
497 for (i = 0; i < 32; i++) {
498 ret = get_user(*reg, tmp);
499 if (ret)
500 break;
501 reg++;
502 tmp++;
503 }
504 break;
505 }
506#endif /* CONFIG_PPC64 */
507
411#ifdef CONFIG_ALTIVEC 508#ifdef CONFIG_ALTIVEC
412 case PTRACE_GETVRREGS: 509 case PTRACE_GETVRREGS:
413 /* Get the child altivec register state. */ 510 /* Get the child altivec register state. */
414 preempt_disable(); 511 flush_altivec_to_thread(child);
415 if (child->thread.regs->msr & MSR_VEC)
416 giveup_altivec(child);
417 preempt_enable();
418 ret = get_vrregs((unsigned long __user *)data, child); 512 ret = get_vrregs((unsigned long __user *)data, child);
419 break; 513 break;
420 514
421 case PTRACE_SETVRREGS: 515 case PTRACE_SETVRREGS:
422 /* Set the child altivec register state. */ 516 /* Set the child altivec register state. */
423 /* this is to clear the MSR_VEC bit to force a reload 517 flush_altivec_to_thread(child);
424 * of register state from memory */
425 preempt_disable();
426 if (child->thread.regs->msr & MSR_VEC)
427 giveup_altivec(child);
428 preempt_enable();
429 ret = set_vrregs(child, (unsigned long __user *)data); 518 ret = set_vrregs(child, (unsigned long __user *)data);
430 break; 519 break;
431#endif 520#endif
@@ -478,12 +567,21 @@ static void do_syscall_trace(void)
478 567
479void do_syscall_trace_enter(struct pt_regs *regs) 568void do_syscall_trace_enter(struct pt_regs *regs)
480{ 569{
570#ifdef CONFIG_PPC64
571 secure_computing(regs->gpr[0]);
572#endif
573
481 if (test_thread_flag(TIF_SYSCALL_TRACE) 574 if (test_thread_flag(TIF_SYSCALL_TRACE)
482 && (current->ptrace & PT_PTRACED)) 575 && (current->ptrace & PT_PTRACED))
483 do_syscall_trace(); 576 do_syscall_trace();
484 577
485 if (unlikely(current->audit_context)) 578 if (unlikely(current->audit_context))
486 audit_syscall_entry(current, AUDIT_ARCH_PPC, 579 audit_syscall_entry(current,
580#ifdef CONFIG_PPC32
581 AUDIT_ARCH_PPC,
582#else
583 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
584#endif
487 regs->gpr[0], 585 regs->gpr[0],
488 regs->gpr[3], regs->gpr[4], 586 regs->gpr[3], regs->gpr[4],
489 regs->gpr[5], regs->gpr[6]); 587 regs->gpr[5], regs->gpr[6]);
@@ -491,17 +589,25 @@ void do_syscall_trace_enter(struct pt_regs *regs)
491 589
492void do_syscall_trace_leave(struct pt_regs *regs) 590void do_syscall_trace_leave(struct pt_regs *regs)
493{ 591{
592#ifdef CONFIG_PPC32
494 secure_computing(regs->gpr[0]); 593 secure_computing(regs->gpr[0]);
594#endif
495 595
496 if (unlikely(current->audit_context)) 596 if (unlikely(current->audit_context))
497 audit_syscall_exit(current, 597 audit_syscall_exit(current,
498 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 598 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
499 regs->result); 599 regs->result);
500 600
501 if ((test_thread_flag(TIF_SYSCALL_TRACE)) 601 if ((test_thread_flag(TIF_SYSCALL_TRACE)
602#ifdef CONFIG_PPC64
603 || test_thread_flag(TIF_SINGLESTEP)
604#endif
605 )
502 && (current->ptrace & PT_PTRACED)) 606 && (current->ptrace & PT_PTRACED))
503 do_syscall_trace(); 607 do_syscall_trace();
504} 608}
505 609
610#ifdef CONFIG_PPC32
506EXPORT_SYMBOL(do_syscall_trace_enter); 611EXPORT_SYMBOL(do_syscall_trace_enter);
507EXPORT_SYMBOL(do_syscall_trace_leave); 612EXPORT_SYMBOL(do_syscall_trace_leave);
613#endif
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index fb8c22d6084a..544368277d7e 100644
--- a/arch/ppc64/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/ptrace32.c 2 * ptrace for 32-bit processes running on a 64-bit kernel.
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -10,10 +10,10 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 13 * and Paul Mackerras (paulus@samba.org).
14 * 14 *
15 * This file is subject to the terms and conditions of the GNU General 15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 16 * Public License. See the file COPYING in the main directory of
17 * this archive for more details. 17 * this archive for more details.
18 */ 18 */
19 19
@@ -40,7 +40,8 @@
40 * in exit.c or in signal.c. 40 * in exit.c or in signal.c.
41 */ 41 */
42 42
43int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) 43int compat_sys_ptrace(int request, int pid, unsigned long addr,
44 unsigned long data)
44{ 45{
45 struct task_struct *child; 46 struct task_struct *child;
46 int ret = -EPERM; 47 int ret = -EPERM;
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644
index 000000000000..2f8c3c951394
--- /dev/null
+++ b/arch/powerpc/kernel/semaphore.c
@@ -0,0 +1,135 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
new file mode 100644
index 000000000000..2d7fdeb581d1
--- /dev/null
+++ b/arch/powerpc/kernel/setup_32.c
@@ -0,0 +1,652 @@
1/*
2 * Common prep/pmac/chrp boot and setup code.
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <linux/string.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/reboot.h>
12#include <linux/delay.h>
13#include <linux/initrd.h>
14#include <linux/ide.h>
15#include <linux/tty.h>
16#include <linux/bootmem.h>
17#include <linux/seq_file.h>
18#include <linux/root_dev.h>
19#include <linux/cpu.h>
20#include <linux/console.h>
21
22#include <asm/residual.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/processor.h>
26#include <asm/pgtable.h>
27#include <asm/bootinfo.h>
28#include <asm/setup.h>
29#include <asm/amigappc.h>
30#include <asm/smp.h>
31#include <asm/elf.h>
32#include <asm/cputable.h>
33#include <asm/bootx.h>
34#include <asm/btext.h>
35#include <asm/machdep.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38#include <asm/pmac_feature.h>
39#include <asm/sections.h>
40#include <asm/nvram.h>
41#include <asm/xmon.h>
42#include <asm/ocp.h>
43
44#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
45 defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
46 defined(CONFIG_PPC_MPC52xx))
47
48#if USES_PPC_SYS
49#include <asm/ppc_sys.h>
50#endif
51
52#if defined CONFIG_KGDB
53#include <asm/kgdb.h>
54#endif
55
56extern void platform_init(void);
57extern void bootx_init(unsigned long r4, unsigned long phys);
58
59extern void ppc6xx_idle(void);
60extern void power4_idle(void);
61
62boot_infos_t *boot_infos;
63struct ide_machdep_calls ppc_ide_md;
64
65/* Used with the BI_MEMSIZE bootinfo parameter to store the memory
66 size value reported by the boot loader. */
67unsigned long boot_mem_size;
68
69unsigned long ISA_DMA_THRESHOLD;
70unsigned int DMA_MODE_READ;
71unsigned int DMA_MODE_WRITE;
72
73int have_of = 1;
74
75#ifdef CONFIG_PPC_MULTIPLATFORM
76int _machine = 0;
77
78extern void prep_init(void);
79extern void pmac_init(void);
80extern void chrp_init(void);
81
82dev_t boot_dev;
83#endif /* CONFIG_PPC_MULTIPLATFORM */
84
85#ifdef CONFIG_MAGIC_SYSRQ
86unsigned long SYSRQ_KEY = 0x54;
87#endif /* CONFIG_MAGIC_SYSRQ */
88
89#ifdef CONFIG_VGA_CONSOLE
90unsigned long vgacon_remap_base;
91#endif
92
93struct machdep_calls ppc_md;
94EXPORT_SYMBOL(ppc_md);
95
96/*
97 * These are used in binfmt_elf.c to put aux entries on the stack
98 * for each elf executable being started.
99 */
100int dcache_bsize;
101int icache_bsize;
102int ucache_bsize;
103
104#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \
105 defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA)
106struct screen_info screen_info = {
107 0, 25, /* orig-x, orig-y */
108 0, /* unused */
109 0, /* orig-video-page */
110 0, /* orig-video-mode */
111 80, /* orig-video-cols */
112 0,0,0, /* ega_ax, ega_bx, ega_cx */
113 25, /* orig-video-lines */
114 1, /* orig-video-isVGA */
115 16 /* orig-video-points */
116};
117#endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */
118
119void machine_restart(char *cmd)
120{
121#ifdef CONFIG_NVRAM
122 nvram_sync();
123#endif
124 ppc_md.restart(cmd);
125}
126
127void machine_power_off(void)
128{
129#ifdef CONFIG_NVRAM
130 nvram_sync();
131#endif
132 ppc_md.power_off();
133}
134
135void machine_halt(void)
136{
137#ifdef CONFIG_NVRAM
138 nvram_sync();
139#endif
140 ppc_md.halt();
141}
142
143void (*pm_power_off)(void) = machine_power_off;
144
145#ifdef CONFIG_TAU
146extern u32 cpu_temp(unsigned long cpu);
147extern u32 cpu_temp_both(unsigned long cpu);
148#endif /* CONFIG_TAU */
149
150int show_cpuinfo(struct seq_file *m, void *v)
151{
152 int i = (int) v - 1;
153 unsigned int pvr;
154 unsigned short maj, min;
155 unsigned long lpj;
156
157 if (i >= NR_CPUS) {
158 /* Show summary information */
159#ifdef CONFIG_SMP
160 unsigned long bogosum = 0;
161 for (i = 0; i < NR_CPUS; ++i)
162 if (cpu_online(i))
163 bogosum += cpu_data[i].loops_per_jiffy;
164 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
165 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
166#endif /* CONFIG_SMP */
167
168 if (ppc_md.show_cpuinfo != NULL)
169 ppc_md.show_cpuinfo(m);
170 return 0;
171 }
172
173#ifdef CONFIG_SMP
174 if (!cpu_online(i))
175 return 0;
176 pvr = cpu_data[i].pvr;
177 lpj = cpu_data[i].loops_per_jiffy;
178#else
179 pvr = mfspr(SPRN_PVR);
180 lpj = loops_per_jiffy;
181#endif
182
183 seq_printf(m, "processor\t: %d\n", i);
184 seq_printf(m, "cpu\t\t: ");
185
186 if (cur_cpu_spec->pvr_mask)
187 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
188 else
189 seq_printf(m, "unknown (%08x)", pvr);
190#ifdef CONFIG_ALTIVEC
191 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
192 seq_printf(m, ", altivec supported");
193#endif
194 seq_printf(m, "\n");
195
196#ifdef CONFIG_TAU
197 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
198#ifdef CONFIG_TAU_AVERAGE
199 /* more straightforward, but potentially misleading */
200 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
201 cpu_temp(i));
202#else
203 /* show the actual temp sensor range */
204 u32 temp;
205 temp = cpu_temp_both(i);
206 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
207 temp & 0xff, temp >> 16);
208#endif
209 }
210#endif /* CONFIG_TAU */
211
212 if (ppc_md.show_percpuinfo != NULL)
213 ppc_md.show_percpuinfo(m, i);
214
215 /* If we are a Freescale core do a simple check so
216 * we dont have to keep adding cases in the future */
217 if (PVR_VER(pvr) & 0x8000) {
218 maj = PVR_MAJ(pvr);
219 min = PVR_MIN(pvr);
220 } else {
221 switch (PVR_VER(pvr)) {
222 case 0x0020: /* 403 family */
223 maj = PVR_MAJ(pvr) + 1;
224 min = PVR_MIN(pvr);
225 break;
226 case 0x1008: /* 740P/750P ?? */
227 maj = ((pvr >> 8) & 0xFF) - 1;
228 min = pvr & 0xFF;
229 break;
230 default:
231 maj = (pvr >> 8) & 0xFF;
232 min = pvr & 0xFF;
233 break;
234 }
235 }
236
237 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
238 maj, min, PVR_VER(pvr), PVR_REV(pvr));
239
240 seq_printf(m, "bogomips\t: %lu.%02lu\n",
241 lpj / (500000/HZ), (lpj / (5000/HZ)) % 100);
242
243#if USES_PPC_SYS
244 if (cur_ppc_sys_spec->ppc_sys_name)
245 seq_printf(m, "chipset\t\t: %s\n",
246 cur_ppc_sys_spec->ppc_sys_name);
247#endif
248
249#ifdef CONFIG_SMP
250 seq_printf(m, "\n");
251#endif
252
253 return 0;
254}
255
256static void *c_start(struct seq_file *m, loff_t *pos)
257{
258 int i = *pos;
259
260 return i <= NR_CPUS? (void *) (i + 1): NULL;
261}
262
263static void *c_next(struct seq_file *m, void *v, loff_t *pos)
264{
265 ++*pos;
266 return c_start(m, pos);
267}
268
269static void c_stop(struct seq_file *m, void *v)
270{
271}
272
273struct seq_operations cpuinfo_op = {
274 .start =c_start,
275 .next = c_next,
276 .stop = c_stop,
277 .show = show_cpuinfo,
278};
279
280/*
281 * We're called here very early in the boot. We determine the machine
282 * type and call the appropriate low-level setup functions.
283 * -- Cort <cort@fsmlabs.com>
284 *
285 * Note that the kernel may be running at an address which is different
286 * from the address that it was linked at, so we must use RELOC/PTRRELOC
287 * to access static data (including strings). -- paulus
288 */
289unsigned long __init early_init(unsigned long dt_ptr)
290{
291 unsigned long offset = reloc_offset();
292
293 /* First zero the BSS -- use memset_io, some platforms don't have
294 * caches on yet */
295 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
296
297 /*
298 * Identify the CPU type and fix up code sections
299 * that depend on which cpu we have.
300 */
301 identify_cpu(offset, 0);
302 do_cpu_ftr_fixups(offset);
303
304 return KERNELBASE + offset;
305}
306
307#ifdef CONFIG_PPC_OF
308/*
309 * Assume here that all clock rates are the same in a
310 * smp system. -- Cort
311 */
312int
313of_show_percpuinfo(struct seq_file *m, int i)
314{
315 struct device_node *cpu_node;
316 u32 *fp;
317 int s;
318
319 cpu_node = find_type_devices("cpu");
320 if (!cpu_node)
321 return 0;
322 for (s = 0; s < i && cpu_node->next; s++)
323 cpu_node = cpu_node->next;
324 fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL);
325 if (fp)
326 seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000);
327 return 0;
328}
329
330void __init
331intuit_machine_type(void)
332{
333 char *model;
334 struct device_node *root;
335
336 /* ask the OF info if we're a chrp or pmac */
337 root = find_path_device("/");
338 if (root != 0) {
339 /* assume pmac unless proven to be chrp -- Cort */
340 _machine = _MACH_Pmac;
341 model = get_property(root, "device_type", NULL);
342 if (model && !strncmp("chrp", model, 4))
343 _machine = _MACH_chrp;
344 else {
345 model = get_property(root, "model", NULL);
346 if (model && !strncmp(model, "IBM", 3))
347 _machine = _MACH_chrp;
348 }
349 }
350}
351#endif
352
353#ifdef CONFIG_PPC_MULTIPLATFORM
354/*
355 * The PPC_MULTIPLATFORM version of platform_init...
356 */
357void __init platform_init(void)
358{
359 /* if we didn't get any bootinfo telling us what we are... */
360 if (_machine == 0) {
361 /* prep boot loader tells us if we're prep or not */
362 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
363 _machine = _MACH_prep;
364 }
365
366#ifdef CONFIG_PPC_PREP
367 /* not much more to do here, if prep */
368 if (_machine == _MACH_prep) {
369 prep_init();
370 return;
371 }
372#endif
373
374#ifdef CONFIG_ADB
375 if (strstr(cmd_line, "adb_sync")) {
376 extern int __adb_probe_sync;
377 __adb_probe_sync = 1;
378 }
379#endif /* CONFIG_ADB */
380
381 switch (_machine) {
382#ifdef CONFIG_PPC_PMAC
383 case _MACH_Pmac:
384 pmac_init();
385 break;
386#endif
387#ifdef CONFIG_PPC_CHRP
388 case _MACH_chrp:
389 chrp_init();
390 break;
391#endif
392 }
393}
394
395#ifdef CONFIG_SERIAL_CORE_CONSOLE
396extern char *of_stdout_device;
397
398static int __init set_preferred_console(void)
399{
400 struct device_node *prom_stdout;
401 char *name;
402 int offset = 0;
403
404 if (of_stdout_device == NULL)
405 return -ENODEV;
406
407 /* The user has requested a console so this is already set up. */
408 if (strstr(saved_command_line, "console="))
409 return -EBUSY;
410
411 prom_stdout = find_path_device(of_stdout_device);
412 if (!prom_stdout)
413 return -ENODEV;
414
415 name = (char *)get_property(prom_stdout, "name", NULL);
416 if (!name)
417 return -ENODEV;
418
419 if (strcmp(name, "serial") == 0) {
420 int i;
421 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
422 if (i > 8) {
423 switch (reg[1]) {
424 case 0x3f8:
425 offset = 0;
426 break;
427 case 0x2f8:
428 offset = 1;
429 break;
430 case 0x898:
431 offset = 2;
432 break;
433 case 0x890:
434 offset = 3;
435 break;
436 default:
437 /* We dont recognise the serial port */
438 return -ENODEV;
439 }
440 }
441 } else if (strcmp(name, "ch-a") == 0)
442 offset = 0;
443 else if (strcmp(name, "ch-b") == 0)
444 offset = 1;
445 else
446 return -ENODEV;
447 return add_preferred_console("ttyS", offset, NULL);
448}
449console_initcall(set_preferred_console);
450#endif /* CONFIG_SERIAL_CORE_CONSOLE */
451#endif /* CONFIG_PPC_MULTIPLATFORM */
452
453/*
454 * Find out what kind of machine we're on and save any data we need
455 * from the early boot process (devtree is copied on pmac by prom_init()).
456 * This is called very early on the boot process, after a minimal
457 * MMU environment has been set up but before MMU_init is called.
458 */
459void __init machine_init(unsigned long dt_ptr, unsigned long phys)
460{
461 early_init_devtree(__va(dt_ptr));
462
463#ifdef CONFIG_CMDLINE
464 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
465#endif /* CONFIG_CMDLINE */
466
467 platform_init();
468
469#ifdef CONFIG_6xx
470 ppc_md.power_save = ppc6xx_idle;
471#endif
472
473 if (ppc_md.progress)
474 ppc_md.progress("id mach(): done", 0x200);
475}
476
477#ifdef CONFIG_BOOKE_WDT
478/* Checks wdt=x and wdt_period=xx command-line option */
479int __init early_parse_wdt(char *p)
480{
481 if (p && strncmp(p, "0", 1) != 0)
482 booke_wdt_enabled = 1;
483
484 return 0;
485}
486early_param("wdt", early_parse_wdt);
487
488int __init early_parse_wdt_period (char *p)
489{
490 if (p)
491 booke_wdt_period = simple_strtoul(p, NULL, 0);
492
493 return 0;
494}
495early_param("wdt_period", early_parse_wdt_period);
496#endif /* CONFIG_BOOKE_WDT */
497
498/* Checks "l2cr=xxxx" command-line option */
499int __init ppc_setup_l2cr(char *str)
500{
501 if (cpu_has_feature(CPU_FTR_L2CR)) {
502 unsigned long val = simple_strtoul(str, NULL, 0);
503 printk(KERN_INFO "l2cr set to %lx\n", val);
504 _set_L2CR(0); /* force invalidate by disable cache */
505 _set_L2CR(val); /* and enable it */
506 }
507 return 1;
508}
509__setup("l2cr=", ppc_setup_l2cr);
510
511#ifdef CONFIG_GENERIC_NVRAM
512
513/* Generic nvram hooks used by drivers/char/gen_nvram.c */
514unsigned char nvram_read_byte(int addr)
515{
516 if (ppc_md.nvram_read_val)
517 return ppc_md.nvram_read_val(addr);
518 return 0xff;
519}
520EXPORT_SYMBOL(nvram_read_byte);
521
522void nvram_write_byte(unsigned char val, int addr)
523{
524 if (ppc_md.nvram_write_val)
525 ppc_md.nvram_write_val(addr, val);
526}
527EXPORT_SYMBOL(nvram_write_byte);
528
529void nvram_sync(void)
530{
531 if (ppc_md.nvram_sync)
532 ppc_md.nvram_sync();
533}
534EXPORT_SYMBOL(nvram_sync);
535
536#endif /* CONFIG_NVRAM */
537
538static struct cpu cpu_devices[NR_CPUS];
539
540int __init ppc_init(void)
541{
542 int i;
543
544 /* clear the progress line */
545 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
546
547 /* register CPU devices */
548 for (i = 0; i < NR_CPUS; i++)
549 if (cpu_possible(i))
550 register_cpu(&cpu_devices[i], i, NULL);
551
552 /* call platform init */
553 if (ppc_md.init != NULL) {
554 ppc_md.init();
555 }
556 return 0;
557}
558
559arch_initcall(ppc_init);
560
561/* Warning, IO base is not yet inited */
562void __init setup_arch(char **cmdline_p)
563{
564 extern char *klimit;
565 extern void do_init_bootmem(void);
566
567 /* so udelay does something sensible, assume <= 1000 bogomips */
568 loops_per_jiffy = 500000000 / HZ;
569
570 unflatten_device_tree();
571 finish_device_tree();
572
573#ifdef CONFIG_BOOTX_TEXT
574 init_boot_display();
575#endif
576
577#ifdef CONFIG_PPC_MULTIPLATFORM
578 /* This could be called "early setup arch", it must be done
579 * now because xmon need it
580 */
581 if (_machine == _MACH_Pmac)
582 pmac_feature_init(); /* New cool way */
583#endif
584
585#ifdef CONFIG_XMON
586 xmon_map_scc();
587 if (strstr(cmd_line, "xmon"))
588 xmon(NULL);
589#endif /* CONFIG_XMON */
590 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
591
592#if defined(CONFIG_KGDB)
593 if (ppc_md.kgdb_map_scc)
594 ppc_md.kgdb_map_scc();
595 set_debug_traps();
596 if (strstr(cmd_line, "gdb")) {
597 if (ppc_md.progress)
598 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
599 printk("kgdb breakpoint activated\n");
600 breakpoint();
601 }
602#endif
603
604 /*
605 * Set cache line size based on type of cpu as a default.
606 * Systems with OF can look in the properties on the cpu node(s)
607 * for a possibly more accurate value.
608 */
609 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
610 dcache_bsize = cur_cpu_spec->dcache_bsize;
611 icache_bsize = cur_cpu_spec->icache_bsize;
612 ucache_bsize = 0;
613 } else
614 ucache_bsize = dcache_bsize = icache_bsize
615 = cur_cpu_spec->dcache_bsize;
616
617 /* reboot on panic */
618 panic_timeout = 180;
619
620 init_mm.start_code = PAGE_OFFSET;
621 init_mm.end_code = (unsigned long) _etext;
622 init_mm.end_data = (unsigned long) _edata;
623 init_mm.brk = (unsigned long) klimit;
624
625 /* Save unparsed command line copy for /proc/cmdline */
626 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
627 *cmdline_p = cmd_line;
628
629 parse_early_param();
630
631 /* set up the bootmem stuff with available memory */
632 do_init_bootmem();
633 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
634
635#ifdef CONFIG_PPC_OCP
636 /* Initialize OCP device list */
637 ocp_early_init();
638 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
639#endif
640
641#ifdef CONFIG_DUMMY_CONSOLE
642 conswitchp = &dummy_con;
643#endif
644
645 ppc_md.setup_arch();
646 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
647
648 paging_init();
649
650 /* this is for modules since _machine can be a define -- Cort */
651 ppc_md.ppc_machine = _machine;
652}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
new file mode 100644
index 000000000000..0312422881ae
--- /dev/null
+++ b/arch/powerpc/kernel/setup_64.c
@@ -0,0 +1,1307 @@
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#undef DEBUG
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/reboot.h>
22#include <linux/delay.h>
23#include <linux/initrd.h>
24#include <linux/ide.h>
25#include <linux/seq_file.h>
26#include <linux/ioport.h>
27#include <linux/console.h>
28#include <linux/utsname.h>
29#include <linux/tty.h>
30#include <linux/root_dev.h>
31#include <linux/notifier.h>
32#include <linux/cpu.h>
33#include <linux/unistd.h>
34#include <linux/serial.h>
35#include <linux/serial_8250.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/processor.h>
39#include <asm/pgtable.h>
40#include <asm/bootinfo.h>
41#include <asm/smp.h>
42#include <asm/elf.h>
43#include <asm/machdep.h>
44#include <asm/paca.h>
45#include <asm/ppcdebug.h>
46#include <asm/time.h>
47#include <asm/cputable.h>
48#include <asm/sections.h>
49#include <asm/btext.h>
50#include <asm/nvram.h>
51#include <asm/setup.h>
52#include <asm/system.h>
53#include <asm/rtas.h>
54#include <asm/iommu.h>
55#include <asm/serial.h>
56#include <asm/cache.h>
57#include <asm/page.h>
58#include <asm/mmu.h>
59#include <asm/lmb.h>
60#include <asm/iSeries/ItLpNaca.h>
61#include <asm/firmware.h>
62#include <asm/systemcfg.h>
63
64#ifdef DEBUG
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
70/*
71 * Here are some early debugging facilities. You can enable one
72 * but your kernel will not boot on anything else if you do so
73 */
74
75/* This one is for use on LPAR machines that support an HVC console
76 * on vterm 0
77 */
78extern void udbg_init_debug_lpar(void);
79/* This one is for use on Apple G5 machines
80 */
81extern void udbg_init_pmac_realmode(void);
82/* That's RTAS panel debug */
83extern void call_rtas_display_status_delay(unsigned char c);
84/* Here's maple real mode debug */
85extern void udbg_init_maple_realmode(void);
86
87#define EARLY_DEBUG_INIT() do {} while(0)
88
89#if 0
90#define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
91#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
92#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
93#define EARLY_DEBUG_INIT() \
94 do { udbg_putc = call_rtas_display_status_delay; } while(0)
95#endif
96
97/* extern void *stab; */
98extern unsigned long klimit;
99
100extern void mm_init_ppc64(void);
101extern void stab_initialize(unsigned long stab);
102extern void htab_initialize(void);
103extern void early_init_devtree(void *flat_dt);
104extern void unflatten_device_tree(void);
105
106extern void smp_release_cpus(void);
107
108int have_of = 1;
109int boot_cpuid = 0;
110int boot_cpuid_phys = 0;
111dev_t boot_dev;
112u64 ppc64_pft_size;
113
114struct ppc64_caches ppc64_caches;
115EXPORT_SYMBOL_GPL(ppc64_caches);
116
117/*
118 * These are used in binfmt_elf.c to put aux entries on the stack
119 * for each elf executable being started.
120 */
121int dcache_bsize;
122int icache_bsize;
123int ucache_bsize;
124
125/* The main machine-dep calls structure
126 */
127struct machdep_calls ppc_md;
128EXPORT_SYMBOL(ppc_md);
129
130#ifdef CONFIG_MAGIC_SYSRQ
131unsigned long SYSRQ_KEY;
132#endif /* CONFIG_MAGIC_SYSRQ */
133
134
135static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
136static struct notifier_block ppc64_panic_block = {
137 .notifier_call = ppc64_panic_event,
138 .priority = INT_MIN /* may not return; must be done last */
139};
140
141/*
142 * Perhaps we can put the pmac screen_info[] here
143 * on pmac as well so we don't need the ifdef's.
144 * Until we get multiple-console support in here
145 * that is. -- Cort
146 * Maybe tie it to serial consoles, since this is really what
147 * these processors use on existing boards. -- Dan
148 */
149struct screen_info screen_info = {
150 .orig_x = 0,
151 .orig_y = 25,
152 .orig_video_cols = 80,
153 .orig_video_lines = 25,
154 .orig_video_isVGA = 1,
155 .orig_video_points = 16
156};
157
158#ifdef CONFIG_SMP
159
160static int smt_enabled_cmdline;
161
162/* Look for ibm,smt-enabled OF option */
163static void check_smt_enabled(void)
164{
165 struct device_node *dn;
166 char *smt_option;
167
168 /* Allow the command line to overrule the OF option */
169 if (smt_enabled_cmdline)
170 return;
171
172 dn = of_find_node_by_path("/options");
173
174 if (dn) {
175 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
176
177 if (smt_option) {
178 if (!strcmp(smt_option, "on"))
179 smt_enabled_at_boot = 1;
180 else if (!strcmp(smt_option, "off"))
181 smt_enabled_at_boot = 0;
182 }
183 }
184}
185
186/* Look for smt-enabled= cmdline option */
187static int __init early_smt_enabled(char *p)
188{
189 smt_enabled_cmdline = 1;
190
191 if (!p)
192 return 0;
193
194 if (!strcmp(p, "on") || !strcmp(p, "1"))
195 smt_enabled_at_boot = 1;
196 else if (!strcmp(p, "off") || !strcmp(p, "0"))
197 smt_enabled_at_boot = 0;
198
199 return 0;
200}
201early_param("smt-enabled", early_smt_enabled);
202
203/**
204 * setup_cpu_maps - initialize the following cpu maps:
205 * cpu_possible_map
206 * cpu_present_map
207 * cpu_sibling_map
208 *
209 * Having the possible map set up early allows us to restrict allocations
210 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
211 *
212 * We do not initialize the online map here; cpus set their own bits in
213 * cpu_online_map as they come up.
214 *
215 * This function is valid only for Open Firmware systems. finish_device_tree
216 * must be called before using this.
217 *
218 * While we're here, we may as well set the "physical" cpu ids in the paca.
219 */
220static void __init setup_cpu_maps(void)
221{
222 struct device_node *dn = NULL;
223 int cpu = 0;
224 int swap_cpuid = 0;
225
226 check_smt_enabled();
227
228 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
229 u32 *intserv;
230 int j, len = sizeof(u32), nthreads;
231
232 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
233 &len);
234 if (!intserv)
235 intserv = (u32 *)get_property(dn, "reg", NULL);
236
237 nthreads = len / sizeof(u32);
238
239 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
240 cpu_set(cpu, cpu_present_map);
241 set_hard_smp_processor_id(cpu, intserv[j]);
242
243 if (intserv[j] == boot_cpuid_phys)
244 swap_cpuid = cpu;
245 cpu_set(cpu, cpu_possible_map);
246 cpu++;
247 }
248 }
249
250 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
251 * boot cpu is logical 0.
252 */
253 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
254 u32 tmp;
255 tmp = get_hard_smp_processor_id(0);
256 set_hard_smp_processor_id(0, boot_cpuid_phys);
257 set_hard_smp_processor_id(swap_cpuid, tmp);
258 }
259
260 /*
261 * On pSeries LPAR, we need to know how many cpus
262 * could possibly be added to this partition.
263 */
264 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
265 (dn = of_find_node_by_path("/rtas"))) {
266 int num_addr_cell, num_size_cell, maxcpus;
267 unsigned int *ireg;
268
269 num_addr_cell = prom_n_addr_cells(dn);
270 num_size_cell = prom_n_size_cells(dn);
271
272 ireg = (unsigned int *)
273 get_property(dn, "ibm,lrdr-capacity", NULL);
274
275 if (!ireg)
276 goto out;
277
278 maxcpus = ireg[num_addr_cell + num_size_cell];
279
280 /* Double maxcpus for processors which have SMT capability */
281 if (cpu_has_feature(CPU_FTR_SMT))
282 maxcpus *= 2;
283
284 if (maxcpus > NR_CPUS) {
285 printk(KERN_WARNING
286 "Partition configured for %d cpus, "
287 "operating system maximum is %d.\n",
288 maxcpus, NR_CPUS);
289 maxcpus = NR_CPUS;
290 } else
291 printk(KERN_INFO "Partition configured for %d cpus.\n",
292 maxcpus);
293
294 for (cpu = 0; cpu < maxcpus; cpu++)
295 cpu_set(cpu, cpu_possible_map);
296 out:
297 of_node_put(dn);
298 }
299
300 /*
301 * Do the sibling map; assume only two threads per processor.
302 */
303 for_each_cpu(cpu) {
304 cpu_set(cpu, cpu_sibling_map[cpu]);
305 if (cpu_has_feature(CPU_FTR_SMT))
306 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
307 }
308
309 systemcfg->processorCount = num_present_cpus();
310}
311#endif /* CONFIG_SMP */
312
313extern struct machdep_calls pSeries_md;
314extern struct machdep_calls pmac_md;
315extern struct machdep_calls maple_md;
316extern struct machdep_calls bpa_md;
317extern struct machdep_calls iseries_md;
318
319/* Ultimately, stuff them in an elf section like initcalls... */
320static struct machdep_calls __initdata *machines[] = {
321#ifdef CONFIG_PPC_PSERIES
322 &pSeries_md,
323#endif /* CONFIG_PPC_PSERIES */
324#ifdef CONFIG_PPC_PMAC
325 &pmac_md,
326#endif /* CONFIG_PPC_PMAC */
327#ifdef CONFIG_PPC_MAPLE
328 &maple_md,
329#endif /* CONFIG_PPC_MAPLE */
330#ifdef CONFIG_PPC_BPA
331 &bpa_md,
332#endif
333#ifdef CONFIG_PPC_ISERIES
334 &iseries_md,
335#endif
336 NULL
337};
338
339/*
340 * Early initialization entry point. This is called by head.S
341 * with MMU translation disabled. We rely on the "feature" of
342 * the CPU that ignores the top 2 bits of the address in real
343 * mode so we can access kernel globals normally provided we
344 * only toy with things in the RMO region. From here, we do
345 * some early parsing of the device-tree to setup out LMB
346 * data structures, and allocate & initialize the hash table
347 * and segment tables so we can start running with translation
348 * enabled.
349 *
350 * It is this function which will call the probe() callback of
351 * the various platform types and copy the matching one to the
352 * global ppc_md structure. Your platform can eventually do
353 * some very early initializations from the probe() routine, but
354 * this is not recommended, be very careful as, for example, the
355 * device-tree is not accessible via normal means at this point.
356 */
357
358void __init early_setup(unsigned long dt_ptr)
359{
360 struct paca_struct *lpaca = get_paca();
361 static struct machdep_calls **mach;
362
363 /*
364 * Enable early debugging if any specified (see top of
365 * this file)
366 */
367 EARLY_DEBUG_INIT();
368
369 DBG(" -> early_setup()\n");
370
371 /*
372 * Fill the default DBG level (do we want to keep
373 * that old mecanism around forever ?)
374 */
375 ppcdbg_initialize();
376
377 /*
378 * Do early initializations using the flattened device
379 * tree, like retreiving the physical memory map or
380 * calculating/retreiving the hash table size
381 */
382 early_init_devtree(__va(dt_ptr));
383
384 /*
385 * Iterate all ppc_md structures until we find the proper
386 * one for the current machine type
387 */
388 DBG("Probing machine type for platform %x...\n",
389 systemcfg->platform);
390
391 for (mach = machines; *mach; mach++) {
392 if ((*mach)->probe(systemcfg->platform))
393 break;
394 }
395 /* What can we do if we didn't find ? */
396 if (*mach == NULL) {
397 DBG("No suitable machine found !\n");
398 for (;;);
399 }
400 ppc_md = **mach;
401
402 DBG("Found, Initializing memory management...\n");
403
404 /*
405 * Initialize stab / SLB management
406 */
407 if (!firmware_has_feature(FW_FEATURE_ISERIES))
408 stab_initialize(lpaca->stab_real);
409
410 /*
411 * Initialize the MMU Hash table and create the linear mapping
412 * of memory
413 */
414 htab_initialize();
415
416 DBG(" <- early_setup()\n");
417}
418
419
420/*
421 * Initialize some remaining members of the ppc64_caches and systemcfg structures
422 * (at least until we get rid of them completely). This is mostly some
423 * cache informations about the CPU that will be used by cache flush
424 * routines and/or provided to userland
425 */
426static void __init initialize_cache_info(void)
427{
428 struct device_node *np;
429 unsigned long num_cpus = 0;
430
431 DBG(" -> initialize_cache_info()\n");
432
433 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
434 num_cpus += 1;
435
436 /* We're assuming *all* of the CPUs have the same
437 * d-cache and i-cache sizes... -Peter
438 */
439
440 if ( num_cpus == 1 ) {
441 u32 *sizep, *lsizep;
442 u32 size, lsize;
443 const char *dc, *ic;
444
445 /* Then read cache informations */
446 if (systemcfg->platform == PLATFORM_POWERMAC) {
447 dc = "d-cache-block-size";
448 ic = "i-cache-block-size";
449 } else {
450 dc = "d-cache-line-size";
451 ic = "i-cache-line-size";
452 }
453
454 size = 0;
455 lsize = cur_cpu_spec->dcache_bsize;
456 sizep = (u32 *)get_property(np, "d-cache-size", NULL);
457 if (sizep != NULL)
458 size = *sizep;
459 lsizep = (u32 *) get_property(np, dc, NULL);
460 if (lsizep != NULL)
461 lsize = *lsizep;
462 if (sizep == 0 || lsizep == 0)
463 DBG("Argh, can't find dcache properties ! "
464 "sizep: %p, lsizep: %p\n", sizep, lsizep);
465
466 systemcfg->dcache_size = ppc64_caches.dsize = size;
467 systemcfg->dcache_line_size =
468 ppc64_caches.dline_size = lsize;
469 ppc64_caches.log_dline_size = __ilog2(lsize);
470 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
471
472 size = 0;
473 lsize = cur_cpu_spec->icache_bsize;
474 sizep = (u32 *)get_property(np, "i-cache-size", NULL);
475 if (sizep != NULL)
476 size = *sizep;
477 lsizep = (u32 *)get_property(np, ic, NULL);
478 if (lsizep != NULL)
479 lsize = *lsizep;
480 if (sizep == 0 || lsizep == 0)
481 DBG("Argh, can't find icache properties ! "
482 "sizep: %p, lsizep: %p\n", sizep, lsizep);
483
484 systemcfg->icache_size = ppc64_caches.isize = size;
485 systemcfg->icache_line_size =
486 ppc64_caches.iline_size = lsize;
487 ppc64_caches.log_iline_size = __ilog2(lsize);
488 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
489 }
490 }
491
492 /* Add an eye catcher and the systemcfg layout version number */
493 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
494 systemcfg->version.major = SYSTEMCFG_MAJOR;
495 systemcfg->version.minor = SYSTEMCFG_MINOR;
496 systemcfg->processor = mfspr(SPRN_PVR);
497
498 DBG(" <- initialize_cache_info()\n");
499}
500
501static void __init check_for_initrd(void)
502{
503#ifdef CONFIG_BLK_DEV_INITRD
504 u64 *prop;
505
506 DBG(" -> check_for_initrd()\n");
507
508 if (of_chosen) {
509 prop = (u64 *)get_property(of_chosen,
510 "linux,initrd-start", NULL);
511 if (prop != NULL) {
512 initrd_start = (unsigned long)__va(*prop);
513 prop = (u64 *)get_property(of_chosen,
514 "linux,initrd-end", NULL);
515 if (prop != NULL) {
516 initrd_end = (unsigned long)__va(*prop);
517 initrd_below_start_ok = 1;
518 } else
519 initrd_start = 0;
520 }
521 }
522
523 /* If we were passed an initrd, set the ROOT_DEV properly if the values
524 * look sensible. If not, clear initrd reference.
525 */
526 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
527 initrd_end > initrd_start)
528 ROOT_DEV = Root_RAM0;
529 else
530 initrd_start = initrd_end = 0;
531
532 if (initrd_start)
533 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
534
535 DBG(" <- check_for_initrd()\n");
536#endif /* CONFIG_BLK_DEV_INITRD */
537}
538
539/*
540 * Do some initial setup of the system. The parameters are those which
541 * were passed in from the bootloader.
542 */
543void __init setup_system(void)
544{
545 DBG(" -> setup_system()\n");
546
547 /*
548 * Unflatten the device-tree passed by prom_init or kexec
549 */
550 unflatten_device_tree();
551
552 /*
553 * Fill the ppc64_caches & systemcfg structures with informations
554 * retreived from the device-tree. Need to be called before
555 * finish_device_tree() since the later requires some of the
556 * informations filled up here to properly parse the interrupt
557 * tree.
558 * It also sets up the cache line sizes which allows to call
559 * routines like flush_icache_range (used by the hash init
560 * later on).
561 */
562 initialize_cache_info();
563
564#ifdef CONFIG_PPC_RTAS
565 /*
566 * Initialize RTAS if available
567 */
568 rtas_initialize();
569#endif /* CONFIG_PPC_RTAS */
570
571 /*
572 * Check if we have an initrd provided via the device-tree
573 */
574 check_for_initrd();
575
576 /*
577 * Do some platform specific early initializations, that includes
578 * setting up the hash table pointers. It also sets up some interrupt-mapping
579 * related options that will be used by finish_device_tree()
580 */
581 ppc_md.init_early();
582
583 /*
584 * "Finish" the device-tree, that is do the actual parsing of
585 * some of the properties like the interrupt map
586 */
587 finish_device_tree();
588
589#ifdef CONFIG_BOOTX_TEXT
590 init_boot_display();
591#endif
592
593 /*
594 * Initialize xmon
595 */
596#ifdef CONFIG_XMON_DEFAULT
597 xmon_init(1);
598#endif
599 /*
600 * Register early console
601 */
602 register_early_udbg_console();
603
604 /* Save unparsed command line copy for /proc/cmdline */
605 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
606
607 parse_early_param();
608
609#ifdef CONFIG_SMP
610 /*
611 * iSeries has already initialized the cpu maps at this point.
612 */
613 setup_cpu_maps();
614
615 /* Release secondary cpus out of their spinloops at 0x60 now that
616 * we can map physical -> logical CPU ids
617 */
618 smp_release_cpus();
619#endif
620
621 printk("Starting Linux PPC64 %s\n", system_utsname.version);
622
623 printk("-----------------------------------------------------\n");
624 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
625 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
626 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
627 printk("systemcfg = 0x%p\n", systemcfg);
628 printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
629 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
630 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
631 printk("ppc64_caches.dcache_line_size = 0x%x\n",
632 ppc64_caches.dline_size);
633 printk("ppc64_caches.icache_line_size = 0x%x\n",
634 ppc64_caches.iline_size);
635 printk("htab_address = 0x%p\n", htab_address);
636 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
637 printk("-----------------------------------------------------\n");
638
639 mm_init_ppc64();
640
641 DBG(" <- setup_system()\n");
642}
643
644/* also used by kexec */
645void machine_shutdown(void)
646{
647 if (ppc_md.nvram_sync)
648 ppc_md.nvram_sync();
649}
650
651void machine_restart(char *cmd)
652{
653 machine_shutdown();
654 ppc_md.restart(cmd);
655#ifdef CONFIG_SMP
656 smp_send_stop();
657#endif
658 printk(KERN_EMERG "System Halted, OK to turn off power\n");
659 local_irq_disable();
660 while (1) ;
661}
662
663void machine_power_off(void)
664{
665 machine_shutdown();
666 ppc_md.power_off();
667#ifdef CONFIG_SMP
668 smp_send_stop();
669#endif
670 printk(KERN_EMERG "System Halted, OK to turn off power\n");
671 local_irq_disable();
672 while (1) ;
673}
674/* Used by the G5 thermal driver */
675EXPORT_SYMBOL_GPL(machine_power_off);
676
677void machine_halt(void)
678{
679 machine_shutdown();
680 ppc_md.halt();
681#ifdef CONFIG_SMP
682 smp_send_stop();
683#endif
684 printk(KERN_EMERG "System Halted, OK to turn off power\n");
685 local_irq_disable();
686 while (1) ;
687}
688
689static int ppc64_panic_event(struct notifier_block *this,
690 unsigned long event, void *ptr)
691{
692 ppc_md.panic((char *)ptr); /* May not return */
693 return NOTIFY_DONE;
694}
695
696
697#ifdef CONFIG_SMP
698DEFINE_PER_CPU(unsigned int, pvr);
699#endif
700
701static int show_cpuinfo(struct seq_file *m, void *v)
702{
703 unsigned long cpu_id = (unsigned long)v - 1;
704 unsigned int pvr;
705 unsigned short maj;
706 unsigned short min;
707
708 if (cpu_id == NR_CPUS) {
709 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
710
711 if (ppc_md.show_cpuinfo != NULL)
712 ppc_md.show_cpuinfo(m);
713
714 return 0;
715 }
716
717 /* We only show online cpus: disable preempt (overzealous, I
718 * knew) to prevent cpu going down. */
719 preempt_disable();
720 if (!cpu_online(cpu_id)) {
721 preempt_enable();
722 return 0;
723 }
724
725#ifdef CONFIG_SMP
726 pvr = per_cpu(pvr, cpu_id);
727#else
728 pvr = mfspr(SPRN_PVR);
729#endif
730 maj = (pvr >> 8) & 0xFF;
731 min = pvr & 0xFF;
732
733 seq_printf(m, "processor\t: %lu\n", cpu_id);
734 seq_printf(m, "cpu\t\t: ");
735
736 if (cur_cpu_spec->pvr_mask)
737 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
738 else
739 seq_printf(m, "unknown (%08x)", pvr);
740
741#ifdef CONFIG_ALTIVEC
742 if (cpu_has_feature(CPU_FTR_ALTIVEC))
743 seq_printf(m, ", altivec supported");
744#endif /* CONFIG_ALTIVEC */
745
746 seq_printf(m, "\n");
747
748 /*
749 * Assume here that all clock rates are the same in a
750 * smp system. -- Cort
751 */
752 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
753 ppc_proc_freq % 1000000);
754
755 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
756
757 preempt_enable();
758 return 0;
759}
760
761static void *c_start(struct seq_file *m, loff_t *pos)
762{
763 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
764}
765static void *c_next(struct seq_file *m, void *v, loff_t *pos)
766{
767 ++*pos;
768 return c_start(m, pos);
769}
770static void c_stop(struct seq_file *m, void *v)
771{
772}
773struct seq_operations cpuinfo_op = {
774 .start =c_start,
775 .next = c_next,
776 .stop = c_stop,
777 .show = show_cpuinfo,
778};
779
780/*
781 * These three variables are used to save values passed to us by prom_init()
782 * via the device tree. The TCE variables are needed because with a memory_limit
783 * in force we may need to explicitly map the TCE are at the top of RAM.
784 */
785unsigned long memory_limit;
786unsigned long tce_alloc_start;
787unsigned long tce_alloc_end;
788
789#ifdef CONFIG_PPC_ISERIES
790/*
791 * On iSeries we just parse the mem=X option from the command line.
792 * On pSeries it's a bit more complicated, see prom_init_mem()
793 */
794static int __init early_parsemem(char *p)
795{
796 if (!p)
797 return 0;
798
799 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
800
801 return 0;
802}
803early_param("mem", early_parsemem);
804#endif /* CONFIG_PPC_ISERIES */
805
806#ifdef CONFIG_PPC_MULTIPLATFORM
807static int __init set_preferred_console(void)
808{
809 struct device_node *prom_stdout = NULL;
810 char *name;
811 u32 *spd;
812 int offset = 0;
813
814 DBG(" -> set_preferred_console()\n");
815
816 /* The user has requested a console so this is already set up. */
817 if (strstr(saved_command_line, "console=")) {
818 DBG(" console was specified !\n");
819 return -EBUSY;
820 }
821
822 if (!of_chosen) {
823 DBG(" of_chosen is NULL !\n");
824 return -ENODEV;
825 }
826 /* We are getting a weird phandle from OF ... */
827 /* ... So use the full path instead */
828 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
829 if (name == NULL) {
830 DBG(" no linux,stdout-path !\n");
831 return -ENODEV;
832 }
833 prom_stdout = of_find_node_by_path(name);
834 if (!prom_stdout) {
835 DBG(" can't find stdout package %s !\n", name);
836 return -ENODEV;
837 }
838 DBG("stdout is %s\n", prom_stdout->full_name);
839
840 name = (char *)get_property(prom_stdout, "name", NULL);
841 if (!name) {
842 DBG(" stdout package has no name !\n");
843 goto not_found;
844 }
845 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
846
847 if (0)
848 ;
849#ifdef CONFIG_SERIAL_8250_CONSOLE
850 else if (strcmp(name, "serial") == 0) {
851 int i;
852 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
853 if (i > 8) {
854 switch (reg[1]) {
855 case 0x3f8:
856 offset = 0;
857 break;
858 case 0x2f8:
859 offset = 1;
860 break;
861 case 0x898:
862 offset = 2;
863 break;
864 case 0x890:
865 offset = 3;
866 break;
867 default:
868 /* We dont recognise the serial port */
869 goto not_found;
870 }
871 }
872 }
873#endif /* CONFIG_SERIAL_8250_CONSOLE */
874#ifdef CONFIG_PPC_PSERIES
875 else if (strcmp(name, "vty") == 0) {
876 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
877 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
878
879 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
880 /* Host Virtual Serial Interface */
881 int offset;
882 switch (reg[0]) {
883 case 0x30000000:
884 offset = 0;
885 break;
886 case 0x30000001:
887 offset = 1;
888 break;
889 default:
890 goto not_found;
891 }
892 of_node_put(prom_stdout);
893 DBG("Found hvsi console at offset %d\n", offset);
894 return add_preferred_console("hvsi", offset, NULL);
895 } else {
896 /* pSeries LPAR virtual console */
897 of_node_put(prom_stdout);
898 DBG("Found hvc console\n");
899 return add_preferred_console("hvc", 0, NULL);
900 }
901 }
902#endif /* CONFIG_PPC_PSERIES */
903#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
904 else if (strcmp(name, "ch-a") == 0)
905 offset = 0;
906 else if (strcmp(name, "ch-b") == 0)
907 offset = 1;
908#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
909 else
910 goto not_found;
911 of_node_put(prom_stdout);
912
913 DBG("Found serial console at ttyS%d\n", offset);
914
915 if (spd) {
916 static char __initdata opt[16];
917 sprintf(opt, "%d", *spd);
918 return add_preferred_console("ttyS", offset, opt);
919 } else
920 return add_preferred_console("ttyS", offset, NULL);
921
922 not_found:
923 DBG("No preferred console found !\n");
924 of_node_put(prom_stdout);
925 return -ENODEV;
926}
927console_initcall(set_preferred_console);
928#endif /* CONFIG_PPC_MULTIPLATFORM */
929
930#ifdef CONFIG_IRQSTACKS
931static void __init irqstack_early_init(void)
932{
933 unsigned int i;
934
935 /*
936 * interrupt stacks must be under 256MB, we cannot afford to take
937 * SLB misses on them.
938 */
939 for_each_cpu(i) {
940 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
941 THREAD_SIZE, 0x10000000));
942 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
943 THREAD_SIZE, 0x10000000));
944 }
945}
946#else
947#define irqstack_early_init()
948#endif
949
950/*
951 * Stack space used when we detect a bad kernel stack pointer, and
952 * early in SMP boots before relocation is enabled.
953 */
954static void __init emergency_stack_init(void)
955{
956 unsigned long limit;
957 unsigned int i;
958
959 /*
960 * Emergency stacks must be under 256MB, we cannot afford to take
961 * SLB misses on them. The ABI also requires them to be 128-byte
962 * aligned.
963 *
964 * Since we use these as temporary stacks during secondary CPU
965 * bringup, we need to get at them in real mode. This means they
966 * must also be within the RMO region.
967 */
968 limit = min(0x10000000UL, lmb.rmo_size);
969
970 for_each_cpu(i)
971 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
972 limit)) + PAGE_SIZE;
973}
974
975/*
976 * Called from setup_arch to initialize the bitmap of available
977 * syscalls in the systemcfg page
978 */
979void __init setup_syscall_map(void)
980{
981 unsigned int i, count64 = 0, count32 = 0;
982 extern unsigned long *sys_call_table;
983 extern unsigned long sys_ni_syscall;
984
985
986 for (i = 0; i < __NR_syscalls; i++) {
987 if (sys_call_table[i*2] != sys_ni_syscall) {
988 count64++;
989 systemcfg->syscall_map_64[i >> 5] |=
990 0x80000000UL >> (i & 0x1f);
991 }
992 if (sys_call_table[i*2+1] != sys_ni_syscall) {
993 count32++;
994 systemcfg->syscall_map_32[i >> 5] |=
995 0x80000000UL >> (i & 0x1f);
996 }
997 }
998 printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
999 count32, count64);
1000}
1001
1002/*
1003 * Called into from start_kernel, after lock_kernel has been called.
1004 * Initializes bootmem, which is unsed to manage page allocation until
1005 * mem_init is called.
1006 */
1007void __init setup_arch(char **cmdline_p)
1008{
1009 extern void do_init_bootmem(void);
1010
1011 ppc64_boot_msg(0x12, "Setup Arch");
1012
1013 *cmdline_p = cmd_line;
1014
1015 /*
1016 * Set cache line size based on type of cpu as a default.
1017 * Systems with OF can look in the properties on the cpu node(s)
1018 * for a possibly more accurate value.
1019 */
1020 dcache_bsize = ppc64_caches.dline_size;
1021 icache_bsize = ppc64_caches.iline_size;
1022
1023 /* reboot on panic */
1024 panic_timeout = 180;
1025
1026 if (ppc_md.panic)
1027 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
1028
1029 init_mm.start_code = PAGE_OFFSET;
1030 init_mm.end_code = (unsigned long) _etext;
1031 init_mm.end_data = (unsigned long) _edata;
1032 init_mm.brk = klimit;
1033
1034 irqstack_early_init();
1035 emergency_stack_init();
1036
1037 stabs_alloc();
1038
1039 /* set up the bootmem stuff with available memory */
1040 do_init_bootmem();
1041 sparse_init();
1042
1043 /* initialize the syscall map in systemcfg */
1044 setup_syscall_map();
1045
1046#ifdef CONFIG_DUMMY_CONSOLE
1047 conswitchp = &dummy_con;
1048#endif
1049
1050 ppc_md.setup_arch();
1051
1052 /* Use the default idle loop if the platform hasn't provided one. */
1053 if (NULL == ppc_md.idle_loop) {
1054 ppc_md.idle_loop = default_idle;
1055 printk(KERN_INFO "Using default idle loop\n");
1056 }
1057
1058 paging_init();
1059 ppc64_boot_msg(0x15, "Setup Done");
1060}
1061
1062
1063/* ToDo: do something useful if ppc_md is not yet setup. */
1064#define PPC64_LINUX_FUNCTION 0x0f000000
1065#define PPC64_IPL_MESSAGE 0xc0000000
1066#define PPC64_TERM_MESSAGE 0xb0000000
1067
1068static void ppc64_do_msg(unsigned int src, const char *msg)
1069{
1070 if (ppc_md.progress) {
1071 char buf[128];
1072
1073 sprintf(buf, "%08X\n", src);
1074 ppc_md.progress(buf, 0);
1075 snprintf(buf, 128, "%s", msg);
1076 ppc_md.progress(buf, 0);
1077 }
1078}
1079
1080/* Print a boot progress message. */
1081void ppc64_boot_msg(unsigned int src, const char *msg)
1082{
1083 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
1084 printk("[boot]%04x %s\n", src, msg);
1085}
1086
1087/* Print a termination message (print only -- does not stop the kernel) */
1088void ppc64_terminate_msg(unsigned int src, const char *msg)
1089{
1090 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
1091 printk("[terminate]%04x %s\n", src, msg);
1092}
1093
1094#ifndef CONFIG_PPC_ISERIES
1095/*
1096 * This function can be used by platforms to "find" legacy serial ports.
1097 * It works for "serial" nodes under an "isa" node, and will try to
1098 * respect the "ibm,aix-loc" property if any. It works with up to 8
1099 * ports.
1100 */
1101
1102#define MAX_LEGACY_SERIAL_PORTS 8
1103static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
1104static unsigned int old_serial_count;
1105
1106void __init generic_find_legacy_serial_ports(u64 *physport,
1107 unsigned int *default_speed)
1108{
1109 struct device_node *np;
1110 u32 *sizeprop;
1111
1112 struct isa_reg_property {
1113 u32 space;
1114 u32 address;
1115 u32 size;
1116 };
1117 struct pci_reg_property {
1118 struct pci_address addr;
1119 u32 size_hi;
1120 u32 size_lo;
1121 };
1122
1123 DBG(" -> generic_find_legacy_serial_port()\n");
1124
1125 *physport = 0;
1126 if (default_speed)
1127 *default_speed = 0;
1128
1129 np = of_find_node_by_path("/");
1130 if (!np)
1131 return;
1132
1133 /* First fill our array */
1134 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
1135 struct device_node *isa, *pci;
1136 struct isa_reg_property *reg;
1137 unsigned long phys_size, addr_size, io_base;
1138 u32 *rangesp;
1139 u32 *interrupts, *clk, *spd;
1140 char *typep;
1141 int index, rlen, rentsize;
1142
1143 /* Ok, first check if it's under an "isa" parent */
1144 isa = of_get_parent(np);
1145 if (!isa || strcmp(isa->name, "isa")) {
1146 DBG("%s: no isa parent found\n", np->full_name);
1147 continue;
1148 }
1149
1150 /* Now look for an "ibm,aix-loc" property that gives us ordering
1151 * if any...
1152 */
1153 typep = (char *)get_property(np, "ibm,aix-loc", NULL);
1154
1155 /* Get the ISA port number */
1156 reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
1157 if (reg == NULL)
1158 goto next_port;
1159 /* We assume the interrupt number isn't translated ... */
1160 interrupts = (u32 *)get_property(np, "interrupts", NULL);
1161 /* get clock freq. if present */
1162 clk = (u32 *)get_property(np, "clock-frequency", NULL);
1163 /* get default speed if present */
1164 spd = (u32 *)get_property(np, "current-speed", NULL);
1165 /* Default to locate at end of array */
1166 index = old_serial_count; /* end of the array by default */
1167
1168 /* If we have a location index, then use it */
1169 if (typep && *typep == 'S') {
1170 index = simple_strtol(typep+1, NULL, 0) - 1;
1171 /* if index is out of range, use end of array instead */
1172 if (index >= MAX_LEGACY_SERIAL_PORTS)
1173 index = old_serial_count;
1174 /* if our index is still out of range, that mean that
1175 * array is full, we could scan for a free slot but that
1176 * make little sense to bother, just skip the port
1177 */
1178 if (index >= MAX_LEGACY_SERIAL_PORTS)
1179 goto next_port;
1180 if (index >= old_serial_count)
1181 old_serial_count = index + 1;
1182 /* Check if there is a port who already claimed our slot */
1183 if (serial_ports[index].iobase != 0) {
1184 /* if we still have some room, move it, else override */
1185 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
1186 DBG("Moved legacy port %d -> %d\n", index,
1187 old_serial_count);
1188 serial_ports[old_serial_count++] =
1189 serial_ports[index];
1190 } else {
1191 DBG("Replacing legacy port %d\n", index);
1192 }
1193 }
1194 }
1195 if (index >= MAX_LEGACY_SERIAL_PORTS)
1196 goto next_port;
1197 if (index >= old_serial_count)
1198 old_serial_count = index + 1;
1199
1200 /* Now fill the entry */
1201 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
1202 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
1203 serial_ports[index].iobase = reg->address;
1204 serial_ports[index].irq = interrupts ? interrupts[0] : 0;
1205 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
1206
1207 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
1208 index,
1209 serial_ports[index].iobase,
1210 serial_ports[index].irq,
1211 serial_ports[index].uartclk);
1212
1213 /* Get phys address of IO reg for port 1 */
1214 if (index != 0)
1215 goto next_port;
1216
1217 pci = of_get_parent(isa);
1218 if (!pci) {
1219 DBG("%s: no pci parent found\n", np->full_name);
1220 goto next_port;
1221 }
1222
1223 rangesp = (u32 *)get_property(pci, "ranges", &rlen);
1224 if (rangesp == NULL) {
1225 of_node_put(pci);
1226 goto next_port;
1227 }
1228 rlen /= 4;
1229
1230 /* we need the #size-cells of the PCI bridge node itself */
1231 phys_size = 1;
1232 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
1233 if (sizeprop != NULL)
1234 phys_size = *sizeprop;
1235 /* we need the parent #addr-cells */
1236 addr_size = prom_n_addr_cells(pci);
1237 rentsize = 3 + addr_size + phys_size;
1238 io_base = 0;
1239 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
1240 if (((rangesp[0] >> 24) & 0x3) != 1)
1241 continue; /* not IO space */
1242 io_base = rangesp[3];
1243 if (addr_size == 2)
1244 io_base = (io_base << 32) | rangesp[4];
1245 }
1246 if (io_base != 0) {
1247 *physport = io_base + reg->address;
1248 if (default_speed && spd)
1249 *default_speed = *spd;
1250 }
1251 of_node_put(pci);
1252 next_port:
1253 of_node_put(isa);
1254 }
1255
1256 DBG(" <- generic_find_legacy_serial_port()\n");
1257}
1258
1259static struct platform_device serial_device = {
1260 .name = "serial8250",
1261 .id = PLAT8250_DEV_PLATFORM,
1262 .dev = {
1263 .platform_data = serial_ports,
1264 },
1265};
1266
1267static int __init serial_dev_init(void)
1268{
1269 return platform_device_register(&serial_device);
1270}
1271arch_initcall(serial_dev_init);
1272
1273#endif /* CONFIG_PPC_ISERIES */
1274
1275int check_legacy_ioport(unsigned long base_port)
1276{
1277 if (ppc_md.check_legacy_ioport == NULL)
1278 return 0;
1279 return ppc_md.check_legacy_ioport(base_port);
1280}
1281EXPORT_SYMBOL(check_legacy_ioport);
1282
1283#ifdef CONFIG_XMON
1284static int __init early_xmon(char *p)
1285{
1286 /* ensure xmon is enabled */
1287 if (p) {
1288 if (strncmp(p, "on", 2) == 0)
1289 xmon_init(1);
1290 if (strncmp(p, "off", 3) == 0)
1291 xmon_init(0);
1292 if (strncmp(p, "early", 5) != 0)
1293 return 0;
1294 }
1295 xmon_init(1);
1296 debugger(NULL);
1297
1298 return 0;
1299}
1300early_param("xmon", early_xmon);
1301#endif
1302
1303void cpu_die(void)
1304{
1305 if (ppc_md.cpu_die)
1306 ppc_md.cpu_die();
1307}
diff --git a/arch/ppc64/kernel/signal32.c b/arch/powerpc/kernel/signal_32.c
index a8b7a5a56bb4..92452b2db26a 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1,56 +1,353 @@
1/* 1/*
2 * signal32.c: Support 32bit signal syscalls. 2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright (C) 2001 IBM 6 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 * 9 *
8 * These routines maintain argument size conversion between 32bit and 64bit 10 * Derived from "arch/i386/kernel/signal.c"
9 * environment. 11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 * 13 *
11 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 15 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 16 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 17 * 2 of the License, or (at your option) any later version.
15 */ 18 */
16 19
17#include <linux/config.h> 20#include <linux/config.h>
18#include <linux/sched.h> 21#include <linux/sched.h>
19#include <linux/mm.h> 22#include <linux/mm.h>
20#include <linux/smp.h> 23#include <linux/smp.h>
21#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
23#include <linux/signal.h> 26#include <linux/signal.h>
24#include <linux/syscalls.h>
25#include <linux/errno.h> 27#include <linux/errno.h>
26#include <linux/elf.h> 28#include <linux/elf.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
27#include <linux/compat.h> 31#include <linux/compat.h>
28#include <linux/ptrace.h> 32#include <linux/ptrace.h>
29#include <asm/ppc32.h> 33#else
34#include <linux/wait.h>
35#include <linux/ptrace.h>
36#include <linux/unistd.h>
37#include <linux/stddef.h>
38#include <linux/tty.h>
39#include <linux/binfmts.h>
40#include <linux/suspend.h>
41#endif
42
30#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h>
45#ifdef CONFIG_PPC64
46#include <asm/ppc32.h>
31#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
32#include <asm/unistd.h> 48#include <asm/unistd.h>
33#include <asm/cacheflush.h>
34#include <asm/vdso.h> 49#include <asm/vdso.h>
50#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
35 54
36#define DEBUG_SIG 0 55#undef DEBUG_SIG
37 56
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 57#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 58
40#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) 59#ifdef CONFIG_PPC64
60#define do_signal do_signal32
61#define sys_sigsuspend compat_sys_sigsuspend
62#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
63#define sys_rt_sigreturn compat_sys_rt_sigreturn
64#define sys_sigaction compat_sys_sigaction
65#define sys_swapcontext compat_sys_swapcontext
66#define sys_sigreturn compat_sys_sigreturn
67
68#define old_sigaction old_sigaction32
69#define sigcontext sigcontext32
70#define mcontext mcontext32
71#define ucontext ucontext32
72
73/*
74 * Returning 0 means we return to userspace via
75 * ret_from_except and thus restore all user
76 * registers from *regs. This is what we need
77 * to do when a signal has been delivered.
78 */
79#define sigreturn_exit(regs) return 0
80
81#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
82#undef __SIGNAL_FRAMESIZE
83#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
84#undef ELF_NVRREG
85#define ELF_NVRREG ELF_NVRREG32
86
87/*
88 * Functions for flipping sigsets (thanks to brain dead generic
89 * implementation that makes things simple for little endian only)
90 */
91static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
92{
93 compat_sigset_t cset;
94
95 switch (_NSIG_WORDS) {
96 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
97 cset.sig[7] = set->sig[3] >> 32;
98 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
99 cset.sig[5] = set->sig[2] >> 32;
100 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
101 cset.sig[3] = set->sig[1] >> 32;
102 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
103 cset.sig[1] = set->sig[0] >> 32;
104 }
105 return copy_to_user(uset, &cset, sizeof(*uset));
106}
107
108static inline int get_sigset_t(sigset_t *set,
109 const compat_sigset_t __user *uset)
110{
111 compat_sigset_t s32;
112
113 if (copy_from_user(&s32, uset, sizeof(*uset)))
114 return -EFAULT;
115
116 /*
117 * Swap the 2 words of the 64-bit sigset_t (they are stored
118 * in the "wrong" endian in 32-bit user storage).
119 */
120 switch (_NSIG_WORDS) {
121 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
122 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
123 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
124 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
125 }
126 return 0;
127}
128
129static inline int get_old_sigaction(struct k_sigaction *new_ka,
130 struct old_sigaction __user *act)
131{
132 compat_old_sigset_t mask;
133 compat_uptr_t handler, restorer;
134
135 if (get_user(handler, &act->sa_handler) ||
136 __get_user(restorer, &act->sa_restorer) ||
137 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
138 __get_user(mask, &act->sa_mask))
139 return -EFAULT;
140 new_ka->sa.sa_handler = compat_ptr(handler);
141 new_ka->sa.sa_restorer = compat_ptr(restorer);
142 siginitset(&new_ka->sa.sa_mask, mask);
143 return 0;
144}
145
146static inline compat_uptr_t to_user_ptr(void *kp)
147{
148 return (compat_uptr_t)(u64)kp;
149}
150
151#define from_user_ptr(p) compat_ptr(p)
152
153static inline int save_general_regs(struct pt_regs *regs,
154 struct mcontext __user *frame)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
159 for (i = 0; i <= PT_RESULT; i ++)
160 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
161 return -EFAULT;
162 return 0;
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
169 int i;
170
171 for (i = 0; i <= PT_RESULT; i++) {
172 if ((i == PT_MSR) || (i == PT_SOFTE))
173 continue;
174 if (__get_user(gregs[i], &sr->mc_gregs[i]))
175 return -EFAULT;
176 }
177 return 0;
178}
179
180#else /* CONFIG_PPC64 */
181
182extern void sigreturn_exit(struct pt_regs *);
183
184#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
185
186static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
187{
188 return copy_to_user(uset, set, sizeof(*uset));
189}
190
191static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
192{
193 return copy_from_user(set, uset, sizeof(*uset));
194}
195
196static inline int get_old_sigaction(struct k_sigaction *new_ka,
197 struct old_sigaction __user *act)
198{
199 old_sigset_t mask;
200
201 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
202 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
203 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
204 return -EFAULT;
205 __get_user(new_ka->sa.sa_flags, &act->sa_flags);
206 __get_user(mask, &act->sa_mask);
207 siginitset(&new_ka->sa.sa_mask, mask);
208 return 0;
209}
210
211#define to_user_ptr(p) (p)
212#define from_user_ptr(p) (p)
213
214static inline int save_general_regs(struct pt_regs *regs,
215 struct mcontext __user *frame)
216{
217 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
218}
219
220static inline int restore_general_regs(struct pt_regs *regs,
221 struct mcontext __user *sr)
222{
223 /* copy up to but not including MSR */
224 if (__copy_from_user(regs, &sr->mc_gregs,
225 PT_MSR * sizeof(elf_greg_t)))
226 return -EFAULT;
227 /* copy from orig_r3 (the word after the MSR) up to the end */
228 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
229 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
230 return -EFAULT;
231 return 0;
232}
233
234#endif /* CONFIG_PPC64 */
235
236int do_signal(sigset_t *oldset, struct pt_regs *regs);
237
238/*
239 * Atomically swap in the new signal mask, and wait for a signal.
240 */
241long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
242 struct pt_regs *regs)
243{
244 sigset_t saveset;
245
246 mask &= _BLOCKABLE;
247 spin_lock_irq(&current->sighand->siglock);
248 saveset = current->blocked;
249 siginitset(&current->blocked, mask);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252
253 regs->result = -EINTR;
254 regs->gpr[3] = EINTR;
255 regs->ccr |= 0x10000000;
256 while (1) {
257 current->state = TASK_INTERRUPTIBLE;
258 schedule();
259 if (do_signal(&saveset, regs))
260 sigreturn_exit(regs);
261 }
262}
263
264long sys_rt_sigsuspend(
265#ifdef CONFIG_PPC64
266 compat_sigset_t __user *unewset,
267#else
268 sigset_t __user *unewset,
269#endif
270 size_t sigsetsize, int p3, int p4,
271 int p6, int p7, struct pt_regs *regs)
272{
273 sigset_t saveset, newset;
274
275 /* XXX: Don't preclude handling different sized sigset_t's. */
276 if (sigsetsize != sizeof(sigset_t))
277 return -EINVAL;
278
279 if (get_sigset_t(&newset, unewset))
280 return -EFAULT;
281 sigdelsetmask(&newset, ~_BLOCKABLE);
282
283 spin_lock_irq(&current->sighand->siglock);
284 saveset = current->blocked;
285 current->blocked = newset;
286 recalc_sigpending();
287 spin_unlock_irq(&current->sighand->siglock);
288
289 regs->result = -EINTR;
290 regs->gpr[3] = EINTR;
291 regs->ccr |= 0x10000000;
292 while (1) {
293 current->state = TASK_INTERRUPTIBLE;
294 schedule();
295 if (do_signal(&saveset, regs))
296 sigreturn_exit(regs);
297 }
298}
299
300#ifdef CONFIG_PPC32
301long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
302 int r6, int r7, int r8, struct pt_regs *regs)
303{
304 return do_sigaltstack(uss, uoss, regs->gpr[1]);
305}
306#endif
307
308long sys_sigaction(int sig, struct old_sigaction __user *act,
309 struct old_sigaction __user *oact)
310{
311 struct k_sigaction new_ka, old_ka;
312 int ret;
313
314#ifdef CONFIG_PPC64
315 if (sig < 0)
316 sig = -sig;
317#endif
318
319 if (act) {
320 if (get_old_sigaction(&new_ka, act))
321 return -EFAULT;
322 }
323
324 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
325 if (!ret && oact) {
326 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
327 __put_user(to_user_ptr(old_ka.sa.sa_handler),
328 &oact->sa_handler) ||
329 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
330 &oact->sa_restorer) ||
331 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
332 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
333 return -EFAULT;
334 }
335
336 return ret;
337}
41 338
42/* 339/*
43 * When we have signals to deliver, we set up on the 340 * When we have signals to deliver, we set up on the
44 * user stack, going down from the original stack pointer: 341 * user stack, going down from the original stack pointer:
45 * a sigregs32 struct 342 * a sigregs struct
46 * a sigcontext32 struct 343 * a sigcontext struct
47 * a gap of __SIGNAL_FRAMESIZE32 bytes 344 * a gap of __SIGNAL_FRAMESIZE bytes
48 * 345 *
49 * Each of these things must be a multiple of 16 bytes in size. 346 * Each of these things must be a multiple of 16 bytes in size.
50 * 347 *
51 */ 348 */
52struct sigregs32 { 349struct sigregs {
53 struct mcontext32 mctx; /* all the register values */ 350 struct mcontext mctx; /* all the register values */
54 /* 351 /*
55 * Programs using the rs6000/xcoff abi can save up to 19 gp 352 * Programs using the rs6000/xcoff abi can save up to 19 gp
56 * regs and 18 fp regs below sp before decrementing it. 353 * regs and 18 fp regs below sp before decrementing it.
@@ -64,17 +361,21 @@ struct sigregs32 {
64/* 361/*
65 * When we have rt signals to deliver, we set up on the 362 * When we have rt signals to deliver, we set up on the
66 * user stack, going down from the original stack pointer: 363 * user stack, going down from the original stack pointer:
67 * one rt_sigframe32 struct (siginfo + ucontext + ABI gap) 364 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
68 * a gap of __SIGNAL_FRAMESIZE32+16 bytes 365 * a gap of __SIGNAL_FRAMESIZE+16 bytes
69 * (the +16 is to get the siginfo and ucontext32 in the same 366 * (the +16 is to get the siginfo and ucontext in the same
70 * positions as in older kernels). 367 * positions as in older kernels).
71 * 368 *
72 * Each of these things must be a multiple of 16 bytes in size. 369 * Each of these things must be a multiple of 16 bytes in size.
73 * 370 *
74 */ 371 */
75struct rt_sigframe32 { 372struct rt_sigframe {
76 compat_siginfo_t info; 373#ifdef CONFIG_PPC64
77 struct ucontext32 uc; 374 compat_siginfo_t info;
375#else
376 struct siginfo info;
377#endif
378 struct ucontext uc;
78 /* 379 /*
79 * Programs using the rs6000/xcoff abi can save up to 19 gp 380 * Programs using the rs6000/xcoff abi can save up to 19 gp
80 * regs and 18 fp regs below sp before decrementing it. 381 * regs and 18 fp regs below sp before decrementing it.
@@ -82,66 +383,24 @@ struct rt_sigframe32 {
82 int abigap[56]; 383 int abigap[56];
83}; 384};
84 385
85
86/*
87 * Common utility functions used by signal and context support
88 *
89 */
90
91/*
92 * Restore the user process's signal mask
93 * (implemented in signal.c)
94 */
95extern void restore_sigmask(sigset_t *set);
96
97/*
98 * Functions for flipping sigsets (thanks to brain dead generic
99 * implementation that makes things simple for little endian only
100 */
101static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
102{
103 switch (_NSIG_WORDS) {
104 case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
105 compat->sig[7] = set->sig[3] >> 32;
106 case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
107 compat->sig[5] = set->sig[2] >> 32;
108 case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
109 compat->sig[3] = set->sig[1] >> 32;
110 case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
111 compat->sig[1] = set->sig[0] >> 32;
112 }
113}
114
115static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
116{
117 switch (_NSIG_WORDS) {
118 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
119 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
120 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
121 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
122 }
123}
124
125
126/* 386/*
127 * Save the current user registers on the user stack. 387 * Save the current user registers on the user stack.
128 * We only save the altivec registers if the process has used 388 * We only save the altivec/spe registers if the process has used
129 * altivec instructions at some point. 389 * altivec/spe instructions at some point.
130 */ 390 */
131static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, int sigret) 391static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
392 int sigret)
132{ 393{
133 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 394#ifdef CONFIG_PPC32
134 int i, err = 0; 395 CHECK_FULL_REGS(regs);
135 396#endif
136 /* Make sure floating point registers are stored in regs */ 397 /* Make sure floating point registers are stored in regs */
137 flush_fp_to_thread(current); 398 flush_fp_to_thread(current);
138 399
139 /* save general and floating-point registers */ 400 /* save general and floating-point registers */
140 for (i = 0; i <= PT_RESULT; i ++) 401 if (save_general_regs(regs, frame) ||
141 err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]); 402 __copy_to_user(&frame->mc_fregs, current->thread.fpr,
142 err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr, 403 ELF_NFPREG * sizeof(double)))
143 ELF_NFPREG * sizeof(double));
144 if (err)
145 return 1; 404 return 1;
146 405
147 current->thread.fpscr = 0; /* turn off all fp exceptions */ 406 current->thread.fpscr = 0; /* turn off all fp exceptions */
@@ -151,7 +410,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
151 if (current->thread.used_vr) { 410 if (current->thread.used_vr) {
152 flush_altivec_to_thread(current); 411 flush_altivec_to_thread(current);
153 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 412 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
154 ELF_NVRREG32 * sizeof(vector128))) 413 ELF_NVRREG * sizeof(vector128)))
155 return 1; 414 return 1;
156 /* set MSR_VEC in the saved MSR value to indicate that 415 /* set MSR_VEC in the saved MSR value to indicate that
157 frame->mc_vregs contains valid data */ 416 frame->mc_vregs contains valid data */
@@ -169,6 +428,25 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
169 return 1; 428 return 1;
170#endif /* CONFIG_ALTIVEC */ 429#endif /* CONFIG_ALTIVEC */
171 430
431#ifdef CONFIG_SPE
432 /* save spe registers */
433 if (current->thread.used_spe) {
434 flush_spe_to_thread(current);
435 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
436 ELF_NEVRREG * sizeof(u32)))
437 return 1;
438 /* set MSR_SPE in the saved MSR value to indicate that
439 frame->mc_vregs contains valid data */
440 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
441 return 1;
442 }
443 /* else assert((regs->msr & MSR_SPE) == 0) */
444
445 /* We always copy to/from spefscr */
446 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
447 return 1;
448#endif /* CONFIG_SPE */
449
172 if (sigret) { 450 if (sigret) {
173 /* Set up the sigreturn trampoline: li r0,sigret; sc */ 451 /* Set up the sigreturn trampoline: li r0,sigret; sc */
174 if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) 452 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -186,13 +464,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
186 * (except for MSR). 464 * (except for MSR).
187 */ 465 */
188static long restore_user_regs(struct pt_regs *regs, 466static long restore_user_regs(struct pt_regs *regs,
189 struct mcontext32 __user *sr, int sig) 467 struct mcontext __user *sr, int sig)
190{ 468{
191 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 469 long err;
192 int i;
193 long err = 0;
194 unsigned int save_r2 = 0; 470 unsigned int save_r2 = 0;
195#ifdef CONFIG_ALTIVEC 471#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
196 unsigned long msr; 472 unsigned long msr;
197#endif 473#endif
198 474
@@ -202,11 +478,7 @@ static long restore_user_regs(struct pt_regs *regs,
202 */ 478 */
203 if (!sig) 479 if (!sig)
204 save_r2 = (unsigned int)regs->gpr[2]; 480 save_r2 = (unsigned int)regs->gpr[2];
205 for (i = 0; i <= PT_RESULT; i++) { 481 err = restore_general_regs(regs, sr);
206 if ((i == PT_MSR) || (i == PT_SOFTE))
207 continue;
208 err |= __get_user(gregs[i], &sr->mc_gregs[i]);
209 }
210 if (!sig) 482 if (!sig)
211 regs->gpr[2] = (unsigned long) save_r2; 483 regs->gpr[2] = (unsigned long) save_r2;
212 if (err) 484 if (err)
@@ -229,135 +501,51 @@ static long restore_user_regs(struct pt_regs *regs,
229 sizeof(sr->mc_vregs))) 501 sizeof(sr->mc_vregs)))
230 return 1; 502 return 1;
231 } else if (current->thread.used_vr) 503 } else if (current->thread.used_vr)
232 memset(current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128)); 504 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
233 505
234 /* Always get VRSAVE back */ 506 /* Always get VRSAVE back */
235 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 507 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
236 return 1; 508 return 1;
237#endif /* CONFIG_ALTIVEC */ 509#endif /* CONFIG_ALTIVEC */
238 510
511#ifdef CONFIG_SPE
512 /* force the process to reload the spe registers from
513 current->thread when it next does spe instructions */
514 regs->msr &= ~MSR_SPE;
515 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
516 /* restore spe registers from the stack */
517 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
518 ELF_NEVRREG * sizeof(u32)))
519 return 1;
520 } else if (current->thread.used_spe)
521 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
522
523 /* Always get SPEFSCR back */
524 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
525 return 1;
526#endif /* CONFIG_SPE */
527
239#ifndef CONFIG_SMP 528#ifndef CONFIG_SMP
240 preempt_disable(); 529 preempt_disable();
241 if (last_task_used_math == current) 530 if (last_task_used_math == current)
242 last_task_used_math = NULL; 531 last_task_used_math = NULL;
243 if (last_task_used_altivec == current) 532 if (last_task_used_altivec == current)
244 last_task_used_altivec = NULL; 533 last_task_used_altivec = NULL;
534#ifdef CONFIG_SPE
535 if (last_task_used_spe == current)
536 last_task_used_spe = NULL;
537#endif
245 preempt_enable(); 538 preempt_enable();
246#endif 539#endif
247 return 0; 540 return 0;
248} 541}
249 542
250 543#ifdef CONFIG_PPC64
251/* 544long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
252 * Start of nonRT signal support
253 *
254 * sigset_t is 32 bits for non-rt signals
255 *
256 * System Calls
257 * sigaction sys32_sigaction
258 * sigreturn sys32_sigreturn
259 *
260 * Note sigsuspend has no special 32 bit routine - uses the 64 bit routine
261 *
262 * Other routines
263 * setup_frame32
264 */
265
266/*
267 * Atomically swap in the new signal mask, and wait for a signal.
268 */
269long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
270 struct pt_regs *regs)
271{
272 sigset_t saveset;
273
274 mask &= _BLOCKABLE;
275 spin_lock_irq(&current->sighand->siglock);
276 saveset = current->blocked;
277 siginitset(&current->blocked, mask);
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280
281 regs->result = -EINTR;
282 regs->gpr[3] = EINTR;
283 regs->ccr |= 0x10000000;
284 while (1) {
285 current->state = TASK_INTERRUPTIBLE;
286 schedule();
287 if (do_signal32(&saveset, regs))
288 /*
289 * Returning 0 means we return to userspace via
290 * ret_from_except and thus restore all user
291 * registers from *regs. This is what we need
292 * to do when a signal has been delivered.
293 */
294 return 0;
295 }
296}
297
298long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
299 struct old_sigaction32 __user *oact)
300{
301 struct k_sigaction new_ka, old_ka;
302 int ret;
303
304 if (sig < 0)
305 sig = -sig;
306
307 if (act) {
308 compat_old_sigset_t mask;
309 compat_uptr_t handler, restorer;
310
311 if (get_user(handler, &act->sa_handler) ||
312 __get_user(restorer, &act->sa_restorer) ||
313 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
314 __get_user(mask, &act->sa_mask))
315 return -EFAULT;
316 new_ka.sa.sa_handler = compat_ptr(handler);
317 new_ka.sa.sa_restorer = compat_ptr(restorer);
318 siginitset(&new_ka.sa.sa_mask, mask);
319 }
320
321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
322 if (!ret && oact) {
323 if (put_user((long)old_ka.sa.sa_handler, &oact->sa_handler) ||
324 __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer) ||
325 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
326 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
327 return -EFAULT;
328 }
329
330 return ret;
331}
332
333
334
335/*
336 * Start of RT signal support
337 *
338 * sigset_t is 64 bits for rt signals
339 *
340 * System Calls
341 * sigaction sys32_rt_sigaction
342 * sigpending sys32_rt_sigpending
343 * sigprocmask sys32_rt_sigprocmask
344 * sigreturn sys32_rt_sigreturn
345 * sigqueueinfo sys32_rt_sigqueueinfo
346 * sigsuspend sys32_rt_sigsuspend
347 *
348 * Other routines
349 * setup_rt_frame32
350 * copy_siginfo_to_user32
351 * siginfo32to64
352 */
353
354
355long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
356 struct sigaction32 __user *oact, size_t sigsetsize) 545 struct sigaction32 __user *oact, size_t sigsetsize)
357{ 546{
358 struct k_sigaction new_ka, old_ka; 547 struct k_sigaction new_ka, old_ka;
359 int ret; 548 int ret;
360 compat_sigset_t set32;
361 549
362 /* XXX: Don't preclude handling different sized sigset_t's. */ 550 /* XXX: Don't preclude handling different sized sigset_t's. */
363 if (sigsetsize != sizeof(compat_sigset_t)) 551 if (sigsetsize != sizeof(compat_sigset_t))
@@ -368,9 +556,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
368 556
369 ret = get_user(handler, &act->sa_handler); 557 ret = get_user(handler, &act->sa_handler);
370 new_ka.sa.sa_handler = compat_ptr(handler); 558 new_ka.sa.sa_handler = compat_ptr(handler);
371 ret |= __copy_from_user(&set32, &act->sa_mask, 559 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
372 sizeof(compat_sigset_t));
373 sigset_from_compat(&new_ka.sa.sa_mask, &set32);
374 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 560 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
375 if (ret) 561 if (ret)
376 return -EFAULT; 562 return -EFAULT;
@@ -378,10 +564,8 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
378 564
379 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 565 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
380 if (!ret && oact) { 566 if (!ret && oact) {
381 compat_from_sigset(&set32, &old_ka.sa.sa_mask);
382 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler); 567 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
383 ret |= __copy_to_user(&oact->sa_mask, &set32, 568 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
384 sizeof(compat_sigset_t));
385 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 569 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
386 } 570 }
387 return ret; 571 return ret;
@@ -394,41 +578,37 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
394 * of a signed int (msr in 32-bit mode) and the register representation 578 * of a signed int (msr in 32-bit mode) and the register representation
395 * of a signed int (msr in 64-bit mode) is performed. 579 * of a signed int (msr in 64-bit mode) is performed.
396 */ 580 */
397long sys32_rt_sigprocmask(u32 how, compat_sigset_t __user *set, 581long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
398 compat_sigset_t __user *oset, size_t sigsetsize) 582 compat_sigset_t __user *oset, size_t sigsetsize)
399{ 583{
400 sigset_t s; 584 sigset_t s;
401 sigset_t __user *up; 585 sigset_t __user *up;
402 compat_sigset_t s32;
403 int ret; 586 int ret;
404 mm_segment_t old_fs = get_fs(); 587 mm_segment_t old_fs = get_fs();
405 588
406 if (set) { 589 if (set) {
407 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 590 if (get_sigset_t(&s, set))
408 return -EFAULT; 591 return -EFAULT;
409 sigset_from_compat(&s, &s32);
410 } 592 }
411 593
412 set_fs(KERNEL_DS); 594 set_fs(KERNEL_DS);
413 /* This is valid because of the set_fs() */ 595 /* This is valid because of the set_fs() */
414 up = (sigset_t __user *) &s; 596 up = (sigset_t __user *) &s;
415 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, 597 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
416 sigsetsize); 598 sigsetsize);
417 set_fs(old_fs); 599 set_fs(old_fs);
418 if (ret) 600 if (ret)
419 return ret; 601 return ret;
420 if (oset) { 602 if (oset) {
421 compat_from_sigset(&s32, &s); 603 if (put_sigset_t(oset, &s))
422 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
423 return -EFAULT; 604 return -EFAULT;
424 } 605 }
425 return 0; 606 return 0;
426} 607}
427 608
428long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) 609long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
429{ 610{
430 sigset_t s; 611 sigset_t s;
431 compat_sigset_t s32;
432 int ret; 612 int ret;
433 mm_segment_t old_fs = get_fs(); 613 mm_segment_t old_fs = get_fs();
434 614
@@ -437,8 +617,7 @@ long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
437 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 617 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
438 set_fs(old_fs); 618 set_fs(old_fs);
439 if (!ret) { 619 if (!ret) {
440 compat_from_sigset(&s32, &s); 620 if (put_sigset_t(set, &s))
441 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
442 return -EFAULT; 621 return -EFAULT;
443 } 622 }
444 return ret; 623 return ret;
@@ -500,6 +679,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
500 return err; 679 return err;
501} 680}
502 681
682#define copy_siginfo_to_user copy_siginfo_to_user32
683
503/* 684/*
504 * Note: it is necessary to treat pid and sig as unsigned ints, with the 685 * Note: it is necessary to treat pid and sig as unsigned ints, with the
505 * corresponding cast to a signed int to insure that the proper conversion 686 * corresponding cast to a signed int to insure that the proper conversion
@@ -507,12 +688,12 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
507 * (msr in 32-bit mode) and the register representation of a signed int 688 * (msr in 32-bit mode) and the register representation of a signed int
508 * (msr in 64-bit mode) is performed. 689 * (msr in 64-bit mode) is performed.
509 */ 690 */
510long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) 691long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
511{ 692{
512 siginfo_t info; 693 siginfo_t info;
513 int ret; 694 int ret;
514 mm_segment_t old_fs = get_fs(); 695 mm_segment_t old_fs = get_fs();
515 696
516 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 697 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
517 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32)) 698 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
518 return -EFAULT; 699 return -EFAULT;
@@ -522,58 +703,14 @@ long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
522 set_fs (old_fs); 703 set_fs (old_fs);
523 return ret; 704 return ret;
524} 705}
525
526int sys32_rt_sigsuspend(compat_sigset_t __user * unewset, size_t sigsetsize, int p3,
527 int p4, int p6, int p7, struct pt_regs *regs)
528{
529 sigset_t saveset, newset;
530 compat_sigset_t s32;
531
532 /* XXX: Don't preclude handling different sized sigset_t's. */
533 if (sigsetsize != sizeof(sigset_t))
534 return -EINVAL;
535
536 if (copy_from_user(&s32, unewset, sizeof(s32)))
537 return -EFAULT;
538
539 /*
540 * Swap the 2 words of the 64-bit sigset_t (they are stored
541 * in the "wrong" endian in 32-bit user storage).
542 */
543 sigset_from_compat(&newset, &s32);
544
545 sigdelsetmask(&newset, ~_BLOCKABLE);
546 spin_lock_irq(&current->sighand->siglock);
547 saveset = current->blocked;
548 current->blocked = newset;
549 recalc_sigpending();
550 spin_unlock_irq(&current->sighand->siglock);
551
552 regs->result = -EINTR;
553 regs->gpr[3] = EINTR;
554 regs->ccr |= 0x10000000;
555 while (1) {
556 current->state = TASK_INTERRUPTIBLE;
557 schedule();
558 if (do_signal32(&saveset, regs))
559 /*
560 * Returning 0 means we return to userspace via
561 * ret_from_except and thus restore all user
562 * registers from *regs. This is what we need
563 * to do when a signal has been delivered.
564 */
565 return 0;
566 }
567}
568
569/* 706/*
570 * Start Alternate signal stack support 707 * Start Alternate signal stack support
571 * 708 *
572 * System Calls 709 * System Calls
573 * sigaltatck sys32_sigaltstack 710 * sigaltatck compat_sys_sigaltstack
574 */ 711 */
575 712
576int sys32_sigaltstack(u32 __new, u32 __old, int r5, 713int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
577 int r6, int r7, int r8, struct pt_regs *regs) 714 int r6, int r7, int r8, struct pt_regs *regs)
578{ 715{
579 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new; 716 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
@@ -615,76 +752,94 @@ int sys32_sigaltstack(u32 __new, u32 __old, int r5,
615 return -EFAULT; 752 return -EFAULT;
616 return ret; 753 return ret;
617} 754}
755#endif /* CONFIG_PPC64 */
756
618 757
758/*
759 * Restore the user process's signal mask
760 */
761#ifdef CONFIG_PPC64
762extern void restore_sigmask(sigset_t *set);
763#else /* CONFIG_PPC64 */
764static void restore_sigmask(sigset_t *set)
765{
766 sigdelsetmask(set, ~_BLOCKABLE);
767 spin_lock_irq(&current->sighand->siglock);
768 current->blocked = *set;
769 recalc_sigpending();
770 spin_unlock_irq(&current->sighand->siglock);
771}
772#endif
619 773
620/* 774/*
621 * Set up a signal frame for a "real-time" signal handler 775 * Set up a signal frame for a "real-time" signal handler
622 * (one which gets siginfo). 776 * (one which gets siginfo).
623 */ 777 */
624static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, 778static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
625 siginfo_t *info, sigset_t *oldset, 779 siginfo_t *info, sigset_t *oldset,
626 struct pt_regs * regs, unsigned long newsp) 780 struct pt_regs *regs, unsigned long newsp)
627{ 781{
628 struct rt_sigframe32 __user *rt_sf; 782 struct rt_sigframe __user *rt_sf;
629 struct mcontext32 __user *frame; 783 struct mcontext __user *frame;
630 unsigned long origsp = newsp; 784 unsigned long origsp = newsp;
631 compat_sigset_t c_oldset;
632 785
633 /* Set up Signal Frame */ 786 /* Set up Signal Frame */
634 /* Put a Real Time Context onto stack */ 787 /* Put a Real Time Context onto stack */
635 newsp -= sizeof(*rt_sf); 788 newsp -= sizeof(*rt_sf);
636 rt_sf = (struct rt_sigframe32 __user *)newsp; 789 rt_sf = (struct rt_sigframe __user *)newsp;
637 790
638 /* create a stack frame for the caller of the handler */ 791 /* create a stack frame for the caller of the handler */
639 newsp -= __SIGNAL_FRAMESIZE32 + 16; 792 newsp -= __SIGNAL_FRAMESIZE + 16;
640 793
641 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp)) 794 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
642 goto badframe; 795 goto badframe;
643 796
644 compat_from_sigset(&c_oldset, oldset);
645
646 /* Put the siginfo & fill in most of the ucontext */ 797 /* Put the siginfo & fill in most of the ucontext */
647 if (copy_siginfo_to_user32(&rt_sf->info, info) 798 if (copy_siginfo_to_user(&rt_sf->info, info)
648 || __put_user(0, &rt_sf->uc.uc_flags) 799 || __put_user(0, &rt_sf->uc.uc_flags)
649 || __put_user(0, &rt_sf->uc.uc_link) 800 || __put_user(0, &rt_sf->uc.uc_link)
650 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) 801 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
651 || __put_user(sas_ss_flags(regs->gpr[1]), 802 || __put_user(sas_ss_flags(regs->gpr[1]),
652 &rt_sf->uc.uc_stack.ss_flags) 803 &rt_sf->uc.uc_stack.ss_flags)
653 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) 804 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
654 || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs) 805 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
655 || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset))) 806 &rt_sf->uc.uc_regs)
807 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
656 goto badframe; 808 goto badframe;
657 809
658 /* Save user registers on the stack */ 810 /* Save user registers on the stack */
659 frame = &rt_sf->uc.uc_mcontext; 811 frame = &rt_sf->uc.uc_mcontext;
660 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 812#ifdef CONFIG_PPC64
661 goto badframe;
662
663 if (vdso32_rt_sigtramp && current->thread.vdso_base) { 813 if (vdso32_rt_sigtramp && current->thread.vdso_base) {
664 if (save_user_regs(regs, frame, 0)) 814 if (save_user_regs(regs, frame, 0))
665 goto badframe; 815 goto badframe;
666 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp; 816 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
667 } else { 817 } else
818#endif
819 {
668 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 820 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
669 goto badframe; 821 goto badframe;
670 regs->link = (unsigned long) frame->tramp; 822 regs->link = (unsigned long) frame->tramp;
671 } 823 }
672 regs->gpr[1] = (unsigned long) newsp; 824 if (put_user(regs->gpr[1], (u32 __user *)newsp))
825 goto badframe;
826 regs->gpr[1] = newsp;
673 regs->gpr[3] = sig; 827 regs->gpr[3] = sig;
674 regs->gpr[4] = (unsigned long) &rt_sf->info; 828 regs->gpr[4] = (unsigned long) &rt_sf->info;
675 regs->gpr[5] = (unsigned long) &rt_sf->uc; 829 regs->gpr[5] = (unsigned long) &rt_sf->uc;
676 regs->gpr[6] = (unsigned long) rt_sf; 830 regs->gpr[6] = (unsigned long) rt_sf;
677 regs->nip = (unsigned long) ka->sa.sa_handler; 831 regs->nip = (unsigned long) ka->sa.sa_handler;
678 regs->trap = 0; 832 regs->trap = 0;
833#ifdef CONFIG_PPC64
679 regs->result = 0; 834 regs->result = 0;
680 835
681 if (test_thread_flag(TIF_SINGLESTEP)) 836 if (test_thread_flag(TIF_SINGLESTEP))
682 ptrace_notify(SIGTRAP); 837 ptrace_notify(SIGTRAP);
683 838#endif
684 return 1; 839 return 1;
685 840
686badframe: 841badframe:
687#if DEBUG_SIG 842#ifdef DEBUG_SIG
688 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 843 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
689 regs, frame, newsp); 844 regs, frame, newsp);
690#endif 845#endif
@@ -692,46 +847,50 @@ badframe:
692 return 0; 847 return 0;
693} 848}
694 849
695static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig) 850static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
696{ 851{
697 compat_sigset_t c_set;
698 sigset_t set; 852 sigset_t set;
699 u32 mcp; 853 struct mcontext __user *mcp;
854
855 if (get_sigset_t(&set, &ucp->uc_sigmask))
856 return -EFAULT;
857#ifdef CONFIG_PPC64
858 {
859 u32 cmcp;
700 860
701 if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set)) 861 if (__get_user(cmcp, &ucp->uc_regs))
702 || __get_user(mcp, &ucp->uc_regs)) 862 return -EFAULT;
863 mcp = (struct mcontext __user *)(u64)cmcp;
864 }
865#else
866 if (__get_user(mcp, &ucp->uc_regs))
703 return -EFAULT; 867 return -EFAULT;
704 sigset_from_compat(&set, &c_set); 868#endif
705 restore_sigmask(&set); 869 restore_sigmask(&set);
706 if (restore_user_regs(regs, (struct mcontext32 __user *)(u64)mcp, sig)) 870 if (restore_user_regs(regs, mcp, sig))
707 return -EFAULT; 871 return -EFAULT;
708 872
709 return 0; 873 return 0;
710} 874}
711 875
712/* 876long sys_swapcontext(struct ucontext __user *old_ctx,
713 * Handle {get,set,swap}_context operations for 32 bits processes 877 struct ucontext __user *new_ctx,
714 */
715
716long sys32_swapcontext(struct ucontext32 __user *old_ctx,
717 struct ucontext32 __user *new_ctx,
718 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 878 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
719{ 879{
720 unsigned char tmp; 880 unsigned char tmp;
721 compat_sigset_t c_set;
722 881
723 /* Context size is for future use. Right now, we only make sure 882 /* Context size is for future use. Right now, we only make sure
724 * we are passed something we understand 883 * we are passed something we understand
725 */ 884 */
726 if (ctx_size < sizeof(struct ucontext32)) 885 if (ctx_size < sizeof(struct ucontext))
727 return -EINVAL; 886 return -EINVAL;
728 887
729 if (old_ctx != NULL) { 888 if (old_ctx != NULL) {
730 compat_from_sigset(&c_set, &current->blocked);
731 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 889 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
732 || save_user_regs(regs, &old_ctx->uc_mcontext, 0) 890 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
733 || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set)) 891 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
734 || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs)) 892 || __put_user(to_user_ptr(&old_ctx->uc_mcontext),
893 &old_ctx->uc_regs))
735 return -EFAULT; 894 return -EFAULT;
736 } 895 }
737 if (new_ctx == NULL) 896 if (new_ctx == NULL)
@@ -752,27 +911,26 @@ long sys32_swapcontext(struct ucontext32 __user *old_ctx,
752 * or if another thread unmaps the region containing the context. 911 * or if another thread unmaps the region containing the context.
753 * We kill the task with a SIGSEGV in this situation. 912 * We kill the task with a SIGSEGV in this situation.
754 */ 913 */
755 if (do_setcontext32(new_ctx, regs, 0)) 914 if (do_setcontext(new_ctx, regs, 0))
756 do_exit(SIGSEGV); 915 do_exit(SIGSEGV);
757 916 sigreturn_exit(regs);
917 /* doesn't actually return back to here */
758 return 0; 918 return 0;
759} 919}
760 920
761long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 921long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
762 struct pt_regs *regs) 922 struct pt_regs *regs)
763{ 923{
764 struct rt_sigframe32 __user *rt_sf; 924 struct rt_sigframe __user *rt_sf;
765 int ret;
766
767 925
768 /* Always make any pending restarted system calls return -EINTR */ 926 /* Always make any pending restarted system calls return -EINTR */
769 current_thread_info()->restart_block.fn = do_no_restart_syscall; 927 current_thread_info()->restart_block.fn = do_no_restart_syscall;
770 928
771 rt_sf = (struct rt_sigframe32 __user *) 929 rt_sf = (struct rt_sigframe __user *)
772 (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16); 930 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
773 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 931 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
774 goto bad; 932 goto bad;
775 if (do_setcontext32(&rt_sf->uc, regs, 1)) 933 if (do_setcontext(&rt_sf->uc, regs, 1))
776 goto bad; 934 goto bad;
777 935
778 /* 936 /*
@@ -781,62 +939,165 @@ long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
781 * signal return. But other architectures do this and we have 939 * signal return. But other architectures do this and we have
782 * always done it up until now so it is probably better not to 940 * always done it up until now so it is probably better not to
783 * change it. -- paulus 941 * change it. -- paulus
784 * We use the sys32_ version that does the 32/64 bits conversion 942 */
943#ifdef CONFIG_PPC64
944 /*
945 * We use the compat_sys_ version that does the 32/64 bits conversion
785 * and takes userland pointer directly. What about error checking ? 946 * and takes userland pointer directly. What about error checking ?
786 * nobody does any... 947 * nobody does any...
787 */ 948 */
788 sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); 949 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
789 950 return (int)regs->result;
790 ret = regs->result; 951#else
791 952 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
792 return ret; 953 sigreturn_exit(regs); /* doesn't return here */
954 return 0;
955#endif
793 956
794 bad: 957 bad:
795 force_sig(SIGSEGV, current); 958 force_sig(SIGSEGV, current);
796 return 0; 959 return 0;
797} 960}
798 961
962#ifdef CONFIG_PPC32
963int sys_debug_setcontext(struct ucontext __user *ctx,
964 int ndbg, struct sig_dbg_op __user *dbg,
965 int r6, int r7, int r8,
966 struct pt_regs *regs)
967{
968 struct sig_dbg_op op;
969 int i;
970 unsigned long new_msr = regs->msr;
971#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
972 unsigned long new_dbcr0 = current->thread.dbcr0;
973#endif
974
975 for (i=0; i<ndbg; i++) {
976 if (__copy_from_user(&op, dbg, sizeof(op)))
977 return -EFAULT;
978 switch (op.dbg_type) {
979 case SIG_DBG_SINGLE_STEPPING:
980#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
981 if (op.dbg_value) {
982 new_msr |= MSR_DE;
983 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
984 } else {
985 new_msr &= ~MSR_DE;
986 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
987 }
988#else
989 if (op.dbg_value)
990 new_msr |= MSR_SE;
991 else
992 new_msr &= ~MSR_SE;
993#endif
994 break;
995 case SIG_DBG_BRANCH_TRACING:
996#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
997 return -EINVAL;
998#else
999 if (op.dbg_value)
1000 new_msr |= MSR_BE;
1001 else
1002 new_msr &= ~MSR_BE;
1003#endif
1004 break;
1005
1006 default:
1007 return -EINVAL;
1008 }
1009 }
1010
1011 /* We wait until here to actually install the values in the
1012 registers so if we fail in the above loop, it will not
1013 affect the contents of these registers. After this point,
1014 failure is a problem, anyway, and it's very unlikely unless
1015 the user is really doing something wrong. */
1016 regs->msr = new_msr;
1017#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1018 current->thread.dbcr0 = new_dbcr0;
1019#endif
1020
1021 /*
1022 * If we get a fault copying the context into the kernel's
1023 * image of the user's registers, we can't just return -EFAULT
1024 * because the user's registers will be corrupted. For instance
1025 * the NIP value may have been updated but not some of the
1026 * other registers. Given that we have done the access_ok
1027 * and successfully read the first and last bytes of the region
1028 * above, this should only happen in an out-of-memory situation
1029 * or if another thread unmaps the region containing the context.
1030 * We kill the task with a SIGSEGV in this situation.
1031 */
1032 if (do_setcontext(ctx, regs, 1)) {
1033 force_sig(SIGSEGV, current);
1034 goto out;
1035 }
1036
1037 /*
1038 * It's not clear whether or why it is desirable to save the
1039 * sigaltstack setting on signal delivery and restore it on
1040 * signal return. But other architectures do this and we have
1041 * always done it up until now so it is probably better not to
1042 * change it. -- paulus
1043 */
1044 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1045
1046 sigreturn_exit(regs);
1047 /* doesn't actually return back to here */
1048
1049 out:
1050 return 0;
1051}
1052#endif
799 1053
800/* 1054/*
801 * OK, we're invoking a handler 1055 * OK, we're invoking a handler
802 */ 1056 */
803static int handle_signal32(unsigned long sig, struct k_sigaction *ka, 1057static int handle_signal(unsigned long sig, struct k_sigaction *ka,
804 siginfo_t *info, sigset_t *oldset, 1058 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
805 struct pt_regs * regs, unsigned long newsp) 1059 unsigned long newsp)
806{ 1060{
807 struct sigcontext32 __user *sc; 1061 struct sigcontext __user *sc;
808 struct sigregs32 __user *frame; 1062 struct sigregs __user *frame;
809 unsigned long origsp = newsp; 1063 unsigned long origsp = newsp;
810 1064
811 /* Set up Signal Frame */ 1065 /* Set up Signal Frame */
812 newsp -= sizeof(struct sigregs32); 1066 newsp -= sizeof(struct sigregs);
813 frame = (struct sigregs32 __user *) newsp; 1067 frame = (struct sigregs __user *) newsp;
814 1068
815 /* Put a sigcontext on the stack */ 1069 /* Put a sigcontext on the stack */
816 newsp -= sizeof(*sc); 1070 newsp -= sizeof(*sc);
817 sc = (struct sigcontext32 __user *) newsp; 1071 sc = (struct sigcontext __user *) newsp;
818 1072
819 /* create a stack frame for the caller of the handler */ 1073 /* create a stack frame for the caller of the handler */
820 newsp -= __SIGNAL_FRAMESIZE32; 1074 newsp -= __SIGNAL_FRAMESIZE;
821 1075
822 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) 1076 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
823 goto badframe; 1077 goto badframe;
824 1078
825#if _NSIG != 64 1079#if _NSIG != 64
826#error "Please adjust handle_signal32()" 1080#error "Please adjust handle_signal()"
827#endif 1081#endif
828 if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler) 1082 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
829 || __put_user(oldset->sig[0], &sc->oldmask) 1083 || __put_user(oldset->sig[0], &sc->oldmask)
1084#ifdef CONFIG_PPC64
830 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) 1085 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
831 || __put_user((u32)(u64)frame, &sc->regs) 1086#else
1087 || __put_user(oldset->sig[1], &sc->_unused[3])
1088#endif
1089 || __put_user(to_user_ptr(frame), &sc->regs)
832 || __put_user(sig, &sc->signal)) 1090 || __put_user(sig, &sc->signal))
833 goto badframe; 1091 goto badframe;
834 1092
1093#ifdef CONFIG_PPC64
835 if (vdso32_sigtramp && current->thread.vdso_base) { 1094 if (vdso32_sigtramp && current->thread.vdso_base) {
836 if (save_user_regs(regs, &frame->mctx, 0)) 1095 if (save_user_regs(regs, &frame->mctx, 0))
837 goto badframe; 1096 goto badframe;
838 regs->link = current->thread.vdso_base + vdso32_sigtramp; 1097 regs->link = current->thread.vdso_base + vdso32_sigtramp;
839 } else { 1098 } else
1099#endif
1100 {
840 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1101 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
841 goto badframe; 1102 goto badframe;
842 regs->link = (unsigned long) frame->mctx.tramp; 1103 regs->link = (unsigned long) frame->mctx.tramp;
@@ -844,22 +1105,24 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
844 1105
845 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 1106 if (put_user(regs->gpr[1], (u32 __user *)newsp))
846 goto badframe; 1107 goto badframe;
847 regs->gpr[1] = (unsigned long) newsp; 1108 regs->gpr[1] = newsp;
848 regs->gpr[3] = sig; 1109 regs->gpr[3] = sig;
849 regs->gpr[4] = (unsigned long) sc; 1110 regs->gpr[4] = (unsigned long) sc;
850 regs->nip = (unsigned long) ka->sa.sa_handler; 1111 regs->nip = (unsigned long) ka->sa.sa_handler;
851 regs->trap = 0; 1112 regs->trap = 0;
1113#ifdef CONFIG_PPC64
852 regs->result = 0; 1114 regs->result = 0;
853 1115
854 if (test_thread_flag(TIF_SINGLESTEP)) 1116 if (test_thread_flag(TIF_SINGLESTEP))
855 ptrace_notify(SIGTRAP); 1117 ptrace_notify(SIGTRAP);
1118#endif
856 1119
857 return 1; 1120 return 1;
858 1121
859badframe: 1122badframe:
860#if DEBUG_SIG 1123#ifdef DEBUG_SIG
861 printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n", 1124 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
862 regs, frame, *newspp); 1125 regs, frame, newsp);
863#endif 1126#endif
864 force_sigsegv(sig, current); 1127 force_sigsegv(sig, current);
865 return 0; 1128 return 0;
@@ -868,65 +1131,69 @@ badframe:
868/* 1131/*
869 * Do a signal return; undo the signal stack. 1132 * Do a signal return; undo the signal stack.
870 */ 1133 */
871long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 1134long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
872 struct pt_regs *regs) 1135 struct pt_regs *regs)
873{ 1136{
874 struct sigcontext32 __user *sc; 1137 struct sigcontext __user *sc;
875 struct sigcontext32 sigctx; 1138 struct sigcontext sigctx;
876 struct mcontext32 __user *sr; 1139 struct mcontext __user *sr;
877 sigset_t set; 1140 sigset_t set;
878 int ret;
879 1141
880 /* Always make any pending restarted system calls return -EINTR */ 1142 /* Always make any pending restarted system calls return -EINTR */
881 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1143 current_thread_info()->restart_block.fn = do_no_restart_syscall;
882 1144
883 sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32); 1145 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
884 if (copy_from_user(&sigctx, sc, sizeof(sigctx))) 1146 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
885 goto badframe; 1147 goto badframe;
886 1148
1149#ifdef CONFIG_PPC64
887 /* 1150 /*
888 * Note that PPC32 puts the upper 32 bits of the sigmask in the 1151 * Note that PPC32 puts the upper 32 bits of the sigmask in the
889 * unused part of the signal stackframe 1152 * unused part of the signal stackframe
890 */ 1153 */
891 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); 1154 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1155#else
1156 set.sig[0] = sigctx.oldmask;
1157 set.sig[1] = sigctx._unused[3];
1158#endif
892 restore_sigmask(&set); 1159 restore_sigmask(&set);
893 1160
894 sr = (struct mcontext32 __user *)(u64)sigctx.regs; 1161 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
895 if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1162 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
896 || restore_user_regs(regs, sr, 1)) 1163 || restore_user_regs(regs, sr, 1))
897 goto badframe; 1164 goto badframe;
898 1165
899 ret = regs->result; 1166#ifdef CONFIG_PPC64
900 return ret; 1167 return (int)regs->result;
1168#else
1169 sigreturn_exit(regs); /* doesn't return */
1170 return 0;
1171#endif
901 1172
902badframe: 1173badframe:
903 force_sig(SIGSEGV, current); 1174 force_sig(SIGSEGV, current);
904 return 0; 1175 return 0;
905} 1176}
906 1177
907
908
909/*
910 * Start of do_signal32 routine
911 *
912 * This routine gets control when a pending signal needs to be processed
913 * in the 32 bit target thread -
914 *
915 * It handles both rt and non-rt signals
916 */
917
918/* 1178/*
919 * Note that 'init' is a special process: it doesn't get signals it doesn't 1179 * Note that 'init' is a special process: it doesn't get signals it doesn't
920 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1180 * want to handle. Thus you cannot kill init even with a SIGKILL even by
921 * mistake. 1181 * mistake.
922 */ 1182 */
923 1183int do_signal(sigset_t *oldset, struct pt_regs *regs)
924int do_signal32(sigset_t *oldset, struct pt_regs *regs)
925{ 1184{
926 siginfo_t info; 1185 siginfo_t info;
1186 struct k_sigaction ka;
927 unsigned int frame, newsp; 1187 unsigned int frame, newsp;
928 int signr, ret; 1188 int signr, ret;
929 struct k_sigaction ka; 1189
1190#ifdef CONFIG_PPC32
1191 if (try_to_freeze()) {
1192 signr = 0;
1193 if (!signal_pending(current))
1194 goto no_signal;
1195 }
1196#endif
930 1197
931 if (!oldset) 1198 if (!oldset)
932 oldset = &current->blocked; 1199 oldset = &current->blocked;
@@ -934,7 +1201,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
934 newsp = frame = 0; 1201 newsp = frame = 0;
935 1202
936 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 1203 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
937 1204#ifdef CONFIG_PPC32
1205no_signal:
1206#endif
938 if (TRAP(regs) == 0x0C00 /* System Call! */ 1207 if (TRAP(regs) == 0x0C00 /* System Call! */
939 && regs->ccr & 0x10000000 /* error signalled */ 1208 && regs->ccr & 0x10000000 /* error signalled */
940 && ((ret = regs->gpr[3]) == ERESTARTSYS 1209 && ((ret = regs->gpr[3]) == ERESTARTSYS
@@ -964,12 +1233,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
964 return 0; /* no signals delivered */ 1233 return 0; /* no signals delivered */
965 1234
966 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size 1235 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
967 && (!on_sig_stack(regs->gpr[1]))) 1236 && !on_sig_stack(regs->gpr[1]))
968 newsp = (current->sas_ss_sp + current->sas_ss_size); 1237 newsp = current->sas_ss_sp + current->sas_ss_size;
969 else 1238 else
970 newsp = regs->gpr[1]; 1239 newsp = regs->gpr[1];
971 newsp &= ~0xfUL; 1240 newsp &= ~0xfUL;
972 1241
1242#ifdef CONFIG_PPC64
973 /* 1243 /*
974 * Reenable the DABR before delivering the signal to 1244 * Reenable the DABR before delivering the signal to
975 * user space. The DABR will have been cleared if it 1245 * user space. The DABR will have been cleared if it
@@ -977,12 +1247,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
977 */ 1247 */
978 if (current->thread.dabr) 1248 if (current->thread.dabr)
979 set_dabr(current->thread.dabr); 1249 set_dabr(current->thread.dabr);
1250#endif
980 1251
981 /* Whee! Actually deliver the signal. */ 1252 /* Whee! Actually deliver the signal. */
982 if (ka.sa.sa_flags & SA_SIGINFO) 1253 if (ka.sa.sa_flags & SA_SIGINFO)
983 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp); 1254 ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
984 else 1255 else
985 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp); 1256 ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
986 1257
987 if (ret) { 1258 if (ret) {
988 spin_lock_irq(&current->sighand->siglock); 1259 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index e93c13458910..a8210ed5c686 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -53,8 +53,7 @@
53#include <asm/time.h> 53#include <asm/time.h>
54#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h> 55#include <asm/systemcfg.h>
56 56#include <asm/ppc-pci.h>
57#include "pci.h"
58 57
59/* readdir & getdents */ 58/* readdir & getdents */
60#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) 59#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
@@ -114,96 +113,6 @@ out:
114 return error; 113 return error;
115} 114}
116 115
117struct linux_dirent32 {
118 u32 d_ino;
119 u32 d_off;
120 unsigned short d_reclen;
121 char d_name[1];
122};
123
124struct getdents_callback32 {
125 struct linux_dirent32 __user * current_dir;
126 struct linux_dirent32 __user * previous;
127 int count;
128 int error;
129};
130
131static int filldir(void * __buf, const char * name, int namlen, off_t offset,
132 ino_t ino, unsigned int d_type)
133{
134 struct linux_dirent32 __user * dirent;
135 struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
136 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
137
138 buf->error = -EINVAL; /* only used if we fail.. */
139 if (reclen > buf->count)
140 return -EINVAL;
141 dirent = buf->previous;
142 if (dirent) {
143 if (__put_user(offset, &dirent->d_off))
144 goto efault;
145 }
146 dirent = buf->current_dir;
147 if (__put_user(ino, &dirent->d_ino))
148 goto efault;
149 if (__put_user(reclen, &dirent->d_reclen))
150 goto efault;
151 if (copy_to_user(dirent->d_name, name, namlen))
152 goto efault;
153 if (__put_user(0, dirent->d_name + namlen))
154 goto efault;
155 if (__put_user(d_type, (char __user *) dirent + reclen - 1))
156 goto efault;
157 buf->previous = dirent;
158 dirent = (void __user *)dirent + reclen;
159 buf->current_dir = dirent;
160 buf->count -= reclen;
161 return 0;
162efault:
163 buf->error = -EFAULT;
164 return -EFAULT;
165}
166
167asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent,
168 unsigned int count)
169{
170 struct file * file;
171 struct linux_dirent32 __user * lastdirent;
172 struct getdents_callback32 buf;
173 int error;
174
175 error = -EFAULT;
176 if (!access_ok(VERIFY_WRITE, dirent, count))
177 goto out;
178
179 error = -EBADF;
180 file = fget(fd);
181 if (!file)
182 goto out;
183
184 buf.current_dir = dirent;
185 buf.previous = NULL;
186 buf.count = count;
187 buf.error = 0;
188
189 error = vfs_readdir(file, (filldir_t)filldir, &buf);
190 if (error < 0)
191 goto out_putf;
192 error = buf.error;
193 lastdirent = buf.previous;
194 if (lastdirent) {
195 if (put_user(file->f_pos, &lastdirent->d_off))
196 error = -EFAULT;
197 else
198 error = count - buf.count;
199 }
200
201out_putf:
202 fput(file);
203out:
204 return error;
205}
206
207asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, 116asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
208 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 117 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
209 compat_uptr_t tvp_x) 118 compat_uptr_t tvp_x)
@@ -248,7 +157,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
248 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 157 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
249 * and the register representation of a signed int (msr in 64-bit mode) is performed. 158 * and the register representation of a signed int (msr in 64-bit mode) is performed.
250 */ 159 */
251asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2) 160asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
252{ 161{
253 return sys_sysfs((int)option, arg1, arg2); 162 return sys_sysfs((int)option, arg1, arg2);
254} 163}
@@ -270,7 +179,7 @@ struct timex32 {
270extern int do_adjtimex(struct timex *); 179extern int do_adjtimex(struct timex *);
271extern void ppc_adjtimex(void); 180extern void ppc_adjtimex(void);
272 181
273asmlinkage long sys32_adjtimex(struct timex32 __user *utp) 182asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
274{ 183{
275 struct timex txc; 184 struct timex txc;
276 int ret; 185 int ret;
@@ -329,7 +238,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
329 return ret; 238 return ret;
330} 239}
331 240
332asmlinkage long sys32_pause(void) 241asmlinkage long compat_sys_pause(void)
333{ 242{
334 current->state = TASK_INTERRUPTIBLE; 243 current->state = TASK_INTERRUPTIBLE;
335 schedule(); 244 schedule();
@@ -375,7 +284,7 @@ struct sysinfo32 {
375 char _f[20-2*sizeof(int)-sizeof(int)]; 284 char _f[20-2*sizeof(int)-sizeof(int)];
376}; 285};
377 286
378asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) 287asmlinkage long compat_sys_sysinfo(struct sysinfo32 __user *info)
379{ 288{
380 struct sysinfo s; 289 struct sysinfo s;
381 int ret, err; 290 int ret, err;
@@ -432,7 +341,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
432 sorts of things, like timeval and itimerval. */ 341 sorts of things, like timeval and itimerval. */
433extern struct timezone sys_tz; 342extern struct timezone sys_tz;
434 343
435asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 344asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
436{ 345{
437 if (tv) { 346 if (tv) {
438 struct timeval ktv; 347 struct timeval ktv;
@@ -450,7 +359,7 @@ asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct time
450 359
451 360
452 361
453asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 362asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
454{ 363{
455 struct timespec kts; 364 struct timespec kts;
456 struct timezone ktz; 365 struct timezone ktz;
@@ -468,7 +377,7 @@ asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct time
468} 377}
469 378
470#ifdef CONFIG_SYSVIPC 379#ifdef CONFIG_SYSVIPC
471long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, 380long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
472 u32 fifth) 381 u32 fifth)
473{ 382{
474 int version; 383 int version;
@@ -539,7 +448,7 @@ long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
539 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 448 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
540 * and the register representation of a signed int (msr in 64-bit mode) is performed. 449 * and the register representation of a signed int (msr in 64-bit mode) is performed.
541 */ 450 */
542asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count) 451asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
543{ 452{
544 mm_segment_t old_fs = get_fs(); 453 mm_segment_t old_fs = get_fs();
545 int ret; 454 int ret;
@@ -561,7 +470,7 @@ asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offs
561 return ret; 470 return ret;
562} 471}
563 472
564asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count) 473asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
565{ 474{
566 mm_segment_t old_fs = get_fs(); 475 mm_segment_t old_fs = get_fs();
567 int ret; 476 int ret;
@@ -583,7 +492,7 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
583 return ret; 492 return ret;
584} 493}
585 494
586long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2, 495long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
587 unsigned long a3, unsigned long a4, unsigned long a5, 496 unsigned long a3, unsigned long a4, unsigned long a5,
588 struct pt_regs *regs) 497 struct pt_regs *regs)
589{ 498{
@@ -610,58 +519,12 @@ out:
610 return error; 519 return error;
611} 520}
612 521
613/* Set up a thread for executing a new program. */
614void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
615{
616 set_fs(USER_DS);
617
618 /*
619 * If we exec out of a kernel thread then thread.regs will not be
620 * set. Do it now.
621 */
622 if (!current->thread.regs) {
623 unsigned long childregs = (unsigned long)current->thread_info +
624 THREAD_SIZE;
625 childregs -= sizeof(struct pt_regs);
626 current->thread.regs = (struct pt_regs *)childregs;
627 }
628
629 /*
630 * ELF_PLAT_INIT already clears all registers but it also sets r2.
631 * So just clear r2 here.
632 */
633 regs->gpr[2] = 0;
634
635 regs->nip = nip;
636 regs->gpr[1] = sp;
637 regs->msr = MSR_USER32;
638#ifndef CONFIG_SMP
639 if (last_task_used_math == current)
640 last_task_used_math = 0;
641#endif /* CONFIG_SMP */
642 current->thread.fpscr = 0;
643 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
644#ifdef CONFIG_ALTIVEC
645#ifndef CONFIG_SMP
646 if (last_task_used_altivec == current)
647 last_task_used_altivec = 0;
648#endif /* CONFIG_SMP */
649 memset(current->thread.vr, 0, sizeof(current->thread.vr));
650 current->thread.vscr.u[0] = 0;
651 current->thread.vscr.u[1] = 0;
652 current->thread.vscr.u[2] = 0;
653 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
654 current->thread.vrsave = 0;
655 current->thread.used_vr = 0;
656#endif /* CONFIG_ALTIVEC */
657}
658
659/* Note: it is necessary to treat option as an unsigned int, 522/* Note: it is necessary to treat option as an unsigned int,
660 * with the corresponding cast to a signed int to insure that the 523 * with the corresponding cast to a signed int to insure that the
661 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 524 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
662 * and the register representation of a signed int (msr in 64-bit mode) is performed. 525 * and the register representation of a signed int (msr in 64-bit mode) is performed.
663 */ 526 */
664asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) 527asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
665{ 528{
666 return sys_prctl((int)option, 529 return sys_prctl((int)option,
667 (unsigned long) arg2, 530 (unsigned long) arg2,
@@ -675,7 +538,7 @@ asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
675 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 538 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
676 * and the register representation of a signed int (msr in 64-bit mode) is performed. 539 * and the register representation of a signed int (msr in 64-bit mode) is performed.
677 */ 540 */
678asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval) 541asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
679{ 542{
680 struct timespec t; 543 struct timespec t;
681 int ret; 544 int ret;
@@ -690,7 +553,7 @@ asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __us
690 return ret; 553 return ret;
691} 554}
692 555
693asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 556asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
694{ 557{
695 return sys_pciconfig_read((unsigned long) bus, 558 return sys_pciconfig_read((unsigned long) bus,
696 (unsigned long) dfn, 559 (unsigned long) dfn,
@@ -699,7 +562,7 @@ asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf
699 compat_ptr(ubuf)); 562 compat_ptr(ubuf));
700} 563}
701 564
702asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 565asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
703{ 566{
704 return sys_pciconfig_write((unsigned long) bus, 567 return sys_pciconfig_write((unsigned long) bus,
705 (unsigned long) dfn, 568 (unsigned long) dfn,
@@ -708,7 +571,7 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu
708 compat_ptr(ubuf)); 571 compat_ptr(ubuf));
709} 572}
710 573
711asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) 574asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
712{ 575{
713 return sys_pciconfig_iobase(which, in_bus, in_devfn); 576 return sys_pciconfig_iobase(which, in_bus, in_devfn);
714} 577}
@@ -719,7 +582,7 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
719 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 582 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
720 * and the register representation of a signed int (msr in 64-bit mode) is performed. 583 * and the register representation of a signed int (msr in 64-bit mode) is performed.
721 */ 584 */
722asmlinkage long sys32_access(const char __user * filename, u32 mode) 585asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
723{ 586{
724 return sys_access(filename, (int)mode); 587 return sys_access(filename, (int)mode);
725} 588}
@@ -730,7 +593,7 @@ asmlinkage long sys32_access(const char __user * filename, u32 mode)
730 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 593 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
731 * and the register representation of a signed int (msr in 64-bit mode) is performed. 594 * and the register representation of a signed int (msr in 64-bit mode) is performed.
732 */ 595 */
733asmlinkage long sys32_creat(const char __user * pathname, u32 mode) 596asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
734{ 597{
735 return sys_creat(pathname, (int)mode); 598 return sys_creat(pathname, (int)mode);
736} 599}
@@ -741,7 +604,7 @@ asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
741 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 604 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
742 * and the register representation of a signed int (msr in 64-bit mode) is performed. 605 * and the register representation of a signed int (msr in 64-bit mode) is performed.
743 */ 606 */
744asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options) 607asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
745{ 608{
746 return sys_waitpid((int)pid, stat_addr, (int)options); 609 return sys_waitpid((int)pid, stat_addr, (int)options);
747} 610}
@@ -752,7 +615,7 @@ asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 opti
752 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 615 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
753 * and the register representation of a signed int (msr in 64-bit mode) is performed. 616 * and the register representation of a signed int (msr in 64-bit mode) is performed.
754 */ 617 */
755asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist) 618asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
756{ 619{
757 return sys_getgroups((int)gidsetsize, grouplist); 620 return sys_getgroups((int)gidsetsize, grouplist);
758} 621}
@@ -763,7 +626,7 @@ asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
763 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 626 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
764 * and the register representation of a signed int (msr in 64-bit mode) is performed. 627 * and the register representation of a signed int (msr in 64-bit mode) is performed.
765 */ 628 */
766asmlinkage long sys32_getpgid(u32 pid) 629asmlinkage long compat_sys_getpgid(u32 pid)
767{ 630{
768 return sys_getpgid((int)pid); 631 return sys_getpgid((int)pid);
769} 632}
@@ -775,7 +638,7 @@ asmlinkage long sys32_getpgid(u32 pid)
775 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 638 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
776 * and the register representation of a signed int (msr in 64-bit mode) is performed. 639 * and the register representation of a signed int (msr in 64-bit mode) is performed.
777 */ 640 */
778asmlinkage long sys32_getsid(u32 pid) 641asmlinkage long compat_sys_getsid(u32 pid)
779{ 642{
780 return sys_getsid((int)pid); 643 return sys_getsid((int)pid);
781} 644}
@@ -786,7 +649,7 @@ asmlinkage long sys32_getsid(u32 pid)
786 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 649 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
787 * and the register representation of a signed int (msr in 64-bit mode) is performed. 650 * and the register representation of a signed int (msr in 64-bit mode) is performed.
788 */ 651 */
789asmlinkage long sys32_kill(u32 pid, u32 sig) 652asmlinkage long compat_sys_kill(u32 pid, u32 sig)
790{ 653{
791 return sys_kill((int)pid, (int)sig); 654 return sys_kill((int)pid, (int)sig);
792} 655}
@@ -797,12 +660,12 @@ asmlinkage long sys32_kill(u32 pid, u32 sig)
797 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 660 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
798 * and the register representation of a signed int (msr in 64-bit mode) is performed. 661 * and the register representation of a signed int (msr in 64-bit mode) is performed.
799 */ 662 */
800asmlinkage long sys32_mkdir(const char __user * pathname, u32 mode) 663asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
801{ 664{
802 return sys_mkdir(pathname, (int)mode); 665 return sys_mkdir(pathname, (int)mode);
803} 666}
804 667
805long sys32_nice(u32 increment) 668long compat_sys_nice(u32 increment)
806{ 669{
807 /* sign extend increment */ 670 /* sign extend increment */
808 return sys_nice((int)increment); 671 return sys_nice((int)increment);
@@ -819,7 +682,7 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
819 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 682 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
820 * and the register representation of a signed int (msr in 64-bit mode) is performed. 683 * and the register representation of a signed int (msr in 64-bit mode) is performed.
821 */ 684 */
822asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32 bufsiz) 685asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
823{ 686{
824 return sys_readlink(path, buf, (int)bufsiz); 687 return sys_readlink(path, buf, (int)bufsiz);
825} 688}
@@ -829,7 +692,7 @@ asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32
829 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 692 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
830 * and the register representation of a signed int (msr in 64-bit mode) is performed. 693 * and the register representation of a signed int (msr in 64-bit mode) is performed.
831 */ 694 */
832asmlinkage long sys32_sched_get_priority_max(u32 policy) 695asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
833{ 696{
834 return sys_sched_get_priority_max((int)policy); 697 return sys_sched_get_priority_max((int)policy);
835} 698}
@@ -840,7 +703,7 @@ asmlinkage long sys32_sched_get_priority_max(u32 policy)
840 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 703 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
841 * and the register representation of a signed int (msr in 64-bit mode) is performed. 704 * and the register representation of a signed int (msr in 64-bit mode) is performed.
842 */ 705 */
843asmlinkage long sys32_sched_get_priority_min(u32 policy) 706asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
844{ 707{
845 return sys_sched_get_priority_min((int)policy); 708 return sys_sched_get_priority_min((int)policy);
846} 709}
@@ -851,7 +714,7 @@ asmlinkage long sys32_sched_get_priority_min(u32 policy)
851 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 714 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
852 * and the register representation of a signed int (msr in 64-bit mode) is performed. 715 * and the register representation of a signed int (msr in 64-bit mode) is performed.
853 */ 716 */
854asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param) 717asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
855{ 718{
856 return sys_sched_getparam((int)pid, param); 719 return sys_sched_getparam((int)pid, param);
857} 720}
@@ -862,7 +725,7 @@ asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
862 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 725 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
863 * and the register representation of a signed int (msr in 64-bit mode) is performed. 726 * and the register representation of a signed int (msr in 64-bit mode) is performed.
864 */ 727 */
865asmlinkage long sys32_sched_getscheduler(u32 pid) 728asmlinkage long compat_sys_sched_getscheduler(u32 pid)
866{ 729{
867 return sys_sched_getscheduler((int)pid); 730 return sys_sched_getscheduler((int)pid);
868} 731}
@@ -873,7 +736,7 @@ asmlinkage long sys32_sched_getscheduler(u32 pid)
873 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 736 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
874 * and the register representation of a signed int (msr in 64-bit mode) is performed. 737 * and the register representation of a signed int (msr in 64-bit mode) is performed.
875 */ 738 */
876asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param) 739asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
877{ 740{
878 return sys_sched_setparam((int)pid, param); 741 return sys_sched_setparam((int)pid, param);
879} 742}
@@ -884,7 +747,7 @@ asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
884 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 747 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
885 * and the register representation of a signed int (msr in 64-bit mode) is performed. 748 * and the register representation of a signed int (msr in 64-bit mode) is performed.
886 */ 749 */
887asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param) 750asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
888{ 751{
889 return sys_sched_setscheduler((int)pid, (int)policy, param); 752 return sys_sched_setscheduler((int)pid, (int)policy, param);
890} 753}
@@ -895,7 +758,7 @@ asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param
895 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 758 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
896 * and the register representation of a signed int (msr in 64-bit mode) is performed. 759 * and the register representation of a signed int (msr in 64-bit mode) is performed.
897 */ 760 */
898asmlinkage long sys32_setdomainname(char __user *name, u32 len) 761asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
899{ 762{
900 return sys_setdomainname(name, (int)len); 763 return sys_setdomainname(name, (int)len);
901} 764}
@@ -906,13 +769,13 @@ asmlinkage long sys32_setdomainname(char __user *name, u32 len)
906 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 769 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
907 * and the register representation of a signed int (msr in 64-bit mode) is performed. 770 * and the register representation of a signed int (msr in 64-bit mode) is performed.
908 */ 771 */
909asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t __user *grouplist) 772asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
910{ 773{
911 return sys_setgroups((int)gidsetsize, grouplist); 774 return sys_setgroups((int)gidsetsize, grouplist);
912} 775}
913 776
914 777
915asmlinkage long sys32_sethostname(char __user *name, u32 len) 778asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
916{ 779{
917 /* sign extend len */ 780 /* sign extend len */
918 return sys_sethostname(name, (int)len); 781 return sys_sethostname(name, (int)len);
@@ -924,30 +787,30 @@ asmlinkage long sys32_sethostname(char __user *name, u32 len)
924 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 787 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
925 * and the register representation of a signed int (msr in 64-bit mode) is performed. 788 * and the register representation of a signed int (msr in 64-bit mode) is performed.
926 */ 789 */
927asmlinkage long sys32_setpgid(u32 pid, u32 pgid) 790asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
928{ 791{
929 return sys_setpgid((int)pid, (int)pgid); 792 return sys_setpgid((int)pid, (int)pgid);
930} 793}
931 794
932long sys32_getpriority(u32 which, u32 who) 795long compat_sys_getpriority(u32 which, u32 who)
933{ 796{
934 /* sign extend which and who */ 797 /* sign extend which and who */
935 return sys_getpriority((int)which, (int)who); 798 return sys_getpriority((int)which, (int)who);
936} 799}
937 800
938long sys32_setpriority(u32 which, u32 who, u32 niceval) 801long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
939{ 802{
940 /* sign extend which, who and niceval */ 803 /* sign extend which, who and niceval */
941 return sys_setpriority((int)which, (int)who, (int)niceval); 804 return sys_setpriority((int)which, (int)who, (int)niceval);
942} 805}
943 806
944long sys32_ioprio_get(u32 which, u32 who) 807long compat_sys_ioprio_get(u32 which, u32 who)
945{ 808{
946 /* sign extend which and who */ 809 /* sign extend which and who */
947 return sys_ioprio_get((int)which, (int)who); 810 return sys_ioprio_get((int)which, (int)who);
948} 811}
949 812
950long sys32_ioprio_set(u32 which, u32 who, u32 ioprio) 813long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
951{ 814{
952 /* sign extend which, who and ioprio */ 815 /* sign extend which, who and ioprio */
953 return sys_ioprio_set((int)which, (int)who, (int)ioprio); 816 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
@@ -958,12 +821,12 @@ long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
958 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 821 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
959 * and the register representation of a signed int (msr in 64-bit mode) is performed. 822 * and the register representation of a signed int (msr in 64-bit mode) is performed.
960 */ 823 */
961asmlinkage long sys32_ssetmask(u32 newmask) 824asmlinkage long compat_sys_ssetmask(u32 newmask)
962{ 825{
963 return sys_ssetmask((int) newmask); 826 return sys_ssetmask((int) newmask);
964} 827}
965 828
966asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len) 829asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
967{ 830{
968 /* sign extend len */ 831 /* sign extend len */
969 return sys_syslog(type, buf, (int)len); 832 return sys_syslog(type, buf, (int)len);
@@ -975,7 +838,7 @@ asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
975 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 838 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
976 * and the register representation of a signed int (msr in 64-bit mode) is performed. 839 * and the register representation of a signed int (msr in 64-bit mode) is performed.
977 */ 840 */
978asmlinkage long sys32_umask(u32 mask) 841asmlinkage long compat_sys_umask(u32 mask)
979{ 842{
980 return sys_umask((int)mask); 843 return sys_umask((int)mask);
981} 844}
@@ -991,7 +854,7 @@ struct __sysctl_args32 {
991 u32 __unused[4]; 854 u32 __unused[4];
992}; 855};
993 856
994asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) 857asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
995{ 858{
996 struct __sysctl_args32 tmp; 859 struct __sysctl_args32 tmp;
997 int error; 860 int error;
@@ -1032,55 +895,7 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
1032} 895}
1033#endif 896#endif
1034 897
1035asmlinkage int sys32_uname(struct old_utsname __user * name) 898unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
1036{
1037 int err = 0;
1038
1039 down_read(&uts_sem);
1040 if (copy_to_user(name, &system_utsname, sizeof(*name)))
1041 err = -EFAULT;
1042 up_read(&uts_sem);
1043 if (!err && personality(current->personality) == PER_LINUX32) {
1044 /* change "ppc64" to "ppc" */
1045 if (__put_user(0, name->machine + 3)
1046 || __put_user(0, name->machine + 4))
1047 err = -EFAULT;
1048 }
1049 return err;
1050}
1051
1052asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
1053{
1054 int error;
1055
1056 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
1057 return -EFAULT;
1058
1059 down_read(&uts_sem);
1060 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
1061 error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
1062 error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
1063 error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
1064 error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
1065 error |= __put_user(0,name->release+__OLD_UTS_LEN);
1066 error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
1067 error |= __put_user(0,name->version+__OLD_UTS_LEN);
1068 error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
1069 error |= __put_user(0,name->machine+__OLD_UTS_LEN);
1070 if (personality(current->personality) == PER_LINUX32) {
1071 /* change "ppc64" to "ppc" */
1072 error |= __put_user(0, name->machine + 3);
1073 error |= __put_user(0, name->machine + 4);
1074 }
1075
1076 up_read(&uts_sem);
1077
1078 error = error ? -EFAULT : 0;
1079
1080 return error;
1081}
1082
1083unsigned long sys32_mmap2(unsigned long addr, size_t len,
1084 unsigned long prot, unsigned long flags, 899 unsigned long prot, unsigned long flags,
1085 unsigned long fd, unsigned long pgoff) 900 unsigned long fd, unsigned long pgoff)
1086{ 901{
@@ -1088,29 +903,7 @@ unsigned long sys32_mmap2(unsigned long addr, size_t len,
1088 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); 903 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
1089} 904}
1090 905
1091int get_compat_timeval(struct timeval *tv, struct compat_timeval __user *ctv) 906long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
1092{
1093 return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
1094 __get_user(tv->tv_sec, &ctv->tv_sec) ||
1095 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
1096}
1097
1098asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs)
1099{
1100 struct timeval ktvs[2], *ptr;
1101
1102 ptr = NULL;
1103 if (tvs) {
1104 if (get_compat_timeval(&ktvs[0], &tvs[0]) ||
1105 get_compat_timeval(&ktvs[1], &tvs[1]))
1106 return -EFAULT;
1107 ptr = ktvs;
1108 }
1109
1110 return do_utimes(filename, ptr);
1111}
1112
1113long sys32_tgkill(u32 tgid, u32 pid, int sig)
1114{ 907{
1115 /* sign extend tgid, pid */ 908 /* sign extend tgid, pid */
1116 return sys_tgkill((int)tgid, (int)pid, sig); 909 return sys_tgkill((int)tgid, (int)pid, sig);
@@ -1121,30 +914,30 @@ long sys32_tgkill(u32 tgid, u32 pid, int sig)
1121 * The 32 bit ABI passes long longs in an odd even register pair. 914 * The 32 bit ABI passes long longs in an odd even register pair.
1122 */ 915 */
1123 916
1124compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, 917compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
1125 u32 reg6, u32 poshi, u32 poslo) 918 u32 reg6, u32 poshi, u32 poslo)
1126{ 919{
1127 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 920 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1128} 921}
1129 922
1130compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count, 923compat_ssize_t compat_sys_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
1131 u32 reg6, u32 poshi, u32 poslo) 924 u32 reg6, u32 poshi, u32 poslo)
1132{ 925{
1133 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 926 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1134} 927}
1135 928
1136compat_ssize_t sys32_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count) 929compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
1137{ 930{
1138 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count); 931 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
1139} 932}
1140 933
1141asmlinkage int sys32_truncate64(const char __user * path, u32 reg4, 934asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
1142 unsigned long high, unsigned long low) 935 unsigned long high, unsigned long low)
1143{ 936{
1144 return sys_truncate(path, (high << 32) | low); 937 return sys_truncate(path, (high << 32) | low);
1145} 938}
1146 939
1147asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, 940asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
1148 unsigned long low) 941 unsigned long low)
1149{ 942{
1150 return sys_ftruncate(fd, (high << 32) | low); 943 return sys_ftruncate(fd, (high << 32) | low);
@@ -1164,13 +957,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
1164 advice); 957 advice);
1165} 958}
1166 959
1167long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
1168 u32 len_high, u32 len_low)
1169{
1170 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
1171 (u64)len_high << 32 | len_low, advice);
1172}
1173
1174long ppc32_timer_create(clockid_t clock, 960long ppc32_timer_create(clockid_t clock,
1175 struct compat_sigevent __user *ev32, 961 struct compat_sigevent __user *ev32,
1176 timer_t __user *timer_id) 962 timer_t __user *timer_id)
@@ -1203,7 +989,7 @@ long ppc32_timer_create(clockid_t clock,
1203 return err; 989 return err;
1204} 990}
1205 991
1206asmlinkage long sys32_add_key(const char __user *_type, 992asmlinkage long compat_sys_add_key(const char __user *_type,
1207 const char __user *_description, 993 const char __user *_description,
1208 const void __user *_payload, 994 const void __user *_payload,
1209 u32 plen, 995 u32 plen,
@@ -1212,7 +998,7 @@ asmlinkage long sys32_add_key(const char __user *_type,
1212 return sys_add_key(_type, _description, _payload, plen, ringid); 998 return sys_add_key(_type, _description, _payload, plen, ringid);
1213} 999}
1214 1000
1215asmlinkage long sys32_request_key(const char __user *_type, 1001asmlinkage long compat_sys_request_key(const char __user *_type,
1216 const char __user *_description, 1002 const char __user *_description,
1217 const char __user *_callout_info, 1003 const char __user *_callout_info,
1218 u32 destringid) 1004 u32 destringid)
diff --git a/arch/ppc64/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 05f16633bd2c..f72ced11212d 100644
--- a/arch/ppc64/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/sys_ppc.c 2 * Implementation of various system calls for Linux/PowerPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 5 *
7 * Derived from "arch/i386/kernel/sys_i386.c" 6 * Derived from "arch/i386/kernel/sys_i386.c"
@@ -52,9 +51,8 @@ extern unsigned long wall_jiffies;
52 * 51 *
53 * This is really horribly ugly. 52 * This is really horribly ugly.
54 */ 53 */
55asmlinkage int 54int sys_ipc(uint call, int first, unsigned long second, long third,
56sys_ipc (uint call, int first, unsigned long second, long third, 55 void __user *ptr, long fifth)
57 void __user *ptr, long fifth)
58{ 56{
59 int version, ret; 57 int version, ret;
60 58
@@ -88,7 +86,7 @@ sys_ipc (uint call, int first, unsigned long second, long third,
88 } 86 }
89 case MSGSND: 87 case MSGSND:
90 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr, 88 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
91 (size_t)second, third); 89 (size_t)second, third);
92 break; 90 break;
93 case MSGRCV: 91 case MSGRCV:
94 switch (version) { 92 switch (version) {
@@ -113,41 +111,29 @@ sys_ipc (uint call, int first, unsigned long second, long third,
113 } 111 }
114 break; 112 break;
115 case MSGGET: 113 case MSGGET:
116 ret = sys_msgget ((key_t)first, (int)second); 114 ret = sys_msgget((key_t)first, (int)second);
117 break; 115 break;
118 case MSGCTL: 116 case MSGCTL:
119 ret = sys_msgctl(first, (int)second, 117 ret = sys_msgctl(first, (int)second,
120 (struct msqid_ds __user *)ptr); 118 (struct msqid_ds __user *)ptr);
121 break; 119 break;
122 case SHMAT: 120 case SHMAT: {
123 switch (version) { 121 ulong raddr;
124 default: { 122 ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr);
125 ulong raddr; 123 if (ret)
126 ret = do_shmat(first, (char __user *) ptr,
127 (int)second, &raddr);
128 if (ret)
129 break;
130 ret = put_user (raddr, (ulong __user *) third);
131 break;
132 }
133 case 1: /* iBCS2 emulator entry point */
134 ret = -EINVAL;
135 if (!segment_eq(get_fs(), get_ds()))
136 break;
137 ret = do_shmat(first, (char __user *)ptr,
138 (int)second, (ulong *)third);
139 break; 124 break;
140 } 125 ret = put_user(raddr, (ulong __user *) third);
141 break; 126 break;
142 case SHMDT: 127 }
143 ret = sys_shmdt ((char __user *)ptr); 128 case SHMDT:
129 ret = sys_shmdt((char __user *)ptr);
144 break; 130 break;
145 case SHMGET: 131 case SHMGET:
146 ret = sys_shmget (first, (size_t)second, third); 132 ret = sys_shmget(first, (size_t)second, third);
147 break; 133 break;
148 case SHMCTL: 134 case SHMCTL:
149 ret = sys_shmctl(first, (int)second, 135 ret = sys_shmctl(first, (int)second,
150 (struct shmid_ds __user *)ptr); 136 (struct shmid_ds __user *)ptr);
151 break; 137 break;
152 } 138 }
153 139
@@ -158,43 +144,89 @@ sys_ipc (uint call, int first, unsigned long second, long third,
158 * sys_pipe() is the normal C calling standard for creating 144 * sys_pipe() is the normal C calling standard for creating
159 * a pipe. It's not the way unix traditionally does this, though. 145 * a pipe. It's not the way unix traditionally does this, though.
160 */ 146 */
161asmlinkage int sys_pipe(int __user *fildes) 147int sys_pipe(int __user *fildes)
162{ 148{
163 int fd[2]; 149 int fd[2];
164 int error; 150 int error;
165 151
166 error = do_pipe(fd); 152 error = do_pipe(fd);
167 if (!error) { 153 if (!error) {
168 if (copy_to_user(fildes, fd, 2*sizeof(int))) 154 if (copy_to_user(fildes, fd, 2*sizeof(int)))
169 error = -EFAULT; 155 error = -EFAULT;
170 } 156 }
171
172 return error; 157 return error;
173} 158}
174 159
175unsigned long sys_mmap(unsigned long addr, size_t len, 160static inline unsigned long do_mmap2(unsigned long addr, size_t len,
176 unsigned long prot, unsigned long flags, 161 unsigned long prot, unsigned long flags,
177 unsigned long fd, off_t offset) 162 unsigned long fd, unsigned long off, int shift)
178{ 163{
179 struct file * file = NULL; 164 struct file * file = NULL;
180 unsigned long ret = -EBADF; 165 unsigned long ret = -EINVAL;
181 166
167 if (shift) {
168 if (off & ((1 << shift) - 1))
169 goto out;
170 off >>= shift;
171 }
172
173 ret = -EBADF;
182 if (!(flags & MAP_ANONYMOUS)) { 174 if (!(flags & MAP_ANONYMOUS)) {
183 if (!(file = fget(fd))) 175 if (!(file = fget(fd)))
184 goto out; 176 goto out;
185 } 177 }
186 178
187 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 179 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
180
188 down_write(&current->mm->mmap_sem); 181 down_write(&current->mm->mmap_sem);
189 ret = do_mmap(file, addr, len, prot, flags, offset); 182 ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
190 up_write(&current->mm->mmap_sem); 183 up_write(&current->mm->mmap_sem);
191 if (file) 184 if (file)
192 fput(file); 185 fput(file);
193
194out: 186out:
195 return ret; 187 return ret;
196} 188}
197 189
190unsigned long sys_mmap2(unsigned long addr, size_t len,
191 unsigned long prot, unsigned long flags,
192 unsigned long fd, unsigned long pgoff)
193{
194 return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
195}
196
197unsigned long sys_mmap(unsigned long addr, size_t len,
198 unsigned long prot, unsigned long flags,
199 unsigned long fd, off_t offset)
200{
201 return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
202}
203
204#ifdef CONFIG_PPC32
205/*
206 * Due to some executables calling the wrong select we sometimes
207 * get wrong args. This determines how the args are being passed
208 * (a single ptr to them all args passed) then calls
209 * sys_select() with the appropriate args. -- Cort
210 */
211int
212ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
213{
214 if ( (unsigned long)n >= 4096 )
215 {
216 unsigned long __user *buffer = (unsigned long __user *)n;
217 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
218 || __get_user(n, buffer)
219 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
220 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
221 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
222 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
223 return -EFAULT;
224 }
225 return sys_select(n, inp, outp, exp, tvp);
226}
227#endif
228
229#ifdef CONFIG_PPC64
198long ppc64_personality(unsigned long personality) 230long ppc64_personality(unsigned long personality)
199{ 231{
200 long ret; 232 long ret;
@@ -207,8 +239,25 @@ long ppc64_personality(unsigned long personality)
207 ret = PER_LINUX; 239 ret = PER_LINUX;
208 return ret; 240 return ret;
209} 241}
242#endif
243
244#ifdef CONFIG_PPC64
245#define OVERRIDE_MACHINE (personality(current->personality) == PER_LINUX32)
246#else
247#define OVERRIDE_MACHINE 0
248#endif
249
250static inline int override_machine(char *mach)
251{
252 if (OVERRIDE_MACHINE) {
253 /* change ppc64 to ppc */
254 if (__put_user(0, mach+3) || __put_user(0, mach+4))
255 return -EFAULT;
256 }
257 return 0;
258}
210 259
211long ppc64_newuname(struct new_utsname __user * name) 260long ppc_newuname(struct new_utsname __user * name)
212{ 261{
213 int err = 0; 262 int err = 0;
214 263
@@ -216,16 +265,54 @@ long ppc64_newuname(struct new_utsname __user * name)
216 if (copy_to_user(name, &system_utsname, sizeof(*name))) 265 if (copy_to_user(name, &system_utsname, sizeof(*name)))
217 err = -EFAULT; 266 err = -EFAULT;
218 up_read(&uts_sem); 267 up_read(&uts_sem);
219 if (!err && personality(current->personality) == PER_LINUX32) { 268 if (!err)
220 /* change ppc64 to ppc */ 269 err = override_machine(name->machine);
221 if (__put_user(0, name->machine + 3)
222 || __put_user(0, name->machine + 4))
223 err = -EFAULT;
224 }
225 return err; 270 return err;
226} 271}
227 272
228asmlinkage time_t sys64_time(time_t __user * tloc) 273int sys_uname(struct old_utsname __user *name)
274{
275 int err = 0;
276
277 down_read(&uts_sem);
278 if (copy_to_user(name, &system_utsname, sizeof(*name)))
279 err = -EFAULT;
280 up_read(&uts_sem);
281 if (!err)
282 err = override_machine(name->machine);
283 return err;
284}
285
286int sys_olduname(struct oldold_utsname __user *name)
287{
288 int error;
289
290 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
291 return -EFAULT;
292
293 down_read(&uts_sem);
294 error = __copy_to_user(&name->sysname, &system_utsname.sysname,
295 __OLD_UTS_LEN);
296 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
297 error |= __copy_to_user(&name->nodename, &system_utsname.nodename,
298 __OLD_UTS_LEN);
299 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
300 error |= __copy_to_user(&name->release, &system_utsname.release,
301 __OLD_UTS_LEN);
302 error |= __put_user(0, name->release + __OLD_UTS_LEN);
303 error |= __copy_to_user(&name->version, &system_utsname.version,
304 __OLD_UTS_LEN);
305 error |= __put_user(0, name->version + __OLD_UTS_LEN);
306 error |= __copy_to_user(&name->machine, &system_utsname.machine,
307 __OLD_UTS_LEN);
308 error |= override_machine(name->machine);
309 up_read(&uts_sem);
310
311 return error? -EFAULT: 0;
312}
313
314#ifdef CONFIG_PPC64
315time_t sys64_time(time_t __user * tloc)
229{ 316{
230 time_t secs; 317 time_t secs;
231 time_t usecs; 318 time_t usecs;
@@ -247,6 +334,14 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
247 334
248 return secs; 335 return secs;
249} 336}
337#endif
338
339long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
340 u32 len_high, u32 len_low)
341{
342 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
343 (u64)len_high << 32 | len_low, advice);
344}
250 345
251void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5, 346void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
252 unsigned long r6, unsigned long r7, unsigned long r8, 347 unsigned long r6, unsigned long r7, unsigned long r8,
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644
index 000000000000..65eaea91b499
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.S
@@ -0,0 +1,321 @@
1/*
2 * This file contains the table of syscall-handling functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/ppc_asm.h>
19
20#ifdef CONFIG_PPC64
21#define SYSCALL(func) .llong .sys_##func,.sys_##func
22#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
23#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
24#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
25#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func
26#define SYSX(f, f3264, f32) .llong .f,.f3264
27#else
28#define SYSCALL(func) .long sys_##func
29#define COMPAT_SYS(func) .long sys_##func
30#define PPC_SYS(func) .long ppc_##func
31#define OLDSYS(func) .long sys_##func
32#define SYS32ONLY(func) .long sys_##func
33#define SYSX(f, f3264, f32) .long f32
34#endif
35
36#ifdef CONFIG_PPC64
37#define sys_sigpending sys_ni_syscall
38#define sys_old_getrlimit sys_ni_syscall
39#else
40#define ppc_rtas sys_ni_syscall
41#endif
42
43_GLOBAL(sys_call_table)
44SYSCALL(restart_syscall)
45SYSCALL(exit)
46PPC_SYS(fork)
47SYSCALL(read)
48SYSCALL(write)
49COMPAT_SYS(open)
50SYSCALL(close)
51COMPAT_SYS(waitpid)
52COMPAT_SYS(creat)
53SYSCALL(link)
54SYSCALL(unlink)
55COMPAT_SYS(execve)
56SYSCALL(chdir)
57SYSX(sys64_time,compat_sys_time,sys_time)
58SYSCALL(mknod)
59SYSCALL(chmod)
60SYSCALL(lchown)
61SYSCALL(ni_syscall)
62OLDSYS(stat)
63SYSX(sys_lseek,ppc32_lseek,sys_lseek)
64SYSCALL(getpid)
65COMPAT_SYS(mount)
66SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
67SYSCALL(setuid)
68SYSCALL(getuid)
69COMPAT_SYS(stime)
70COMPAT_SYS(ptrace)
71SYSCALL(alarm)
72OLDSYS(fstat)
73COMPAT_SYS(pause)
74COMPAT_SYS(utime)
75SYSCALL(ni_syscall)
76SYSCALL(ni_syscall)
77COMPAT_SYS(access)
78COMPAT_SYS(nice)
79SYSCALL(ni_syscall)
80SYSCALL(sync)
81COMPAT_SYS(kill)
82SYSCALL(rename)
83COMPAT_SYS(mkdir)
84SYSCALL(rmdir)
85SYSCALL(dup)
86SYSCALL(pipe)
87COMPAT_SYS(times)
88SYSCALL(ni_syscall)
89SYSCALL(brk)
90SYSCALL(setgid)
91SYSCALL(getgid)
92SYSCALL(signal)
93SYSCALL(geteuid)
94SYSCALL(getegid)
95SYSCALL(acct)
96SYSCALL(umount)
97SYSCALL(ni_syscall)
98COMPAT_SYS(ioctl)
99COMPAT_SYS(fcntl)
100SYSCALL(ni_syscall)
101COMPAT_SYS(setpgid)
102SYSCALL(ni_syscall)
103SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
104COMPAT_SYS(umask)
105SYSCALL(chroot)
106SYSCALL(ustat)
107SYSCALL(dup2)
108SYSCALL(getppid)
109SYSCALL(getpgrp)
110SYSCALL(setsid)
111SYS32ONLY(sigaction)
112SYSCALL(sgetmask)
113COMPAT_SYS(ssetmask)
114SYSCALL(setreuid)
115SYSCALL(setregid)
116SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
117COMPAT_SYS(sigpending)
118COMPAT_SYS(sethostname)
119COMPAT_SYS(setrlimit)
120COMPAT_SYS(old_getrlimit)
121COMPAT_SYS(getrusage)
122COMPAT_SYS(gettimeofday)
123COMPAT_SYS(settimeofday)
124COMPAT_SYS(getgroups)
125COMPAT_SYS(setgroups)
126SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
127SYSCALL(symlink)
128OLDSYS(lstat)
129COMPAT_SYS(readlink)
130SYSCALL(uselib)
131SYSCALL(swapon)
132SYSCALL(reboot)
133SYSX(sys_ni_syscall,old32_readdir,old_readdir)
134SYSCALL(mmap)
135SYSCALL(munmap)
136SYSCALL(truncate)
137SYSCALL(ftruncate)
138SYSCALL(fchmod)
139SYSCALL(fchown)
140COMPAT_SYS(getpriority)
141COMPAT_SYS(setpriority)
142SYSCALL(ni_syscall)
143COMPAT_SYS(statfs)
144COMPAT_SYS(fstatfs)
145SYSCALL(ni_syscall)
146COMPAT_SYS(socketcall)
147COMPAT_SYS(syslog)
148COMPAT_SYS(setitimer)
149COMPAT_SYS(getitimer)
150COMPAT_SYS(newstat)
151COMPAT_SYS(newlstat)
152COMPAT_SYS(newfstat)
153SYSX(sys_ni_syscall,sys_uname,sys_uname)
154SYSCALL(ni_syscall)
155SYSCALL(vhangup)
156SYSCALL(ni_syscall)
157SYSCALL(ni_syscall)
158COMPAT_SYS(wait4)
159SYSCALL(swapoff)
160COMPAT_SYS(sysinfo)
161COMPAT_SYS(ipc)
162SYSCALL(fsync)
163SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
164PPC_SYS(clone)
165COMPAT_SYS(setdomainname)
166PPC_SYS(newuname)
167SYSCALL(ni_syscall)
168COMPAT_SYS(adjtimex)
169SYSCALL(mprotect)
170SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
171SYSCALL(ni_syscall)
172SYSCALL(init_module)
173SYSCALL(delete_module)
174SYSCALL(ni_syscall)
175SYSCALL(quotactl)
176COMPAT_SYS(getpgid)
177SYSCALL(fchdir)
178SYSCALL(bdflush)
179COMPAT_SYS(sysfs)
180SYSX(ppc64_personality,ppc64_personality,sys_personality)
181SYSCALL(ni_syscall)
182SYSCALL(setfsuid)
183SYSCALL(setfsgid)
184SYSCALL(llseek)
185COMPAT_SYS(getdents)
186SYSX(sys_select,ppc32_select,ppc_select)
187SYSCALL(flock)
188SYSCALL(msync)
189COMPAT_SYS(readv)
190COMPAT_SYS(writev)
191COMPAT_SYS(getsid)
192SYSCALL(fdatasync)
193COMPAT_SYS(sysctl)
194SYSCALL(mlock)
195SYSCALL(munlock)
196SYSCALL(mlockall)
197SYSCALL(munlockall)
198COMPAT_SYS(sched_setparam)
199COMPAT_SYS(sched_getparam)
200COMPAT_SYS(sched_setscheduler)
201COMPAT_SYS(sched_getscheduler)
202SYSCALL(sched_yield)
203COMPAT_SYS(sched_get_priority_max)
204COMPAT_SYS(sched_get_priority_min)
205COMPAT_SYS(sched_rr_get_interval)
206COMPAT_SYS(nanosleep)
207SYSCALL(mremap)
208SYSCALL(setresuid)
209SYSCALL(getresuid)
210SYSCALL(ni_syscall)
211SYSCALL(poll)
212COMPAT_SYS(nfsservctl)
213SYSCALL(setresgid)
214SYSCALL(getresgid)
215COMPAT_SYS(prctl)
216SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
217COMPAT_SYS(rt_sigaction)
218COMPAT_SYS(rt_sigprocmask)
219COMPAT_SYS(rt_sigpending)
220COMPAT_SYS(rt_sigtimedwait)
221COMPAT_SYS(rt_sigqueueinfo)
222SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
223COMPAT_SYS(pread64)
224COMPAT_SYS(pwrite64)
225SYSCALL(chown)
226SYSCALL(getcwd)
227SYSCALL(capget)
228SYSCALL(capset)
229COMPAT_SYS(sigaltstack)
230SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
231SYSCALL(ni_syscall)
232SYSCALL(ni_syscall)
233PPC_SYS(vfork)
234COMPAT_SYS(getrlimit)
235COMPAT_SYS(readahead)
236SYS32ONLY(mmap2)
237SYS32ONLY(truncate64)
238SYS32ONLY(ftruncate64)
239SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
242COMPAT_SYS(pciconfig_read)
243COMPAT_SYS(pciconfig_write)
244COMPAT_SYS(pciconfig_iobase)
245SYSCALL(ni_syscall)
246SYSCALL(getdents64)
247SYSCALL(pivot_root)
248SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
249SYSCALL(madvise)
250SYSCALL(mincore)
251SYSCALL(gettid)
252SYSCALL(tkill)
253SYSCALL(setxattr)
254SYSCALL(lsetxattr)
255SYSCALL(fsetxattr)
256SYSCALL(getxattr)
257SYSCALL(lgetxattr)
258SYSCALL(fgetxattr)
259SYSCALL(listxattr)
260SYSCALL(llistxattr)
261SYSCALL(flistxattr)
262SYSCALL(removexattr)
263SYSCALL(lremovexattr)
264SYSCALL(fremovexattr)
265COMPAT_SYS(futex)
266COMPAT_SYS(sched_setaffinity)
267COMPAT_SYS(sched_getaffinity)
268SYSCALL(ni_syscall)
269SYSCALL(ni_syscall)
270SYS32ONLY(sendfile64)
271COMPAT_SYS(io_setup)
272SYSCALL(io_destroy)
273COMPAT_SYS(io_getevents)
274COMPAT_SYS(io_submit)
275SYSCALL(io_cancel)
276SYSCALL(set_tid_address)
277SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
278SYSCALL(exit_group)
279SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
280SYSCALL(epoll_create)
281SYSCALL(epoll_ctl)
282SYSCALL(epoll_wait)
283SYSCALL(remap_file_pages)
284SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
285COMPAT_SYS(timer_settime)
286COMPAT_SYS(timer_gettime)
287SYSCALL(timer_getoverrun)
288SYSCALL(timer_delete)
289COMPAT_SYS(clock_settime)
290COMPAT_SYS(clock_gettime)
291COMPAT_SYS(clock_getres)
292COMPAT_SYS(clock_nanosleep)
293SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
294COMPAT_SYS(tgkill)
295COMPAT_SYS(utimes)
296COMPAT_SYS(statfs64)
297COMPAT_SYS(fstatfs64)
298SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
299PPC_SYS(rtas)
300OLDSYS(debug_setcontext)
301SYSCALL(ni_syscall)
302SYSCALL(ni_syscall)
303COMPAT_SYS(mbind)
304COMPAT_SYS(get_mempolicy)
305COMPAT_SYS(set_mempolicy)
306COMPAT_SYS(mq_open)
307SYSCALL(mq_unlink)
308COMPAT_SYS(mq_timedsend)
309COMPAT_SYS(mq_timedreceive)
310COMPAT_SYS(mq_notify)
311COMPAT_SYS(mq_getsetattr)
312COMPAT_SYS(kexec_load)
313COMPAT_SYS(add_key)
314COMPAT_SYS(request_key)
315COMPAT_SYS(keyctl)
316COMPAT_SYS(waitid)
317COMPAT_SYS(ioprio_set)
318COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch)
diff --git a/arch/ppc64/kernel/time.c b/arch/powerpc/kernel/time.c
index b56c6a324e17..b635c7de6698 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Common time routines among all ppc machines. 2 * Common time routines among all ppc machines.
4 * 3 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
@@ -44,29 +43,32 @@
44#include <linux/interrupt.h> 43#include <linux/interrupt.h>
45#include <linux/timex.h> 44#include <linux/timex.h>
46#include <linux/kernel_stat.h> 45#include <linux/kernel_stat.h>
47#include <linux/mc146818rtc.h>
48#include <linux/time.h> 46#include <linux/time.h>
49#include <linux/init.h> 47#include <linux/init.h>
50#include <linux/profile.h> 48#include <linux/profile.h>
51#include <linux/cpu.h> 49#include <linux/cpu.h>
52#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53 53
54#include <asm/io.h> 54#include <asm/io.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/nvram.h> 56#include <asm/nvram.h>
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/machdep.h> 58#include <asm/machdep.h>
59#ifdef CONFIG_PPC_ISERIES
60#include <asm/iSeries/ItLpQueue.h>
61#include <asm/iSeries/HvCallXm.h>
62#endif
63#include <asm/uaccess.h> 59#include <asm/uaccess.h>
64#include <asm/time.h> 60#include <asm/time.h>
65#include <asm/ppcdebug.h>
66#include <asm/prom.h> 61#include <asm/prom.h>
67#include <asm/sections.h> 62#include <asm/irq.h>
63#include <asm/div64.h>
64#ifdef CONFIG_PPC64
68#include <asm/systemcfg.h> 65#include <asm/systemcfg.h>
69#include <asm/firmware.h> 66#include <asm/firmware.h>
67#endif
68#ifdef CONFIG_PPC_ISERIES
69#include <asm/iSeries/ItLpQueue.h>
70#include <asm/iSeries/HvCallXm.h>
71#endif
70 72
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 73u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
72 74
@@ -81,27 +83,37 @@ unsigned long iSeries_recal_tb = 0;
81static unsigned long first_settimeofday = 1; 83static unsigned long first_settimeofday = 1;
82#endif 84#endif
83 85
86/* The decrementer counts down by 128 every 128ns on a 601. */
87#define DECREMENTER_COUNT_601 (1000000000 / HZ)
88
84#define XSEC_PER_SEC (1024*1024) 89#define XSEC_PER_SEC (1024*1024)
85 90
91#ifdef CONFIG_PPC64
92#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
93#else
94/* compute ((xsec << 12) * max) >> 32 */
95#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
96#endif
97
86unsigned long tb_ticks_per_jiffy; 98unsigned long tb_ticks_per_jiffy;
87unsigned long tb_ticks_per_usec = 100; /* sane default */ 99unsigned long tb_ticks_per_usec = 100; /* sane default */
88EXPORT_SYMBOL(tb_ticks_per_usec); 100EXPORT_SYMBOL(tb_ticks_per_usec);
89unsigned long tb_ticks_per_sec; 101unsigned long tb_ticks_per_sec;
90unsigned long tb_to_xs; 102u64 tb_to_xs;
91unsigned tb_to_us; 103unsigned tb_to_us;
92unsigned long processor_freq; 104unsigned long processor_freq;
93DEFINE_SPINLOCK(rtc_lock); 105DEFINE_SPINLOCK(rtc_lock);
94EXPORT_SYMBOL_GPL(rtc_lock); 106EXPORT_SYMBOL_GPL(rtc_lock);
95 107
96unsigned long tb_to_ns_scale; 108u64 tb_to_ns_scale;
97unsigned long tb_to_ns_shift; 109unsigned tb_to_ns_shift;
98 110
99struct gettimeofday_struct do_gtod; 111struct gettimeofday_struct do_gtod;
100 112
101extern unsigned long wall_jiffies; 113extern unsigned long wall_jiffies;
102extern int smp_tb_synchronized;
103 114
104extern struct timezone sys_tz; 115extern struct timezone sys_tz;
116static long timezone_offset;
105 117
106void ppc_adjtimex(void); 118void ppc_adjtimex(void);
107 119
@@ -110,6 +122,10 @@ static unsigned adjusting_time = 0;
110unsigned long ppc_proc_freq; 122unsigned long ppc_proc_freq;
111unsigned long ppc_tb_freq; 123unsigned long ppc_tb_freq;
112 124
125#ifdef CONFIG_PPC32 /* XXX for now */
126#define boot_cpuid 0
127#endif
128
113static __inline__ void timer_check_rtc(void) 129static __inline__ void timer_check_rtc(void)
114{ 130{
115 /* 131 /*
@@ -129,30 +145,30 @@ static __inline__ void timer_check_rtc(void)
129 * seconds like on Intel to avoid problems with non UTC clocks. 145 * seconds like on Intel to avoid problems with non UTC clocks.
130 */ 146 */
131 if (ntp_synced() && 147 if (ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 && 148 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 149 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) { 150 jiffies - wall_jiffies == 1) {
135 struct rtc_time tm; 151 struct rtc_time tm;
136 to_tm(xtime.tv_sec+1, &tm); 152 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
137 tm.tm_year -= 1900; 153 tm.tm_year -= 1900;
138 tm.tm_mon -= 1; 154 tm.tm_mon -= 1;
139 if (ppc_md.set_rtc_time(&tm) == 0) 155 if (ppc_md.set_rtc_time(&tm) == 0)
140 last_rtc_update = xtime.tv_sec+1; 156 last_rtc_update = xtime.tv_sec + 1;
141 else 157 else
142 /* Try again one minute later */ 158 /* Try again one minute later */
143 last_rtc_update += 60; 159 last_rtc_update += 60;
144 } 160 }
145} 161}
146 162
147/* 163/*
148 * This version of gettimeofday has microsecond resolution. 164 * This version of gettimeofday has microsecond resolution.
149 */ 165 */
150static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val) 166static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
151{ 167{
152 unsigned long sec, usec, tb_ticks; 168 unsigned long sec, usec;
153 unsigned long xsec, tb_xsec; 169 u64 tb_ticks, xsec;
154 struct gettimeofday_vars * temp_varp; 170 struct gettimeofday_vars *temp_varp;
155 unsigned long temp_tb_to_xs, temp_stamp_xsec; 171 u64 temp_tb_to_xs, temp_stamp_xsec;
156 172
157 /* 173 /*
158 * These calculations are faster (gets rid of divides) 174 * These calculations are faster (gets rid of divides)
@@ -164,11 +180,10 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
164 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 180 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
165 temp_tb_to_xs = temp_varp->tb_to_xs; 181 temp_tb_to_xs = temp_varp->tb_to_xs;
166 temp_stamp_xsec = temp_varp->stamp_xsec; 182 temp_stamp_xsec = temp_varp->stamp_xsec;
167 tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs ); 183 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
168 xsec = temp_stamp_xsec + tb_xsec;
169 sec = xsec / XSEC_PER_SEC; 184 sec = xsec / XSEC_PER_SEC;
170 xsec -= sec * XSEC_PER_SEC; 185 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
171 usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC; 186 usec = SCALE_XSEC(usec, 1000000);
172 187
173 tv->tv_sec = sec; 188 tv->tv_sec = sec;
174 tv->tv_usec = usec; 189 tv->tv_usec = usec;
@@ -185,6 +200,8 @@ EXPORT_SYMBOL(do_gettimeofday);
185 200
186static inline void timer_sync_xtime(unsigned long cur_tb) 201static inline void timer_sync_xtime(unsigned long cur_tb)
187{ 202{
203#ifdef CONFIG_PPC64
204 /* why do we do this? */
188 struct timeval my_tv; 205 struct timeval my_tv;
189 206
190 __do_gettimeofday(&my_tv, cur_tb); 207 __do_gettimeofday(&my_tv, cur_tb);
@@ -193,47 +210,74 @@ static inline void timer_sync_xtime(unsigned long cur_tb)
193 xtime.tv_sec = my_tv.tv_sec; 210 xtime.tv_sec = my_tv.tv_sec;
194 xtime.tv_nsec = my_tv.tv_usec * 1000; 211 xtime.tv_nsec = my_tv.tv_usec * 1000;
195 } 212 }
213#endif
196} 214}
197 215
198/* 216/*
199 * When the timebase - tb_orig_stamp gets too big, we do a manipulation 217 * There are two copies of tb_to_xs and stamp_xsec so that no
200 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the 218 * lock is needed to access and use these values in
201 * difference tb - tb_orig_stamp small enough to always fit inside a 219 * do_gettimeofday. We alternate the copies and as long as a
202 * 32 bits number. This is a requirement of our fast 32 bits userland 220 * reasonable time elapses between changes, there will never
203 * implementation in the vdso. If we "miss" a call to this function 221 * be inconsistent values. ntpd has a minimum of one minute
204 * (interrupt latency, CPU locked in a spinlock, ...) and we end up 222 * between updates.
205 * with a too big difference, then the vdso will fallback to calling
206 * the syscall
207 */ 223 */
208static __inline__ void timer_recalc_offset(unsigned long cur_tb) 224static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
225 u64 new_tb_to_xs)
209{ 226{
210 struct gettimeofday_vars * temp_varp;
211 unsigned temp_idx; 227 unsigned temp_idx;
212 unsigned long offset, new_stamp_xsec, new_tb_orig_stamp; 228 struct gettimeofday_vars *temp_varp;
213
214 if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)
215 return;
216 229
217 temp_idx = (do_gtod.var_idx == 0); 230 temp_idx = (do_gtod.var_idx == 0);
218 temp_varp = &do_gtod.vars[temp_idx]; 231 temp_varp = &do_gtod.vars[temp_idx];
219 232
220 new_tb_orig_stamp = cur_tb; 233 temp_varp->tb_to_xs = new_tb_to_xs;
221 offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp; 234 temp_varp->tb_orig_stamp = new_tb_stamp;
222 new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);
223
224 temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;
225 temp_varp->tb_orig_stamp = new_tb_orig_stamp;
226 temp_varp->stamp_xsec = new_stamp_xsec; 235 temp_varp->stamp_xsec = new_stamp_xsec;
227 smp_mb(); 236 smp_mb();
228 do_gtod.varp = temp_varp; 237 do_gtod.varp = temp_varp;
229 do_gtod.var_idx = temp_idx; 238 do_gtod.var_idx = temp_idx;
230 239
240#ifdef CONFIG_PPC64
241 /*
242 * tb_update_count is used to allow the userspace gettimeofday code
243 * to assure itself that it sees a consistent view of the tb_to_xs and
244 * stamp_xsec variables. It reads the tb_update_count, then reads
245 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
246 * the two values of tb_update_count match and are even then the
247 * tb_to_xs and stamp_xsec values are consistent. If not, then it
248 * loops back and reads them again until this criteria is met.
249 */
231 ++(systemcfg->tb_update_count); 250 ++(systemcfg->tb_update_count);
232 smp_wmb(); 251 smp_wmb();
233 systemcfg->tb_orig_stamp = new_tb_orig_stamp; 252 systemcfg->tb_orig_stamp = new_tb_stamp;
234 systemcfg->stamp_xsec = new_stamp_xsec; 253 systemcfg->stamp_xsec = new_stamp_xsec;
254 systemcfg->tb_to_xs = new_tb_to_xs;
235 smp_wmb(); 255 smp_wmb();
236 ++(systemcfg->tb_update_count); 256 ++(systemcfg->tb_update_count);
257#endif
258}
259
260/*
261 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
262 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
263 * difference tb - tb_orig_stamp small enough to always fit inside a
264 * 32 bits number. This is a requirement of our fast 32 bits userland
265 * implementation in the vdso. If we "miss" a call to this function
266 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
267 * with a too big difference, then the vdso will fallback to calling
268 * the syscall
269 */
270static __inline__ void timer_recalc_offset(u64 cur_tb)
271{
272 unsigned long offset;
273 u64 new_stamp_xsec;
274
275 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
276 if ((offset & 0x80000000u) == 0)
277 return;
278 new_stamp_xsec = do_gtod.varp->stamp_xsec
279 + mulhdu(offset, do_gtod.varp->tb_to_xs);
280 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
237} 281}
238 282
239#ifdef CONFIG_SMP 283#ifdef CONFIG_SMP
@@ -313,26 +357,46 @@ static void iSeries_tb_recal(void)
313 * call will not be needed) 357 * call will not be needed)
314 */ 358 */
315 359
316unsigned long tb_last_stamp __cacheline_aligned_in_smp; 360u64 tb_last_stamp __cacheline_aligned_in_smp;
361
362/*
363 * Note that on ppc32 this only stores the bottom 32 bits of
364 * the timebase value, but that's enough to tell when a jiffy
365 * has passed.
366 */
367DEFINE_PER_CPU(unsigned long, last_jiffy);
317 368
318/* 369/*
319 * timer_interrupt - gets called when the decrementer overflows, 370 * timer_interrupt - gets called when the decrementer overflows,
320 * with interrupts disabled. 371 * with interrupts disabled.
321 */ 372 */
322int timer_interrupt(struct pt_regs * regs) 373void timer_interrupt(struct pt_regs * regs)
323{ 374{
324 int next_dec; 375 int next_dec;
325 unsigned long cur_tb; 376 int cpu = smp_processor_id();
326 struct paca_struct *lpaca = get_paca(); 377 unsigned long ticks;
327 unsigned long cpu = smp_processor_id(); 378
379#ifdef CONFIG_PPC32
380 if (atomic_read(&ppc_n_lost_interrupts) != 0)
381 do_IRQ(regs);
382#endif
328 383
329 irq_enter(); 384 irq_enter();
330 385
331 profile_tick(CPU_PROFILING, regs); 386 profile_tick(CPU_PROFILING, regs);
332 387
333 lpaca->lppaca.int_dword.fields.decr_int = 0; 388#ifdef CONFIG_PPC_ISERIES
389 get_paca()->lppaca.int_dword.fields.decr_int = 0;
390#endif
391
392 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
393 >= tb_ticks_per_jiffy) {
394 /* Update last_jiffy */
395 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
396 /* Handle RTCL overflow on 601 */
397 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
398 per_cpu(last_jiffy, cpu) -= 1000000000;
334 399
335 while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
336 /* 400 /*
337 * We cannot disable the decrementer, so in the period 401 * We cannot disable the decrementer, so in the period
338 * between this cpu's being marked offline in cpu_online_map 402 * between this cpu's being marked offline in cpu_online_map
@@ -342,27 +406,26 @@ int timer_interrupt(struct pt_regs * regs)
342 */ 406 */
343 if (!cpu_is_offline(cpu)) 407 if (!cpu_is_offline(cpu))
344 update_process_times(user_mode(regs)); 408 update_process_times(user_mode(regs));
409
345 /* 410 /*
346 * No need to check whether cpu is offline here; boot_cpuid 411 * No need to check whether cpu is offline here; boot_cpuid
347 * should have been fixed up by now. 412 * should have been fixed up by now.
348 */ 413 */
349 if (cpu == boot_cpuid) { 414 if (cpu != boot_cpuid)
350 write_seqlock(&xtime_lock); 415 continue;
351 tb_last_stamp = lpaca->next_jiffy_update_tb; 416
352 timer_recalc_offset(lpaca->next_jiffy_update_tb); 417 write_seqlock(&xtime_lock);
353 do_timer(regs); 418 tb_last_stamp += tb_ticks_per_jiffy;
354 timer_sync_xtime(lpaca->next_jiffy_update_tb); 419 timer_recalc_offset(tb_last_stamp);
355 timer_check_rtc(); 420 do_timer(regs);
356 write_sequnlock(&xtime_lock); 421 timer_sync_xtime(tb_last_stamp);
357 if ( adjusting_time && (time_adjust == 0) ) 422 timer_check_rtc();
358 ppc_adjtimex(); 423 write_sequnlock(&xtime_lock);
359 } 424 if (adjusting_time && (time_adjust == 0))
360 lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; 425 ppc_adjtimex();
361 } 426 }
362 427
363 next_dec = lpaca->next_jiffy_update_tb - cur_tb; 428 next_dec = tb_ticks_per_jiffy - ticks;
364 if (next_dec > lpaca->default_decr)
365 next_dec = lpaca->default_decr;
366 set_dec(next_dec); 429 set_dec(next_dec);
367 430
368#ifdef CONFIG_PPC_ISERIES 431#ifdef CONFIG_PPC_ISERIES
@@ -370,16 +433,46 @@ int timer_interrupt(struct pt_regs * regs)
370 process_hvlpevents(regs); 433 process_hvlpevents(regs);
371#endif 434#endif
372 435
436#ifdef CONFIG_PPC64
373 /* collect purr register values often, for accurate calculations */ 437 /* collect purr register values often, for accurate calculations */
374 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 438 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 439 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
376 cu->current_tb = mfspr(SPRN_PURR); 440 cu->current_tb = mfspr(SPRN_PURR);
377 } 441 }
442#endif
378 443
379 irq_exit(); 444 irq_exit();
445}
446
447void wakeup_decrementer(void)
448{
449 int i;
450
451 set_dec(tb_ticks_per_jiffy);
452 /*
453 * We don't expect this to be called on a machine with a 601,
454 * so using get_tbl is fine.
455 */
456 tb_last_stamp = get_tb();
457 for_each_cpu(i)
458 per_cpu(last_jiffy, i) = tb_last_stamp;
459}
380 460
381 return 1; 461#ifdef CONFIG_SMP
462void __init smp_space_timers(unsigned int max_cpus)
463{
464 int i;
465 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
466 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
467
468 for_each_cpu(i) {
469 if (i != boot_cpuid) {
470 previous_tb += offset;
471 per_cpu(last_jiffy, i) = previous_tb;
472 }
473 }
382} 474}
475#endif
383 476
384/* 477/*
385 * Scheduler clock - returns current time in nanosec units. 478 * Scheduler clock - returns current time in nanosec units.
@@ -398,23 +491,24 @@ int do_settimeofday(struct timespec *tv)
398 time_t wtm_sec, new_sec = tv->tv_sec; 491 time_t wtm_sec, new_sec = tv->tv_sec;
399 long wtm_nsec, new_nsec = tv->tv_nsec; 492 long wtm_nsec, new_nsec = tv->tv_nsec;
400 unsigned long flags; 493 unsigned long flags;
401 unsigned long delta_xsec;
402 long int tb_delta; 494 long int tb_delta;
403 unsigned long new_xsec; 495 u64 new_xsec;
404 496
405 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 497 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
406 return -EINVAL; 498 return -EINVAL;
407 499
408 write_seqlock_irqsave(&xtime_lock, flags); 500 write_seqlock_irqsave(&xtime_lock, flags);
409 /* Updating the RTC is not the job of this code. If the time is 501
410 * stepped under NTP, the RTC will be update after STA_UNSYNC 502 /*
411 * is cleared. Tool like clock/hwclock either copy the RTC 503 * Updating the RTC is not the job of this code. If the time is
504 * stepped under NTP, the RTC will be updated after STA_UNSYNC
505 * is cleared. Tools like clock/hwclock either copy the RTC
412 * to the system time, in which case there is no point in writing 506 * to the system time, in which case there is no point in writing
413 * to the RTC again, or write to the RTC but then they don't call 507 * to the RTC again, or write to the RTC but then they don't call
414 * settimeofday to perform this operation. 508 * settimeofday to perform this operation.
415 */ 509 */
416#ifdef CONFIG_PPC_ISERIES 510#ifdef CONFIG_PPC_ISERIES
417 if ( first_settimeofday ) { 511 if (first_settimeofday) {
418 iSeries_tb_recal(); 512 iSeries_tb_recal();
419 first_settimeofday = 0; 513 first_settimeofday = 0;
420 } 514 }
@@ -422,7 +516,7 @@ int do_settimeofday(struct timespec *tv)
422 tb_delta = tb_ticks_since(tb_last_stamp); 516 tb_delta = tb_ticks_since(tb_last_stamp);
423 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 517 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
424 518
425 new_nsec -= tb_delta / tb_ticks_per_usec / 1000; 519 new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta);
426 520
427 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 521 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
428 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 522 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -437,28 +531,15 @@ int do_settimeofday(struct timespec *tv)
437 531
438 ntp_clear(); 532 ntp_clear();
439 533
440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 534 new_xsec = (u64)new_nsec * XSEC_PER_SEC;
441 do_gtod.varp->tb_to_xs ); 535 do_div(new_xsec, NSEC_PER_SEC);
442 536 new_xsec += (u64)new_sec * XSEC_PER_SEC;
443 new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; 537 update_gtod(tb_last_stamp, new_xsec, do_gtod.varp->tb_to_xs);
444 new_xsec += new_sec * XSEC_PER_SEC;
445 if ( new_xsec > delta_xsec ) {
446 do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
447 systemcfg->stamp_xsec = new_xsec - delta_xsec;
448 }
449 else {
450 /* This is only for the case where the user is setting the time
451 * way back to a time such that the boot time would have been
452 * before 1970 ... eg. we booted ten days ago, and we are setting
453 * the time to Jan 5, 1970 */
454 do_gtod.varp->stamp_xsec = new_xsec;
455 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
456 systemcfg->stamp_xsec = new_xsec;
457 systemcfg->tb_orig_stamp = tb_last_stamp;
458 }
459 538
539#ifdef CONFIG_PPC64
460 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 540 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
461 systemcfg->tz_dsttime = sys_tz.tz_dsttime; 541 systemcfg->tz_dsttime = sys_tz.tz_dsttime;
542#endif
462 543
463 write_sequnlock_irqrestore(&xtime_lock, flags); 544 write_sequnlock_irqrestore(&xtime_lock, flags);
464 clock_was_set(); 545 clock_was_set();
@@ -467,11 +548,9 @@ int do_settimeofday(struct timespec *tv)
467 548
468EXPORT_SYMBOL(do_settimeofday); 549EXPORT_SYMBOL(do_settimeofday);
469 550
470#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
471void __init generic_calibrate_decr(void) 551void __init generic_calibrate_decr(void)
472{ 552{
473 struct device_node *cpu; 553 struct device_node *cpu;
474 struct div_result divres;
475 unsigned int *fp; 554 unsigned int *fp;
476 int node_found; 555 int node_found;
477 556
@@ -510,33 +589,51 @@ void __init generic_calibrate_decr(void)
510 "(not found)\n"); 589 "(not found)\n");
511 590
512 of_node_put(cpu); 591 of_node_put(cpu);
592}
513 593
514 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", 594unsigned long get_boot_time(void)
515 ppc_tb_freq/1000000, ppc_tb_freq%1000000); 595{
516 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", 596 struct rtc_time tm;
517 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
518
519 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
520 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
521 tb_ticks_per_usec = ppc_tb_freq / 1000000;
522 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
523 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
524 tb_to_xs = divres.result_low;
525 597
526 setup_default_decr(); 598 if (ppc_md.get_boot_time)
599 return ppc_md.get_boot_time();
600 if (!ppc_md.get_rtc_time)
601 return 0;
602 ppc_md.get_rtc_time(&tm);
603 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
604 tm.tm_hour, tm.tm_min, tm.tm_sec);
527} 605}
528#endif
529 606
607/* This function is only called on the boot processor */
530void __init time_init(void) 608void __init time_init(void)
531{ 609{
532 /* This function is only called on the boot processor */
533 unsigned long flags; 610 unsigned long flags;
534 struct rtc_time tm; 611 unsigned long tm = 0;
535 struct div_result res; 612 struct div_result res;
536 unsigned long scale, shift; 613 u64 scale;
614 unsigned shift;
615
616 if (ppc_md.time_init != NULL)
617 timezone_offset = ppc_md.time_init();
537 618
538 ppc_md.calibrate_decr(); 619 ppc_md.calibrate_decr();
539 620
621 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
622 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
623 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
624 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
625
626 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
627 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
628 tb_ticks_per_usec = ppc_tb_freq / 1000000;
629 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
630 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
631 tb_to_xs = res.result_low;
632
633#ifdef CONFIG_PPC64
634 get_paca()->default_decr = tb_ticks_per_jiffy;
635#endif
636
540 /* 637 /*
541 * Compute scale factor for sched_clock. 638 * Compute scale factor for sched_clock.
542 * The calibrate_decr() function has set tb_ticks_per_sec, 639 * The calibrate_decr() function has set tb_ticks_per_sec,
@@ -559,29 +656,37 @@ void __init time_init(void)
559#ifdef CONFIG_PPC_ISERIES 656#ifdef CONFIG_PPC_ISERIES
560 if (!piranha_simulator) 657 if (!piranha_simulator)
561#endif 658#endif
562 ppc_md.get_boot_time(&tm); 659 tm = get_boot_time();
563 660
564 write_seqlock_irqsave(&xtime_lock, flags); 661 write_seqlock_irqsave(&xtime_lock, flags);
565 xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 662 xtime.tv_sec = tm;
566 tm.tm_hour, tm.tm_min, tm.tm_sec); 663 xtime.tv_nsec = 0;
567 tb_last_stamp = get_tb(); 664 tb_last_stamp = get_tb();
568 do_gtod.varp = &do_gtod.vars[0]; 665 do_gtod.varp = &do_gtod.vars[0];
569 do_gtod.var_idx = 0; 666 do_gtod.var_idx = 0;
570 do_gtod.varp->tb_orig_stamp = tb_last_stamp; 667 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
571 get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy; 668 __get_cpu_var(last_jiffy) = tb_last_stamp;
572 do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 669 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
573 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 670 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
574 do_gtod.varp->tb_to_xs = tb_to_xs; 671 do_gtod.varp->tb_to_xs = tb_to_xs;
575 do_gtod.tb_to_us = tb_to_us; 672 do_gtod.tb_to_us = tb_to_us;
673#ifdef CONFIG_PPC64
576 systemcfg->tb_orig_stamp = tb_last_stamp; 674 systemcfg->tb_orig_stamp = tb_last_stamp;
577 systemcfg->tb_update_count = 0; 675 systemcfg->tb_update_count = 0;
578 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 676 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
579 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 677 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
580 systemcfg->tb_to_xs = tb_to_xs; 678 systemcfg->tb_to_xs = tb_to_xs;
679#endif
581 680
582 time_freq = 0; 681 time_freq = 0;
583 682
584 xtime.tv_nsec = 0; 683 /* If platform provided a timezone (pmac), we correct the time */
684 if (timezone_offset) {
685 sys_tz.tz_minuteswest = -timezone_offset / 60;
686 sys_tz.tz_dsttime = 0;
687 xtime.tv_sec -= timezone_offset;
688 }
689
585 last_rtc_update = xtime.tv_sec; 690 last_rtc_update = xtime.tv_sec;
586 set_normalized_timespec(&wall_to_monotonic, 691 set_normalized_timespec(&wall_to_monotonic,
587 -xtime.tv_sec, -xtime.tv_nsec); 692 -xtime.tv_sec, -xtime.tv_nsec);
@@ -604,25 +709,28 @@ void __init time_init(void)
604 709
605void ppc_adjtimex(void) 710void ppc_adjtimex(void)
606{ 711{
607 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec; 712#ifdef CONFIG_PPC64
713 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
714 new_tb_to_xs, new_xsec, new_stamp_xsec;
608 unsigned long tb_ticks_per_sec_delta; 715 unsigned long tb_ticks_per_sec_delta;
609 long delta_freq, ltemp; 716 long delta_freq, ltemp;
610 struct div_result divres; 717 struct div_result divres;
611 unsigned long flags; 718 unsigned long flags;
612 struct gettimeofday_vars * temp_varp;
613 unsigned temp_idx;
614 long singleshot_ppm = 0; 719 long singleshot_ppm = 0;
615 720
616 /* Compute parts per million frequency adjustment to accomplish the time adjustment 721 /*
617 implied by time_offset to be applied over the elapsed time indicated by time_constant. 722 * Compute parts per million frequency adjustment to
618 Use SHIFT_USEC to get it into the same units as time_freq. */ 723 * accomplish the time adjustment implied by time_offset to be
724 * applied over the elapsed time indicated by time_constant.
725 * Use SHIFT_USEC to get it into the same units as
726 * time_freq.
727 */
619 if ( time_offset < 0 ) { 728 if ( time_offset < 0 ) {
620 ltemp = -time_offset; 729 ltemp = -time_offset;
621 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 730 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
622 ltemp >>= SHIFT_KG + time_constant; 731 ltemp >>= SHIFT_KG + time_constant;
623 ltemp = -ltemp; 732 ltemp = -ltemp;
624 } 733 } else {
625 else {
626 ltemp = time_offset; 734 ltemp = time_offset;
627 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 735 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
628 ltemp >>= SHIFT_KG + time_constant; 736 ltemp >>= SHIFT_KG + time_constant;
@@ -639,7 +747,10 @@ void ppc_adjtimex(void)
639 747
640 adjusting_time = 1; 748 adjusting_time = 1;
641 749
642 /* Compute parts per million frequency adjustment to match time_adjust */ 750 /*
751 * Compute parts per million frequency adjustment
752 * to match time_adjust
753 */
643 singleshot_ppm = tickadj * HZ; 754 singleshot_ppm = tickadj * HZ;
644 /* 755 /*
645 * The adjustment should be tickadj*HZ to match the code in 756 * The adjustment should be tickadj*HZ to match the code in
@@ -647,7 +758,7 @@ void ppc_adjtimex(void)
647 * large. 3/4 of tickadj*HZ seems about right 758 * large. 3/4 of tickadj*HZ seems about right
648 */ 759 */
649 singleshot_ppm -= singleshot_ppm / 4; 760 singleshot_ppm -= singleshot_ppm / 4;
650 /* Use SHIFT_USEC to get it into the same units as time_freq */ 761 /* Use SHIFT_USEC to get it into the same units as time_freq */
651 singleshot_ppm <<= SHIFT_USEC; 762 singleshot_ppm <<= SHIFT_USEC;
652 if ( time_adjust < 0 ) 763 if ( time_adjust < 0 )
653 singleshot_ppm = -singleshot_ppm; 764 singleshot_ppm = -singleshot_ppm;
@@ -663,7 +774,10 @@ void ppc_adjtimex(void)
663 /* Add up all of the frequency adjustments */ 774 /* Add up all of the frequency adjustments */
664 delta_freq = time_freq + ltemp + singleshot_ppm; 775 delta_freq = time_freq + ltemp + singleshot_ppm;
665 776
666 /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */ 777 /*
778 * Compute a new value for tb_ticks_per_sec based on
779 * the frequency adjustment
780 */
667 den = 1000000 * (1 << (SHIFT_USEC - 8)); 781 den = 1000000 * (1 << (SHIFT_USEC - 8));
668 if ( delta_freq < 0 ) { 782 if ( delta_freq < 0 ) {
669 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; 783 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
@@ -678,61 +792,37 @@ void ppc_adjtimex(void)
678 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); 792 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
679 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); 793 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
680#endif 794#endif
681 795
682 /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of 796 /*
683 stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp. This 797 * Compute a new value of tb_to_xs (used to convert tb to
684 new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs) 798 * microseconds) and a new value of stamp_xsec which is the
685 which guarantees that the current time remains the same */ 799 * time (in 1/2^20 second units) corresponding to
800 * tb_orig_stamp. This new value of stamp_xsec compensates
801 * for the change in frequency (implied by the new tb_to_xs)
802 * which guarantees that the current time remains the same.
803 */
686 write_seqlock_irqsave( &xtime_lock, flags ); 804 write_seqlock_irqsave( &xtime_lock, flags );
687 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; 805 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
688 div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres ); 806 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
689 new_tb_to_xs = divres.result_low; 807 new_tb_to_xs = divres.result_low;
690 new_xsec = mulhdu( tb_ticks, new_tb_to_xs ); 808 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
691 809
692 old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs ); 810 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
693 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; 811 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
694 812
695 /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these 813 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
696 values in do_gettimeofday. We alternate the copies and as long as a reasonable time elapses between
697 changes, there will never be inconsistent values. ntpd has a minimum of one minute between updates */
698
699 temp_idx = (do_gtod.var_idx == 0);
700 temp_varp = &do_gtod.vars[temp_idx];
701
702 temp_varp->tb_to_xs = new_tb_to_xs;
703 temp_varp->stamp_xsec = new_stamp_xsec;
704 temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp;
705 smp_mb();
706 do_gtod.varp = temp_varp;
707 do_gtod.var_idx = temp_idx;
708
709 /*
710 * tb_update_count is used to allow the problem state gettimeofday code
711 * to assure itself that it sees a consistent view of the tb_to_xs and
712 * stamp_xsec variables. It reads the tb_update_count, then reads
713 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
714 * the two values of tb_update_count match and are even then the
715 * tb_to_xs and stamp_xsec values are consistent. If not, then it
716 * loops back and reads them again until this criteria is met.
717 */
718 ++(systemcfg->tb_update_count);
719 smp_wmb();
720 systemcfg->tb_to_xs = new_tb_to_xs;
721 systemcfg->stamp_xsec = new_stamp_xsec;
722 smp_wmb();
723 ++(systemcfg->tb_update_count);
724 814
725 write_sequnlock_irqrestore( &xtime_lock, flags ); 815 write_sequnlock_irqrestore( &xtime_lock, flags );
726 816#endif /* CONFIG_PPC64 */
727} 817}
728 818
729 819
730#define TICK_SIZE tick
731#define FEBRUARY 2 820#define FEBRUARY 2
732#define STARTOFTIME 1970 821#define STARTOFTIME 1970
733#define SECDAY 86400L 822#define SECDAY 86400L
734#define SECYR (SECDAY * 365) 823#define SECYR (SECDAY * 365)
735#define leapyear(year) ((year) % 4 == 0) 824#define leapyear(year) ((year) % 4 == 0 && \
825 ((year) % 100 != 0 || (year) % 400 == 0))
736#define days_in_year(a) (leapyear(a) ? 366 : 365) 826#define days_in_year(a) (leapyear(a) ? 366 : 365)
737#define days_in_month(a) (month_days[(a) - 1]) 827#define days_in_month(a) (month_days[(a) - 1])
738 828
@@ -750,37 +840,25 @@ void GregorianDay(struct rtc_time * tm)
750 int day; 840 int day;
751 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 841 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
752 842
753 lastYear=tm->tm_year-1; 843 lastYear = tm->tm_year - 1;
754 844
755 /* 845 /*
756 * Number of leap corrections to apply up to end of last year 846 * Number of leap corrections to apply up to end of last year
757 */ 847 */
758 leapsToDate = lastYear/4 - lastYear/100 + lastYear/400; 848 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
759 849
760 /* 850 /*
761 * This year is a leap year if it is divisible by 4 except when it is 851 * This year is a leap year if it is divisible by 4 except when it is
762 * divisible by 100 unless it is divisible by 400 852 * divisible by 100 unless it is divisible by 400
763 * 853 *
764 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be 854 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
765 */ 855 */
766 if((tm->tm_year%4==0) && 856 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
767 ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
768 (tm->tm_mon>2))
769 {
770 /*
771 * We are past Feb. 29 in a leap year
772 */
773 day=1;
774 }
775 else
776 {
777 day=0;
778 }
779 857
780 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 858 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
781 tm->tm_mday; 859 tm->tm_mday;
782 860
783 tm->tm_wday=day%7; 861 tm->tm_wday = day % 7;
784} 862}
785 863
786void to_tm(int tim, struct rtc_time * tm) 864void to_tm(int tim, struct rtc_time * tm)
@@ -826,14 +904,16 @@ void to_tm(int tim, struct rtc_time * tm)
826 * oscillators and the precision with which the timebase frequency 904 * oscillators and the precision with which the timebase frequency
827 * is measured but does not harm. 905 * is measured but does not harm.
828 */ 906 */
829unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { 907unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
908{
830 unsigned mlt=0, tmp, err; 909 unsigned mlt=0, tmp, err;
831 /* No concern for performance, it's done once: use a stupid 910 /* No concern for performance, it's done once: use a stupid
832 * but safe and compact method to find the multiplier. 911 * but safe and compact method to find the multiplier.
833 */ 912 */
834 913
835 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 914 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
836 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; 915 if (mulhwu(inscale, mlt|tmp) < outscale)
916 mlt |= tmp;
837 } 917 }
838 918
839 /* We might still be off by 1 for the best approximation. 919 /* We might still be off by 1 for the best approximation.
@@ -843,39 +923,41 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
843 * some might have been forgotten in the test however. 923 * some might have been forgotten in the test however.
844 */ 924 */
845 925
846 err = inscale*(mlt+1); 926 err = inscale * (mlt+1);
847 if (err <= inscale/2) mlt++; 927 if (err <= inscale/2)
928 mlt++;
848 return mlt; 929 return mlt;
849 } 930}
850 931
851/* 932/*
852 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 933 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
853 * result. 934 * result.
854 */ 935 */
855 936void div128_by_32(u64 dividend_high, u64 dividend_low,
856void div128_by_32( unsigned long dividend_high, unsigned long dividend_low, 937 unsigned divisor, struct div_result *dr)
857 unsigned divisor, struct div_result *dr )
858{ 938{
859 unsigned long a,b,c,d, w,x,y,z, ra,rb,rc; 939 unsigned long a, b, c, d;
940 unsigned long w, x, y, z;
941 u64 ra, rb, rc;
860 942
861 a = dividend_high >> 32; 943 a = dividend_high >> 32;
862 b = dividend_high & 0xffffffff; 944 b = dividend_high & 0xffffffff;
863 c = dividend_low >> 32; 945 c = dividend_low >> 32;
864 d = dividend_low & 0xffffffff; 946 d = dividend_low & 0xffffffff;
865 947
866 w = a/divisor; 948 w = a / divisor;
867 ra = (a - (w * divisor)) << 32; 949 ra = ((u64)(a - (w * divisor)) << 32) + b;
868 950
869 x = (ra + b)/divisor; 951 rb = ((u64) do_div(ra, divisor) << 32) + c;
870 rb = ((ra + b) - (x * divisor)) << 32; 952 x = ra;
871 953
872 y = (rb + c)/divisor; 954 rc = ((u64) do_div(rb, divisor) << 32) + d;
873 rc = ((rb + c) - (y * divisor)) << 32; 955 y = rb;
874 956
875 z = (rc + d)/divisor; 957 do_div(rc, divisor);
958 z = rc;
876 959
877 dr->result_high = (w << 32) + x; 960 dr->result_high = ((u64)w << 32) + x;
878 dr->result_low = (y << 32) + z; 961 dr->result_low = ((u64)y << 32) + z;
879 962
880} 963}
881
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644
index 000000000000..f87580382da4
--- /dev/null
+++ b/arch/powerpc/kernel/traps.c
@@ -0,0 +1,1101 @@
1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/prctl.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34
35#include <asm/kdebug.h>
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/machdep.h>
41#include <asm/rtas.h>
42#include <asm/xmon.h>
43#include <asm/pmc.h>
44#ifdef CONFIG_PPC32
45#include <asm/reg.h>
46#endif
47#ifdef CONFIG_PMAC_BACKLIGHT
48#include <asm/backlight.h>
49#endif
50#ifdef CONFIG_PPC64
51#include <asm/firmware.h>
52#include <asm/processor.h>
53#include <asm/systemcfg.h>
54#endif
55
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
60#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs);
63int (*__debugger_bpt)(struct pt_regs *regs);
64int (*__debugger_sstep)(struct pt_regs *regs);
65int (*__debugger_iabr_match)(struct pt_regs *regs);
66int (*__debugger_dabr_match)(struct pt_regs *regs);
67int (*__debugger_fault_handler)(struct pt_regs *regs);
68
69EXPORT_SYMBOL(__debugger);
70EXPORT_SYMBOL(__debugger_ipi);
71EXPORT_SYMBOL(__debugger_bpt);
72EXPORT_SYMBOL(__debugger_sstep);
73EXPORT_SYMBOL(__debugger_iabr_match);
74EXPORT_SYMBOL(__debugger_dabr_match);
75EXPORT_SYMBOL(__debugger_fault_handler);
76#endif
77
78struct notifier_block *powerpc_die_chain;
79static DEFINE_SPINLOCK(die_notifier_lock);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 int err = 0;
84 unsigned long flags;
85
86 spin_lock_irqsave(&die_notifier_lock, flags);
87 err = notifier_chain_register(&powerpc_die_chain, nb);
88 spin_unlock_irqrestore(&die_notifier_lock, flags);
89 return err;
90}
91
92/*
93 * Trap & Exception support
94 */
95
96static DEFINE_SPINLOCK(die_lock);
97
98int die(const char *str, struct pt_regs *regs, long err)
99{
100 static int die_counter;
101 int nl = 0;
102
103 if (debugger(regs))
104 return 1;
105
106 console_verbose();
107 spin_lock_irq(&die_lock);
108 bust_spinlocks(1);
109#ifdef CONFIG_PMAC_BACKLIGHT
110 if (_machine == _MACH_Pmac) {
111 set_backlight_enable(1);
112 set_backlight_level(BACKLIGHT_MAX);
113 }
114#endif
115 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
116#ifdef CONFIG_PREEMPT
117 printk("PREEMPT ");
118 nl = 1;
119#endif
120#ifdef CONFIG_SMP
121 printk("SMP NR_CPUS=%d ", NR_CPUS);
122 nl = 1;
123#endif
124#ifdef CONFIG_DEBUG_PAGEALLOC
125 printk("DEBUG_PAGEALLOC ");
126 nl = 1;
127#endif
128#ifdef CONFIG_NUMA
129 printk("NUMA ");
130 nl = 1;
131#endif
132#ifdef CONFIG_PPC64
133 switch (systemcfg->platform) {
134 case PLATFORM_PSERIES:
135 printk("PSERIES ");
136 nl = 1;
137 break;
138 case PLATFORM_PSERIES_LPAR:
139 printk("PSERIES LPAR ");
140 nl = 1;
141 break;
142 case PLATFORM_ISERIES_LPAR:
143 printk("ISERIES LPAR ");
144 nl = 1;
145 break;
146 case PLATFORM_POWERMAC:
147 printk("POWERMAC ");
148 nl = 1;
149 break;
150 case PLATFORM_BPA:
151 printk("BPA ");
152 nl = 1;
153 break;
154 }
155#endif
156 if (nl)
157 printk("\n");
158 print_modules();
159 show_regs(regs);
160 bust_spinlocks(0);
161 spin_unlock_irq(&die_lock);
162
163 if (in_interrupt())
164 panic("Fatal exception in interrupt");
165
166 if (panic_on_oops) {
167#ifdef CONFIG_PPC64
168 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
169 ssleep(5);
170#endif
171 panic("Fatal exception");
172 }
173 do_exit(err);
174
175 return 0;
176}
177
178void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
179{
180 siginfo_t info;
181
182 if (!user_mode(regs)) {
183 if (die("Exception in kernel mode", regs, signr))
184 return;
185 }
186
187 memset(&info, 0, sizeof(info));
188 info.si_signo = signr;
189 info.si_code = code;
190 info.si_addr = (void __user *) addr;
191 force_sig_info(signr, &info, current);
192
193 /*
194 * Init gets no signals that it doesn't have a handler for.
195 * That's all very well, but if it has caused a synchronous
196 * exception and we ignore the resulting signal, it will just
197 * generate the same exception over and over again and we get
198 * nowhere. Better to kill it and let the kernel panic.
199 */
200 if (current->pid == 1) {
201 __sighandler_t handler;
202
203 spin_lock_irq(&current->sighand->siglock);
204 handler = current->sighand->action[signr-1].sa.sa_handler;
205 spin_unlock_irq(&current->sighand->siglock);
206 if (handler == SIG_DFL) {
207 /* init has generated a synchronous exception
208 and it doesn't have a handler for the signal */
209 printk(KERN_CRIT "init has generated signal %d "
210 "but has no handler for it\n", signr);
211 do_exit(signr);
212 }
213 }
214}
215
216#ifdef CONFIG_PPC64
217void system_reset_exception(struct pt_regs *regs)
218{
219 /* See if any machine dependent calls */
220 if (ppc_md.system_reset_exception)
221 ppc_md.system_reset_exception(regs);
222
223 die("System Reset", regs, SIGABRT);
224
225 /* Must die if the interrupt is not recoverable */
226 if (!(regs->msr & MSR_RI))
227 panic("Unrecoverable System Reset");
228
229 /* What should we do here? We could issue a shutdown or hard reset. */
230}
231#endif
232
233/*
234 * I/O accesses can cause machine checks on powermacs.
235 * Check if the NIP corresponds to the address of a sync
236 * instruction for which there is an entry in the exception
237 * table.
238 * Note that the 601 only takes a machine check on TEA
239 * (transfer error ack) signal assertion, and does not
240 * set any of the top 16 bits of SRR1.
241 * -- paulus.
242 */
243static inline int check_io_access(struct pt_regs *regs)
244{
245#ifdef CONFIG_PPC_PMAC
246 unsigned long msr = regs->msr;
247 const struct exception_table_entry *entry;
248 unsigned int *nip = (unsigned int *)regs->nip;
249
250 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
251 && (entry = search_exception_tables(regs->nip)) != NULL) {
252 /*
253 * Check that it's a sync instruction, or somewhere
254 * in the twi; isync; nop sequence that inb/inw/inl uses.
255 * As the address is in the exception table
256 * we should be able to read the instr there.
257 * For the debug message, we look at the preceding
258 * load or store.
259 */
260 if (*nip == 0x60000000) /* nop */
261 nip -= 2;
262 else if (*nip == 0x4c00012c) /* isync */
263 --nip;
264 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
265 /* sync or twi */
266 unsigned int rb;
267
268 --nip;
269 rb = (*nip >> 11) & 0x1f;
270 printk(KERN_DEBUG "%s bad port %lx at %p\n",
271 (*nip & 0x100)? "OUT to": "IN from",
272 regs->gpr[rb] - _IO_BASE, nip);
273 regs->msr |= MSR_RI;
274 regs->nip = entry->fixup;
275 return 1;
276 }
277 }
278#endif /* CONFIG_PPC_PMAC */
279 return 0;
280}
281
282#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
283/* On 4xx, the reason for the machine check or program exception
284 is in the ESR. */
285#define get_reason(regs) ((regs)->dsisr)
286#ifndef CONFIG_FSL_BOOKE
287#define get_mc_reason(regs) ((regs)->dsisr)
288#else
289#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
290#endif
291#define REASON_FP ESR_FP
292#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
293#define REASON_PRIVILEGED ESR_PPR
294#define REASON_TRAP ESR_PTR
295
296/* single-step stuff */
297#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
298#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
299
300#else
301/* On non-4xx, the reason for the machine check or program
302 exception is in the MSR. */
303#define get_reason(regs) ((regs)->msr)
304#define get_mc_reason(regs) ((regs)->msr)
305#define REASON_FP 0x100000
306#define REASON_ILLEGAL 0x80000
307#define REASON_PRIVILEGED 0x40000
308#define REASON_TRAP 0x20000
309
310#define single_stepping(regs) ((regs)->msr & MSR_SE)
311#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
312#endif
313
314/*
315 * This is "fall-back" implementation for configurations
316 * which don't provide platform-specific machine check info
317 */
318void __attribute__ ((weak))
319platform_machine_check(struct pt_regs *regs)
320{
321}
322
323void machine_check_exception(struct pt_regs *regs)
324{
325#ifdef CONFIG_PPC64
326 int recover = 0;
327
328 /* See if any machine dependent calls */
329 if (ppc_md.machine_check_exception)
330 recover = ppc_md.machine_check_exception(regs);
331
332 if (recover)
333 return;
334#else
335 unsigned long reason = get_mc_reason(regs);
336
337 if (user_mode(regs)) {
338 regs->msr |= MSR_RI;
339 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
340 return;
341 }
342
343#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
344 /* the qspan pci read routines can cause machine checks -- Cort */
345 bad_page_fault(regs, regs->dar, SIGBUS);
346 return;
347#endif
348
349 if (debugger_fault_handler(regs)) {
350 regs->msr |= MSR_RI;
351 return;
352 }
353
354 if (check_io_access(regs))
355 return;
356
357#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
358 if (reason & ESR_IMCP) {
359 printk("Instruction");
360 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 } else
362 printk("Data");
363 printk(" machine check in kernel mode.\n");
364#elif defined(CONFIG_440A)
365 printk("Machine check in kernel mode.\n");
366 if (reason & ESR_IMCP){
367 printk("Instruction Synchronous Machine Check exception\n");
368 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
369 }
370 else {
371 u32 mcsr = mfspr(SPRN_MCSR);
372 if (mcsr & MCSR_IB)
373 printk("Instruction Read PLB Error\n");
374 if (mcsr & MCSR_DRB)
375 printk("Data Read PLB Error\n");
376 if (mcsr & MCSR_DWB)
377 printk("Data Write PLB Error\n");
378 if (mcsr & MCSR_TLBP)
379 printk("TLB Parity Error\n");
380 if (mcsr & MCSR_ICP){
381 flush_instruction_cache();
382 printk("I-Cache Parity Error\n");
383 }
384 if (mcsr & MCSR_DCSP)
385 printk("D-Cache Search Parity Error\n");
386 if (mcsr & MCSR_DCFP)
387 printk("D-Cache Flush Parity Error\n");
388 if (mcsr & MCSR_IMPE)
389 printk("Machine Check exception is imprecise\n");
390
391 /* Clear MCSR */
392 mtspr(SPRN_MCSR, mcsr);
393 }
394#elif defined (CONFIG_E500)
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
397
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_GL_CI)
407 printk("Guarded Load or Cache-Inhibited stwcx.\n");
408 if (reason & MCSR_BUS_IAERR)
409 printk("Bus - Instruction Address Error\n");
410 if (reason & MCSR_BUS_RAERR)
411 printk("Bus - Read Address Error\n");
412 if (reason & MCSR_BUS_WAERR)
413 printk("Bus - Write Address Error\n");
414 if (reason & MCSR_BUS_IBERR)
415 printk("Bus - Instruction Data Error\n");
416 if (reason & MCSR_BUS_RBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_WBERR)
419 printk("Bus - Read Data Bus Error\n");
420 if (reason & MCSR_BUS_IPERR)
421 printk("Bus - Instruction Parity Error\n");
422 if (reason & MCSR_BUS_RPERR)
423 printk("Bus - Read Parity Error\n");
424#elif defined (CONFIG_E200)
425 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from MCSR=%lx): ", reason);
427
428 if (reason & MCSR_MCP)
429 printk("Machine Check Signal\n");
430 if (reason & MCSR_CP_PERR)
431 printk("Cache Push Parity Error\n");
432 if (reason & MCSR_CPERR)
433 printk("Cache Parity Error\n");
434 if (reason & MCSR_EXCP_ERR)
435 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
436 if (reason & MCSR_BUS_IRERR)
437 printk("Bus - Read Bus Error on instruction fetch\n");
438 if (reason & MCSR_BUS_DRERR)
439 printk("Bus - Read Bus Error on data load\n");
440 if (reason & MCSR_BUS_WRERR)
441 printk("Bus - Write Bus Error on buffered store or cache line push\n");
442#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
443 printk("Machine check in kernel mode.\n");
444 printk("Caused by (from SRR1=%lx): ", reason);
445 switch (reason & 0x601F0000) {
446 case 0x80000:
447 printk("Machine check signal\n");
448 break;
449 case 0: /* for 601 */
450 case 0x40000:
451 case 0x140000: /* 7450 MSS error and TEA */
452 printk("Transfer error ack signal\n");
453 break;
454 case 0x20000:
455 printk("Data parity error signal\n");
456 break;
457 case 0x10000:
458 printk("Address parity error signal\n");
459 break;
460 case 0x20000000:
461 printk("L1 Data Cache error\n");
462 break;
463 case 0x40000000:
464 printk("L1 Instruction Cache error\n");
465 break;
466 case 0x00100000:
467 printk("L2 data cache parity error\n");
468 break;
469 default:
470 printk("Unknown values in msr\n");
471 }
472#endif /* CONFIG_4xx */
473
474 /*
475 * Optional platform-provided routine to print out
476 * additional info, e.g. bus error registers.
477 */
478 platform_machine_check(regs);
479#endif /* CONFIG_PPC64 */
480
481 if (debugger_fault_handler(regs))
482 return;
483 die("Machine check", regs, SIGBUS);
484
485 /* Must die if the interrupt is not recoverable */
486 if (!(regs->msr & MSR_RI))
487 panic("Unrecoverable Machine check");
488}
489
490void SMIException(struct pt_regs *regs)
491{
492 die("System Management Interrupt", regs, SIGABRT);
493}
494
495void unknown_exception(struct pt_regs *regs)
496{
497 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
498 regs->nip, regs->msr, regs->trap);
499
500 _exception(SIGTRAP, regs, 0, 0);
501}
502
503void instruction_breakpoint_exception(struct pt_regs *regs)
504{
505 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
506 5, SIGTRAP) == NOTIFY_STOP)
507 return;
508 if (debugger_iabr_match(regs))
509 return;
510 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
511}
512
513void RunModeException(struct pt_regs *regs)
514{
515 _exception(SIGTRAP, regs, 0, 0);
516}
517
518void __kprobes single_step_exception(struct pt_regs *regs)
519{
520 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
521
522 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
523 5, SIGTRAP) == NOTIFY_STOP)
524 return;
525 if (debugger_sstep(regs))
526 return;
527
528 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
529}
530
531/*
532 * After we have successfully emulated an instruction, we have to
533 * check if the instruction was being single-stepped, and if so,
534 * pretend we got a single-step exception. This was pointed out
535 * by Kumar Gala. -- paulus
536 */
537static void emulate_single_step(struct pt_regs *regs)
538{
539 if (single_stepping(regs)) {
540 clear_single_step(regs);
541 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
542 }
543}
544
545static void parse_fpe(struct pt_regs *regs)
546{
547 int code = 0;
548 unsigned long fpscr;
549
550 flush_fp_to_thread(current);
551
552 fpscr = current->thread.fpscr;
553
554 /* Invalid operation */
555 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
556 code = FPE_FLTINV;
557
558 /* Overflow */
559 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
560 code = FPE_FLTOVF;
561
562 /* Underflow */
563 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
564 code = FPE_FLTUND;
565
566 /* Divide by zero */
567 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
568 code = FPE_FLTDIV;
569
570 /* Inexact result */
571 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
572 code = FPE_FLTRES;
573
574 _exception(SIGFPE, regs, code, regs->nip);
575}
576
577/*
578 * Illegal instruction emulation support. Originally written to
579 * provide the PVR to user applications using the mfspr rd, PVR.
580 * Return non-zero if we can't emulate, or -EFAULT if the associated
581 * memory access caused an access fault. Return zero on success.
582 *
583 * There are a couple of ways to do this, either "decode" the instruction
584 * or directly match lots of bits. In this case, matching lots of
585 * bits is faster and easier.
586 *
587 */
588#define INST_MFSPR_PVR 0x7c1f42a6
589#define INST_MFSPR_PVR_MASK 0xfc1fffff
590
591#define INST_DCBA 0x7c0005ec
592#define INST_DCBA_MASK 0x7c0007fe
593
594#define INST_MCRXR 0x7c000400
595#define INST_MCRXR_MASK 0x7c0007fe
596
597#define INST_STRING 0x7c00042a
598#define INST_STRING_MASK 0x7c0007fe
599#define INST_STRING_GEN_MASK 0x7c00067e
600#define INST_LSWI 0x7c0004aa
601#define INST_LSWX 0x7c00042a
602#define INST_STSWI 0x7c0005aa
603#define INST_STSWX 0x7c00052a
604
605static int emulate_string_inst(struct pt_regs *regs, u32 instword)
606{
607 u8 rT = (instword >> 21) & 0x1f;
608 u8 rA = (instword >> 16) & 0x1f;
609 u8 NB_RB = (instword >> 11) & 0x1f;
610 u32 num_bytes;
611 unsigned long EA;
612 int pos = 0;
613
614 /* Early out if we are an invalid form of lswx */
615 if ((instword & INST_STRING_MASK) == INST_LSWX)
616 if ((rT == rA) || (rT == NB_RB))
617 return -EINVAL;
618
619 EA = (rA == 0) ? 0 : regs->gpr[rA];
620
621 switch (instword & INST_STRING_MASK) {
622 case INST_LSWX:
623 case INST_STSWX:
624 EA += NB_RB;
625 num_bytes = regs->xer & 0x7f;
626 break;
627 case INST_LSWI:
628 case INST_STSWI:
629 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 while (num_bytes != 0)
636 {
637 u8 val;
638 u32 shift = 8 * (3 - (pos & 0x3));
639
640 switch ((instword & INST_STRING_MASK)) {
641 case INST_LSWX:
642 case INST_LSWI:
643 if (get_user(val, (u8 __user *)EA))
644 return -EFAULT;
645 /* first time updating this reg,
646 * zero it out */
647 if (pos == 0)
648 regs->gpr[rT] = 0;
649 regs->gpr[rT] |= val << shift;
650 break;
651 case INST_STSWI:
652 case INST_STSWX:
653 val = regs->gpr[rT] >> shift;
654 if (put_user(val, (u8 __user *)EA))
655 return -EFAULT;
656 break;
657 }
658 /* move EA to next address */
659 EA += 1;
660 num_bytes--;
661
662 /* manage our position within the register */
663 if (++pos == 4) {
664 pos = 0;
665 if (++rT == 32)
666 rT = 0;
667 }
668 }
669
670 return 0;
671}
672
673static int emulate_instruction(struct pt_regs *regs)
674{
675 u32 instword;
676 u32 rd;
677
678 if (!user_mode(regs))
679 return -EINVAL;
680 CHECK_FULL_REGS(regs);
681
682 if (get_user(instword, (u32 __user *)(regs->nip)))
683 return -EFAULT;
684
685 /* Emulate the mfspr rD, PVR. */
686 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
687 rd = (instword >> 21) & 0x1f;
688 regs->gpr[rd] = mfspr(SPRN_PVR);
689 return 0;
690 }
691
692 /* Emulating the dcba insn is just a no-op. */
693 if ((instword & INST_DCBA_MASK) == INST_DCBA)
694 return 0;
695
696 /* Emulate the mcrxr insn. */
697 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
698 int shift = (instword >> 21) & 0x1c;
699 unsigned long msk = 0xf0000000UL >> shift;
700
701 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
702 regs->xer &= ~0xf0000000UL;
703 return 0;
704 }
705
706 /* Emulate load/store string insn. */
707 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
708 return emulate_string_inst(regs, instword);
709
710 return -EINVAL;
711}
712
713/*
714 * Look through the list of trap instructions that are used for BUG(),
715 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
716 * that the exception was caused by a trap instruction of some kind.
717 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
718 * otherwise.
719 */
720extern struct bug_entry __start___bug_table[], __stop___bug_table[];
721
722#ifndef CONFIG_MODULES
723#define module_find_bug(x) NULL
724#endif
725
726struct bug_entry *find_bug(unsigned long bugaddr)
727{
728 struct bug_entry *bug;
729
730 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
731 if (bugaddr == bug->bug_addr)
732 return bug;
733 return module_find_bug(bugaddr);
734}
735
736static int check_bug_trap(struct pt_regs *regs)
737{
738 struct bug_entry *bug;
739 unsigned long addr;
740
741 if (regs->msr & MSR_PR)
742 return 0; /* not in kernel */
743 addr = regs->nip; /* address of trap instruction */
744 if (addr < PAGE_OFFSET)
745 return 0;
746 bug = find_bug(regs->nip);
747 if (bug == NULL)
748 return 0;
749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n",
757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack();
760 return 1;
761 }
762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
764 bug->function, bug->file, bug->line);
765 xmon(regs);
766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
768 bug->function, bug->file, bug->line);
769
770 return 0;
771}
772
773void __kprobes program_check_exception(struct pt_regs *regs)
774{
775 unsigned int reason = get_reason(regs);
776 extern int do_mathemu(struct pt_regs *regs);
777
778#ifdef CONFIG_MATH_EMULATION
779 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
780 * but there seems to be a hardware bug on the 405GP (RevD)
781 * that means ESR is sometimes set incorrectly - either to
782 * ESR_DST (!?) or 0. In the process of chasing this with the
783 * hardware people - not sure if it can happen on any illegal
784 * instruction or only on FP instructions, whether there is a
785 * pattern to occurences etc. -dgibson 31/Mar/2003 */
786 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
787 emulate_single_step(regs);
788 return;
789 }
790#endif /* CONFIG_MATH_EMULATION */
791
792 if (reason & REASON_FP) {
793 /* IEEE FP exception */
794 parse_fpe(regs);
795 return;
796 }
797 if (reason & REASON_TRAP) {
798 /* trap exception */
799 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
800 == NOTIFY_STOP)
801 return;
802 if (debugger_bpt(regs))
803 return;
804 if (check_bug_trap(regs)) {
805 regs->nip += 4;
806 return;
807 }
808 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
809 return;
810 }
811
812 /* Try to emulate it if we should. */
813 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
814 switch (emulate_instruction(regs)) {
815 case 0:
816 regs->nip += 4;
817 emulate_single_step(regs);
818 return;
819 case -EFAULT:
820 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
821 return;
822 }
823 }
824
825 if (reason & REASON_PRIVILEGED)
826 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
827 else
828 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
829}
830
831void alignment_exception(struct pt_regs *regs)
832{
833 int fixed;
834
835 fixed = fix_alignment(regs);
836
837 if (fixed == 1) {
838 regs->nip += 4; /* skip over emulated instruction */
839 emulate_single_step(regs);
840 return;
841 }
842
843 /* Operand address was bad */
844 if (fixed == -EFAULT) {
845 if (user_mode(regs))
846 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
847 else
848 /* Search exception table */
849 bad_page_fault(regs, regs->dar, SIGSEGV);
850 return;
851 }
852 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
853}
854
855void StackOverflow(struct pt_regs *regs)
856{
857 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
858 current, regs->gpr[1]);
859 debugger(regs);
860 show_regs(regs);
861 panic("kernel stack overflow");
862}
863
864void nonrecoverable_exception(struct pt_regs *regs)
865{
866 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
867 regs->nip, regs->msr);
868 debugger(regs);
869 die("nonrecoverable exception", regs, SIGKILL);
870}
871
872void trace_syscall(struct pt_regs *regs)
873{
874 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
875 current, current->pid, regs->nip, regs->link, regs->gpr[0],
876 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
877}
878
879void kernel_fp_unavailable_exception(struct pt_regs *regs)
880{
881 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
882 "%lx at %lx\n", regs->trap, regs->nip);
883 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
884}
885
886void altivec_unavailable_exception(struct pt_regs *regs)
887{
888#if !defined(CONFIG_ALTIVEC)
889 if (user_mode(regs)) {
890 /* A user program has executed an altivec instruction,
891 but this kernel doesn't support altivec. */
892 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
893 return;
894 }
895#endif
896 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
897 "%lx at %lx\n", regs->trap, regs->nip);
898 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
899}
900
901#ifdef CONFIG_PPC64
902extern perf_irq_t perf_irq;
903#endif
904
905#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
906void performance_monitor_exception(struct pt_regs *regs)
907{
908 perf_irq(regs);
909}
910#endif
911
912#ifdef CONFIG_8xx
913void SoftwareEmulation(struct pt_regs *regs)
914{
915 extern int do_mathemu(struct pt_regs *);
916 extern int Soft_emulate_8xx(struct pt_regs *);
917 int errcode;
918
919 CHECK_FULL_REGS(regs);
920
921 if (!user_mode(regs)) {
922 debugger(regs);
923 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
924 }
925
926#ifdef CONFIG_MATH_EMULATION
927 errcode = do_mathemu(regs);
928#else
929 errcode = Soft_emulate_8xx(regs);
930#endif
931 if (errcode) {
932 if (errcode > 0)
933 _exception(SIGFPE, regs, 0, 0);
934 else if (errcode == -EFAULT)
935 _exception(SIGSEGV, regs, 0, 0);
936 else
937 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
938 } else
939 emulate_single_step(regs);
940}
941#endif /* CONFIG_8xx */
942
943#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
944
945void DebugException(struct pt_regs *regs, unsigned long debug_status)
946{
947 if (debug_status & DBSR_IC) { /* instruction completion */
948 regs->msr &= ~MSR_DE;
949 if (user_mode(regs)) {
950 current->thread.dbcr0 &= ~DBCR0_IC;
951 } else {
952 /* Disable instruction completion */
953 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
954 /* Clear the instruction completion event */
955 mtspr(SPRN_DBSR, DBSR_IC);
956 if (debugger_sstep(regs))
957 return;
958 }
959 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
960 }
961}
962#endif /* CONFIG_4xx || CONFIG_BOOKE */
963
964#if !defined(CONFIG_TAU_INT)
965void TAUException(struct pt_regs *regs)
966{
967 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
968 regs->nip, regs->msr, regs->trap, print_tainted());
969}
970#endif /* CONFIG_INT_TAU */
971
972#ifdef CONFIG_ALTIVEC
973void altivec_assist_exception(struct pt_regs *regs)
974{
975 int err;
976
977 if (!user_mode(regs)) {
978 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
979 " at %lx\n", regs->nip);
980 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
981 }
982
983 flush_altivec_to_thread(current);
984
985 err = emulate_altivec(regs);
986 if (err == 0) {
987 regs->nip += 4; /* skip emulated instruction */
988 emulate_single_step(regs);
989 return;
990 }
991
992 if (err == -EFAULT) {
993 /* got an error reading the instruction */
994 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
995 } else {
996 /* didn't recognize the instruction */
997 /* XXX quick hack for now: set the non-Java bit in the VSCR */
998 if (printk_ratelimit())
999 printk(KERN_ERR "Unrecognized altivec instruction "
1000 "in %s at %lx\n", current->comm, regs->nip);
1001 current->thread.vscr.u[3] |= 0x10000;
1002 }
1003}
1004#endif /* CONFIG_ALTIVEC */
1005
1006#ifdef CONFIG_FSL_BOOKE
1007void CacheLockingException(struct pt_regs *regs, unsigned long address,
1008 unsigned long error_code)
1009{
1010 /* We treat cache locking instructions from the user
1011 * as priv ops, in the future we could try to do
1012 * something smarter
1013 */
1014 if (error_code & (ESR_DLK|ESR_ILK))
1015 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1016 return;
1017}
1018#endif /* CONFIG_FSL_BOOKE */
1019
1020#ifdef CONFIG_SPE
1021void SPEFloatingPointException(struct pt_regs *regs)
1022{
1023 unsigned long spefscr;
1024 int fpexc_mode;
1025 int code = 0;
1026
1027 spefscr = current->thread.spefscr;
1028 fpexc_mode = current->thread.fpexc_mode;
1029
1030 /* Hardware does not neccessarily set sticky
1031 * underflow/overflow/invalid flags */
1032 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1033 code = FPE_FLTOVF;
1034 spefscr |= SPEFSCR_FOVFS;
1035 }
1036 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1037 code = FPE_FLTUND;
1038 spefscr |= SPEFSCR_FUNFS;
1039 }
1040 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1041 code = FPE_FLTDIV;
1042 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1043 code = FPE_FLTINV;
1044 spefscr |= SPEFSCR_FINVS;
1045 }
1046 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1047 code = FPE_FLTRES;
1048
1049 current->thread.spefscr = spefscr;
1050
1051 _exception(SIGFPE, regs, code, regs->nip);
1052 return;
1053}
1054#endif
1055
1056/*
1057 * We enter here if we get an unrecoverable exception, that is, one
1058 * that happened at a point where the RI (recoverable interrupt) bit
1059 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1060 * we therefore lost state by taking this exception.
1061 */
1062void unrecoverable_exception(struct pt_regs *regs)
1063{
1064 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1065 regs->trap, regs->nip);
1066 die("Unrecoverable exception", regs, SIGABRT);
1067}
1068
1069#ifdef CONFIG_BOOKE_WDT
1070/*
1071 * Default handler for a Watchdog exception,
1072 * spins until a reboot occurs
1073 */
1074void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1075{
1076 /* Generic WatchdogHandler, implement your own */
1077 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1078 return;
1079}
1080
1081void WatchdogException(struct pt_regs *regs)
1082{
1083 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1084 WatchdogHandler(regs);
1085}
1086#endif
1087
1088/*
1089 * We enter here if we discover during exception entry that we are
1090 * running in supervisor mode with a userspace value in the stack pointer.
1091 */
1092void kernel_bad_stack(struct pt_regs *regs)
1093{
1094 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1095 regs->gpr[1], regs->nip);
1096 die("Bad kernel stack pointer", regs, SIGABRT);
1097}
1098
1099void __init trap_init(void)
1100{
1101}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d0947cb20..604d0947cb20 100644
--- a/arch/ppc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
diff --git a/arch/ppc64/kernel/vector.S b/arch/powerpc/kernel/vector.S
index b79d33e4001e..66b3d03c5fa5 100644
--- a/arch/ppc64/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,11 +1,26 @@
1#include <linux/config.h>
1#include <asm/ppc_asm.h> 2#include <asm/ppc_asm.h>
2#include <asm/processor.h> 3#include <asm/reg.h>
3 4
4/* 5/*
5 * The routines below are in assembler so we can closely control the 6 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called 7 * usage of floating-point registers. These routines must be called
7 * with preempt disabled. 8 * with preempt disabled.
8 */ 9 */
10#ifdef CONFIG_PPC32
11 .data
12fpzero:
13 .long 0
14fpone:
15 .long 0x3f800000 /* 1.0 in single-precision FP */
16fphalf:
17 .long 0x3f000000 /* 0.5 in single-precision FP */
18
19#define LDCONST(fr, name) \
20 lis r11,name@ha; \
21 lfs fr,name@l(r11)
22#else
23
9 .section ".toc","aw" 24 .section ".toc","aw"
10fpzero: 25fpzero:
11 .tc FD_0_0[TC],0 26 .tc FD_0_0[TC],0
@@ -14,32 +29,42 @@ fpone:
14fphalf: 29fphalf:
15 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ 30 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
16 31
32#define LDCONST(fr, name) \
33 lfd fr,name@toc(r2)
34#endif
35
17 .text 36 .text
18/* 37/*
19 * Internal routine to enable floating point and set FPSCR to 0. 38 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention. 39 * Don't call it from C; it doesn't use the normal calling convention.
21 */ 40 */
22fpenable: 41fpenable:
42#ifdef CONFIG_PPC32
43 stwu r1,-64(r1)
44#else
45 stdu r1,-64(r1)
46#endif
23 mfmsr r10 47 mfmsr r10
24 ori r11,r10,MSR_FP 48 ori r11,r10,MSR_FP
25 mtmsr r11 49 mtmsr r11
26 isync 50 isync
27 stfd fr31,-8(r1) 51 stfd fr0,24(r1)
28 stfd fr0,-16(r1) 52 stfd fr1,16(r1)
29 stfd fr1,-24(r1) 53 stfd fr31,8(r1)
54 LDCONST(fr1, fpzero)
30 mffs fr31 55 mffs fr31
31 lfd fr1,fpzero@toc(r2)
32 mtfsf 0xff,fr1 56 mtfsf 0xff,fr1
33 blr 57 blr
34 58
35fpdisable: 59fpdisable:
36 mtlr r12 60 mtlr r12
37 mtfsf 0xff,fr31 61 mtfsf 0xff,fr31
38 lfd fr1,-24(r1) 62 lfd fr31,8(r1)
39 lfd fr0,-16(r1) 63 lfd fr1,16(r1)
40 lfd fr31,-8(r1) 64 lfd fr0,24(r1)
41 mtmsr r10 65 mtmsr r10
42 isync 66 isync
67 addi r1,r1,64
43 blr 68 blr
44 69
45/* 70/*
@@ -82,7 +107,7 @@ _GLOBAL(vsubfp)
82_GLOBAL(vmaddfp) 107_GLOBAL(vmaddfp)
83 mflr r12 108 mflr r12
84 bl fpenable 109 bl fpenable
85 stfd fr2,-32(r1) 110 stfd fr2,32(r1)
86 li r0,4 111 li r0,4
87 mtctr r0 112 mtctr r0
88 li r7,0 113 li r7,0
@@ -93,7 +118,7 @@ _GLOBAL(vmaddfp)
93 stfsx fr0,r3,r7 118 stfsx fr0,r3,r7
94 addi r7,r7,4 119 addi r7,r7,4
95 bdnz 1b 120 bdnz 1b
96 lfd fr2,-32(r1) 121 lfd fr2,32(r1)
97 b fpdisable 122 b fpdisable
98 123
99/* 124/*
@@ -102,7 +127,7 @@ _GLOBAL(vmaddfp)
102_GLOBAL(vnmsubfp) 127_GLOBAL(vnmsubfp)
103 mflr r12 128 mflr r12
104 bl fpenable 129 bl fpenable
105 stfd fr2,-32(r1) 130 stfd fr2,32(r1)
106 li r0,4 131 li r0,4
107 mtctr r0 132 mtctr r0
108 li r7,0 133 li r7,0
@@ -113,7 +138,7 @@ _GLOBAL(vnmsubfp)
113 stfsx fr0,r3,r7 138 stfsx fr0,r3,r7
114 addi r7,r7,4 139 addi r7,r7,4
115 bdnz 1b 140 bdnz 1b
116 lfd fr2,-32(r1) 141 lfd fr2,32(r1)
117 b fpdisable 142 b fpdisable
118 143
119/* 144/*
@@ -124,7 +149,7 @@ _GLOBAL(vrefp)
124 mflr r12 149 mflr r12
125 bl fpenable 150 bl fpenable
126 li r0,4 151 li r0,4
127 lfd fr1,fpone@toc(r2) 152 LDCONST(fr1, fpone)
128 mtctr r0 153 mtctr r0
129 li r6,0 154 li r6,0
1301: lfsx fr0,r4,r6 1551: lfsx fr0,r4,r6
@@ -143,13 +168,13 @@ _GLOBAL(vrefp)
143_GLOBAL(vrsqrtefp) 168_GLOBAL(vrsqrtefp)
144 mflr r12 169 mflr r12
145 bl fpenable 170 bl fpenable
146 stfd fr2,-32(r1) 171 stfd fr2,32(r1)
147 stfd fr3,-40(r1) 172 stfd fr3,40(r1)
148 stfd fr4,-48(r1) 173 stfd fr4,48(r1)
149 stfd fr5,-56(r1) 174 stfd fr5,56(r1)
150 li r0,4 175 li r0,4
151 lfd fr4,fpone@toc(r2) 176 LDCONST(fr4, fpone)
152 lfd fr5,fphalf@toc(r2) 177 LDCONST(fr5, fphalf)
153 mtctr r0 178 mtctr r0
154 li r6,0 179 li r6,0
1551: lfsx fr0,r4,r6 1801: lfsx fr0,r4,r6
@@ -165,8 +190,8 @@ _GLOBAL(vrsqrtefp)
165 stfsx fr1,r3,r6 190 stfsx fr1,r3,r6
166 addi r6,r6,4 191 addi r6,r6,4
167 bdnz 1b 192 bdnz 1b
168 lfd fr5,-56(r1) 193 lfd fr5,56(r1)
169 lfd fr4,-48(r1) 194 lfd fr4,48(r1)
170 lfd fr3,-40(r1) 195 lfd fr3,40(r1)
171 lfd fr2,-32(r1) 196 lfd fr2,32(r1)
172 b fpdisable 197 b fpdisable
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d4dfcfbce272
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -0,0 +1,279 @@
1#include <linux/config.h>
2#ifdef CONFIG_PPC64
3#include <asm/page.h>
4#else
5#define PAGE_SIZE 4096
6#endif
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_PPC64
10OUTPUT_ARCH(powerpc:common64)
11jiffies = jiffies_64;
12#else
13OUTPUT_ARCH(powerpc:common)
14jiffies = jiffies_64 + 4;
15#endif
16SECTIONS
17{
18 /* Sections to be discarded. */
19 /DISCARD/ : {
20 *(.exitcall.exit)
21 *(.exit.data)
22 }
23
24
25 /* Read-only sections, merged into text segment: */
26#ifdef CONFIG_PPC32
27 . = + SIZEOF_HEADERS;
28 .interp : { *(.interp) }
29 .hash : { *(.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .rel.text : { *(.rel.text) }
33 .rela.text : { *(.rela.text) }
34 .rel.data : { *(.rel.data) }
35 .rela.data : { *(.rela.data) }
36 .rel.rodata : { *(.rel.rodata) }
37 .rela.rodata : { *(.rela.rodata) }
38 .rel.got : { *(.rel.got) }
39 .rela.got : { *(.rela.got) }
40 .rel.ctors : { *(.rel.ctors) }
41 .rela.ctors : { *(.rela.ctors) }
42 .rel.dtors : { *(.rel.dtors) }
43 .rela.dtors : { *(.rela.dtors) }
44 .rel.bss : { *(.rel.bss) }
45 .rela.bss : { *(.rela.bss) }
46 .rel.plt : { *(.rel.plt) }
47 .rela.plt : { *(.rela.plt) }
48/* .init : { *(.init) } =0*/
49 .plt : { *(.plt) }
50#endif
51 .text : {
52 *(.text .text.*)
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.fixup)
57#ifdef CONFIG_PPC32
58 *(.got1)
59 __got2_start = .;
60 *(.got2)
61 __got2_end = .;
62#else
63 . = ALIGN(PAGE_SIZE);
64 _etext = .;
65#endif
66 }
67#ifdef CONFIG_PPC32
68 _etext = .;
69 PROVIDE (etext = .);
70
71 RODATA
72 .fini : { *(.fini) } =0
73 .ctors : { *(.ctors) }
74 .dtors : { *(.dtors) }
75
76 .fixup : { *(.fixup) }
77#endif
78
79 __ex_table : {
80 __start___ex_table = .;
81 *(__ex_table)
82 __stop___ex_table = .;
83 }
84
85 __bug_table : {
86 __start___bug_table = .;
87 *(__bug_table)
88 __stop___bug_table = .;
89 }
90
91#ifdef CONFIG_PPC64
92 __ftr_fixup : {
93 __start___ftr_fixup = .;
94 *(__ftr_fixup)
95 __stop___ftr_fixup = .;
96 }
97
98 RODATA
99#endif
100
101#ifdef CONFIG_PPC32
102 /* Read-write section, merged into data segment: */
103 . = ALIGN(PAGE_SIZE);
104 _sdata = .;
105 .data :
106 {
107 *(.data)
108 *(.data1)
109 *(.sdata)
110 *(.sdata2)
111 *(.got.plt) *(.got)
112 *(.dynamic)
113 CONSTRUCTORS
114 }
115
116 . = ALIGN(PAGE_SIZE);
117 __nosave_begin = .;
118 .data_nosave : { *(.data.nosave) }
119 . = ALIGN(PAGE_SIZE);
120 __nosave_end = .;
121
122 . = ALIGN(32);
123 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
124
125 _edata = .;
126 PROVIDE (edata = .);
127
128 . = ALIGN(8192);
129 .data.init_task : { *(.data.init_task) }
130#endif
131
132 /* will be freed after init */
133 . = ALIGN(PAGE_SIZE);
134 __init_begin = .;
135 .init.text : {
136 _sinittext = .;
137 *(.init.text)
138 _einittext = .;
139 }
140#ifdef CONFIG_PPC32
141 /* .exit.text is discarded at runtime, not link time,
142 to deal with references from __bug_table */
143 .exit.text : { *(.exit.text) }
144#endif
145 .init.data : {
146 *(.init.data);
147 __vtop_table_begin = .;
148 *(.vtop_fixup);
149 __vtop_table_end = .;
150 __ptov_table_begin = .;
151 *(.ptov_fixup);
152 __ptov_table_end = .;
153 }
154
155 . = ALIGN(16);
156 .init.setup : {
157 __setup_start = .;
158 *(.init.setup)
159 __setup_end = .;
160 }
161
162 .initcall.init : {
163 __initcall_start = .;
164 *(.initcall1.init)
165 *(.initcall2.init)
166 *(.initcall3.init)
167 *(.initcall4.init)
168 *(.initcall5.init)
169 *(.initcall6.init)
170 *(.initcall7.init)
171 __initcall_end = .;
172 }
173
174 .con_initcall.init : {
175 __con_initcall_start = .;
176 *(.con_initcall.init)
177 __con_initcall_end = .;
178 }
179
180 SECURITY_INIT
181
182#ifdef CONFIG_PPC32
183 __start___ftr_fixup = .;
184 __ftr_fixup : { *(__ftr_fixup) }
185 __stop___ftr_fixup = .;
186#else
187 . = ALIGN(PAGE_SIZE);
188 .init.ramfs : {
189 __initramfs_start = .;
190 *(.init.ramfs)
191 __initramfs_end = .;
192 }
193#endif
194
195#ifdef CONFIG_PPC32
196 . = ALIGN(32);
197#endif
198 .data.percpu : {
199 __per_cpu_start = .;
200 *(.data.percpu)
201 __per_cpu_end = .;
202 }
203
204 . = ALIGN(PAGE_SIZE);
205#ifdef CONFIG_PPC64
206 . = ALIGN(16384);
207 __init_end = .;
208 /* freed after init ends here */
209
210 /* Read/write sections */
211 . = ALIGN(PAGE_SIZE);
212 . = ALIGN(16384);
213 _sdata = .;
214 /* The initial task and kernel stack */
215 .data.init_task : {
216 *(.data.init_task)
217 }
218
219 . = ALIGN(PAGE_SIZE);
220 .data.page_aligned : {
221 *(.data.page_aligned)
222 }
223
224 .data.cacheline_aligned : {
225 *(.data.cacheline_aligned)
226 }
227
228 .data : {
229 *(.data .data.rel* .toc1)
230 *(.branch_lt)
231 }
232
233 .opd : {
234 *(.opd)
235 }
236
237 .got : {
238 __toc_start = .;
239 *(.got)
240 *(.toc)
241 . = ALIGN(PAGE_SIZE);
242 _edata = .;
243 }
244
245 . = ALIGN(PAGE_SIZE);
246#else
247 __initramfs_start = .;
248 .init.ramfs : {
249 *(.init.ramfs)
250 }
251 __initramfs_end = .;
252
253 . = ALIGN(4096);
254 __init_end = .;
255
256 . = ALIGN(4096);
257 _sextratext = .;
258 _eextratext = .;
259
260 __bss_start = .;
261#endif
262
263 .bss : {
264 __bss_start = .;
265 *(.sbss) *(.scommon)
266 *(.dynbss)
267 *(.bss)
268 *(COMMON)
269 __bss_stop = .;
270 }
271
272#ifdef CONFIG_PPC64
273 . = ALIGN(PAGE_SIZE);
274#endif
275 _end = . ;
276#ifdef CONFIG_PPC32
277 PROVIDE (end = .);
278#endif
279}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
new file mode 100644
index 000000000000..30367a0237dd
--- /dev/null
+++ b/arch/powerpc/lib/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for ppc-specific library files..
3#
4
5obj-y := strcase.o string.o
6obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
7obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o memcpy_64.o \
8 usercopy_64.o sstep.o checksum_64.o mem_64.o
9obj-$(CONFIG_PPC_ISERIES) += e2a.o
10ifeq ($(CONFIG_PPC64),y)
11obj-$(CONFIG_SMP) += locks.o
12endif
13
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
new file mode 100644
index 000000000000..7874e8a80455
--- /dev/null
+++ b/arch/powerpc/lib/checksum_32.S
@@ -0,0 +1,225 @@
1/*
2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
13 */
14
15#include <linux/sys.h>
16#include <asm/processor.h>
17#include <asm/errno.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/*
23 * ip_fast_csum(buf, len) -- Optimized for IP header
24 * len is in words and is always >= 5.
25 */
26_GLOBAL(ip_fast_csum)
27 lwz r0,0(r3)
28 lwzu r5,4(r3)
29 addic. r4,r4,-2
30 addc r0,r0,r5
31 mtctr r4
32 blelr-
331: lwzu r4,4(r3)
34 adde r0,r0,r4
35 bdnz 1b
36 addze r0,r0 /* add in final carry */
37 rlwinm r3,r0,16,0,31 /* fold two halves together */
38 add r3,r0,r3
39 not r3,r3
40 srwi r3,r3,16
41 blr
42
43/*
44 * Compute checksum of TCP or UDP pseudo-header:
45 * csum_tcpudp_magic(saddr, daddr, len, proto, sum)
46 */
47_GLOBAL(csum_tcpudp_magic)
48 rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
49 addc r0,r3,r4 /* add 4 32-bit words together */
50 adde r0,r0,r5
51 adde r0,r0,r7
52 addze r0,r0 /* add in final carry */
53 rlwinm r3,r0,16,0,31 /* fold two halves together */
54 add r3,r0,r3
55 not r3,r3
56 srwi r3,r3,16
57 blr
58
59/*
60 * computes the checksum of a memory block at buff, length len,
61 * and adds in "sum" (32-bit)
62 *
63 * csum_partial(buff, len, sum)
64 */
65_GLOBAL(csum_partial)
66 addic r0,r5,0
67 subi r3,r3,4
68 srwi. r6,r4,2
69 beq 3f /* if we're doing < 4 bytes */
70 andi. r5,r3,2 /* Align buffer to longword boundary */
71 beq+ 1f
72 lhz r5,4(r3) /* do 2 bytes to get aligned */
73 addi r3,r3,2
74 subi r4,r4,2
75 addc r0,r0,r5
76 srwi. r6,r4,2 /* # words to do */
77 beq 3f
781: mtctr r6
792: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */
80 adde r0,r0,r5 /* be unnecessary to unroll this loop */
81 bdnz 2b
82 andi. r4,r4,3
833: cmpwi 0,r4,2
84 blt+ 4f
85 lhz r5,4(r3)
86 addi r3,r3,2
87 subi r4,r4,2
88 adde r0,r0,r5
894: cmpwi 0,r4,1
90 bne+ 5f
91 lbz r5,4(r3)
92 slwi r5,r5,8 /* Upper byte of word */
93 adde r0,r0,r5
945: addze r3,r0 /* add in final carry */
95 blr
96
97/*
98 * Computes the checksum of a memory block at src, length len,
99 * and adds in "sum" (32-bit), while copying the block to dst.
100 * If an access exception occurs on src or dst, it stores -EFAULT
101 * to *src_err or *dst_err respectively, and (for an error on
102 * src) zeroes the rest of dst.
103 *
104 * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
105 */
106_GLOBAL(csum_partial_copy_generic)
107 addic r0,r6,0
108 subi r3,r3,4
109 subi r4,r4,4
110 srwi. r6,r5,2
111 beq 3f /* if we're doing < 4 bytes */
112 andi. r9,r4,2 /* Align dst to longword boundary */
113 beq+ 1f
11481: lhz r6,4(r3) /* do 2 bytes to get aligned */
115 addi r3,r3,2
116 subi r5,r5,2
11791: sth r6,4(r4)
118 addi r4,r4,2
119 addc r0,r0,r6
120 srwi. r6,r5,2 /* # words to do */
121 beq 3f
1221: srwi. r6,r5,4 /* # groups of 4 words to do */
123 beq 10f
124 mtctr r6
12571: lwz r6,4(r3)
12672: lwz r9,8(r3)
12773: lwz r10,12(r3)
12874: lwzu r11,16(r3)
129 adde r0,r0,r6
13075: stw r6,4(r4)
131 adde r0,r0,r9
13276: stw r9,8(r4)
133 adde r0,r0,r10
13477: stw r10,12(r4)
135 adde r0,r0,r11
13678: stwu r11,16(r4)
137 bdnz 71b
13810: rlwinm. r6,r5,30,30,31 /* # words left to do */
139 beq 13f
140 mtctr r6
14182: lwzu r9,4(r3)
14292: stwu r9,4(r4)
143 adde r0,r0,r9
144 bdnz 82b
14513: andi. r5,r5,3
1463: cmpwi 0,r5,2
147 blt+ 4f
14883: lhz r6,4(r3)
149 addi r3,r3,2
150 subi r5,r5,2
15193: sth r6,4(r4)
152 addi r4,r4,2
153 adde r0,r0,r6
1544: cmpwi 0,r5,1
155 bne+ 5f
15684: lbz r6,4(r3)
15794: stb r6,4(r4)
158 slwi r6,r6,8 /* Upper byte of word */
159 adde r0,r0,r6
1605: addze r3,r0 /* add in final carry */
161 blr
162
163/* These shouldn't go in the fixup section, since that would
164 cause the ex_table addresses to get out of order. */
165
166src_error_4:
167 mfctr r6 /* update # bytes remaining from ctr */
168 rlwimi r5,r6,4,0,27
169 b 79f
170src_error_1:
171 li r6,0
172 subi r5,r5,2
17395: sth r6,4(r4)
174 addi r4,r4,2
17579: srwi. r6,r5,2
176 beq 3f
177 mtctr r6
178src_error_2:
179 li r6,0
18096: stwu r6,4(r4)
181 bdnz 96b
1823: andi. r5,r5,3
183 beq src_error
184src_error_3:
185 li r6,0
186 mtctr r5
187 addi r4,r4,3
18897: stbu r6,1(r4)
189 bdnz 97b
190src_error:
191 cmpwi 0,r7,0
192 beq 1f
193 li r6,-EFAULT
194 stw r6,0(r7)
1951: addze r3,r0
196 blr
197
198dst_error:
199 cmpwi 0,r8,0
200 beq 1f
201 li r6,-EFAULT
202 stw r6,0(r8)
2031: addze r3,r0
204 blr
205
206.section __ex_table,"a"
207 .long 81b,src_error_1
208 .long 91b,dst_error
209 .long 71b,src_error_4
210 .long 72b,src_error_4
211 .long 73b,src_error_4
212 .long 74b,src_error_4
213 .long 75b,dst_error
214 .long 76b,dst_error
215 .long 77b,dst_error
216 .long 78b,dst_error
217 .long 82b,src_error_2
218 .long 92b,dst_error
219 .long 83b,src_error_3
220 .long 93b,dst_error
221 .long 84b,src_error_3
222 .long 94b,dst_error
223 .long 95b,dst_error
224 .long 96b,dst_error
225 .long 97b,dst_error
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
new file mode 100644
index 000000000000..ef96c6c58efc
--- /dev/null
+++ b/arch/powerpc/lib/checksum_64.S
@@ -0,0 +1,229 @@
1/*
2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
13 */
14
15#include <linux/sys.h>
16#include <asm/processor.h>
17#include <asm/errno.h>
18#include <asm/ppc_asm.h>
19
20/*
21 * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
22 * len is in words and is always >= 5.
23 *
24 * In practice len == 5, but this is not guaranteed. So this code does not
25 * attempt to use doubleword instructions.
26 */
27_GLOBAL(ip_fast_csum)
28 lwz r0,0(r3)
29 lwzu r5,4(r3)
30 addic. r4,r4,-2
31 addc r0,r0,r5
32 mtctr r4
33 blelr-
341: lwzu r4,4(r3)
35 adde r0,r0,r4
36 bdnz 1b
37 addze r0,r0 /* add in final carry */
38 rldicl r4,r0,32,0 /* fold two 32-bit halves together */
39 add r0,r0,r4
40 srdi r0,r0,32
41 rlwinm r3,r0,16,0,31 /* fold two halves together */
42 add r3,r0,r3
43 not r3,r3
44 srwi r3,r3,16
45 blr
46
47/*
48 * Compute checksum of TCP or UDP pseudo-header:
49 * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
50 * No real gain trying to do this specially for 64 bit, but
51 * the 32 bit addition may spill into the upper bits of
52 * the doubleword so we still must fold it down from 64.
53 */
54_GLOBAL(csum_tcpudp_magic)
55 rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
56 addc r0,r3,r4 /* add 4 32-bit words together */
57 adde r0,r0,r5
58 adde r0,r0,r7
59 rldicl r4,r0,32,0 /* fold 64 bit value */
60 add r0,r4,r0
61 srdi r0,r0,32
62 rlwinm r3,r0,16,0,31 /* fold two halves together */
63 add r3,r0,r3
64 not r3,r3
65 srwi r3,r3,16
66 blr
67
68/*
69 * Computes the checksum of a memory block at buff, length len,
70 * and adds in "sum" (32-bit).
71 *
72 * This code assumes at least halfword alignment, though the length
73 * can be any number of bytes. The sum is accumulated in r5.
74 *
75 * csum_partial(r3=buff, r4=len, r5=sum)
76 */
77_GLOBAL(csum_partial)
78 subi r3,r3,8 /* we'll offset by 8 for the loads */
79 srdi. r6,r4,3 /* divide by 8 for doubleword count */
80 addic r5,r5,0 /* clear carry */
81 beq 3f /* if we're doing < 8 bytes */
82 andi. r0,r3,2 /* aligned on a word boundary already? */
83 beq+ 1f
84 lhz r6,8(r3) /* do 2 bytes to get aligned */
85 addi r3,r3,2
86 subi r4,r4,2
87 addc r5,r5,r6
88 srdi. r6,r4,3 /* recompute number of doublewords */
89 beq 3f /* any left? */
901: mtctr r6
912: ldu r6,8(r3) /* main sum loop */
92 adde r5,r5,r6
93 bdnz 2b
94 andi. r4,r4,7 /* compute bytes left to sum after doublewords */
953: cmpwi 0,r4,4 /* is at least a full word left? */
96 blt 4f
97 lwz r6,8(r3) /* sum this word */
98 addi r3,r3,4
99 subi r4,r4,4
100 adde r5,r5,r6
1014: cmpwi 0,r4,2 /* is at least a halfword left? */
102 blt+ 5f
103 lhz r6,8(r3) /* sum this halfword */
104 addi r3,r3,2
105 subi r4,r4,2
106 adde r5,r5,r6
1075: cmpwi 0,r4,1 /* is at least a byte left? */
108 bne+ 6f
109 lbz r6,8(r3) /* sum this byte */
110 slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
111 adde r5,r5,r6
1126: addze r5,r5 /* add in final carry */
113 rldicl r4,r5,32,0 /* fold two 32-bit halves together */
114 add r3,r4,r5
115 srdi r3,r3,32
116 blr
117
118/*
119 * Computes the checksum of a memory block at src, length len,
120 * and adds in "sum" (32-bit), while copying the block to dst.
121 * If an access exception occurs on src or dst, it stores -EFAULT
122 * to *src_err or *dst_err respectively, and (for an error on
123 * src) zeroes the rest of dst.
124 *
125 * This code needs to be reworked to take advantage of 64 bit sum+copy.
126 * However, due to tokenring halfword alignment problems this will be very
127 * tricky. For now we'll leave it until we instrument it somehow.
128 *
129 * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
130 */
131_GLOBAL(csum_partial_copy_generic)
132 addic r0,r6,0
133 subi r3,r3,4
134 subi r4,r4,4
135 srwi. r6,r5,2
136 beq 3f /* if we're doing < 4 bytes */
137 andi. r9,r4,2 /* Align dst to longword boundary */
138 beq+ 1f
13981: lhz r6,4(r3) /* do 2 bytes to get aligned */
140 addi r3,r3,2
141 subi r5,r5,2
14291: sth r6,4(r4)
143 addi r4,r4,2
144 addc r0,r0,r6
145 srwi. r6,r5,2 /* # words to do */
146 beq 3f
1471: mtctr r6
14882: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
14992: stwu r6,4(r4) /* be unnecessary to unroll this loop */
150 adde r0,r0,r6
151 bdnz 82b
152 andi. r5,r5,3
1533: cmpwi 0,r5,2
154 blt+ 4f
15583: lhz r6,4(r3)
156 addi r3,r3,2
157 subi r5,r5,2
15893: sth r6,4(r4)
159 addi r4,r4,2
160 adde r0,r0,r6
1614: cmpwi 0,r5,1
162 bne+ 5f
16384: lbz r6,4(r3)
16494: stb r6,4(r4)
165 slwi r6,r6,8 /* Upper byte of word */
166 adde r0,r0,r6
1675: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
168 rldicl r4,r3,32,0 /* fold 64 bit value */
169 add r3,r4,r3
170 srdi r3,r3,32
171 blr
172
173/* These shouldn't go in the fixup section, since that would
174 cause the ex_table addresses to get out of order. */
175
176 .globl src_error_1
177src_error_1:
178 li r6,0
179 subi r5,r5,2
18095: sth r6,4(r4)
181 addi r4,r4,2
182 srwi. r6,r5,2
183 beq 3f
184 mtctr r6
185 .globl src_error_2
186src_error_2:
187 li r6,0
18896: stwu r6,4(r4)
189 bdnz 96b
1903: andi. r5,r5,3
191 beq src_error
192 .globl src_error_3
193src_error_3:
194 li r6,0
195 mtctr r5
196 addi r4,r4,3
19797: stbu r6,1(r4)
198 bdnz 97b
199 .globl src_error
200src_error:
201 cmpdi 0,r7,0
202 beq 1f
203 li r6,-EFAULT
204 stw r6,0(r7)
2051: addze r3,r0
206 blr
207
208 .globl dst_error
209dst_error:
210 cmpdi 0,r8,0
211 beq 1f
212 li r6,-EFAULT
213 stw r6,0(r8)
2141: addze r3,r0
215 blr
216
217.section __ex_table,"a"
218 .align 3
219 .llong 81b,src_error_1
220 .llong 91b,dst_error
221 .llong 82b,src_error_2
222 .llong 92b,dst_error
223 .llong 83b,src_error_3
224 .llong 93b,dst_error
225 .llong 84b,src_error_3
226 .llong 94b,dst_error
227 .llong 95b,dst_error
228 .llong 96b,dst_error
229 .llong 97b,dst_error
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
new file mode 100644
index 000000000000..bee51414812e
--- /dev/null
+++ b/arch/powerpc/lib/copy_32.S
@@ -0,0 +1,543 @@
1/*
2 * Memory copy functions for 32-bit PowerPC.
3 *
4 * Copyright (C) 1996-2005 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/cache.h>
14#include <asm/errno.h>
15#include <asm/ppc_asm.h>
16
17#define COPY_16_BYTES \
18 lwz r7,4(r4); \
19 lwz r8,8(r4); \
20 lwz r9,12(r4); \
21 lwzu r10,16(r4); \
22 stw r7,4(r6); \
23 stw r8,8(r6); \
24 stw r9,12(r6); \
25 stwu r10,16(r6)
26
27#define COPY_16_BYTES_WITHEX(n) \
288 ## n ## 0: \
29 lwz r7,4(r4); \
308 ## n ## 1: \
31 lwz r8,8(r4); \
328 ## n ## 2: \
33 lwz r9,12(r4); \
348 ## n ## 3: \
35 lwzu r10,16(r4); \
368 ## n ## 4: \
37 stw r7,4(r6); \
388 ## n ## 5: \
39 stw r8,8(r6); \
408 ## n ## 6: \
41 stw r9,12(r6); \
428 ## n ## 7: \
43 stwu r10,16(r6)
44
45#define COPY_16_BYTES_EXCODE(n) \
469 ## n ## 0: \
47 addi r5,r5,-(16 * n); \
48 b 104f; \
499 ## n ## 1: \
50 addi r5,r5,-(16 * n); \
51 b 105f; \
52.section __ex_table,"a"; \
53 .align 2; \
54 .long 8 ## n ## 0b,9 ## n ## 0b; \
55 .long 8 ## n ## 1b,9 ## n ## 0b; \
56 .long 8 ## n ## 2b,9 ## n ## 0b; \
57 .long 8 ## n ## 3b,9 ## n ## 0b; \
58 .long 8 ## n ## 4b,9 ## n ## 1b; \
59 .long 8 ## n ## 5b,9 ## n ## 1b; \
60 .long 8 ## n ## 6b,9 ## n ## 1b; \
61 .long 8 ## n ## 7b,9 ## n ## 1b; \
62 .text
63
64 .text
65 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
66 .stabs "copy32.S",N_SO,0,0,0f
670:
68
69CACHELINE_BYTES = L1_CACHE_BYTES
70LG_CACHELINE_BYTES = L1_CACHE_SHIFT
71CACHELINE_MASK = (L1_CACHE_BYTES-1)
72
73/*
74 * Use dcbz on the complete cache lines in the destination
75 * to set them to zero. This requires that the destination
76 * area is cacheable. -- paulus
77 */
78_GLOBAL(cacheable_memzero)
79 mr r5,r4
80 li r4,0
81 addi r6,r3,-4
82 cmplwi 0,r5,4
83 blt 7f
84 stwu r4,4(r6)
85 beqlr
86 andi. r0,r6,3
87 add r5,r0,r5
88 subf r6,r0,r6
89 clrlwi r7,r6,32-LG_CACHELINE_BYTES
90 add r8,r7,r5
91 srwi r9,r8,LG_CACHELINE_BYTES
92 addic. r9,r9,-1 /* total number of complete cachelines */
93 ble 2f
94 xori r0,r7,CACHELINE_MASK & ~3
95 srwi. r0,r0,2
96 beq 3f
97 mtctr r0
984: stwu r4,4(r6)
99 bdnz 4b
1003: mtctr r9
101 li r7,4
102#if !defined(CONFIG_8xx)
10310: dcbz r7,r6
104#else
10510: stw r4, 4(r6)
106 stw r4, 8(r6)
107 stw r4, 12(r6)
108 stw r4, 16(r6)
109#if CACHE_LINE_SIZE >= 32
110 stw r4, 20(r6)
111 stw r4, 24(r6)
112 stw r4, 28(r6)
113 stw r4, 32(r6)
114#endif /* CACHE_LINE_SIZE */
115#endif
116 addi r6,r6,CACHELINE_BYTES
117 bdnz 10b
118 clrlwi r5,r8,32-LG_CACHELINE_BYTES
119 addi r5,r5,4
1202: srwi r0,r5,2
121 mtctr r0
122 bdz 6f
1231: stwu r4,4(r6)
124 bdnz 1b
1256: andi. r5,r5,3
1267: cmpwi 0,r5,0
127 beqlr
128 mtctr r5
129 addi r6,r6,3
1308: stbu r4,1(r6)
131 bdnz 8b
132 blr
133
134_GLOBAL(memset)
135 rlwimi r4,r4,8,16,23
136 rlwimi r4,r4,16,0,15
137 addi r6,r3,-4
138 cmplwi 0,r5,4
139 blt 7f
140 stwu r4,4(r6)
141 beqlr
142 andi. r0,r6,3
143 add r5,r0,r5
144 subf r6,r0,r6
145 srwi r0,r5,2
146 mtctr r0
147 bdz 6f
1481: stwu r4,4(r6)
149 bdnz 1b
1506: andi. r5,r5,3
1517: cmpwi 0,r5,0
152 beqlr
153 mtctr r5
154 addi r6,r6,3
1558: stbu r4,1(r6)
156 bdnz 8b
157 blr
158
159/*
160 * This version uses dcbz on the complete cache lines in the
161 * destination area to reduce memory traffic. This requires that
162 * the destination area is cacheable.
163 * We only use this version if the source and dest don't overlap.
164 * -- paulus.
165 */
166_GLOBAL(cacheable_memcpy)
167 add r7,r3,r5 /* test if the src & dst overlap */
168 add r8,r4,r5
169 cmplw 0,r4,r7
170 cmplw 1,r3,r8
171 crand 0,0,4 /* cr0.lt &= cr1.lt */
172 blt memcpy /* if regions overlap */
173
174 addi r4,r4,-4
175 addi r6,r3,-4
176 neg r0,r3
177 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
178 beq 58f
179
180 cmplw 0,r5,r0 /* is this more than total to do? */
181 blt 63f /* if not much to do */
182 andi. r8,r0,3 /* get it word-aligned first */
183 subf r5,r0,r5
184 mtctr r8
185 beq+ 61f
18670: lbz r9,4(r4) /* do some bytes */
187 stb r9,4(r6)
188 addi r4,r4,1
189 addi r6,r6,1
190 bdnz 70b
19161: srwi. r0,r0,2
192 mtctr r0
193 beq 58f
19472: lwzu r9,4(r4) /* do some words */
195 stwu r9,4(r6)
196 bdnz 72b
197
19858: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
199 clrlwi r5,r5,32-LG_CACHELINE_BYTES
200 li r11,4
201 mtctr r0
202 beq 63f
20353:
204#if !defined(CONFIG_8xx)
205 dcbz r11,r6
206#endif
207 COPY_16_BYTES
208#if L1_CACHE_BYTES >= 32
209 COPY_16_BYTES
210#if L1_CACHE_BYTES >= 64
211 COPY_16_BYTES
212 COPY_16_BYTES
213#if L1_CACHE_BYTES >= 128
214 COPY_16_BYTES
215 COPY_16_BYTES
216 COPY_16_BYTES
217 COPY_16_BYTES
218#endif
219#endif
220#endif
221 bdnz 53b
222
22363: srwi. r0,r5,2
224 mtctr r0
225 beq 64f
22630: lwzu r0,4(r4)
227 stwu r0,4(r6)
228 bdnz 30b
229
23064: andi. r0,r5,3
231 mtctr r0
232 beq+ 65f
23340: lbz r0,4(r4)
234 stb r0,4(r6)
235 addi r4,r4,1
236 addi r6,r6,1
237 bdnz 40b
23865: blr
239
240_GLOBAL(memmove)
241 cmplw 0,r3,r4
242 bgt backwards_memcpy
243 /* fall through */
244
245_GLOBAL(memcpy)
246 srwi. r7,r5,3
247 addi r6,r3,-4
248 addi r4,r4,-4
249 beq 2f /* if less than 8 bytes to do */
250 andi. r0,r6,3 /* get dest word aligned */
251 mtctr r7
252 bne 5f
2531: lwz r7,4(r4)
254 lwzu r8,8(r4)
255 stw r7,4(r6)
256 stwu r8,8(r6)
257 bdnz 1b
258 andi. r5,r5,7
2592: cmplwi 0,r5,4
260 blt 3f
261 lwzu r0,4(r4)
262 addi r5,r5,-4
263 stwu r0,4(r6)
2643: cmpwi 0,r5,0
265 beqlr
266 mtctr r5
267 addi r4,r4,3
268 addi r6,r6,3
2694: lbzu r0,1(r4)
270 stbu r0,1(r6)
271 bdnz 4b
272 blr
2735: subfic r0,r0,4
274 mtctr r0
2756: lbz r7,4(r4)
276 addi r4,r4,1
277 stb r7,4(r6)
278 addi r6,r6,1
279 bdnz 6b
280 subf r5,r0,r5
281 rlwinm. r7,r5,32-3,3,31
282 beq 2b
283 mtctr r7
284 b 1b
285
286_GLOBAL(backwards_memcpy)
287 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
288 add r6,r3,r5
289 add r4,r4,r5
290 beq 2f
291 andi. r0,r6,3
292 mtctr r7
293 bne 5f
2941: lwz r7,-4(r4)
295 lwzu r8,-8(r4)
296 stw r7,-4(r6)
297 stwu r8,-8(r6)
298 bdnz 1b
299 andi. r5,r5,7
3002: cmplwi 0,r5,4
301 blt 3f
302 lwzu r0,-4(r4)
303 subi r5,r5,4
304 stwu r0,-4(r6)
3053: cmpwi 0,r5,0
306 beqlr
307 mtctr r5
3084: lbzu r0,-1(r4)
309 stbu r0,-1(r6)
310 bdnz 4b
311 blr
3125: mtctr r0
3136: lbzu r7,-1(r4)
314 stbu r7,-1(r6)
315 bdnz 6b
316 subf r5,r0,r5
317 rlwinm. r7,r5,32-3,3,31
318 beq 2b
319 mtctr r7
320 b 1b
321
322_GLOBAL(__copy_tofrom_user)
323 addi r4,r4,-4
324 addi r6,r3,-4
325 neg r0,r3
326 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
327 beq 58f
328
329 cmplw 0,r5,r0 /* is this more than total to do? */
330 blt 63f /* if not much to do */
331 andi. r8,r0,3 /* get it word-aligned first */
332 mtctr r8
333 beq+ 61f
33470: lbz r9,4(r4) /* do some bytes */
33571: stb r9,4(r6)
336 addi r4,r4,1
337 addi r6,r6,1
338 bdnz 70b
33961: subf r5,r0,r5
340 srwi. r0,r0,2
341 mtctr r0
342 beq 58f
34372: lwzu r9,4(r4) /* do some words */
34473: stwu r9,4(r6)
345 bdnz 72b
346
347 .section __ex_table,"a"
348 .align 2
349 .long 70b,100f
350 .long 71b,101f
351 .long 72b,102f
352 .long 73b,103f
353 .text
354
35558: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
356 clrlwi r5,r5,32-LG_CACHELINE_BYTES
357 li r11,4
358 beq 63f
359
360#ifdef CONFIG_8xx
361 /* Don't use prefetch on 8xx */
362 mtctr r0
363 li r0,0
36453: COPY_16_BYTES_WITHEX(0)
365 bdnz 53b
366
367#else /* not CONFIG_8xx */
368 /* Here we decide how far ahead to prefetch the source */
369 li r3,4
370 cmpwi r0,1
371 li r7,0
372 ble 114f
373 li r7,1
374#if MAX_COPY_PREFETCH > 1
375 /* Heuristically, for large transfers we prefetch
376 MAX_COPY_PREFETCH cachelines ahead. For small transfers
377 we prefetch 1 cacheline ahead. */
378 cmpwi r0,MAX_COPY_PREFETCH
379 ble 112f
380 li r7,MAX_COPY_PREFETCH
381112: mtctr r7
382111: dcbt r3,r4
383 addi r3,r3,CACHELINE_BYTES
384 bdnz 111b
385#else
386 dcbt r3,r4
387 addi r3,r3,CACHELINE_BYTES
388#endif /* MAX_COPY_PREFETCH > 1 */
389
390114: subf r8,r7,r0
391 mr r0,r7
392 mtctr r8
393
39453: dcbt r3,r4
39554: dcbz r11,r6
396 .section __ex_table,"a"
397 .align 2
398 .long 54b,105f
399 .text
400/* the main body of the cacheline loop */
401 COPY_16_BYTES_WITHEX(0)
402#if L1_CACHE_BYTES >= 32
403 COPY_16_BYTES_WITHEX(1)
404#if L1_CACHE_BYTES >= 64
405 COPY_16_BYTES_WITHEX(2)
406 COPY_16_BYTES_WITHEX(3)
407#if L1_CACHE_BYTES >= 128
408 COPY_16_BYTES_WITHEX(4)
409 COPY_16_BYTES_WITHEX(5)
410 COPY_16_BYTES_WITHEX(6)
411 COPY_16_BYTES_WITHEX(7)
412#endif
413#endif
414#endif
415 bdnz 53b
416 cmpwi r0,0
417 li r3,4
418 li r7,0
419 bne 114b
420#endif /* CONFIG_8xx */
421
42263: srwi. r0,r5,2
423 mtctr r0
424 beq 64f
42530: lwzu r0,4(r4)
42631: stwu r0,4(r6)
427 bdnz 30b
428
42964: andi. r0,r5,3
430 mtctr r0
431 beq+ 65f
43240: lbz r0,4(r4)
43341: stb r0,4(r6)
434 addi r4,r4,1
435 addi r6,r6,1
436 bdnz 40b
43765: li r3,0
438 blr
439
440/* read fault, initial single-byte copy */
441100: li r9,0
442 b 90f
443/* write fault, initial single-byte copy */
444101: li r9,1
44590: subf r5,r8,r5
446 li r3,0
447 b 99f
448/* read fault, initial word copy */
449102: li r9,0
450 b 91f
451/* write fault, initial word copy */
452103: li r9,1
45391: li r3,2
454 b 99f
455
456/*
457 * this stuff handles faults in the cacheline loop and branches to either
458 * 104f (if in read part) or 105f (if in write part), after updating r5
459 */
460 COPY_16_BYTES_EXCODE(0)
461#if L1_CACHE_BYTES >= 32
462 COPY_16_BYTES_EXCODE(1)
463#if L1_CACHE_BYTES >= 64
464 COPY_16_BYTES_EXCODE(2)
465 COPY_16_BYTES_EXCODE(3)
466#if L1_CACHE_BYTES >= 128
467 COPY_16_BYTES_EXCODE(4)
468 COPY_16_BYTES_EXCODE(5)
469 COPY_16_BYTES_EXCODE(6)
470 COPY_16_BYTES_EXCODE(7)
471#endif
472#endif
473#endif
474
475/* read fault in cacheline loop */
476104: li r9,0
477 b 92f
478/* fault on dcbz (effectively a write fault) */
479/* or write fault in cacheline loop */
480105: li r9,1
48192: li r3,LG_CACHELINE_BYTES
482 mfctr r8
483 add r0,r0,r8
484 b 106f
485/* read fault in final word loop */
486108: li r9,0
487 b 93f
488/* write fault in final word loop */
489109: li r9,1
49093: andi. r5,r5,3
491 li r3,2
492 b 99f
493/* read fault in final byte loop */
494110: li r9,0
495 b 94f
496/* write fault in final byte loop */
497111: li r9,1
49894: li r5,0
499 li r3,0
500/*
501 * At this stage the number of bytes not copied is
502 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
503 */
50499: mfctr r0
505106: slw r3,r0,r3
506 add. r3,r3,r5
507 beq 120f /* shouldn't happen */
508 cmpwi 0,r9,0
509 bne 120f
510/* for a read fault, first try to continue the copy one byte at a time */
511 mtctr r3
512130: lbz r0,4(r4)
513131: stb r0,4(r6)
514 addi r4,r4,1
515 addi r6,r6,1
516 bdnz 130b
517/* then clear out the destination: r3 bytes starting at 4(r6) */
518132: mfctr r3
519 srwi. r0,r3,2
520 li r9,0
521 mtctr r0
522 beq 113f
523112: stwu r9,4(r6)
524 bdnz 112b
525113: andi. r0,r3,3
526 mtctr r0
527 beq 120f
528114: stb r9,4(r6)
529 addi r6,r6,1
530 bdnz 114b
531120: blr
532
533 .section __ex_table,"a"
534 .align 2
535 .long 30b,108b
536 .long 31b,109b
537 .long 40b,110b
538 .long 41b,111b
539 .long 130b,132b
540 .long 131b,120b
541 .long 112b,120b
542 .long 114b,120b
543 .text
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
new file mode 100644
index 000000000000..733d61618bbf
--- /dev/null
+++ b/arch/powerpc/lib/copypage_64.S
@@ -0,0 +1,121 @@
1/*
2 * arch/ppc64/lib/copypage.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14_GLOBAL(copy_page)
15 std r31,-8(1)
16 std r30,-16(1)
17 std r29,-24(1)
18 std r28,-32(1)
19 std r27,-40(1)
20 std r26,-48(1)
21 std r25,-56(1)
22 std r24,-64(1)
23 std r23,-72(1)
24 std r22,-80(1)
25 std r21,-88(1)
26 std r20,-96(1)
27 li r5,4096/32 - 1
28 addi r3,r3,-8
29 li r12,5
300: addi r5,r5,-24
31 mtctr r12
32 ld r22,640(4)
33 ld r21,512(4)
34 ld r20,384(4)
35 ld r11,256(4)
36 ld r9,128(4)
37 ld r7,0(4)
38 ld r25,648(4)
39 ld r24,520(4)
40 ld r23,392(4)
41 ld r10,264(4)
42 ld r8,136(4)
43 ldu r6,8(4)
44 cmpwi r5,24
451: std r22,648(3)
46 std r21,520(3)
47 std r20,392(3)
48 std r11,264(3)
49 std r9,136(3)
50 std r7,8(3)
51 ld r28,648(4)
52 ld r27,520(4)
53 ld r26,392(4)
54 ld r31,264(4)
55 ld r30,136(4)
56 ld r29,8(4)
57 std r25,656(3)
58 std r24,528(3)
59 std r23,400(3)
60 std r10,272(3)
61 std r8,144(3)
62 std r6,16(3)
63 ld r22,656(4)
64 ld r21,528(4)
65 ld r20,400(4)
66 ld r11,272(4)
67 ld r9,144(4)
68 ld r7,16(4)
69 std r28,664(3)
70 std r27,536(3)
71 std r26,408(3)
72 std r31,280(3)
73 std r30,152(3)
74 stdu r29,24(3)
75 ld r25,664(4)
76 ld r24,536(4)
77 ld r23,408(4)
78 ld r10,280(4)
79 ld r8,152(4)
80 ldu r6,24(4)
81 bdnz 1b
82 std r22,648(3)
83 std r21,520(3)
84 std r20,392(3)
85 std r11,264(3)
86 std r9,136(3)
87 std r7,8(3)
88 addi r4,r4,640
89 addi r3,r3,648
90 bge 0b
91 mtctr r5
92 ld r7,0(4)
93 ld r8,8(4)
94 ldu r9,16(4)
953: ld r10,8(4)
96 std r7,8(3)
97 ld r7,16(4)
98 std r8,16(3)
99 ld r8,24(4)
100 std r9,24(3)
101 ldu r9,32(4)
102 stdu r10,32(3)
103 bdnz 3b
1044: ld r10,8(4)
105 std r7,8(3)
106 std r8,16(3)
107 std r9,24(3)
108 std r10,32(3)
1099: ld r20,-96(1)
110 ld r21,-88(1)
111 ld r22,-80(1)
112 ld r23,-72(1)
113 ld r24,-64(1)
114 ld r25,-56(1)
115 ld r26,-48(1)
116 ld r27,-40(1)
117 ld r28,-32(1)
118 ld r29,-24(1)
119 ld r30,-16(1)
120 ld r31,-8(1)
121 blr
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
new file mode 100644
index 000000000000..a0b3fbbd6fb1
--- /dev/null
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -0,0 +1,576 @@
1/*
2 * arch/ppc64/lib/copyuser.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14 .align 7
15_GLOBAL(__copy_tofrom_user)
16 /* first check for a whole page copy on a page boundary */
17 cmpldi cr1,r5,16
18 cmpdi cr6,r5,4096
19 or r0,r3,r4
20 neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
21 andi. r0,r0,4095
22 std r3,-24(r1)
23 crand cr0*4+2,cr0*4+2,cr6*4+2
24 std r4,-16(r1)
25 std r5,-8(r1)
26 dcbt 0,r4
27 beq .Lcopy_page
28 andi. r6,r6,7
29 mtcrf 0x01,r5
30 blt cr1,.Lshort_copy
31 bne .Ldst_unaligned
32.Ldst_aligned:
33 andi. r0,r4,7
34 addi r3,r3,-16
35 bne .Lsrc_unaligned
36 srdi r7,r5,4
3720: ld r9,0(r4)
38 addi r4,r4,-8
39 mtctr r7
40 andi. r5,r5,7
41 bf cr7*4+0,22f
42 addi r3,r3,8
43 addi r4,r4,8
44 mr r8,r9
45 blt cr1,72f
4621: ld r9,8(r4)
4770: std r8,8(r3)
4822: ldu r8,16(r4)
4971: stdu r9,16(r3)
50 bdnz 21b
5172: std r8,8(r3)
52 beq+ 3f
53 addi r3,r3,16
5423: ld r9,8(r4)
55.Ldo_tail:
56 bf cr7*4+1,1f
57 rotldi r9,r9,32
5873: stw r9,0(r3)
59 addi r3,r3,4
601: bf cr7*4+2,2f
61 rotldi r9,r9,16
6274: sth r9,0(r3)
63 addi r3,r3,2
642: bf cr7*4+3,3f
65 rotldi r9,r9,8
6675: stb r9,0(r3)
673: li r3,0
68 blr
69
70.Lsrc_unaligned:
71 srdi r6,r5,3
72 addi r5,r5,-16
73 subf r4,r0,r4
74 srdi r7,r5,4
75 sldi r10,r0,3
76 cmpldi cr6,r6,3
77 andi. r5,r5,7
78 mtctr r7
79 subfic r11,r10,64
80 add r5,r5,r0
81 bt cr7*4+0,28f
82
8324: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
8425: ld r0,8(r4)
85 sld r6,r9,r10
8626: ldu r9,16(r4)
87 srd r7,r0,r11
88 sld r8,r0,r10
89 or r7,r7,r6
90 blt cr6,79f
9127: ld r0,8(r4)
92 b 2f
93
9428: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
9529: ldu r9,8(r4)
96 sld r8,r0,r10
97 addi r3,r3,-8
98 blt cr6,5f
9930: ld r0,8(r4)
100 srd r12,r9,r11
101 sld r6,r9,r10
10231: ldu r9,16(r4)
103 or r12,r8,r12
104 srd r7,r0,r11
105 sld r8,r0,r10
106 addi r3,r3,16
107 beq cr6,78f
108
1091: or r7,r7,r6
11032: ld r0,8(r4)
11176: std r12,8(r3)
1122: srd r12,r9,r11
113 sld r6,r9,r10
11433: ldu r9,16(r4)
115 or r12,r8,r12
11677: stdu r7,16(r3)
117 srd r7,r0,r11
118 sld r8,r0,r10
119 bdnz 1b
120
12178: std r12,8(r3)
122 or r7,r7,r6
12379: std r7,16(r3)
1245: srd r12,r9,r11
125 or r12,r8,r12
12680: std r12,24(r3)
127 bne 6f
128 li r3,0
129 blr
1306: cmpwi cr1,r5,8
131 addi r3,r3,32
132 sld r9,r9,r10
133 ble cr1,.Ldo_tail
13434: ld r0,8(r4)
135 srd r7,r0,r11
136 or r9,r7,r9
137 b .Ldo_tail
138
139.Ldst_unaligned:
140 mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */
141 subf r5,r6,r5
142 li r7,0
143 cmpldi r1,r5,16
144 bf cr7*4+3,1f
14535: lbz r0,0(r4)
14681: stb r0,0(r3)
147 addi r7,r7,1
1481: bf cr7*4+2,2f
14936: lhzx r0,r7,r4
15082: sthx r0,r7,r3
151 addi r7,r7,2
1522: bf cr7*4+1,3f
15337: lwzx r0,r7,r4
15483: stwx r0,r7,r3
1553: mtcrf 0x01,r5
156 add r4,r6,r4
157 add r3,r6,r3
158 b .Ldst_aligned
159
160.Lshort_copy:
161 bf cr7*4+0,1f
16238: lwz r0,0(r4)
16339: lwz r9,4(r4)
164 addi r4,r4,8
16584: stw r0,0(r3)
16685: stw r9,4(r3)
167 addi r3,r3,8
1681: bf cr7*4+1,2f
16940: lwz r0,0(r4)
170 addi r4,r4,4
17186: stw r0,0(r3)
172 addi r3,r3,4
1732: bf cr7*4+2,3f
17441: lhz r0,0(r4)
175 addi r4,r4,2
17687: sth r0,0(r3)
177 addi r3,r3,2
1783: bf cr7*4+3,4f
17942: lbz r0,0(r4)
18088: stb r0,0(r3)
1814: li r3,0
182 blr
183
184/*
185 * exception handlers follow
186 * we have to return the number of bytes not copied
187 * for an exception on a load, we set the rest of the destination to 0
188 */
189
190136:
191137:
192 add r3,r3,r7
193 b 1f
194130:
195131:
196 addi r3,r3,8
197120:
198122:
199124:
200125:
201126:
202127:
203128:
204129:
205133:
206 addi r3,r3,8
207121:
208132:
209 addi r3,r3,8
210123:
211134:
212135:
213138:
214139:
215140:
216141:
217142:
218
219/*
220 * here we have had a fault on a load and r3 points to the first
221 * unmodified byte of the destination
222 */
2231: ld r6,-24(r1)
224 ld r4,-16(r1)
225 ld r5,-8(r1)
226 subf r6,r6,r3
227 add r4,r4,r6
228 subf r5,r6,r5 /* #bytes left to go */
229
230/*
231 * first see if we can copy any more bytes before hitting another exception
232 */
233 mtctr r5
23443: lbz r0,0(r4)
235 addi r4,r4,1
23689: stb r0,0(r3)
237 addi r3,r3,1
238 bdnz 43b
239 li r3,0 /* huh? all copied successfully this time? */
240 blr
241
242/*
243 * here we have trapped again, need to clear ctr bytes starting at r3
244 */
245143: mfctr r5
246 li r0,0
247 mr r4,r3
248 mr r3,r5 /* return the number of bytes not copied */
2491: andi. r9,r4,7
250 beq 3f
25190: stb r0,0(r4)
252 addic. r5,r5,-1
253 addi r4,r4,1
254 bne 1b
255 blr
2563: cmpldi cr1,r5,8
257 srdi r9,r5,3
258 andi. r5,r5,7
259 blt cr1,93f
260 mtctr r9
26191: std r0,0(r4)
262 addi r4,r4,8
263 bdnz 91b
26493: beqlr
265 mtctr r5
26692: stb r0,0(r4)
267 addi r4,r4,1
268 bdnz 92b
269 blr
270
271/*
272 * exception handlers for stores: we just need to work
273 * out how many bytes weren't copied
274 */
275182:
276183:
277 add r3,r3,r7
278 b 1f
279180:
280 addi r3,r3,8
281171:
282177:
283 addi r3,r3,8
284170:
285172:
286176:
287178:
288 addi r3,r3,4
289185:
290 addi r3,r3,4
291173:
292174:
293175:
294179:
295181:
296184:
297186:
298187:
299188:
300189:
3011:
302 ld r6,-24(r1)
303 ld r5,-8(r1)
304 add r6,r6,r5
305 subf r3,r3,r6 /* #bytes not copied */
306190:
307191:
308192:
309 blr /* #bytes not copied in r3 */
310
311 .section __ex_table,"a"
312 .align 3
313 .llong 20b,120b
314 .llong 21b,121b
315 .llong 70b,170b
316 .llong 22b,122b
317 .llong 71b,171b
318 .llong 72b,172b
319 .llong 23b,123b
320 .llong 73b,173b
321 .llong 74b,174b
322 .llong 75b,175b
323 .llong 24b,124b
324 .llong 25b,125b
325 .llong 26b,126b
326 .llong 27b,127b
327 .llong 28b,128b
328 .llong 29b,129b
329 .llong 30b,130b
330 .llong 31b,131b
331 .llong 32b,132b
332 .llong 76b,176b
333 .llong 33b,133b
334 .llong 77b,177b
335 .llong 78b,178b
336 .llong 79b,179b
337 .llong 80b,180b
338 .llong 34b,134b
339 .llong 35b,135b
340 .llong 81b,181b
341 .llong 36b,136b
342 .llong 82b,182b
343 .llong 37b,137b
344 .llong 83b,183b
345 .llong 38b,138b
346 .llong 39b,139b
347 .llong 84b,184b
348 .llong 85b,185b
349 .llong 40b,140b
350 .llong 86b,186b
351 .llong 41b,141b
352 .llong 87b,187b
353 .llong 42b,142b
354 .llong 88b,188b
355 .llong 43b,143b
356 .llong 89b,189b
357 .llong 90b,190b
358 .llong 91b,191b
359 .llong 92b,192b
360
361 .text
362
363/*
364 * Routine to copy a whole page of data, optimized for POWER4.
365 * On POWER4 it is more than 50% faster than the simple loop
366 * above (following the .Ldst_aligned label) but it runs slightly
367 * slower on POWER3.
368 */
369.Lcopy_page:
370 std r31,-32(1)
371 std r30,-40(1)
372 std r29,-48(1)
373 std r28,-56(1)
374 std r27,-64(1)
375 std r26,-72(1)
376 std r25,-80(1)
377 std r24,-88(1)
378 std r23,-96(1)
379 std r22,-104(1)
380 std r21,-112(1)
381 std r20,-120(1)
382 li r5,4096/32 - 1
383 addi r3,r3,-8
384 li r0,5
3850: addi r5,r5,-24
386 mtctr r0
38720: ld r22,640(4)
38821: ld r21,512(4)
38922: ld r20,384(4)
39023: ld r11,256(4)
39124: ld r9,128(4)
39225: ld r7,0(4)
39326: ld r25,648(4)
39427: ld r24,520(4)
39528: ld r23,392(4)
39629: ld r10,264(4)
39730: ld r8,136(4)
39831: ldu r6,8(4)
399 cmpwi r5,24
4001:
40132: std r22,648(3)
40233: std r21,520(3)
40334: std r20,392(3)
40435: std r11,264(3)
40536: std r9,136(3)
40637: std r7,8(3)
40738: ld r28,648(4)
40839: ld r27,520(4)
40940: ld r26,392(4)
41041: ld r31,264(4)
41142: ld r30,136(4)
41243: ld r29,8(4)
41344: std r25,656(3)
41445: std r24,528(3)
41546: std r23,400(3)
41647: std r10,272(3)
41748: std r8,144(3)
41849: std r6,16(3)
41950: ld r22,656(4)
42051: ld r21,528(4)
42152: ld r20,400(4)
42253: ld r11,272(4)
42354: ld r9,144(4)
42455: ld r7,16(4)
42556: std r28,664(3)
42657: std r27,536(3)
42758: std r26,408(3)
42859: std r31,280(3)
42960: std r30,152(3)
43061: stdu r29,24(3)
43162: ld r25,664(4)
43263: ld r24,536(4)
43364: ld r23,408(4)
43465: ld r10,280(4)
43566: ld r8,152(4)
43667: ldu r6,24(4)
437 bdnz 1b
43868: std r22,648(3)
43969: std r21,520(3)
44070: std r20,392(3)
44171: std r11,264(3)
44272: std r9,136(3)
44373: std r7,8(3)
44474: addi r4,r4,640
44575: addi r3,r3,648
446 bge 0b
447 mtctr r5
44876: ld r7,0(4)
44977: ld r8,8(4)
45078: ldu r9,16(4)
4513:
45279: ld r10,8(4)
45380: std r7,8(3)
45481: ld r7,16(4)
45582: std r8,16(3)
45683: ld r8,24(4)
45784: std r9,24(3)
45885: ldu r9,32(4)
45986: stdu r10,32(3)
460 bdnz 3b
4614:
46287: ld r10,8(4)
46388: std r7,8(3)
46489: std r8,16(3)
46590: std r9,24(3)
46691: std r10,32(3)
4679: ld r20,-120(1)
468 ld r21,-112(1)
469 ld r22,-104(1)
470 ld r23,-96(1)
471 ld r24,-88(1)
472 ld r25,-80(1)
473 ld r26,-72(1)
474 ld r27,-64(1)
475 ld r28,-56(1)
476 ld r29,-48(1)
477 ld r30,-40(1)
478 ld r31,-32(1)
479 li r3,0
480 blr
481
482/*
483 * on an exception, reset to the beginning and jump back into the
484 * standard __copy_tofrom_user
485 */
486100: ld r20,-120(1)
487 ld r21,-112(1)
488 ld r22,-104(1)
489 ld r23,-96(1)
490 ld r24,-88(1)
491 ld r25,-80(1)
492 ld r26,-72(1)
493 ld r27,-64(1)
494 ld r28,-56(1)
495 ld r29,-48(1)
496 ld r30,-40(1)
497 ld r31,-32(1)
498 ld r3,-24(r1)
499 ld r4,-16(r1)
500 li r5,4096
501 b .Ldst_aligned
502
503 .section __ex_table,"a"
504 .align 3
505 .llong 20b,100b
506 .llong 21b,100b
507 .llong 22b,100b
508 .llong 23b,100b
509 .llong 24b,100b
510 .llong 25b,100b
511 .llong 26b,100b
512 .llong 27b,100b
513 .llong 28b,100b
514 .llong 29b,100b
515 .llong 30b,100b
516 .llong 31b,100b
517 .llong 32b,100b
518 .llong 33b,100b
519 .llong 34b,100b
520 .llong 35b,100b
521 .llong 36b,100b
522 .llong 37b,100b
523 .llong 38b,100b
524 .llong 39b,100b
525 .llong 40b,100b
526 .llong 41b,100b
527 .llong 42b,100b
528 .llong 43b,100b
529 .llong 44b,100b
530 .llong 45b,100b
531 .llong 46b,100b
532 .llong 47b,100b
533 .llong 48b,100b
534 .llong 49b,100b
535 .llong 50b,100b
536 .llong 51b,100b
537 .llong 52b,100b
538 .llong 53b,100b
539 .llong 54b,100b
540 .llong 55b,100b
541 .llong 56b,100b
542 .llong 57b,100b
543 .llong 58b,100b
544 .llong 59b,100b
545 .llong 60b,100b
546 .llong 61b,100b
547 .llong 62b,100b
548 .llong 63b,100b
549 .llong 64b,100b
550 .llong 65b,100b
551 .llong 66b,100b
552 .llong 67b,100b
553 .llong 68b,100b
554 .llong 69b,100b
555 .llong 70b,100b
556 .llong 71b,100b
557 .llong 72b,100b
558 .llong 73b,100b
559 .llong 74b,100b
560 .llong 75b,100b
561 .llong 76b,100b
562 .llong 77b,100b
563 .llong 78b,100b
564 .llong 79b,100b
565 .llong 80b,100b
566 .llong 81b,100b
567 .llong 82b,100b
568 .llong 83b,100b
569 .llong 84b,100b
570 .llong 85b,100b
571 .llong 86b,100b
572 .llong 87b,100b
573 .llong 88b,100b
574 .llong 89b,100b
575 .llong 90b,100b
576 .llong 91b,100b
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S
new file mode 100644
index 000000000000..83d9832fd919
--- /dev/null
+++ b/arch/powerpc/lib/div64.S
@@ -0,0 +1,59 @@
1/*
2 * Divide a 64-bit unsigned number by a 32-bit unsigned number.
3 * This routine assumes that the top 32 bits of the dividend are
4 * non-zero to start with.
5 * On entry, r3 points to the dividend, which get overwritten with
6 * the 64-bit quotient, and r4 contains the divisor.
7 * On exit, r3 contains the remainder.
8 *
9 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#include <asm/ppc_asm.h>
17#include <asm/processor.h>
18
19_GLOBAL(__div64_32)
20 lwz r5,0(r3) # get the dividend into r5/r6
21 lwz r6,4(r3)
22 cmplw r5,r4
23 li r7,0
24 li r8,0
25 blt 1f
26 divwu r7,r5,r4 # if dividend.hi >= divisor,
27 mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
28 subf. r5,r0,r5 # dividend.hi %= divisor
29 beq 3f
301: mr r11,r5 # here dividend.hi != 0
31 andis. r0,r5,0xc000
32 bne 2f
33 cntlzw r0,r5 # we are shifting the dividend right
34 li r10,-1 # to make it < 2^32, and shifting
35 srw r10,r10,r0 # the divisor right the same amount,
36 addc r9,r4,r10 # rounding up (so the estimate cannot
37 andc r11,r6,r10 # ever be too large, only too small)
38 andc r9,r9,r10
39 addze r9,r9
40 or r11,r5,r11
41 rotlw r9,r9,r0
42 rotlw r11,r11,r0
43 divwu r11,r11,r9 # then we divide the shifted quantities
442: mullw r10,r11,r4 # to get an estimate of the quotient,
45 mulhwu r9,r11,r4 # multiply the estimate by the divisor,
46 subfc r6,r10,r6 # take the product from the divisor,
47 add r8,r8,r11 # and add the estimate to the accumulated
48 subfe. r5,r9,r5 # quotient
49 bne 1b
503: cmplw r6,r4
51 blt 4f
52 divwu r0,r6,r4 # perform the remaining 32-bit division
53 mullw r10,r0,r4 # and get the remainder
54 add r8,r8,r0
55 subf r6,r10,r6
564: stw r7,0(r3) # return the quotient in *r3
57 stw r8,4(r3)
58 mr r3,r6 # return the remainder in r3
59 blr
diff --git a/arch/powerpc/lib/e2a.c b/arch/powerpc/lib/e2a.c
new file mode 100644
index 000000000000..d2b834887920
--- /dev/null
+++ b/arch/powerpc/lib/e2a.c
@@ -0,0 +1,108 @@
1/*
2 * arch/ppc64/lib/e2a.c
3 *
4 * EBCDIC to ASCII conversion
5 *
6 * This function moved here from arch/ppc64/kernel/viopath.c
7 *
8 * (C) Copyright 2000-2004 IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) anyu later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#include <linux/module.h>
27
28unsigned char e2a(unsigned char x)
29{
30 switch (x) {
31 case 0xF0:
32 return '0';
33 case 0xF1:
34 return '1';
35 case 0xF2:
36 return '2';
37 case 0xF3:
38 return '3';
39 case 0xF4:
40 return '4';
41 case 0xF5:
42 return '5';
43 case 0xF6:
44 return '6';
45 case 0xF7:
46 return '7';
47 case 0xF8:
48 return '8';
49 case 0xF9:
50 return '9';
51 case 0xC1:
52 return 'A';
53 case 0xC2:
54 return 'B';
55 case 0xC3:
56 return 'C';
57 case 0xC4:
58 return 'D';
59 case 0xC5:
60 return 'E';
61 case 0xC6:
62 return 'F';
63 case 0xC7:
64 return 'G';
65 case 0xC8:
66 return 'H';
67 case 0xC9:
68 return 'I';
69 case 0xD1:
70 return 'J';
71 case 0xD2:
72 return 'K';
73 case 0xD3:
74 return 'L';
75 case 0xD4:
76 return 'M';
77 case 0xD5:
78 return 'N';
79 case 0xD6:
80 return 'O';
81 case 0xD7:
82 return 'P';
83 case 0xD8:
84 return 'Q';
85 case 0xD9:
86 return 'R';
87 case 0xE2:
88 return 'S';
89 case 0xE3:
90 return 'T';
91 case 0xE4:
92 return 'U';
93 case 0xE5:
94 return 'V';
95 case 0xE6:
96 return 'W';
97 case 0xE7:
98 return 'X';
99 case 0xE8:
100 return 'Y';
101 case 0xE9:
102 return 'Z';
103 }
104 return ' ';
105}
106EXPORT_SYMBOL(e2a);
107
108
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
new file mode 100644
index 000000000000..4b8c5ad5e7dc
--- /dev/null
+++ b/arch/powerpc/lib/locks.c
@@ -0,0 +1,95 @@
1/*
2 * Spin and read/write lock operations.
3 *
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/stringify.h>
20
21/* waiting for a spinlock... */
22#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
23#include <asm/hvcall.h>
24#include <asm/iSeries/HvCall.h>
25
26void __spin_yield(raw_spinlock_t *lock)
27{
28 unsigned int lock_value, holder_cpu, yield_count;
29 struct paca_struct *holder_paca;
30
31 lock_value = lock->slock;
32 if (lock_value == 0)
33 return;
34 holder_cpu = lock_value & 0xffff;
35 BUG_ON(holder_cpu >= NR_CPUS);
36 holder_paca = &paca[holder_cpu];
37 yield_count = holder_paca->lppaca.yield_count;
38 if ((yield_count & 1) == 0)
39 return; /* virtual cpu is currently running */
40 rmb();
41 if (lock->slock != lock_value)
42 return; /* something has changed */
43#ifdef CONFIG_PPC_ISERIES
44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
45 ((u64)holder_cpu << 32) | yield_count);
46#else
47 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
48 yield_count);
49#endif
50}
51
52/*
53 * Waiting for a read lock or a write lock on a rwlock...
54 * This turns out to be the same for read and write locks, since
55 * we only know the holder if it is write-locked.
56 */
57void __rw_yield(raw_rwlock_t *rw)
58{
59 int lock_value;
60 unsigned int holder_cpu, yield_count;
61 struct paca_struct *holder_paca;
62
63 lock_value = rw->lock;
64 if (lock_value >= 0)
65 return; /* no write lock at present */
66 holder_cpu = lock_value & 0xffff;
67 BUG_ON(holder_cpu >= NR_CPUS);
68 holder_paca = &paca[holder_cpu];
69 yield_count = holder_paca->lppaca.yield_count;
70 if ((yield_count & 1) == 0)
71 return; /* virtual cpu is currently running */
72 rmb();
73 if (rw->lock != lock_value)
74 return; /* something has changed */
75#ifdef CONFIG_PPC_ISERIES
76 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
77 ((u64)holder_cpu << 32) | yield_count);
78#else
79 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
80 yield_count);
81#endif
82}
83#endif
84
85void __raw_spin_unlock_wait(raw_spinlock_t *lock)
86{
87 while (lock->slock) {
88 HMT_low();
89 if (SHARED_PROCESSOR)
90 __spin_yield(lock);
91 }
92 HMT_medium();
93}
94
95EXPORT_SYMBOL(__raw_spin_unlock_wait);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
new file mode 100644
index 000000000000..68df20283ff5
--- /dev/null
+++ b/arch/powerpc/lib/mem_64.S
@@ -0,0 +1,119 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/errno.h>
13#include <asm/ppc_asm.h>
14
15_GLOBAL(memset)
16 neg r0,r3
17 rlwimi r4,r4,8,16,23
18 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
19 rlwimi r4,r4,16,0,15
20 cmplw cr1,r5,r0 /* do we get that far? */
21 rldimi r4,r4,32,0
22 mtcrf 1,r0
23 mr r6,r3
24 blt cr1,8f
25 beq+ 3f /* if already 8-byte aligned */
26 subf r5,r0,r5
27 bf 31,1f
28 stb r4,0(r6)
29 addi r6,r6,1
301: bf 30,2f
31 sth r4,0(r6)
32 addi r6,r6,2
332: bf 29,3f
34 stw r4,0(r6)
35 addi r6,r6,4
363: srdi. r0,r5,6
37 clrldi r5,r5,58
38 mtctr r0
39 beq 5f
404: std r4,0(r6)
41 std r4,8(r6)
42 std r4,16(r6)
43 std r4,24(r6)
44 std r4,32(r6)
45 std r4,40(r6)
46 std r4,48(r6)
47 std r4,56(r6)
48 addi r6,r6,64
49 bdnz 4b
505: srwi. r0,r5,3
51 clrlwi r5,r5,29
52 mtcrf 1,r0
53 beq 8f
54 bf 29,6f
55 std r4,0(r6)
56 std r4,8(r6)
57 std r4,16(r6)
58 std r4,24(r6)
59 addi r6,r6,32
606: bf 30,7f
61 std r4,0(r6)
62 std r4,8(r6)
63 addi r6,r6,16
647: bf 31,8f
65 std r4,0(r6)
66 addi r6,r6,8
678: cmpwi r5,0
68 mtcrf 1,r5
69 beqlr+
70 bf 29,9f
71 stw r4,0(r6)
72 addi r6,r6,4
739: bf 30,10f
74 sth r4,0(r6)
75 addi r6,r6,2
7610: bflr 31
77 stb r4,0(r6)
78 blr
79
80_GLOBAL(memmove)
81 cmplw 0,r3,r4
82 bgt .backwards_memcpy
83 b .memcpy
84
85_GLOBAL(backwards_memcpy)
86 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
87 add r6,r3,r5
88 add r4,r4,r5
89 beq 2f
90 andi. r0,r6,3
91 mtctr r7
92 bne 5f
931: lwz r7,-4(r4)
94 lwzu r8,-8(r4)
95 stw r7,-4(r6)
96 stwu r8,-8(r6)
97 bdnz 1b
98 andi. r5,r5,7
992: cmplwi 0,r5,4
100 blt 3f
101 lwzu r0,-4(r4)
102 subi r5,r5,4
103 stwu r0,-4(r6)
1043: cmpwi 0,r5,0
105 beqlr
106 mtctr r5
1074: lbzu r0,-1(r4)
108 stbu r0,-1(r6)
109 bdnz 4b
110 blr
1115: mtctr r0
1126: lbzu r7,-1(r4)
113 stbu r7,-1(r6)
114 bdnz 6b
115 subf r5,r0,r5
116 rlwinm. r7,r5,32-3,3,31
117 beq 2b
118 mtctr r7
119 b 1b
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
new file mode 100644
index 000000000000..9ccacdf5bcb9
--- /dev/null
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -0,0 +1,172 @@
1/*
2 * arch/ppc64/lib/memcpy.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14 .align 7
15_GLOBAL(memcpy)
16 mtcrf 0x01,r5
17 cmpldi cr1,r5,16
18 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
19 andi. r6,r6,7
20 dcbt 0,r4
21 blt cr1,.Lshort_copy
22 bne .Ldst_unaligned
23.Ldst_aligned:
24 andi. r0,r4,7
25 addi r3,r3,-16
26 bne .Lsrc_unaligned
27 srdi r7,r5,4
28 ld r9,0(r4)
29 addi r4,r4,-8
30 mtctr r7
31 andi. r5,r5,7
32 bf cr7*4+0,2f
33 addi r3,r3,8
34 addi r4,r4,8
35 mr r8,r9
36 blt cr1,3f
371: ld r9,8(r4)
38 std r8,8(r3)
392: ldu r8,16(r4)
40 stdu r9,16(r3)
41 bdnz 1b
423: std r8,8(r3)
43 beqlr
44 addi r3,r3,16
45 ld r9,8(r4)
46.Ldo_tail:
47 bf cr7*4+1,1f
48 rotldi r9,r9,32
49 stw r9,0(r3)
50 addi r3,r3,4
511: bf cr7*4+2,2f
52 rotldi r9,r9,16
53 sth r9,0(r3)
54 addi r3,r3,2
552: bf cr7*4+3,3f
56 rotldi r9,r9,8
57 stb r9,0(r3)
583: blr
59
60.Lsrc_unaligned:
61 srdi r6,r5,3
62 addi r5,r5,-16
63 subf r4,r0,r4
64 srdi r7,r5,4
65 sldi r10,r0,3
66 cmpdi cr6,r6,3
67 andi. r5,r5,7
68 mtctr r7
69 subfic r11,r10,64
70 add r5,r5,r0
71
72 bt cr7*4+0,0f
73
74 ld r9,0(r4) # 3+2n loads, 2+2n stores
75 ld r0,8(r4)
76 sld r6,r9,r10
77 ldu r9,16(r4)
78 srd r7,r0,r11
79 sld r8,r0,r10
80 or r7,r7,r6
81 blt cr6,4f
82 ld r0,8(r4)
83 # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
84 b 2f
85
860: ld r0,0(r4) # 4+2n loads, 3+2n stores
87 ldu r9,8(r4)
88 sld r8,r0,r10
89 addi r3,r3,-8
90 blt cr6,5f
91 ld r0,8(r4)
92 srd r12,r9,r11
93 sld r6,r9,r10
94 ldu r9,16(r4)
95 or r12,r8,r12
96 srd r7,r0,r11
97 sld r8,r0,r10
98 addi r3,r3,16
99 beq cr6,3f
100
101 # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1021: or r7,r7,r6
103 ld r0,8(r4)
104 std r12,8(r3)
1052: srd r12,r9,r11
106 sld r6,r9,r10
107 ldu r9,16(r4)
108 or r12,r8,r12
109 stdu r7,16(r3)
110 srd r7,r0,r11
111 sld r8,r0,r10
112 bdnz 1b
113
1143: std r12,8(r3)
115 or r7,r7,r6
1164: std r7,16(r3)
1175: srd r12,r9,r11
118 or r12,r8,r12
119 std r12,24(r3)
120 beqlr
121 cmpwi cr1,r5,8
122 addi r3,r3,32
123 sld r9,r9,r10
124 ble cr1,.Ldo_tail
125 ld r0,8(r4)
126 srd r7,r0,r11
127 or r9,r7,r9
128 b .Ldo_tail
129
130.Ldst_unaligned:
131 mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
132 subf r5,r6,r5
133 li r7,0
134 cmpldi r1,r5,16
135 bf cr7*4+3,1f
136 lbz r0,0(r4)
137 stb r0,0(r3)
138 addi r7,r7,1
1391: bf cr7*4+2,2f
140 lhzx r0,r7,r4
141 sthx r0,r7,r3
142 addi r7,r7,2
1432: bf cr7*4+1,3f
144 lwzx r0,r7,r4
145 stwx r0,r7,r3
1463: mtcrf 0x01,r5
147 add r4,r6,r4
148 add r3,r6,r3
149 b .Ldst_aligned
150
151.Lshort_copy:
152 bf cr7*4+0,1f
153 lwz r0,0(r4)
154 lwz r9,4(r4)
155 addi r4,r4,8
156 stw r0,0(r3)
157 stw r9,4(r3)
158 addi r3,r3,8
1591: bf cr7*4+1,2f
160 lwz r0,0(r4)
161 addi r4,r4,4
162 stw r0,0(r3)
163 addi r3,r3,4
1642: bf cr7*4+2,3f
165 lhz r0,0(r4)
166 addi r4,r4,2
167 sth r0,0(r3)
168 addi r3,r3,2
1693: bf cr7*4+3,4f
170 lbz r0,0(r4)
171 stb r0,0(r3)
1724: blr
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
new file mode 100644
index 000000000000..42c5de2c898f
--- /dev/null
+++ b/arch/powerpc/lib/rheap.c
@@ -0,0 +1,693 @@
1/*
2 * arch/ppc/syslib/rheap.c
3 *
4 * A Remote Heap. Remote means that we don't touch the memory that the
5 * heap points to. Normal heap implementations use the memory they manage
6 * to place their list. We cannot do that because the memory we manage may
7 * have special properties, for example it is uncachable or of different
8 * endianess.
9 *
10 * Author: Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
13 * the terms of the GNU General Public License version 2. This program
14 * is licensed "as is" without any warranty of any kind, whether express
15 * or implied.
16 */
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21
22#include <asm/rheap.h>
23
24/*
25 * Fixup a list_head, needed when copying lists. If the pointers fall
26 * between s and e, apply the delta. This assumes that
27 * sizeof(struct list_head *) == sizeof(unsigned long *).
28 */
29static inline void fixup(unsigned long s, unsigned long e, int d,
30 struct list_head *l)
31{
32 unsigned long *pp;
33
34 pp = (unsigned long *)&l->next;
35 if (*pp >= s && *pp < e)
36 *pp += d;
37
38 pp = (unsigned long *)&l->prev;
39 if (*pp >= s && *pp < e)
40 *pp += d;
41}
42
43/* Grow the allocated blocks */
44static int grow(rh_info_t * info, int max_blocks)
45{
46 rh_block_t *block, *blk;
47 int i, new_blocks;
48 int delta;
49 unsigned long blks, blke;
50
51 if (max_blocks <= info->max_blocks)
52 return -EINVAL;
53
54 new_blocks = max_blocks - info->max_blocks;
55
56 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
57 if (block == NULL)
58 return -ENOMEM;
59
60 if (info->max_blocks > 0) {
61
62 /* copy old block area */
63 memcpy(block, info->block,
64 sizeof(rh_block_t) * info->max_blocks);
65
66 delta = (char *)block - (char *)info->block;
67
68 /* and fixup list pointers */
69 blks = (unsigned long)info->block;
70 blke = (unsigned long)(info->block + info->max_blocks);
71
72 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
73 fixup(blks, blke, delta, &blk->list);
74
75 fixup(blks, blke, delta, &info->empty_list);
76 fixup(blks, blke, delta, &info->free_list);
77 fixup(blks, blke, delta, &info->taken_list);
78
79 /* free the old allocated memory */
80 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
81 kfree(info->block);
82 }
83
84 info->block = block;
85 info->empty_slots += new_blocks;
86 info->max_blocks = max_blocks;
87 info->flags &= ~RHIF_STATIC_BLOCK;
88
89 /* add all new blocks to the free list */
90 for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
91 list_add(&blk->list, &info->empty_list);
92
93 return 0;
94}
95
96/*
97 * Assure at least the required amount of empty slots. If this function
98 * causes a grow in the block area then all pointers kept to the block
99 * area are invalid!
100 */
101static int assure_empty(rh_info_t * info, int slots)
102{
103 int max_blocks;
104
105 /* This function is not meant to be used to grow uncontrollably */
106 if (slots >= 4)
107 return -EINVAL;
108
109 /* Enough space */
110 if (info->empty_slots >= slots)
111 return 0;
112
113 /* Next 16 sized block */
114 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
115
116 return grow(info, max_blocks);
117}
118
119static rh_block_t *get_slot(rh_info_t * info)
120{
121 rh_block_t *blk;
122
123 /* If no more free slots, and failure to extend. */
124 /* XXX: You should have called assure_empty before */
125 if (info->empty_slots == 0) {
126 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
127 return NULL;
128 }
129
130 /* Get empty slot to use */
131 blk = list_entry(info->empty_list.next, rh_block_t, list);
132 list_del_init(&blk->list);
133 info->empty_slots--;
134
135 /* Initialize */
136 blk->start = NULL;
137 blk->size = 0;
138 blk->owner = NULL;
139
140 return blk;
141}
142
143static inline void release_slot(rh_info_t * info, rh_block_t * blk)
144{
145 list_add(&blk->list, &info->empty_list);
146 info->empty_slots++;
147}
148
149static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
150{
151 rh_block_t *blk;
152 rh_block_t *before;
153 rh_block_t *after;
154 rh_block_t *next;
155 int size;
156 unsigned long s, e, bs, be;
157 struct list_head *l;
158
159 /* We assume that they are aligned properly */
160 size = blkn->size;
161 s = (unsigned long)blkn->start;
162 e = s + size;
163
164 /* Find the blocks immediately before and after the given one
165 * (if any) */
166 before = NULL;
167 after = NULL;
168 next = NULL;
169
170 list_for_each(l, &info->free_list) {
171 blk = list_entry(l, rh_block_t, list);
172
173 bs = (unsigned long)blk->start;
174 be = bs + blk->size;
175
176 if (next == NULL && s >= bs)
177 next = blk;
178
179 if (be == s)
180 before = blk;
181
182 if (e == bs)
183 after = blk;
184
185 /* If both are not null, break now */
186 if (before != NULL && after != NULL)
187 break;
188 }
189
190 /* Now check if they are really adjacent */
191 if (before != NULL && s != (unsigned long)before->start + before->size)
192 before = NULL;
193
194 if (after != NULL && e != (unsigned long)after->start)
195 after = NULL;
196
197 /* No coalescing; list insert and return */
198 if (before == NULL && after == NULL) {
199
200 if (next != NULL)
201 list_add(&blkn->list, &next->list);
202 else
203 list_add(&blkn->list, &info->free_list);
204
205 return;
206 }
207
208 /* We don't need it anymore */
209 release_slot(info, blkn);
210
211 /* Grow the before block */
212 if (before != NULL && after == NULL) {
213 before->size += size;
214 return;
215 }
216
217 /* Grow the after block backwards */
218 if (before == NULL && after != NULL) {
219 after->start = (int8_t *)after->start - size;
220 after->size += size;
221 return;
222 }
223
224 /* Grow the before block, and release the after block */
225 before->size += size + after->size;
226 list_del(&after->list);
227 release_slot(info, after);
228}
229
230static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
231{
232 rh_block_t *blk;
233 struct list_head *l;
234
235 /* Find the block immediately before the given one (if any) */
236 list_for_each(l, &info->taken_list) {
237 blk = list_entry(l, rh_block_t, list);
238 if (blk->start > blkn->start) {
239 list_add_tail(&blkn->list, &blk->list);
240 return;
241 }
242 }
243
244 list_add_tail(&blkn->list, &info->taken_list);
245}
246
247/*
248 * Create a remote heap dynamically. Note that no memory for the blocks
249 * are allocated. It will upon the first allocation
250 */
251rh_info_t *rh_create(unsigned int alignment)
252{
253 rh_info_t *info;
254
255 /* Alignment must be a power of two */
256 if ((alignment & (alignment - 1)) != 0)
257 return ERR_PTR(-EINVAL);
258
259 info = kmalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return ERR_PTR(-ENOMEM);
262
263 info->alignment = alignment;
264
265 /* Initially everything as empty */
266 info->block = NULL;
267 info->max_blocks = 0;
268 info->empty_slots = 0;
269 info->flags = 0;
270
271 INIT_LIST_HEAD(&info->empty_list);
272 INIT_LIST_HEAD(&info->free_list);
273 INIT_LIST_HEAD(&info->taken_list);
274
275 return info;
276}
277
278/*
279 * Destroy a dynamically created remote heap. Deallocate only if the areas
280 * are not static
281 */
282void rh_destroy(rh_info_t * info)
283{
284 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
285 kfree(info->block);
286
287 if ((info->flags & RHIF_STATIC_INFO) == 0)
288 kfree(info);
289}
290
291/*
292 * Initialize in place a remote heap info block. This is needed to support
293 * operation very early in the startup of the kernel, when it is not yet safe
294 * to call kmalloc.
295 */
296void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
297 rh_block_t * block)
298{
299 int i;
300 rh_block_t *blk;
301
302 /* Alignment must be a power of two */
303 if ((alignment & (alignment - 1)) != 0)
304 return;
305
306 info->alignment = alignment;
307
308 /* Initially everything as empty */
309 info->block = block;
310 info->max_blocks = max_blocks;
311 info->empty_slots = max_blocks;
312 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
313
314 INIT_LIST_HEAD(&info->empty_list);
315 INIT_LIST_HEAD(&info->free_list);
316 INIT_LIST_HEAD(&info->taken_list);
317
318 /* Add all new blocks to the free list */
319 for (i = 0, blk = block; i < max_blocks; i++, blk++)
320 list_add(&blk->list, &info->empty_list);
321}
322
323/* Attach a free memory region, coalesces regions if adjuscent */
324int rh_attach_region(rh_info_t * info, void *start, int size)
325{
326 rh_block_t *blk;
327 unsigned long s, e, m;
328 int r;
329
330 /* The region must be aligned */
331 s = (unsigned long)start;
332 e = s + size;
333 m = info->alignment - 1;
334
335 /* Round start up */
336 s = (s + m) & ~m;
337
338 /* Round end down */
339 e = e & ~m;
340
341 /* Take final values */
342 start = (void *)s;
343 size = (int)(e - s);
344
345 /* Grow the blocks, if needed */
346 r = assure_empty(info, 1);
347 if (r < 0)
348 return r;
349
350 blk = get_slot(info);
351 blk->start = start;
352 blk->size = size;
353 blk->owner = NULL;
354
355 attach_free_block(info, blk);
356
357 return 0;
358}
359
360/* Detatch given address range, splits free block if needed. */
361void *rh_detach_region(rh_info_t * info, void *start, int size)
362{
363 struct list_head *l;
364 rh_block_t *blk, *newblk;
365 unsigned long s, e, m, bs, be;
366
367 /* Validate size */
368 if (size <= 0)
369 return ERR_PTR(-EINVAL);
370
371 /* The region must be aligned */
372 s = (unsigned long)start;
373 e = s + size;
374 m = info->alignment - 1;
375
376 /* Round start up */
377 s = (s + m) & ~m;
378
379 /* Round end down */
380 e = e & ~m;
381
382 if (assure_empty(info, 1) < 0)
383 return ERR_PTR(-ENOMEM);
384
385 blk = NULL;
386 list_for_each(l, &info->free_list) {
387 blk = list_entry(l, rh_block_t, list);
388 /* The range must lie entirely inside one free block */
389 bs = (unsigned long)blk->start;
390 be = (unsigned long)blk->start + blk->size;
391 if (s >= bs && e <= be)
392 break;
393 blk = NULL;
394 }
395
396 if (blk == NULL)
397 return ERR_PTR(-ENOMEM);
398
399 /* Perfect fit */
400 if (bs == s && be == e) {
401 /* Delete from free list, release slot */
402 list_del(&blk->list);
403 release_slot(info, blk);
404 return (void *)s;
405 }
406
407 /* blk still in free list, with updated start and/or size */
408 if (bs == s || be == e) {
409 if (bs == s)
410 blk->start = (int8_t *)blk->start + size;
411 blk->size -= size;
412
413 } else {
414 /* The front free fragment */
415 blk->size = s - bs;
416
417 /* the back free fragment */
418 newblk = get_slot(info);
419 newblk->start = (void *)e;
420 newblk->size = be - e;
421
422 list_add(&newblk->list, &blk->list);
423 }
424
425 return (void *)s;
426}
427
428void *rh_alloc(rh_info_t * info, int size, const char *owner)
429{
430 struct list_head *l;
431 rh_block_t *blk;
432 rh_block_t *newblk;
433 void *start;
434
435 /* Validate size */
436 if (size <= 0)
437 return ERR_PTR(-EINVAL);
438
439 /* Align to configured alignment */
440 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
441
442 if (assure_empty(info, 1) < 0)
443 return ERR_PTR(-ENOMEM);
444
445 blk = NULL;
446 list_for_each(l, &info->free_list) {
447 blk = list_entry(l, rh_block_t, list);
448 if (size <= blk->size)
449 break;
450 blk = NULL;
451 }
452
453 if (blk == NULL)
454 return ERR_PTR(-ENOMEM);
455
456 /* Just fits */
457 if (blk->size == size) {
458 /* Move from free list to taken list */
459 list_del(&blk->list);
460 blk->owner = owner;
461 start = blk->start;
462
463 attach_taken_block(info, blk);
464
465 return start;
466 }
467
468 newblk = get_slot(info);
469 newblk->start = blk->start;
470 newblk->size = size;
471 newblk->owner = owner;
472
473 /* blk still in free list, with updated start, size */
474 blk->start = (int8_t *)blk->start + size;
475 blk->size -= size;
476
477 start = newblk->start;
478
479 attach_taken_block(info, newblk);
480
481 return start;
482}
483
484/* allocate at precisely the given address */
485void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
486{
487 struct list_head *l;
488 rh_block_t *blk, *newblk1, *newblk2;
489 unsigned long s, e, m, bs, be;
490
491 /* Validate size */
492 if (size <= 0)
493 return ERR_PTR(-EINVAL);
494
495 /* The region must be aligned */
496 s = (unsigned long)start;
497 e = s + size;
498 m = info->alignment - 1;
499
500 /* Round start up */
501 s = (s + m) & ~m;
502
503 /* Round end down */
504 e = e & ~m;
505
506 if (assure_empty(info, 2) < 0)
507 return ERR_PTR(-ENOMEM);
508
509 blk = NULL;
510 list_for_each(l, &info->free_list) {
511 blk = list_entry(l, rh_block_t, list);
512 /* The range must lie entirely inside one free block */
513 bs = (unsigned long)blk->start;
514 be = (unsigned long)blk->start + blk->size;
515 if (s >= bs && e <= be)
516 break;
517 }
518
519 if (blk == NULL)
520 return ERR_PTR(-ENOMEM);
521
522 /* Perfect fit */
523 if (bs == s && be == e) {
524 /* Move from free list to taken list */
525 list_del(&blk->list);
526 blk->owner = owner;
527
528 start = blk->start;
529 attach_taken_block(info, blk);
530
531 return start;
532
533 }
534
535 /* blk still in free list, with updated start and/or size */
536 if (bs == s || be == e) {
537 if (bs == s)
538 blk->start = (int8_t *)blk->start + size;
539 blk->size -= size;
540
541 } else {
542 /* The front free fragment */
543 blk->size = s - bs;
544
545 /* The back free fragment */
546 newblk2 = get_slot(info);
547 newblk2->start = (void *)e;
548 newblk2->size = be - e;
549
550 list_add(&newblk2->list, &blk->list);
551 }
552
553 newblk1 = get_slot(info);
554 newblk1->start = (void *)s;
555 newblk1->size = e - s;
556 newblk1->owner = owner;
557
558 start = newblk1->start;
559 attach_taken_block(info, newblk1);
560
561 return start;
562}
563
564int rh_free(rh_info_t * info, void *start)
565{
566 rh_block_t *blk, *blk2;
567 struct list_head *l;
568 int size;
569
570 /* Linear search for block */
571 blk = NULL;
572 list_for_each(l, &info->taken_list) {
573 blk2 = list_entry(l, rh_block_t, list);
574 if (start < blk2->start)
575 break;
576 blk = blk2;
577 }
578
579 if (blk == NULL || start > (blk->start + blk->size))
580 return -EINVAL;
581
582 /* Remove from taken list */
583 list_del(&blk->list);
584
585 /* Get size of freed block */
586 size = blk->size;
587 attach_free_block(info, blk);
588
589 return size;
590}
591
592int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
593{
594 rh_block_t *blk;
595 struct list_head *l;
596 struct list_head *h;
597 int nr;
598
599 switch (what) {
600
601 case RHGS_FREE:
602 h = &info->free_list;
603 break;
604
605 case RHGS_TAKEN:
606 h = &info->taken_list;
607 break;
608
609 default:
610 return -EINVAL;
611 }
612
613 /* Linear search for block */
614 nr = 0;
615 list_for_each(l, h) {
616 blk = list_entry(l, rh_block_t, list);
617 if (stats != NULL && nr < max_stats) {
618 stats->start = blk->start;
619 stats->size = blk->size;
620 stats->owner = blk->owner;
621 stats++;
622 }
623 nr++;
624 }
625
626 return nr;
627}
628
629int rh_set_owner(rh_info_t * info, void *start, const char *owner)
630{
631 rh_block_t *blk, *blk2;
632 struct list_head *l;
633 int size;
634
635 /* Linear search for block */
636 blk = NULL;
637 list_for_each(l, &info->taken_list) {
638 blk2 = list_entry(l, rh_block_t, list);
639 if (start < blk2->start)
640 break;
641 blk = blk2;
642 }
643
644 if (blk == NULL || start > (blk->start + blk->size))
645 return -EINVAL;
646
647 blk->owner = owner;
648 size = blk->size;
649
650 return size;
651}
652
653void rh_dump(rh_info_t * info)
654{
655 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
656 int maxnr;
657 int i, nr;
658
659 maxnr = sizeof(st) / sizeof(st[0]);
660
661 printk(KERN_INFO
662 "info @0x%p (%d slots empty / %d max)\n",
663 info, info->empty_slots, info->max_blocks);
664
665 printk(KERN_INFO " Free:\n");
666 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
667 if (nr > maxnr)
668 nr = maxnr;
669 for (i = 0; i < nr; i++)
670 printk(KERN_INFO
671 " 0x%p-0x%p (%u)\n",
672 st[i].start, (int8_t *) st[i].start + st[i].size,
673 st[i].size);
674 printk(KERN_INFO "\n");
675
676 printk(KERN_INFO " Taken:\n");
677 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
678 if (nr > maxnr)
679 nr = maxnr;
680 for (i = 0; i < nr; i++)
681 printk(KERN_INFO
682 " 0x%p-0x%p (%u) %s\n",
683 st[i].start, (int8_t *) st[i].start + st[i].size,
684 st[i].size, st[i].owner != NULL ? st[i].owner : "");
685 printk(KERN_INFO "\n");
686}
687
688void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
689{
690 printk(KERN_INFO
691 "blk @0x%p: 0x%p-0x%p (%u)\n",
692 blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
693}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
new file mode 100644
index 000000000000..e79123d1485c
--- /dev/null
+++ b/arch/powerpc/lib/sstep.c
@@ -0,0 +1,141 @@
1/*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <asm/sstep.h>
14#include <asm/processor.h>
15
16extern char system_call_common[];
17
18/* Bits in SRR1 that are copied from MSR */
19#define MSR_MASK 0xffffffff87c0ffff
20
21/*
22 * Determine whether a conditional branch instruction would branch.
23 */
24static int branch_taken(unsigned int instr, struct pt_regs *regs)
25{
26 unsigned int bo = (instr >> 21) & 0x1f;
27 unsigned int bi;
28
29 if ((bo & 4) == 0) {
30 /* decrement counter */
31 --regs->ctr;
32 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
33 return 0;
34 }
35 if ((bo & 0x10) == 0) {
36 /* check bit from CR */
37 bi = (instr >> 16) & 0x1f;
38 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
39 return 0;
40 }
41 return 1;
42}
43
44/*
45 * Emulate instructions that cause a transfer of control.
46 * Returns 1 if the step was emulated, 0 if not,
47 * or -1 if the instruction is one that should not be stepped,
48 * such as an rfid, or a mtmsrd that would clear MSR_RI.
49 */
50int emulate_step(struct pt_regs *regs, unsigned int instr)
51{
52 unsigned int opcode, rd;
53 unsigned long int imm;
54
55 opcode = instr >> 26;
56 switch (opcode) {
57 case 16: /* bc */
58 imm = (signed short)(instr & 0xfffc);
59 if ((instr & 2) == 0)
60 imm += regs->nip;
61 regs->nip += 4;
62 if ((regs->msr & MSR_SF) == 0)
63 regs->nip &= 0xffffffffUL;
64 if (instr & 1)
65 regs->link = regs->nip;
66 if (branch_taken(instr, regs))
67 regs->nip = imm;
68 return 1;
69 case 17: /* sc */
70 /*
71 * N.B. this uses knowledge about how the syscall
72 * entry code works. If that is changed, this will
73 * need to be changed also.
74 */
75 regs->gpr[9] = regs->gpr[13];
76 regs->gpr[11] = regs->nip + 4;
77 regs->gpr[12] = regs->msr & MSR_MASK;
78 regs->gpr[13] = (unsigned long) get_paca();
79 regs->nip = (unsigned long) &system_call_common;
80 regs->msr = MSR_KERNEL;
81 return 1;
82 case 18: /* b */
83 imm = instr & 0x03fffffc;
84 if (imm & 0x02000000)
85 imm -= 0x04000000;
86 if ((instr & 2) == 0)
87 imm += regs->nip;
88 if (instr & 1) {
89 regs->link = regs->nip + 4;
90 if ((regs->msr & MSR_SF) == 0)
91 regs->link &= 0xffffffffUL;
92 }
93 if ((regs->msr & MSR_SF) == 0)
94 imm &= 0xffffffffUL;
95 regs->nip = imm;
96 return 1;
97 case 19:
98 switch (instr & 0x7fe) {
99 case 0x20: /* bclr */
100 case 0x420: /* bcctr */
101 imm = (instr & 0x400)? regs->ctr: regs->link;
102 regs->nip += 4;
103 if ((regs->msr & MSR_SF) == 0) {
104 regs->nip &= 0xffffffffUL;
105 imm &= 0xffffffffUL;
106 }
107 if (instr & 1)
108 regs->link = regs->nip;
109 if (branch_taken(instr, regs))
110 regs->nip = imm;
111 return 1;
112 case 0x24: /* rfid, scary */
113 return -1;
114 }
115 case 31:
116 rd = (instr >> 21) & 0x1f;
117 switch (instr & 0x7fe) {
118 case 0xa6: /* mfmsr */
119 regs->gpr[rd] = regs->msr & MSR_MASK;
120 regs->nip += 4;
121 if ((regs->msr & MSR_SF) == 0)
122 regs->nip &= 0xffffffffUL;
123 return 1;
124 case 0x164: /* mtmsrd */
125 /* only MSR_EE and MSR_RI get changed if bit 15 set */
126 /* mtmsrd doesn't change MSR_HV and MSR_ME */
127 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
128 imm = (regs->msr & MSR_MASK & ~imm)
129 | (regs->gpr[rd] & imm);
130 if ((imm & MSR_RI) == 0)
131 /* can't step mtmsrd that would clear MSR_RI */
132 return -1;
133 regs->msr = imm;
134 regs->nip += 4;
135 if ((imm & MSR_SF) == 0)
136 regs->nip &= 0xffffffffUL;
137 return 1;
138 }
139 }
140 return 0;
141}
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
new file mode 100644
index 000000000000..36b521091bbc
--- /dev/null
+++ b/arch/powerpc/lib/strcase.c
@@ -0,0 +1,23 @@
1#include <linux/ctype.h>
2
3int strcasecmp(const char *s1, const char *s2)
4{
5 int c1, c2;
6
7 do {
8 c1 = tolower(*s1++);
9 c2 = tolower(*s2++);
10 } while (c1 == c2 && c1 != 0);
11 return c1 - c2;
12}
13
14int strncasecmp(const char *s1, const char *s2, int n)
15{
16 int c1, c2;
17
18 do {
19 c1 = tolower(*s1++);
20 c2 = tolower(*s2++);
21 } while ((--n > 0) && c1 == c2 && c1 != 0);
22 return c1 - c2;
23}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
new file mode 100644
index 000000000000..b9ca84ed8927
--- /dev/null
+++ b/arch/powerpc/lib/string.S
@@ -0,0 +1,198 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/errno.h>
14#include <asm/ppc_asm.h>
15
16 .section __ex_table,"a"
17#ifdef CONFIG_PPC64
18 .align 3
19#define EXTBL .llong
20#else
21 .align 2
22#define EXTBL .long
23#endif
24 .text
25
26_GLOBAL(strcpy)
27 addi r5,r3,-1
28 addi r4,r4,-1
291: lbzu r0,1(r4)
30 cmpwi 0,r0,0
31 stbu r0,1(r5)
32 bne 1b
33 blr
34
35/* This clears out any unused part of the destination buffer,
36 just as the libc version does. -- paulus */
37_GLOBAL(strncpy)
38 cmpwi 0,r5,0
39 beqlr
40 mtctr r5
41 addi r6,r3,-1
42 addi r4,r4,-1
431: lbzu r0,1(r4)
44 cmpwi 0,r0,0
45 stbu r0,1(r6)
46 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
47 bnelr /* if we didn't hit a null char, we're done */
48 mfctr r5
49 cmpwi 0,r5,0 /* any space left in destination buffer? */
50 beqlr /* we know r0 == 0 here */
512: stbu r0,1(r6) /* clear it out if so */
52 bdnz 2b
53 blr
54
55_GLOBAL(strcat)
56 addi r5,r3,-1
57 addi r4,r4,-1
581: lbzu r0,1(r5)
59 cmpwi 0,r0,0
60 bne 1b
61 addi r5,r5,-1
621: lbzu r0,1(r4)
63 cmpwi 0,r0,0
64 stbu r0,1(r5)
65 bne 1b
66 blr
67
68_GLOBAL(strcmp)
69 addi r5,r3,-1
70 addi r4,r4,-1
711: lbzu r3,1(r5)
72 cmpwi 1,r3,0
73 lbzu r0,1(r4)
74 subf. r3,r0,r3
75 beqlr 1
76 beq 1b
77 blr
78
79_GLOBAL(strlen)
80 addi r4,r3,-1
811: lbzu r0,1(r4)
82 cmpwi 0,r0,0
83 bne 1b
84 subf r3,r3,r4
85 blr
86
87_GLOBAL(memcmp)
88 cmpwi 0,r5,0
89 ble- 2f
90 mtctr r5
91 addi r6,r3,-1
92 addi r4,r4,-1
931: lbzu r3,1(r6)
94 lbzu r0,1(r4)
95 subf. r3,r0,r3
96 bdnzt 2,1b
97 blr
982: li r3,0
99 blr
100
101_GLOBAL(memchr)
102 cmpwi 0,r5,0
103 ble- 2f
104 mtctr r5
105 addi r3,r3,-1
1061: lbzu r0,1(r3)
107 cmpw 0,r0,r4
108 bdnzf 2,1b
109 beqlr
1102: li r3,0
111 blr
112
113_GLOBAL(__clear_user)
114 addi r6,r3,-4
115 li r3,0
116 li r5,0
117 cmplwi 0,r4,4
118 blt 7f
119 /* clear a single word */
12011: stwu r5,4(r6)
121 beqlr
122 /* clear word sized chunks */
123 andi. r0,r6,3
124 add r4,r0,r4
125 subf r6,r0,r6
126 srwi r0,r4,2
127 andi. r4,r4,3
128 mtctr r0
129 bdz 7f
1301: stwu r5,4(r6)
131 bdnz 1b
132 /* clear byte sized chunks */
1337: cmpwi 0,r4,0
134 beqlr
135 mtctr r4
136 addi r6,r6,3
1378: stbu r5,1(r6)
138 bdnz 8b
139 blr
14090: mr r3,r4
141 blr
14291: mfctr r3
143 slwi r3,r3,2
144 add r3,r3,r4
145 blr
14692: mfctr r3
147 blr
148
149 .section __ex_table,"a"
150 EXTBL 11b,90b
151 EXTBL 1b,91b
152 EXTBL 8b,92b
153 .text
154
155_GLOBAL(__strncpy_from_user)
156 addi r6,r3,-1
157 addi r4,r4,-1
158 cmpwi 0,r5,0
159 beq 2f
160 mtctr r5
1611: lbzu r0,1(r4)
162 cmpwi 0,r0,0
163 stbu r0,1(r6)
164 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
165 beq 3f
1662: addi r6,r6,1
1673: subf r3,r3,r6
168 blr
16999: li r3,-EFAULT
170 blr
171
172 .section __ex_table,"a"
173 EXTBL 1b,99b
174 .text
175
176/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
177_GLOBAL(__strnlen_user)
178 addi r7,r3,-1
179 subf r6,r7,r5 /* top+1 - str */
180 cmplw 0,r4,r6
181 bge 0f
182 mr r6,r4
1830: mtctr r6 /* ctr = min(len, top - str) */
1841: lbzu r0,1(r7) /* get next byte */
185 cmpwi 0,r0,0
186 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
187 addi r7,r7,1
188 subf r3,r3,r7 /* number of bytes we have looked at */
189 beqlr /* return if we found a 0 byte */
190 cmpw 0,r3,r4 /* did we look at all len bytes? */
191 blt 99f /* if not, must have hit top */
192 addi r3,r4,1 /* return len + 1 to indicate no null found */
193 blr
19499: li r3,0 /* bad address, return 0 */
195 blr
196
197 .section __ex_table,"a"
198 EXTBL 1b,99b
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
new file mode 100644
index 000000000000..5eea6f3c1e03
--- /dev/null
+++ b/arch/powerpc/lib/usercopy_64.c
@@ -0,0 +1,41 @@
1/*
2 * Functions which are too large to be inlined.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10#include <asm/uaccess.h>
11
12unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
13{
14 if (likely(access_ok(VERIFY_READ, from, n)))
15 n = __copy_from_user(to, from, n);
16 else
17 memset(to, 0, n);
18 return n;
19}
20
21unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
22{
23 if (likely(access_ok(VERIFY_WRITE, to, n)))
24 n = __copy_to_user(to, from, n);
25 return n;
26}
27
28unsigned long copy_in_user(void __user *to, const void __user *from,
29 unsigned long n)
30{
31 might_sleep();
32 if (likely(access_ok(VERIFY_READ, from, n) &&
33 access_ok(VERIFY_WRITE, to, n)))
34 n =__copy_tofrom_user(to, from, n);
35 return n;
36}
37
38EXPORT_SYMBOL(copy_from_user);
39EXPORT_SYMBOL(copy_to_user);
40EXPORT_SYMBOL(copy_in_user);
41
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
new file mode 100644
index 000000000000..3d79ce281b67
--- /dev/null
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -0,0 +1,120 @@
1/*
2 * Modifications by Matt Porter (mporter@mvista.com) to support
3 * PPC44x Book E processors.
4 *
5 * This file contains the routines for initializing the MMU
6 * on the 4xx series of chips.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 *
17 * Derived from "arch/i386/mm/init.c"
18 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/signal.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/ptrace.h>
35#include <linux/mman.h>
36#include <linux/mm.h>
37#include <linux/swap.h>
38#include <linux/stddef.h>
39#include <linux/vmalloc.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/highmem.h>
43
44#include <asm/pgalloc.h>
45#include <asm/prom.h>
46#include <asm/io.h>
47#include <asm/mmu_context.h>
48#include <asm/pgtable.h>
49#include <asm/mmu.h>
50#include <asm/uaccess.h>
51#include <asm/smp.h>
52#include <asm/bootx.h>
53#include <asm/machdep.h>
54#include <asm/setup.h>
55
56#include "mmu_decl.h"
57
58extern char etext[], _stext[];
59
60/* Used by the 44x TLB replacement exception handler.
61 * Just needed it declared someplace.
62 */
63unsigned int tlb_44x_index = 0;
64unsigned int tlb_44x_hwater = 62;
65
66/*
67 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
68 */
69static void __init
70ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
71{
72 unsigned long attrib = 0;
73
74 __asm__ __volatile__("\
75 clrrwi %2,%2,10\n\
76 ori %2,%2,%4\n\
77 clrrwi %1,%1,10\n\
78 li %0,0\n\
79 ori %0,%0,%5\n\
80 tlbwe %2,%3,%6\n\
81 tlbwe %1,%3,%7\n\
82 tlbwe %0,%3,%8"
83 :
84 : "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
85 "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
86 "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
87 "i" (PPC44x_TLB_PAGEID),
88 "i" (PPC44x_TLB_XLAT),
89 "i" (PPC44x_TLB_ATTRIB));
90}
91
92/*
93 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
94 */
95void __init MMU_init_hw(void)
96{
97 flush_instruction_cache();
98}
99
100unsigned long __init mmu_mapin_ram(void)
101{
102 unsigned int pinned_tlbs = 1;
103 int i;
104
105 /* Determine number of entries necessary to cover lowmem */
106 pinned_tlbs = (unsigned int)
107 (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
108
109 /* Write upper watermark to save location */
110 tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
111
112 /* If necessary, set additional pinned TLBs */
113 if (pinned_tlbs > 1)
114 for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
115 unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
116 ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
117 }
118
119 return total_lowmem;
120}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
new file mode 100644
index 000000000000..b7bcbc232f39
--- /dev/null
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -0,0 +1,141 @@
1/*
2 * This file contains the routines for initializing the MMU
3 * on the 4xx series of chips.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * Derived from "arch/i386/mm/init.c"
15 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/highmem.h>
40
41#include <asm/pgalloc.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/uaccess.h>
48#include <asm/smp.h>
49#include <asm/bootx.h>
50#include <asm/machdep.h>
51#include <asm/setup.h>
52#include "mmu_decl.h"
53
54extern int __map_without_ltlbs;
55/*
56 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
57 */
58void __init MMU_init_hw(void)
59{
60 /*
61 * The Zone Protection Register (ZPR) defines how protection will
62 * be applied to every page which is a member of a given zone. At
63 * present, we utilize only two of the 4xx's zones.
64 * The zone index bits (of ZSEL) in the PTE are used for software
65 * indicators, except the LSB. For user access, zone 1 is used,
66 * for kernel access, zone 0 is used. We set all but zone 1
67 * to zero, allowing only kernel access as indicated in the PTE.
68 * For zone 1, we set a 01 binary (a value of 10 will not work)
69 * to allow user access as indicated in the PTE. This also allows
70 * kernel access as indicated in the PTE.
71 */
72
73 mtspr(SPRN_ZPR, 0x10000000);
74
75 flush_instruction_cache();
76
77 /*
78 * Set up the real-mode cache parameters for the exception vector
79 * handlers (which are run in real-mode).
80 */
81
82 mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
83
84 /*
85 * Cache instruction and data space where the exception
86 * vectors and the kernel live in real-mode.
87 */
88
89 mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
90 mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
91}
92
93#define LARGE_PAGE_SIZE_16M (1<<24)
94#define LARGE_PAGE_SIZE_4M (1<<22)
95
96unsigned long __init mmu_mapin_ram(void)
97{
98 unsigned long v, s;
99 phys_addr_t p;
100
101 v = KERNELBASE;
102 p = PPC_MEMSTART;
103 s = 0;
104
105 if (__map_without_ltlbs) {
106 return s;
107 }
108
109 while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
110 pmd_t *pmdp;
111 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
112
113 spin_lock(&init_mm.page_table_lock);
114 pmdp = pmd_offset(pgd_offset_k(v), v);
115 pmd_val(*pmdp++) = val;
116 pmd_val(*pmdp++) = val;
117 pmd_val(*pmdp++) = val;
118 pmd_val(*pmdp++) = val;
119 spin_unlock(&init_mm.page_table_lock);
120
121 v += LARGE_PAGE_SIZE_16M;
122 p += LARGE_PAGE_SIZE_16M;
123 s += LARGE_PAGE_SIZE_16M;
124 }
125
126 while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
127 pmd_t *pmdp;
128 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
129
130 spin_lock(&init_mm.page_table_lock);
131 pmdp = pmd_offset(pgd_offset_k(v), v);
132 pmd_val(*pmdp) = val;
133 spin_unlock(&init_mm.page_table_lock);
134
135 v += LARGE_PAGE_SIZE_4M;
136 p += LARGE_PAGE_SIZE_4M;
137 s += LARGE_PAGE_SIZE_4M;
138 }
139
140 return s;
141}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
new file mode 100644
index 000000000000..93441e7a2921
--- /dev/null
+++ b/arch/powerpc/mm/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8
9obj-y := fault.o mem.o lmb.o
10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \
15 $(hash-y)
16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
17obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o
19obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
20obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
21obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/ppc64/mm/fault.c b/arch/powerpc/mm/fault.c
index be3f25cf3e9f..3df641fa789d 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/ppc/mm/fault.c 2 * arch/ppc/mm/fault.c
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 6 *
7 * Derived from "arch/i386/mm/fault.c" 7 * Derived from "arch/i386/mm/fault.c"
@@ -24,10 +24,11 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/ptrace.h>
27#include <linux/mman.h> 28#include <linux/mman.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
30#include <linux/smp_lock.h> 31#include <linux/highmem.h>
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/kprobes.h> 33#include <linux/kprobes.h>
33 34
@@ -37,6 +38,7 @@
37#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/tlbflush.h>
40#include <asm/kdebug.h> 42#include <asm/kdebug.h>
41#include <asm/siginfo.h> 43#include <asm/siginfo.h>
42 44
@@ -101,10 +103,15 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
101} 103}
102 104
103/* 105/*
104 * The error_code parameter is 106 * For 600- and 800-family processors, the error_code parameter is DSISR
107 * for a data fault, SRR1 for an instruction fault. For 400-family processors
108 * the error_code parameter is ESR for a data fault, 0 for an instruction
109 * fault.
110 * For 64-bit processors, the error_code parameter is
105 * - DSISR for a non-SLB data access fault, 111 * - DSISR for a non-SLB data access fault,
106 * - SRR1 & 0x08000000 for a non-SLB instruction access fault 112 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
107 * - 0 any SLB fault. 113 * - 0 any SLB fault.
114 *
108 * The return value is 0 if the fault was handled, or the signal 115 * The return value is 0 if the fault was handled, or the signal
109 * number if this is a kernel fault that can't be handled here. 116 * number if this is a kernel fault that can't be handled here.
110 */ 117 */
@@ -114,12 +121,25 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
114 struct vm_area_struct * vma; 121 struct vm_area_struct * vma;
115 struct mm_struct *mm = current->mm; 122 struct mm_struct *mm = current->mm;
116 siginfo_t info; 123 siginfo_t info;
117 unsigned long code = SEGV_MAPERR; 124 int code = SEGV_MAPERR;
118 unsigned long is_write = error_code & DSISR_ISSTORE; 125 int is_write = 0;
119 unsigned long trap = TRAP(regs); 126 int trap = TRAP(regs);
120 unsigned long is_exec = trap == 0x400; 127 int is_exec = trap == 0x400;
121 128
122 BUG_ON((trap == 0x380) || (trap == 0x480)); 129#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
130 /*
131 * Fortunately the bit assignments in SRR1 for an instruction
132 * fault and DSISR for a data fault are mostly the same for the
133 * bits we are interested in. But there are some bits which
134 * indicate errors in DSISR but can validly be set in SRR1.
135 */
136 if (trap == 0x400)
137 error_code &= 0x48200000;
138 else
139 is_write = error_code & DSISR_ISSTORE;
140#else
141 is_write = error_code & ESR_DST;
142#endif /* CONFIG_4xx || CONFIG_BOOKE */
123 143
124 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, 144 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
125 11, SIGSEGV) == NOTIFY_STOP) 145 11, SIGSEGV) == NOTIFY_STOP)
@@ -134,10 +154,13 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
134 if (!user_mode(regs) && (address >= TASK_SIZE)) 154 if (!user_mode(regs) && (address >= TASK_SIZE))
135 return SIGSEGV; 155 return SIGSEGV;
136 156
157#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
137 if (error_code & DSISR_DABRMATCH) { 158 if (error_code & DSISR_DABRMATCH) {
159 /* DABR match */
138 do_dabr(regs, error_code); 160 do_dabr(regs, error_code);
139 return 0; 161 return 0;
140 } 162 }
163#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
141 164
142 if (in_atomic() || mm == NULL) { 165 if (in_atomic() || mm == NULL) {
143 if (!user_mode(regs)) 166 if (!user_mode(regs))
@@ -176,10 +199,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
176 vma = find_vma(mm, address); 199 vma = find_vma(mm, address);
177 if (!vma) 200 if (!vma)
178 goto bad_area; 201 goto bad_area;
179 202 if (vma->vm_start <= address)
180 if (vma->vm_start <= address) {
181 goto good_area; 203 goto good_area;
182 }
183 if (!(vma->vm_flags & VM_GROWSDOWN)) 204 if (!(vma->vm_flags & VM_GROWSDOWN))
184 goto bad_area; 205 goto bad_area;
185 206
@@ -214,35 +235,76 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
214 && (!user_mode(regs) || !store_updates_sp(regs))) 235 && (!user_mode(regs) || !store_updates_sp(regs)))
215 goto bad_area; 236 goto bad_area;
216 } 237 }
217
218 if (expand_stack(vma, address)) 238 if (expand_stack(vma, address))
219 goto bad_area; 239 goto bad_area;
220 240
221good_area: 241good_area:
222 code = SEGV_ACCERR; 242 code = SEGV_ACCERR;
243#if defined(CONFIG_6xx)
244 if (error_code & 0x95700000)
245 /* an error such as lwarx to I/O controller space,
246 address matching DABR, eciwx, etc. */
247 goto bad_area;
248#endif /* CONFIG_6xx */
249#if defined(CONFIG_8xx)
250 /* The MPC8xx seems to always set 0x80000000, which is
251 * "undefined". Of those that can be set, this is the only
252 * one which seems bad.
253 */
254 if (error_code & 0x10000000)
255 /* Guarded storage error. */
256 goto bad_area;
257#endif /* CONFIG_8xx */
223 258
224 if (is_exec) { 259 if (is_exec) {
260#ifdef CONFIG_PPC64
225 /* protection fault */ 261 /* protection fault */
226 if (error_code & DSISR_PROTFAULT) 262 if (error_code & DSISR_PROTFAULT)
227 goto bad_area; 263 goto bad_area;
228 if (!(vma->vm_flags & VM_EXEC)) 264 if (!(vma->vm_flags & VM_EXEC))
229 goto bad_area; 265 goto bad_area;
266#endif
267#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
268 pte_t *ptep;
269
270 /* Since 4xx/Book-E supports per-page execute permission,
271 * we lazily flush dcache to icache. */
272 ptep = NULL;
273 if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
274 struct page *page = pte_page(*ptep);
275
276 if (! test_bit(PG_arch_1, &page->flags)) {
277 flush_dcache_icache_page(page);
278 set_bit(PG_arch_1, &page->flags);
279 }
280 pte_update(ptep, 0, _PAGE_HWEXEC);
281 _tlbie(address);
282 pte_unmap(ptep);
283 up_read(&mm->mmap_sem);
284 return 0;
285 }
286 if (ptep != NULL)
287 pte_unmap(ptep);
288#endif
230 /* a write */ 289 /* a write */
231 } else if (is_write) { 290 } else if (is_write) {
232 if (!(vma->vm_flags & VM_WRITE)) 291 if (!(vma->vm_flags & VM_WRITE))
233 goto bad_area; 292 goto bad_area;
234 /* a read */ 293 /* a read */
235 } else { 294 } else {
236 if (!(vma->vm_flags & VM_READ)) 295 /* protection fault */
296 if (error_code & 0x08000000)
297 goto bad_area;
298 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
237 goto bad_area; 299 goto bad_area;
238 } 300 }
239 301
240 survive:
241 /* 302 /*
242 * If for any reason at all we couldn't handle the fault, 303 * If for any reason at all we couldn't handle the fault,
243 * make sure we exit gracefully rather than endlessly redo 304 * make sure we exit gracefully rather than endlessly redo
244 * the fault. 305 * the fault.
245 */ 306 */
307 survive:
246 switch (handle_mm_fault(mm, vma, address, is_write)) { 308 switch (handle_mm_fault(mm, vma, address, is_write)) {
247 309
248 case VM_FAULT_MINOR: 310 case VM_FAULT_MINOR:
@@ -268,15 +330,11 @@ bad_area:
268bad_area_nosemaphore: 330bad_area_nosemaphore:
269 /* User mode accesses cause a SIGSEGV */ 331 /* User mode accesses cause a SIGSEGV */
270 if (user_mode(regs)) { 332 if (user_mode(regs)) {
271 info.si_signo = SIGSEGV; 333 _exception(SIGSEGV, regs, code, address);
272 info.si_errno = 0;
273 info.si_code = code;
274 info.si_addr = (void __user *) address;
275 force_sig_info(SIGSEGV, &info, current);
276 return 0; 334 return 0;
277 } 335 }
278 336
279 if (trap == 0x400 && (error_code & DSISR_PROTFAULT) 337 if (is_exec && (error_code & DSISR_PROTFAULT)
280 && printk_ratelimit()) 338 && printk_ratelimit())
281 printk(KERN_CRIT "kernel tried to execute NX-protected" 339 printk(KERN_CRIT "kernel tried to execute NX-protected"
282 " page (%lx) - exploit attempt? (uid: %d)\n", 340 " page (%lx) - exploit attempt? (uid: %d)\n",
@@ -315,8 +373,8 @@ do_sigbus:
315 373
316/* 374/*
317 * bad_page_fault is called when we have a bad access from the kernel. 375 * bad_page_fault is called when we have a bad access from the kernel.
318 * It is called from do_page_fault above and from some of the procedures 376 * It is called from the DSI and ISI handlers in head.S and from some
319 * in traps.c. 377 * of the procedures in traps.c.
320 */ 378 */
321void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 379void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
322{ 380{
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
new file mode 100644
index 000000000000..af9ca0eb6d55
--- /dev/null
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -0,0 +1,237 @@
1/*
2 * Modifications by Kumar Gala (kumar.gala@freescale.com) to support
3 * E500 Book E processors.
4 *
5 * Copyright 2004 Freescale Semiconductor, Inc
6 *
7 * This file contains the routines for initializing the MMU
8 * on the 4xx series of chips.
9 * -- paulus
10 *
11 * Derived from arch/ppc/mm/init.c:
12 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
13 *
14 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
15 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
16 * Copyright (C) 1996 Paul Mackerras
17 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 *
19 * Derived from "arch/i386/mm/init.c"
20 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <linux/config.h>
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <linux/ptrace.h>
37#include <linux/mman.h>
38#include <linux/mm.h>
39#include <linux/swap.h>
40#include <linux/stddef.h>
41#include <linux/vmalloc.h>
42#include <linux/init.h>
43#include <linux/delay.h>
44#include <linux/highmem.h>
45
46#include <asm/pgalloc.h>
47#include <asm/prom.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/bootx.h>
55#include <asm/machdep.h>
56#include <asm/setup.h>
57
58extern void loadcam_entry(unsigned int index);
59unsigned int tlbcam_index;
60unsigned int num_tlbcam_entries;
61static unsigned long __cam0, __cam1, __cam2;
62extern unsigned long total_lowmem;
63extern unsigned long __max_low_memory;
64#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
65
66#define NUM_TLBCAMS (16)
67
68struct tlbcam {
69 u32 MAS0;
70 u32 MAS1;
71 u32 MAS2;
72 u32 MAS3;
73 u32 MAS7;
74} TLBCAM[NUM_TLBCAMS];
75
76struct tlbcamrange {
77 unsigned long start;
78 unsigned long limit;
79 phys_addr_t phys;
80} tlbcam_addrs[NUM_TLBCAMS];
81
82extern unsigned int tlbcam_index;
83
84/*
85 * Return PA for this VA if it is mapped by a CAM, or 0
86 */
87unsigned long v_mapped_by_tlbcam(unsigned long va)
88{
89 int b;
90 for (b = 0; b < tlbcam_index; ++b)
91 if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
92 return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
93 return 0;
94}
95
96/*
97 * Return VA for a given PA or 0 if not mapped
98 */
99unsigned long p_mapped_by_tlbcam(unsigned long pa)
100{
101 int b;
102 for (b = 0; b < tlbcam_index; ++b)
103 if (pa >= tlbcam_addrs[b].phys
104 && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
105 +tlbcam_addrs[b].phys)
106 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
107 return 0;
108}
109
110/*
111 * Set up one of the I/D BAT (block address translation) register pairs.
112 * The parameters are not checked; in particular size must be a power
113 * of 4 between 4k and 256M.
114 */
115void settlbcam(int index, unsigned long virt, phys_addr_t phys,
116 unsigned int size, int flags, unsigned int pid)
117{
118 unsigned int tsize, lz;
119
120 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
121 tsize = (21 - lz) / 2;
122
123#ifdef CONFIG_SMP
124 if ((flags & _PAGE_NO_CACHE) == 0)
125 flags |= _PAGE_COHERENT;
126#endif
127
128 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
129 TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
130 TLBCAM[index].MAS2 = virt & PAGE_MASK;
131
132 TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
133 TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
134 TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
135 TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
136 TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
137
138 TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
139 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
140
141#ifndef CONFIG_KGDB /* want user access for breakpoints */
142 if (flags & _PAGE_USER) {
143 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
144 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
145 }
146#else
147 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
148 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
149#endif
150
151 tlbcam_addrs[index].start = virt;
152 tlbcam_addrs[index].limit = virt + size - 1;
153 tlbcam_addrs[index].phys = phys;
154
155 loadcam_entry(index);
156}
157
158void invalidate_tlbcam_entry(int index)
159{
160 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
161 TLBCAM[index].MAS1 = ~MAS1_VALID;
162
163 loadcam_entry(index);
164}
165
166void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
167 unsigned long cam2)
168{
169 settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
170 tlbcam_index++;
171 if (cam1) {
172 tlbcam_index++;
173 settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
174 }
175 if (cam2) {
176 tlbcam_index++;
177 settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
178 }
179}
180
181/*
182 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
183 */
184void __init MMU_init_hw(void)
185{
186 flush_instruction_cache();
187}
188
189unsigned long __init mmu_mapin_ram(void)
190{
191 cam_mapin_ram(__cam0, __cam1, __cam2);
192
193 return __cam0 + __cam1 + __cam2;
194}
195
196
197void __init
198adjust_total_lowmem(void)
199{
200 unsigned long max_low_mem = MAX_LOW_MEM;
201 unsigned long cam_max = 0x10000000;
202 unsigned long ram;
203
204 /* adjust CAM size to max_low_mem */
205 if (max_low_mem < cam_max)
206 cam_max = max_low_mem;
207
208 /* adjust lowmem size to max_low_mem */
209 if (max_low_mem < total_lowmem)
210 ram = max_low_mem;
211 else
212 ram = total_lowmem;
213
214 /* Calculate CAM values */
215 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
216 if (__cam0 > cam_max)
217 __cam0 = cam_max;
218 ram -= __cam0;
219 if (ram) {
220 __cam1 = 1UL << 2 * (__ilog2(ram) / 2);
221 if (__cam1 > cam_max)
222 __cam1 = cam_max;
223 ram -= __cam1;
224 }
225 if (ram) {
226 __cam2 = 1UL << 2 * (__ilog2(ram) / 2);
227 if (__cam2 > cam_max)
228 __cam2 = cam_max;
229 ram -= __cam2;
230 }
231
232 printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
233 " CAM2=%ldMb residual: %ldMb\n",
234 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
235 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
236 __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
237}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
new file mode 100644
index 000000000000..12ccd7155bac
--- /dev/null
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -0,0 +1,618 @@
1/*
2 * arch/ppc/kernel/hashtable.S
3 *
4 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 *
15 * This file contains low-level assembler routines for managing
16 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
17 * hash table, so this file is not used on them.)
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <asm/reg.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/ppc_asm.h>
32#include <asm/thread_info.h>
33#include <asm/asm-offsets.h>
34
35#ifdef CONFIG_SMP
36 .comm mmu_hash_lock,4
37#endif /* CONFIG_SMP */
38
39/*
40 * Sync CPUs with hash_page taking & releasing the hash
41 * table lock
42 */
43#ifdef CONFIG_SMP
44 .text
45_GLOBAL(hash_page_sync)
46 lis r8,mmu_hash_lock@h
47 ori r8,r8,mmu_hash_lock@l
48 lis r0,0x0fff
49 b 10f
5011: lwz r6,0(r8)
51 cmpwi 0,r6,0
52 bne 11b
5310: lwarx r6,0,r8
54 cmpwi 0,r6,0
55 bne- 11b
56 stwcx. r0,0,r8
57 bne- 10b
58 isync
59 eieio
60 li r0,0
61 stw r0,0(r8)
62 blr
63#endif
64
65/*
66 * Load a PTE into the hash table, if possible.
67 * The address is in r4, and r3 contains an access flag:
68 * _PAGE_RW (0x400) if a write.
69 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
70 * SPRG3 contains the physical address of the current task's thread.
71 *
72 * Returns to the caller if the access is illegal or there is no
73 * mapping for the address. Otherwise it places an appropriate PTE
74 * in the hash table and returns from the exception.
75 * Uses r0, r3 - r8, ctr, lr.
76 */
77 .text
78_GLOBAL(hash_page)
79#ifdef CONFIG_PPC64BRIDGE
80 mfmsr r0
81 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
82 MTMSRD(r0)
83 isync
84#endif
85 tophys(r7,0) /* gets -KERNELBASE into r7 */
86#ifdef CONFIG_SMP
87 addis r8,r7,mmu_hash_lock@h
88 ori r8,r8,mmu_hash_lock@l
89 lis r0,0x0fff
90 b 10f
9111: lwz r6,0(r8)
92 cmpwi 0,r6,0
93 bne 11b
9410: lwarx r6,0,r8
95 cmpwi 0,r6,0
96 bne- 11b
97 stwcx. r0,0,r8
98 bne- 10b
99 isync
100#endif
101 /* Get PTE (linux-style) and check access */
102 lis r0,KERNELBASE@h /* check if kernel address */
103 cmplw 0,r4,r0
104 mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
105 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
106 lwz r5,PGDIR(r8) /* virt page-table root */
107 blt+ 112f /* assume user more likely */
108 lis r5,swapper_pg_dir@ha /* if kernel address, use */
109 addi r5,r5,swapper_pg_dir@l /* kernel page table */
110 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
111112: add r5,r5,r7 /* convert to phys addr */
112 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
113 lwz r8,0(r5) /* get pmd entry */
114 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
115#ifdef CONFIG_SMP
116 beq- hash_page_out /* return if no mapping */
117#else
118 /* XXX it seems like the 601 will give a machine fault on the
119 rfi if its alignment is wrong (bottom 4 bits of address are
120 8 or 0xc) and we have had a not-taken conditional branch
121 to the address following the rfi. */
122 beqlr-
123#endif
124 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
125 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
126 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
127
128 /*
129 * Update the linux PTE atomically. We do the lwarx up-front
130 * because almost always, there won't be a permission violation
131 * and there won't already be an HPTE, and thus we will have
132 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
133 */
134retry:
135 lwarx r6,0,r8 /* get linux-style pte */
136 andc. r5,r3,r6 /* check access & ~permission */
137#ifdef CONFIG_SMP
138 bne- hash_page_out /* return if access not permitted */
139#else
140 bnelr-
141#endif
142 or r5,r0,r6 /* set accessed/dirty bits */
143 stwcx. r5,0,r8 /* attempt to update PTE */
144 bne- retry /* retry if someone got there first */
145
146 mfsrin r3,r4 /* get segment reg for segment */
147 mfctr r0
148 stw r0,_CTR(r11)
149 bl create_hpte /* add the hash table entry */
150
151#ifdef CONFIG_SMP
152 eieio
153 addis r8,r7,mmu_hash_lock@ha
154 li r0,0
155 stw r0,mmu_hash_lock@l(r8)
156#endif
157
158 /* Return from the exception */
159 lwz r5,_CTR(r11)
160 mtctr r5
161 lwz r0,GPR0(r11)
162 lwz r7,GPR7(r11)
163 lwz r8,GPR8(r11)
164 b fast_exception_return
165
166#ifdef CONFIG_SMP
167hash_page_out:
168 eieio
169 addis r8,r7,mmu_hash_lock@ha
170 li r0,0
171 stw r0,mmu_hash_lock@l(r8)
172 blr
173#endif /* CONFIG_SMP */
174
175/*
176 * Add an entry for a particular page to the hash table.
177 *
178 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
179 *
180 * We assume any necessary modifications to the pte (e.g. setting
181 * the accessed bit) have already been done and that there is actually
182 * a hash table in use (i.e. we're not on a 603).
183 */
184_GLOBAL(add_hash_page)
185 mflr r0
186 stw r0,4(r1)
187
188 /* Convert context and va to VSID */
189 mulli r3,r3,897*16 /* multiply context by context skew */
190 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
191 mulli r0,r0,0x111 /* multiply by ESID skew */
192 add r3,r3,r0 /* note create_hpte trims to 24 bits */
193
194#ifdef CONFIG_SMP
195 rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
196 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
197 oris r8,r8,12
198#endif /* CONFIG_SMP */
199
200 /*
201 * We disable interrupts here, even on UP, because we don't
202 * want to race with hash_page, and because we want the
203 * _PAGE_HASHPTE bit to be a reliable indication of whether
204 * the HPTE exists (or at least whether one did once).
205 * We also turn off the MMU for data accesses so that we
206 * we can't take a hash table miss (assuming the code is
207 * covered by a BAT). -- paulus
208 */
209 mfmsr r10
210 SYNC
211 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
212 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
213 mtmsr r0
214 SYNC_601
215 isync
216
217 tophys(r7,0)
218
219#ifdef CONFIG_SMP
220 addis r9,r7,mmu_hash_lock@ha
221 addi r9,r9,mmu_hash_lock@l
22210: lwarx r0,0,r9 /* take the mmu_hash_lock */
223 cmpi 0,r0,0
224 bne- 11f
225 stwcx. r8,0,r9
226 beq+ 12f
22711: lwz r0,0(r9)
228 cmpi 0,r0,0
229 beq 10b
230 b 11b
23112: isync
232#endif
233
234 /*
235 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
236 * If _PAGE_HASHPTE was already set, we don't replace the existing
237 * HPTE, so we just unlock and return.
238 */
239 mr r8,r5
240 rlwimi r8,r4,22,20,29
2411: lwarx r6,0,r8
242 andi. r0,r6,_PAGE_HASHPTE
243 bne 9f /* if HASHPTE already set, done */
244 ori r5,r6,_PAGE_HASHPTE
245 stwcx. r5,0,r8
246 bne- 1b
247
248 bl create_hpte
249
2509:
251#ifdef CONFIG_SMP
252 eieio
253 li r0,0
254 stw r0,0(r9) /* clear mmu_hash_lock */
255#endif
256
257 /* reenable interrupts and DR */
258 mtmsr r10
259 SYNC_601
260 isync
261
262 lwz r0,4(r1)
263 mtlr r0
264 blr
265
266/*
267 * This routine adds a hardware PTE to the hash table.
268 * It is designed to be called with the MMU either on or off.
269 * r3 contains the VSID, r4 contains the virtual address,
270 * r5 contains the linux PTE, r6 contains the old value of the
271 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
272 * offset to be added to addresses (0 if the MMU is on,
273 * -KERNELBASE if it is off).
274 * On SMP, the caller should have the mmu_hash_lock held.
275 * We assume that the caller has (or will) set the _PAGE_HASHPTE
276 * bit in the linux PTE in memory. The value passed in r6 should
277 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
278 * this routine will skip the search for an existing HPTE.
279 * This procedure modifies r0, r3 - r6, r8, cr0.
280 * -- paulus.
281 *
282 * For speed, 4 of the instructions get patched once the size and
283 * physical address of the hash table are known. These definitions
284 * of Hash_base and Hash_bits below are just an example.
285 */
286Hash_base = 0xc0180000
287Hash_bits = 12 /* e.g. 256kB hash table */
288Hash_msk = (((1 << Hash_bits) - 1) * 64)
289
290#ifndef CONFIG_PPC64BRIDGE
291/* defines for the PTE format for 32-bit PPCs */
292#define PTE_SIZE 8
293#define PTEG_SIZE 64
294#define LG_PTEG_SIZE 6
295#define LDPTEu lwzu
296#define STPTE stw
297#define CMPPTE cmpw
298#define PTE_H 0x40
299#define PTE_V 0x80000000
300#define TST_V(r) rlwinm. r,r,0,0,0
301#define SET_V(r) oris r,r,PTE_V@h
302#define CLR_V(r,t) rlwinm r,r,0,1,31
303
304#else
305/* defines for the PTE format for 64-bit PPCs */
306#define PTE_SIZE 16
307#define PTEG_SIZE 128
308#define LG_PTEG_SIZE 7
309#define LDPTEu ldu
310#define STPTE std
311#define CMPPTE cmpd
312#define PTE_H 2
313#define PTE_V 1
314#define TST_V(r) andi. r,r,PTE_V
315#define SET_V(r) ori r,r,PTE_V
316#define CLR_V(r,t) li t,PTE_V; andc r,r,t
317#endif /* CONFIG_PPC64BRIDGE */
318
319#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
320#define HASH_RIGHT 31-LG_PTEG_SIZE
321
322_GLOBAL(create_hpte)
323 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
324 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
325 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
326 and r8,r8,r0 /* writable if _RW & _DIRTY */
327 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
328 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
329 ori r8,r8,0xe14 /* clear out reserved bits and M */
330 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
331BEGIN_FTR_SECTION
332 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
333END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
334
335 /* Construct the high word of the PPC-style PTE (r5) */
336#ifndef CONFIG_PPC64BRIDGE
337 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
338 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
339#else /* CONFIG_PPC64BRIDGE */
340 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
341 sldi r5,r3,12 /* shift vsid into position */
342 rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
343#endif /* CONFIG_PPC64BRIDGE */
344 SET_V(r5) /* set V (valid) bit */
345
346 /* Get the address of the primary PTE group in the hash table (r3) */
347_GLOBAL(hash_page_patch_A)
348 addis r0,r7,Hash_base@h /* base address of hash table */
349 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
350 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
351 xor r3,r3,r0 /* make primary hash */
352 li r0,8 /* PTEs/group */
353
354 /*
355 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
356 * if it is clear, meaning that the HPTE isn't there already...
357 */
358 andi. r6,r6,_PAGE_HASHPTE
359 beq+ 10f /* no PTE: go look for an empty slot */
360 tlbie r4
361
362 addis r4,r7,htab_hash_searches@ha
363 lwz r6,htab_hash_searches@l(r4)
364 addi r6,r6,1 /* count how many searches we do */
365 stw r6,htab_hash_searches@l(r4)
366
367 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
368 mtctr r0
369 addi r4,r3,-PTE_SIZE
3701: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
371 CMPPTE 0,r6,r5
372 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
373 beq+ found_slot
374
375 /* Search the secondary PTEG for a matching PTE */
376 ori r5,r5,PTE_H /* set H (secondary hash) bit */
377_GLOBAL(hash_page_patch_B)
378 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
379 xori r4,r4,(-PTEG_SIZE & 0xffff)
380 addi r4,r4,-PTE_SIZE
381 mtctr r0
3822: LDPTEu r6,PTE_SIZE(r4)
383 CMPPTE 0,r6,r5
384 bdnzf 2,2b
385 beq+ found_slot
386 xori r5,r5,PTE_H /* clear H bit again */
387
388 /* Search the primary PTEG for an empty slot */
38910: mtctr r0
390 addi r4,r3,-PTE_SIZE /* search primary PTEG */
3911: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
392 TST_V(r6) /* test valid bit */
393 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
394 beq+ found_empty
395
396 /* update counter of times that the primary PTEG is full */
397 addis r4,r7,primary_pteg_full@ha
398 lwz r6,primary_pteg_full@l(r4)
399 addi r6,r6,1
400 stw r6,primary_pteg_full@l(r4)
401
402 /* Search the secondary PTEG for an empty slot */
403 ori r5,r5,PTE_H /* set H (secondary hash) bit */
404_GLOBAL(hash_page_patch_C)
405 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
406 xori r4,r4,(-PTEG_SIZE & 0xffff)
407 addi r4,r4,-PTE_SIZE
408 mtctr r0
4092: LDPTEu r6,PTE_SIZE(r4)
410 TST_V(r6)
411 bdnzf 2,2b
412 beq+ found_empty
413 xori r5,r5,PTE_H /* clear H bit again */
414
415 /*
416 * Choose an arbitrary slot in the primary PTEG to overwrite.
417 * Since both the primary and secondary PTEGs are full, and we
418 * have no information that the PTEs in the primary PTEG are
419 * more important or useful than those in the secondary PTEG,
420 * and we know there is a definite (although small) speed
421 * advantage to putting the PTE in the primary PTEG, we always
422 * put the PTE in the primary PTEG.
423 */
424 addis r4,r7,next_slot@ha
425 lwz r6,next_slot@l(r4)
426 addi r6,r6,PTE_SIZE
427 andi. r6,r6,7*PTE_SIZE
428 stw r6,next_slot@l(r4)
429 add r4,r3,r6
430
431#ifndef CONFIG_SMP
432 /* Store PTE in PTEG */
433found_empty:
434 STPTE r5,0(r4)
435found_slot:
436 STPTE r8,PTE_SIZE/2(r4)
437
438#else /* CONFIG_SMP */
439/*
440 * Between the tlbie above and updating the hash table entry below,
441 * another CPU could read the hash table entry and put it in its TLB.
442 * There are 3 cases:
443 * 1. using an empty slot
444 * 2. updating an earlier entry to change permissions (i.e. enable write)
445 * 3. taking over the PTE for an unrelated address
446 *
447 * In each case it doesn't really matter if the other CPUs have the old
448 * PTE in their TLB. So we don't need to bother with another tlbie here,
449 * which is convenient as we've overwritten the register that had the
450 * address. :-) The tlbie above is mainly to make sure that this CPU comes
451 * and gets the new PTE from the hash table.
452 *
453 * We do however have to make sure that the PTE is never in an invalid
454 * state with the V bit set.
455 */
456found_empty:
457found_slot:
458 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
459 STPTE r5,0(r4)
460 sync
461 TLBSYNC
462 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
463 sync
464 SET_V(r5)
465 STPTE r5,0(r4) /* finally set V bit in PTE */
466#endif /* CONFIG_SMP */
467
468 sync /* make sure pte updates get to memory */
469 blr
470
471 .comm next_slot,4
472 .comm primary_pteg_full,4
473 .comm htab_hash_searches,4
474
475/*
476 * Flush the entry for a particular page from the hash table.
477 *
478 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
479 * int count)
480 *
481 * We assume that there is a hash table in use (Hash != 0).
482 */
483_GLOBAL(flush_hash_pages)
484 tophys(r7,0)
485
486 /*
487 * We disable interrupts here, even on UP, because we want
488 * the _PAGE_HASHPTE bit to be a reliable indication of
489 * whether the HPTE exists (or at least whether one did once).
490 * We also turn off the MMU for data accesses so that we
491 * we can't take a hash table miss (assuming the code is
492 * covered by a BAT). -- paulus
493 */
494 mfmsr r10
495 SYNC
496 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
497 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
498 mtmsr r0
499 SYNC_601
500 isync
501
502 /* First find a PTE in the range that has _PAGE_HASHPTE set */
503 rlwimi r5,r4,22,20,29
5041: lwz r0,0(r5)
505 cmpwi cr1,r6,1
506 andi. r0,r0,_PAGE_HASHPTE
507 bne 2f
508 ble cr1,19f
509 addi r4,r4,0x1000
510 addi r5,r5,4
511 addi r6,r6,-1
512 b 1b
513
514 /* Convert context and va to VSID */
5152: mulli r3,r3,897*16 /* multiply context by context skew */
516 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
517 mulli r0,r0,0x111 /* multiply by ESID skew */
518 add r3,r3,r0 /* note code below trims to 24 bits */
519
520 /* Construct the high word of the PPC-style PTE (r11) */
521#ifndef CONFIG_PPC64BRIDGE
522 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
523 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
524#else /* CONFIG_PPC64BRIDGE */
525 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
526 sldi r11,r3,12 /* shift vsid into position */
527 rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
528#endif /* CONFIG_PPC64BRIDGE */
529 SET_V(r11) /* set V (valid) bit */
530
531#ifdef CONFIG_SMP
532 addis r9,r7,mmu_hash_lock@ha
533 addi r9,r9,mmu_hash_lock@l
534 rlwinm r8,r1,0,0,18
535 add r8,r8,r7
536 lwz r8,TI_CPU(r8)
537 oris r8,r8,9
53810: lwarx r0,0,r9
539 cmpi 0,r0,0
540 bne- 11f
541 stwcx. r8,0,r9
542 beq+ 12f
54311: lwz r0,0(r9)
544 cmpi 0,r0,0
545 beq 10b
546 b 11b
54712: isync
548#endif
549
550 /*
551 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
552 * already clear, we're done (for this pte). If not,
553 * clear it (atomically) and proceed. -- paulus.
554 */
55533: lwarx r8,0,r5 /* fetch the pte */
556 andi. r0,r8,_PAGE_HASHPTE
557 beq 8f /* done if HASHPTE is already clear */
558 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
559 stwcx. r8,0,r5 /* update the pte */
560 bne- 33b
561
562 /* Get the address of the primary PTE group in the hash table (r3) */
563_GLOBAL(flush_hash_patch_A)
564 addis r8,r7,Hash_base@h /* base address of hash table */
565 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
566 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
567 xor r8,r0,r8 /* make primary hash */
568
569 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
570 li r0,8 /* PTEs/group */
571 mtctr r0
572 addi r12,r8,-PTE_SIZE
5731: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
574 CMPPTE 0,r0,r11
575 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
576 beq+ 3f
577
578 /* Search the secondary PTEG for a matching PTE */
579 ori r11,r11,PTE_H /* set H (secondary hash) bit */
580 li r0,8 /* PTEs/group */
581_GLOBAL(flush_hash_patch_B)
582 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
583 xori r12,r12,(-PTEG_SIZE & 0xffff)
584 addi r12,r12,-PTE_SIZE
585 mtctr r0
5862: LDPTEu r0,PTE_SIZE(r12)
587 CMPPTE 0,r0,r11
588 bdnzf 2,2b
589 xori r11,r11,PTE_H /* clear H again */
590 bne- 4f /* should rarely fail to find it */
591
5923: li r0,0
593 STPTE r0,0(r12) /* invalidate entry */
5944: sync
595 tlbie r4 /* in hw tlb too */
596 sync
597
5988: ble cr1,9f /* if all ptes checked */
59981: addi r6,r6,-1
600 addi r5,r5,4 /* advance to next pte */
601 addi r4,r4,0x1000
602 lwz r0,0(r5) /* check next pte */
603 cmpwi cr1,r6,1
604 andi. r0,r0,_PAGE_HASHPTE
605 bne 33b
606 bgt cr1,81b
607
6089:
609#ifdef CONFIG_SMP
610 TLBSYNC
611 li r0,0
612 stw r0,0(r9) /* clear mmu_hash_lock */
613#endif
614
61519: mtmsr r10
616 SYNC_601
617 isync
618 blr
diff --git a/arch/ppc64/mm/hash_low.S b/arch/powerpc/mm/hash_low_64.S
index ee5a5d36bfa8..d6ed9102eeea 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -10,7 +10,7 @@
10 * described in the kernel's COPYING file. 10 * described in the kernel's COPYING file.
11 */ 11 */
12 12
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/page.h> 16#include <asm/page.h>
diff --git a/arch/ppc64/mm/hash_native.c b/arch/powerpc/mm/hash_native_64.c
index bfd385b7713c..174d14576c28 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -335,10 +335,9 @@ static void native_hpte_clear(void)
335 local_irq_restore(flags); 335 local_irq_restore(flags);
336} 336}
337 337
338static void native_flush_hash_range(unsigned long context, 338static void native_flush_hash_range(unsigned long number, int local)
339 unsigned long number, int local)
340{ 339{
341 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; 340 unsigned long va, vpn, hash, secondary, slot, flags, avpn;
342 int i, j; 341 int i, j;
343 hpte_t *hptep; 342 hpte_t *hptep;
344 unsigned long hpte_v; 343 unsigned long hpte_v;
@@ -349,13 +348,7 @@ static void native_flush_hash_range(unsigned long context,
349 348
350 j = 0; 349 j = 0;
351 for (i = 0; i < number; i++) { 350 for (i = 0; i < number; i++) {
352 if (batch->addr[i] < KERNELBASE) 351 va = batch->vaddr[j];
353 vsid = get_vsid(context, batch->addr[i]);
354 else
355 vsid = get_kernel_vsid(batch->addr[i]);
356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va;
359 if (large) 352 if (large)
360 vpn = va >> HPAGE_SHIFT; 353 vpn = va >> HPAGE_SHIFT;
361 else 354 else
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/powerpc/mm/hash_utils_64.c
index 09475c8edf7c..6e9e05cce02c 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -78,7 +78,7 @@ extern unsigned long dart_tablebase;
78hpte_t *htab_address; 78hpte_t *htab_address;
79unsigned long htab_hash_mask; 79unsigned long htab_hash_mask;
80 80
81extern unsigned long _SDR1; 81unsigned long _SDR1;
82 82
83#define KB (1024) 83#define KB (1024)
84#define MB (1024*KB) 84#define MB (1024*KB)
@@ -90,7 +90,6 @@ static inline void loop_forever(void)
90 ; 90 ;
91} 91}
92 92
93#ifdef CONFIG_PPC_MULTIPLATFORM
94static inline void create_pte_mapping(unsigned long start, unsigned long end, 93static inline void create_pte_mapping(unsigned long start, unsigned long end,
95 unsigned long mode, int large) 94 unsigned long mode, int large)
96{ 95{
@@ -111,7 +110,7 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
111 unsigned long vpn, hash, hpteg; 110 unsigned long vpn, hash, hpteg;
112 unsigned long vsid = get_kernel_vsid(addr); 111 unsigned long vsid = get_kernel_vsid(addr);
113 unsigned long va = (vsid << 28) | (addr & 0xfffffff); 112 unsigned long va = (vsid << 28) | (addr & 0xfffffff);
114 int ret; 113 int ret = -1;
115 114
116 if (large) 115 if (large)
117 vpn = va >> HPAGE_SHIFT; 116 vpn = va >> HPAGE_SHIFT;
@@ -129,16 +128,25 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
129 128
130 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 129 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
131 130
131#ifdef CONFIG_PPC_ISERIES
132 if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
133 ret = iSeries_hpte_bolt_or_insert(hpteg, va,
134 virt_to_abs(addr) >> PAGE_SHIFT,
135 vflags, tmp_mode);
136 else
137#endif
132#ifdef CONFIG_PPC_PSERIES 138#ifdef CONFIG_PPC_PSERIES
133 if (systemcfg->platform & PLATFORM_LPAR) 139 if (systemcfg->platform & PLATFORM_LPAR)
134 ret = pSeries_lpar_hpte_insert(hpteg, va, 140 ret = pSeries_lpar_hpte_insert(hpteg, va,
135 virt_to_abs(addr) >> PAGE_SHIFT, 141 virt_to_abs(addr) >> PAGE_SHIFT,
136 vflags, tmp_mode); 142 vflags, tmp_mode);
137 else 143 else
138#endif /* CONFIG_PPC_PSERIES */ 144#endif
145#ifdef CONFIG_PPC_MULTIPLATFORM
139 ret = native_hpte_insert(hpteg, va, 146 ret = native_hpte_insert(hpteg, va,
140 virt_to_abs(addr) >> PAGE_SHIFT, 147 virt_to_abs(addr) >> PAGE_SHIFT,
141 vflags, tmp_mode); 148 vflags, tmp_mode);
149#endif
142 150
143 if (ret == -1) { 151 if (ret == -1) {
144 ppc64_terminate_msg(0x20, "create_pte_mapping"); 152 ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -147,6 +155,27 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
147 } 155 }
148} 156}
149 157
158static unsigned long get_hashtable_size(void)
159{
160 unsigned long rnd_mem_size, pteg_count;
161
162 /* If hash size wasn't obtained in prom.c, we calculate it now based on
163 * the total RAM size
164 */
165 if (ppc64_pft_size)
166 return 1UL << ppc64_pft_size;
167
168 /* round mem_size up to next power of 2 */
169 rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
170 if (rnd_mem_size < systemcfg->physicalMemorySize)
171 rnd_mem_size <<= 1;
172
173 /* # pages / 2 */
174 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
175
176 return pteg_count << 7;
177}
178
150void __init htab_initialize(void) 179void __init htab_initialize(void)
151{ 180{
152 unsigned long table, htab_size_bytes; 181 unsigned long table, htab_size_bytes;
@@ -162,7 +191,7 @@ void __init htab_initialize(void)
162 * Calculate the required size of the htab. We want the number of 191 * Calculate the required size of the htab. We want the number of
163 * PTEGs to equal one half the number of real pages. 192 * PTEGs to equal one half the number of real pages.
164 */ 193 */
165 htab_size_bytes = 1UL << ppc64_pft_size; 194 htab_size_bytes = get_hashtable_size();
166 pteg_count = htab_size_bytes >> 7; 195 pteg_count = htab_size_bytes >> 7;
167 196
168 /* For debug, make the HTAB 1/8 as big as it normally would be. */ 197 /* For debug, make the HTAB 1/8 as big as it normally would be. */
@@ -261,7 +290,6 @@ void __init htab_initialize(void)
261} 290}
262#undef KB 291#undef KB
263#undef MB 292#undef MB
264#endif /* CONFIG_PPC_MULTIPLATFORM */
265 293
266/* 294/*
267 * Called by asm hashtable.S for doing lazy icache flush 295 * Called by asm hashtable.S for doing lazy icache flush
@@ -355,18 +383,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
355 return ret; 383 return ret;
356} 384}
357 385
358void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 386void flush_hash_page(unsigned long va, pte_t pte, int local)
359 int local)
360{ 387{
361 unsigned long vsid, vpn, va, hash, secondary, slot; 388 unsigned long vpn, hash, secondary, slot;
362 unsigned long huge = pte_huge(pte); 389 unsigned long huge = pte_huge(pte);
363 390
364 if (ea < KERNELBASE)
365 vsid = get_vsid(context, ea);
366 else
367 vsid = get_kernel_vsid(ea);
368
369 va = (vsid << 28) | (ea & 0x0fffffff);
370 if (huge) 391 if (huge)
371 vpn = va >> HPAGE_SHIFT; 392 vpn = va >> HPAGE_SHIFT;
372 else 393 else
@@ -381,17 +402,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
381 ppc_md.hpte_invalidate(slot, va, huge, local); 402 ppc_md.hpte_invalidate(slot, va, huge, local);
382} 403}
383 404
384void flush_hash_range(unsigned long context, unsigned long number, int local) 405void flush_hash_range(unsigned long number, int local)
385{ 406{
386 if (ppc_md.flush_hash_range) { 407 if (ppc_md.flush_hash_range) {
387 ppc_md.flush_hash_range(context, number, local); 408 ppc_md.flush_hash_range(number, local);
388 } else { 409 } else {
389 int i; 410 int i;
390 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 411 struct ppc64_tlb_batch *batch =
412 &__get_cpu_var(ppc64_tlb_batch);
391 413
392 for (i = 0; i < number; i++) 414 for (i = 0; i < number; i++)
393 flush_hash_page(context, batch->addr[i], batch->pte[i], 415 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
394 local);
395 } 416 }
396} 417}
397 418
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ea0994ed974..0ea0994ed974 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
diff --git a/arch/ppc64/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index c65b87b92756..c65b87b92756 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
new file mode 100644
index 000000000000..aa6a5440cec1
--- /dev/null
+++ b/arch/powerpc/mm/init_32.c
@@ -0,0 +1,252 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/bootinfo.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/sections.h>
50
51#include "mmu_decl.h"
52
53#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
54/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
55#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
56#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
57#endif
58#endif
59#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
60
61DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
62
63unsigned long total_memory;
64unsigned long total_lowmem;
65
66unsigned long ppc_memstart;
67unsigned long ppc_memoffset = PAGE_OFFSET;
68
69int boot_mapsize;
70#ifdef CONFIG_PPC_PMAC
71unsigned long agp_special_page;
72EXPORT_SYMBOL(agp_special_page);
73#endif
74
75#ifdef CONFIG_HIGHMEM
76pte_t *kmap_pte;
77pgprot_t kmap_prot;
78
79EXPORT_SYMBOL(kmap_prot);
80EXPORT_SYMBOL(kmap_pte);
81#endif
82
83void MMU_init(void);
84
85/* XXX should be in current.h -- paulus */
86extern struct task_struct *current_set[NR_CPUS];
87
88char *klimit = _end;
89struct device_node *memory_node;
90
91extern int init_bootmem_done;
92
93/*
94 * this tells the system to map all of ram with the segregs
95 * (i.e. page tables) instead of the bats.
96 * -- Cort
97 */
98int __map_without_bats;
99int __map_without_ltlbs;
100
101/* max amount of low RAM to map in */
102unsigned long __max_low_memory = MAX_LOW_MEM;
103
104/*
105 * limit of what is accessible with initial MMU setup -
106 * 256MB usually, but only 16MB on 601.
107 */
108unsigned long __initial_memory_limit = 0x10000000;
109
110/*
111 * Check for command-line options that affect what MMU_init will do.
112 */
113void MMU_setup(void)
114{
115 /* Check for nobats option (used in mapin_ram). */
116 if (strstr(cmd_line, "nobats")) {
117 __map_without_bats = 1;
118 }
119
120 if (strstr(cmd_line, "noltlbs")) {
121 __map_without_ltlbs = 1;
122 }
123}
124
125/*
126 * MMU_init sets up the basic memory mappings for the kernel,
127 * including both RAM and possibly some I/O regions,
128 * and sets up the page tables and the MMU hardware ready to go.
129 */
130void __init MMU_init(void)
131{
132 if (ppc_md.progress)
133 ppc_md.progress("MMU:enter", 0x111);
134
135 /* 601 can only access 16MB at the moment */
136 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
137 __initial_memory_limit = 0x01000000;
138
139 /* parse args from command line */
140 MMU_setup();
141
142 if (lmb.memory.cnt > 1) {
143 lmb.memory.cnt = 1;
144 lmb_analyze();
145 printk(KERN_WARNING "Only using first contiguous memory region");
146 }
147
148 total_memory = lmb_end_of_DRAM();
149 total_lowmem = total_memory;
150
151#ifdef CONFIG_FSL_BOOKE
152 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
153 * entries, so we need to adjust lowmem to match the amount we can map
154 * in the fixed entries */
155 adjust_total_lowmem();
156#endif /* CONFIG_FSL_BOOKE */
157 if (total_lowmem > __max_low_memory) {
158 total_lowmem = __max_low_memory;
159#ifndef CONFIG_HIGHMEM
160 total_memory = total_lowmem;
161#endif /* CONFIG_HIGHMEM */
162 }
163
164 /* Initialize the MMU hardware */
165 if (ppc_md.progress)
166 ppc_md.progress("MMU:hw init", 0x300);
167 MMU_init_hw();
168
169 /* Map in all of RAM starting at KERNELBASE */
170 if (ppc_md.progress)
171 ppc_md.progress("MMU:mapin", 0x301);
172 mapin_ram();
173
174#ifdef CONFIG_HIGHMEM
175 ioremap_base = PKMAP_BASE;
176#else
177 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
178#endif /* CONFIG_HIGHMEM */
179 ioremap_bot = ioremap_base;
180
181 /* Map in I/O resources */
182 if (ppc_md.progress)
183 ppc_md.progress("MMU:setio", 0x302);
184 if (ppc_md.setup_io_mappings)
185 ppc_md.setup_io_mappings();
186
187 /* Initialize the context management stuff */
188 mmu_context_init();
189
190 if (ppc_md.progress)
191 ppc_md.progress("MMU:exit", 0x211);
192}
193
194/* This is only called until mem_init is done. */
195void __init *early_get_page(void)
196{
197 void *p;
198
199 if (init_bootmem_done) {
200 p = alloc_bootmem_pages(PAGE_SIZE);
201 } else {
202 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
203 __initial_memory_limit));
204 }
205 return p;
206}
207
208/* Free up now-unused memory */
209static void free_sec(unsigned long start, unsigned long end, const char *name)
210{
211 unsigned long cnt = 0;
212
213 while (start < end) {
214 ClearPageReserved(virt_to_page(start));
215 set_page_count(virt_to_page(start), 1);
216 free_page(start);
217 cnt++;
218 start += PAGE_SIZE;
219 }
220 if (cnt) {
221 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
222 totalram_pages += cnt;
223 }
224}
225
226void free_initmem(void)
227{
228#define FREESEC(TYPE) \
229 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
230 (unsigned long)(&__ ## TYPE ## _end), \
231 #TYPE);
232
233 printk ("Freeing unused kernel memory:");
234 FREESEC(init);
235 printk("\n");
236 ppc_md.progress = NULL;
237#undef FREESEC
238}
239
240#ifdef CONFIG_BLK_DEV_INITRD
241void free_initrd_mem(unsigned long start, unsigned long end)
242{
243 if (start < end)
244 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
245 for (; start < end; start += PAGE_SIZE) {
246 ClearPageReserved(virt_to_page(start));
247 set_page_count(virt_to_page(start), 1);
248 free_page(start);
249 totalram_pages++;
250 }
251}
252#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
new file mode 100644
index 000000000000..b0fc822ec29f
--- /dev/null
+++ b/arch/powerpc/mm/init_64.c
@@ -0,0 +1,223 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76unsigned long klimit = (unsigned long)_end;
77
78/* max amount of RAM to use */
79unsigned long __max_memory;
80
81/* info on what we think the IO hole is */
82unsigned long io_hole_start;
83unsigned long io_hole_size;
84
85/*
86 * Do very early mm setup.
87 */
88void __init mm_init_ppc64(void)
89{
90#ifndef CONFIG_PPC_ISERIES
91 unsigned long i;
92#endif
93
94 ppc64_boot_msg(0x100, "MM Init");
95
96 /* This is the story of the IO hole... please, keep seated,
97 * unfortunately, we are out of oxygen masks at the moment.
98 * So we need some rough way to tell where your big IO hole
99 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
100 * that area as well, on POWER4 we don't have one, etc...
101 * We need that as a "hint" when sizing the TCE table on POWER3
102 * So far, the simplest way that seem work well enough for us it
103 * to just assume that the first discontinuity in our physical
104 * RAM layout is the IO hole. That may not be correct in the future
105 * (and isn't on iSeries but then we don't care ;)
106 */
107
108#ifndef CONFIG_PPC_ISERIES
109 for (i = 1; i < lmb.memory.cnt; i++) {
110 unsigned long base, prevbase, prevsize;
111
112 prevbase = lmb.memory.region[i-1].base;
113 prevsize = lmb.memory.region[i-1].size;
114 base = lmb.memory.region[i].base;
115 if (base > (prevbase + prevsize)) {
116 io_hole_start = prevbase + prevsize;
117 io_hole_size = base - (prevbase + prevsize);
118 break;
119 }
120 }
121#endif /* CONFIG_PPC_ISERIES */
122 if (io_hole_start)
123 printk("IO Hole assumed to be %lx -> %lx\n",
124 io_hole_start, io_hole_start + io_hole_size - 1);
125
126 ppc64_boot_msg(0x100, "MM Init Done");
127}
128
129void free_initmem(void)
130{
131 unsigned long addr;
132
133 addr = (unsigned long)__init_begin;
134 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
135 memset((void *)addr, 0xcc, PAGE_SIZE);
136 ClearPageReserved(virt_to_page(addr));
137 set_page_count(virt_to_page(addr), 1);
138 free_page(addr);
139 totalram_pages++;
140 }
141 printk ("Freeing unused kernel memory: %luk freed\n",
142 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
143}
144
145#ifdef CONFIG_BLK_DEV_INITRD
146void free_initrd_mem(unsigned long start, unsigned long end)
147{
148 if (start < end)
149 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
150 for (; start < end; start += PAGE_SIZE) {
151 ClearPageReserved(virt_to_page(start));
152 set_page_count(virt_to_page(start), 1);
153 free_page(start);
154 totalram_pages++;
155 }
156}
157#endif
158
159static struct kcore_list kcore_vmem;
160
161static int __init setup_kcore(void)
162{
163 int i;
164
165 for (i=0; i < lmb.memory.cnt; i++) {
166 unsigned long base, size;
167 struct kcore_list *kcore_mem;
168
169 base = lmb.memory.region[i].base;
170 size = lmb.memory.region[i].size;
171
172 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
173 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
174 if (!kcore_mem)
175 panic("mem_init: kmalloc failed\n");
176
177 kclist_add(kcore_mem, __va(base), size);
178 }
179
180 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
181
182 return 0;
183}
184module_init(setup_kcore);
185
186static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
187{
188 memset(addr, 0, kmem_cache_size(cache));
189}
190
191static const int pgtable_cache_size[2] = {
192 PTE_TABLE_SIZE, PMD_TABLE_SIZE
193};
194static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
195 "pgd_pte_cache", "pud_pmd_cache",
196};
197
198kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
199
200void pgtable_cache_init(void)
201{
202 int i;
203
204 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
205 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
206 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
207 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
208
209 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
210 int size = pgtable_cache_size[i];
211 const char *name = pgtable_cache_name[i];
212
213 pgtable_cache[i] = kmem_cache_create(name,
214 size, size,
215 SLAB_HWCACHE_ALIGN
216 | SLAB_MUST_HWCACHE_ALIGN,
217 zero_ctor,
218 NULL);
219 if (! pgtable_cache[i])
220 panic("pgtable_cache_init(): could not create %s!\n",
221 name);
222 }
223}
diff --git a/arch/ppc64/kernel/lmb.c b/arch/powerpc/mm/lmb.c
index 5adaca2ddc9d..9b5aa6808eb8 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Procedures for interfacing to Open Firmware. 2 * Procedures for maintaining information about logical memory blocks.
3 * 3 *
4 * Peter Bergner, IBM Corp. June 2001. 4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner. 5 * Copyright (C) 2001 Peter Bergner.
@@ -18,7 +18,9 @@
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/abs_addr.h> 21#ifdef CONFIG_PPC32
22#include "mmu_decl.h" /* for __max_low_memory */
23#endif
22 24
23struct lmb lmb; 25struct lmb lmb;
24 26
@@ -54,16 +56,14 @@ void lmb_dump_all(void)
54#endif /* DEBUG */ 56#endif /* DEBUG */
55} 57}
56 58
57static unsigned long __init 59static unsigned long __init lmb_addrs_overlap(unsigned long base1,
58lmb_addrs_overlap(unsigned long base1, unsigned long size1, 60 unsigned long size1, unsigned long base2, unsigned long size2)
59 unsigned long base2, unsigned long size2)
60{ 61{
61 return ((base1 < (base2+size2)) && (base2 < (base1+size1))); 62 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
62} 63}
63 64
64static long __init 65static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
65lmb_addrs_adjacent(unsigned long base1, unsigned long size1, 66 unsigned long base2, unsigned long size2)
66 unsigned long base2, unsigned long size2)
67{ 67{
68 if (base2 == base1 + size1) 68 if (base2 == base1 + size1)
69 return 1; 69 return 1;
@@ -73,8 +73,8 @@ lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
73 return 0; 73 return 0;
74} 74}
75 75
76static long __init 76static long __init lmb_regions_adjacent(struct lmb_region *rgn,
77lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 77 unsigned long r1, unsigned long r2)
78{ 78{
79 unsigned long base1 = rgn->region[r1].base; 79 unsigned long base1 = rgn->region[r1].base;
80 unsigned long size1 = rgn->region[r1].size; 80 unsigned long size1 = rgn->region[r1].size;
@@ -85,8 +85,8 @@ lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
85} 85}
86 86
87/* Assumption: base addr of region 1 < base addr of region 2 */ 87/* Assumption: base addr of region 1 < base addr of region 2 */
88static void __init 88static void __init lmb_coalesce_regions(struct lmb_region *rgn,
89lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 89 unsigned long r1, unsigned long r2)
90{ 90{
91 unsigned long i; 91 unsigned long i;
92 92
@@ -99,8 +99,7 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
99} 99}
100 100
101/* This routine called with relocation disabled. */ 101/* This routine called with relocation disabled. */
102void __init 102void __init lmb_init(void)
103lmb_init(void)
104{ 103{
105 /* Create a dummy zero size LMB which will get coalesced away later. 104 /* Create a dummy zero size LMB which will get coalesced away later.
106 * This simplifies the lmb_add() code below... 105 * This simplifies the lmb_add() code below...
@@ -115,9 +114,8 @@ lmb_init(void)
115 lmb.reserved.cnt = 1; 114 lmb.reserved.cnt = 1;
116} 115}
117 116
118/* This routine called with relocation disabled. */ 117/* This routine may be called with relocation disabled. */
119void __init 118void __init lmb_analyze(void)
120lmb_analyze(void)
121{ 119{
122 int i; 120 int i;
123 121
@@ -128,8 +126,8 @@ lmb_analyze(void)
128} 126}
129 127
130/* This routine called with relocation disabled. */ 128/* This routine called with relocation disabled. */
131static long __init 129static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
132lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 130 unsigned long size)
133{ 131{
134 unsigned long i, coalesced = 0; 132 unsigned long i, coalesced = 0;
135 long adjacent; 133 long adjacent;
@@ -158,18 +156,17 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
158 coalesced++; 156 coalesced++;
159 } 157 }
160 158
161 if ( coalesced ) { 159 if (coalesced)
162 return coalesced; 160 return coalesced;
163 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) { 161 if (rgn->cnt >= MAX_LMB_REGIONS)
164 return -1; 162 return -1;
165 }
166 163
167 /* Couldn't coalesce the LMB, so add it to the sorted table. */ 164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
168 for (i=rgn->cnt-1; i >= 0; i--) { 165 for (i = rgn->cnt-1; i >= 0; i--) {
169 if (base < rgn->region[i].base) { 166 if (base < rgn->region[i].base) {
170 rgn->region[i+1].base = rgn->region[i].base; 167 rgn->region[i+1].base = rgn->region[i].base;
171 rgn->region[i+1].size = rgn->region[i].size; 168 rgn->region[i+1].size = rgn->region[i].size;
172 } else { 169 } else {
173 rgn->region[i+1].base = base; 170 rgn->region[i+1].base = base;
174 rgn->region[i+1].size = size; 171 rgn->region[i+1].size = size;
175 break; 172 break;
@@ -180,30 +177,28 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
180 return 0; 177 return 0;
181} 178}
182 179
183/* This routine called with relocation disabled. */ 180/* This routine may be called with relocation disabled. */
184long __init 181long __init lmb_add(unsigned long base, unsigned long size)
185lmb_add(unsigned long base, unsigned long size)
186{ 182{
187 struct lmb_region *_rgn = &(lmb.memory); 183 struct lmb_region *_rgn = &(lmb.memory);
188 184
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 185 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if ( base == 0 ) 186 if (base == 0)
191 lmb.rmo_size = size; 187 lmb.rmo_size = size;
192 188
193 return lmb_add_region(_rgn, base, size); 189 return lmb_add_region(_rgn, base, size);
194 190
195} 191}
196 192
197long __init 193long __init lmb_reserve(unsigned long base, unsigned long size)
198lmb_reserve(unsigned long base, unsigned long size)
199{ 194{
200 struct lmb_region *_rgn = &(lmb.reserved); 195 struct lmb_region *_rgn = &(lmb.reserved);
201 196
202 return lmb_add_region(_rgn, base, size); 197 return lmb_add_region(_rgn, base, size);
203} 198}
204 199
205long __init 200long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
206lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 201 unsigned long size)
207{ 202{
208 unsigned long i; 203 unsigned long i;
209 204
@@ -218,39 +213,44 @@ lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long si
218 return (i < rgn->cnt) ? i : -1; 213 return (i < rgn->cnt) ? i : -1;
219} 214}
220 215
221unsigned long __init 216unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
222lmb_alloc(unsigned long size, unsigned long align)
223{ 217{
224 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); 218 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
225} 219}
226 220
227unsigned long __init 221unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
228lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr) 222 unsigned long max_addr)
229{ 223{
230 long i, j; 224 long i, j;
231 unsigned long base = 0; 225 unsigned long base = 0;
232 226
233 for (i=lmb.memory.cnt-1; i >= 0; i--) { 227#ifdef CONFIG_PPC32
228 /* On 32-bit, make sure we allocate lowmem */
229 if (max_addr == LMB_ALLOC_ANYWHERE)
230 max_addr = __max_low_memory;
231#endif
232 for (i = lmb.memory.cnt-1; i >= 0; i--) {
234 unsigned long lmbbase = lmb.memory.region[i].base; 233 unsigned long lmbbase = lmb.memory.region[i].base;
235 unsigned long lmbsize = lmb.memory.region[i].size; 234 unsigned long lmbsize = lmb.memory.region[i].size;
236 235
237 if ( max_addr == LMB_ALLOC_ANYWHERE ) 236 if (max_addr == LMB_ALLOC_ANYWHERE)
238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 237 base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
239 else if ( lmbbase < max_addr ) 238 else if (lmbbase < max_addr) {
240 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align); 239 base = min(lmbbase + lmbsize, max_addr);
241 else 240 base = _ALIGN_DOWN(base - size, align);
241 } else
242 continue; 242 continue;
243 243
244 while ( (lmbbase <= base) && 244 while ((lmbbase <= base) &&
245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) { 245 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align); 246 base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
247 } 247 align);
248 248
249 if ( (base != 0) && (lmbbase <= base) ) 249 if ((base != 0) && (lmbbase <= base))
250 break; 250 break;
251 } 251 }
252 252
253 if ( i < 0 ) 253 if (i < 0)
254 return 0; 254 return 0;
255 255
256 lmb_add_region(&lmb.reserved, base, size); 256 lmb_add_region(&lmb.reserved, base, size);
@@ -259,14 +259,12 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
259} 259}
260 260
261/* You must call lmb_analyze() before this. */ 261/* You must call lmb_analyze() before this. */
262unsigned long __init 262unsigned long __init lmb_phys_mem_size(void)
263lmb_phys_mem_size(void)
264{ 263{
265 return lmb.memory.size; 264 return lmb.memory.size;
266} 265}
267 266
268unsigned long __init 267unsigned long __init lmb_end_of_DRAM(void)
269lmb_end_of_DRAM(void)
270{ 268{
271 int idx = lmb.memory.cnt - 1; 269 int idx = lmb.memory.cnt - 1;
272 270
@@ -277,9 +275,8 @@ lmb_end_of_DRAM(void)
277 * Truncate the lmb list to memory_limit if it's set 275 * Truncate the lmb list to memory_limit if it's set
278 * You must call lmb_analyze() after this. 276 * You must call lmb_analyze() after this.
279 */ 277 */
280void __init lmb_enforce_memory_limit(void) 278void __init lmb_enforce_memory_limit(unsigned long memory_limit)
281{ 279{
282 extern unsigned long memory_limit;
283 unsigned long i, limit; 280 unsigned long i, limit;
284 281
285 if (! memory_limit) 282 if (! memory_limit)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
new file mode 100644
index 000000000000..5e9206715f09
--- /dev/null
+++ b/arch/powerpc/mm/mem.c
@@ -0,0 +1,484 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/bootinfo.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/sections.h>
50#ifdef CONFIG_PPC64
51#include <asm/vdso.h>
52#endif
53
54#include "mmu_decl.h"
55
56#ifndef CPU_FTR_COHERENT_ICACHE
57#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
58#define CPU_FTR_NOEXECUTE 0
59#endif
60
61int init_bootmem_done;
62int mem_init_done;
63
64/*
65 * This is called by /dev/mem to know if a given address has to
66 * be mapped non-cacheable or not
67 */
68int page_is_ram(unsigned long pfn)
69{
70 unsigned long paddr = (pfn << PAGE_SHIFT);
71
72#ifndef CONFIG_PPC64 /* XXX for now */
73 return paddr < __pa(high_memory);
74#else
75 int i;
76 for (i=0; i < lmb.memory.cnt; i++) {
77 unsigned long base;
78
79 base = lmb.memory.region[i].base;
80
81 if ((paddr >= base) &&
82 (paddr < (base + lmb.memory.region[i].size))) {
83 return 1;
84 }
85 }
86
87 return 0;
88#endif
89}
90EXPORT_SYMBOL(page_is_ram);
91
92pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
93 unsigned long size, pgprot_t vma_prot)
94{
95 if (ppc_md.phys_mem_access_prot)
96 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
97
98 if (!page_is_ram(addr >> PAGE_SHIFT))
99 vma_prot = __pgprot(pgprot_val(vma_prot)
100 | _PAGE_GUARDED | _PAGE_NO_CACHE);
101 return vma_prot;
102}
103EXPORT_SYMBOL(phys_mem_access_prot);
104
105void show_mem(void)
106{
107 unsigned long total = 0, reserved = 0;
108 unsigned long shared = 0, cached = 0;
109 unsigned long highmem = 0;
110 struct page *page;
111 pg_data_t *pgdat;
112 unsigned long i;
113
114 printk("Mem-info:\n");
115 show_free_areas();
116 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
117 for_each_pgdat(pgdat) {
118 for (i = 0; i < pgdat->node_spanned_pages; i++) {
119 page = pgdat_page_nr(pgdat, i);
120 total++;
121 if (PageHighMem(page))
122 highmem++;
123 if (PageReserved(page))
124 reserved++;
125 else if (PageSwapCache(page))
126 cached++;
127 else if (page_count(page))
128 shared += page_count(page) - 1;
129 }
130 }
131 printk("%ld pages of RAM\n", total);
132#ifdef CONFIG_HIGHMEM
133 printk("%ld pages of HIGHMEM\n", highmem);
134#endif
135 printk("%ld reserved pages\n", reserved);
136 printk("%ld pages shared\n", shared);
137 printk("%ld pages swap cached\n", cached);
138}
139
140/*
141 * Initialize the bootmem system and give it all the memory we
142 * have available. If we are using highmem, we only put the
143 * lowmem into the bootmem system.
144 */
145#ifndef CONFIG_NEED_MULTIPLE_NODES
146void __init do_init_bootmem(void)
147{
148 unsigned long i;
149 unsigned long start, bootmap_pages;
150 unsigned long total_pages;
151 int boot_mapsize;
152
153 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
154#ifdef CONFIG_HIGHMEM
155 total_pages = total_lowmem >> PAGE_SHIFT;
156#endif
157
158 /*
159 * Find an area to use for the bootmem bitmap. Calculate the size of
160 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
161 * Add 1 additional page in case the address isn't page-aligned.
162 */
163 bootmap_pages = bootmem_bootmap_pages(total_pages);
164
165 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
166 BUG_ON(!start);
167
168 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
169
170 /* Add all physical memory to the bootmem map, mark each area
171 * present.
172 */
173 for (i = 0; i < lmb.memory.cnt; i++) {
174 unsigned long base = lmb.memory.region[i].base;
175 unsigned long size = lmb_size_bytes(&lmb.memory, i);
176#ifdef CONFIG_HIGHMEM
177 if (base >= total_lowmem)
178 continue;
179 if (base + size > total_lowmem)
180 size = total_lowmem - base;
181#endif
182 free_bootmem(base, size);
183 }
184
185 /* reserve the sections we're already using */
186 for (i = 0; i < lmb.reserved.cnt; i++)
187 reserve_bootmem(lmb.reserved.region[i].base,
188 lmb_size_bytes(&lmb.reserved, i));
189
190 /* XXX need to clip this if using highmem? */
191 for (i = 0; i < lmb.memory.cnt; i++)
192 memory_present(0, lmb_start_pfn(&lmb.memory, i),
193 lmb_end_pfn(&lmb.memory, i));
194 init_bootmem_done = 1;
195}
196
197/*
198 * paging_init() sets up the page tables - in fact we've already done this.
199 */
200void __init paging_init(void)
201{
202 unsigned long zones_size[MAX_NR_ZONES];
203 unsigned long zholes_size[MAX_NR_ZONES];
204 unsigned long total_ram = lmb_phys_mem_size();
205 unsigned long top_of_ram = lmb_end_of_DRAM();
206
207#ifdef CONFIG_HIGHMEM
208 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
209 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
210 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
211 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
212 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
213 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
214 kmap_prot = PAGE_KERNEL;
215#endif /* CONFIG_HIGHMEM */
216
217 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
218 top_of_ram, total_ram);
219 printk(KERN_INFO "Memory hole size: %ldMB\n",
220 (top_of_ram - total_ram) >> 20);
221 /*
222 * All pages are DMA-able so we put them all in the DMA zone.
223 */
224 memset(zones_size, 0, sizeof(zones_size));
225 memset(zholes_size, 0, sizeof(zholes_size));
226
227 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
228 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
229
230#ifdef CONFIG_HIGHMEM
231 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
232 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
233 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
234#else
235 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
236 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
237#endif /* CONFIG_HIGHMEM */
238
239 free_area_init_node(0, NODE_DATA(0), zones_size,
240 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
241}
242#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
243
244void __init mem_init(void)
245{
246#ifdef CONFIG_NEED_MULTIPLE_NODES
247 int nid;
248#endif
249 pg_data_t *pgdat;
250 unsigned long i;
251 struct page *page;
252 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
253
254 num_physpages = max_pfn; /* RAM is assumed contiguous */
255 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
256
257#ifdef CONFIG_NEED_MULTIPLE_NODES
258 for_each_online_node(nid) {
259 if (NODE_DATA(nid)->node_spanned_pages != 0) {
260 printk("freeing bootmem node %x\n", nid);
261 totalram_pages +=
262 free_all_bootmem_node(NODE_DATA(nid));
263 }
264 }
265#else
266 max_mapnr = num_physpages;
267 totalram_pages += free_all_bootmem();
268#endif
269 for_each_pgdat(pgdat) {
270 for (i = 0; i < pgdat->node_spanned_pages; i++) {
271 page = pgdat_page_nr(pgdat, i);
272 if (PageReserved(page))
273 reservedpages++;
274 }
275 }
276
277 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
278 datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
279 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
280 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
281
282#ifdef CONFIG_HIGHMEM
283 {
284 unsigned long pfn, highmem_mapnr;
285
286 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
287 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
288 struct page *page = pfn_to_page(pfn);
289
290 ClearPageReserved(page);
291 set_page_count(page, 1);
292 __free_page(page);
293 totalhigh_pages++;
294 }
295 totalram_pages += totalhigh_pages;
296 printk(KERN_INFO "High memory: %luk\n",
297 totalhigh_pages << (PAGE_SHIFT-10));
298 }
299#endif /* CONFIG_HIGHMEM */
300
301 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
302 "%luk reserved, %luk data, %luk bss, %luk init)\n",
303 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
304 num_physpages << (PAGE_SHIFT-10),
305 codesize >> 10,
306 reservedpages << (PAGE_SHIFT-10),
307 datasize >> 10,
308 bsssize >> 10,
309 initsize >> 10);
310
311 mem_init_done = 1;
312
313#ifdef CONFIG_PPC64
314 /* Initialize the vDSO */
315 vdso_init();
316#endif
317}
318
319/*
320 * This is called when a page has been modified by the kernel.
321 * It just marks the page as not i-cache clean. We do the i-cache
322 * flush later when the page is given to a user process, if necessary.
323 */
324void flush_dcache_page(struct page *page)
325{
326 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
327 return;
328 /* avoid an atomic op if possible */
329 if (test_bit(PG_arch_1, &page->flags))
330 clear_bit(PG_arch_1, &page->flags);
331}
332EXPORT_SYMBOL(flush_dcache_page);
333
334void flush_dcache_icache_page(struct page *page)
335{
336#ifdef CONFIG_BOOKE
337 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
338 __flush_dcache_icache(start);
339 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
340#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
341 /* On 8xx there is no need to kmap since highmem is not supported */
342 __flush_dcache_icache(page_address(page));
343#else
344 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
345#endif
346
347}
348void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
349{
350 clear_page(page);
351
352 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
353 return;
354 /*
355 * We shouldnt have to do this, but some versions of glibc
356 * require it (ld.so assumes zero filled pages are icache clean)
357 * - Anton
358 */
359
360 /* avoid an atomic op if possible */
361 if (test_bit(PG_arch_1, &pg->flags))
362 clear_bit(PG_arch_1, &pg->flags);
363}
364EXPORT_SYMBOL(clear_user_page);
365
366void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
367 struct page *pg)
368{
369 copy_page(vto, vfrom);
370
371 /*
372 * We should be able to use the following optimisation, however
373 * there are two problems.
374 * Firstly a bug in some versions of binutils meant PLT sections
375 * were not marked executable.
376 * Secondly the first word in the GOT section is blrl, used
377 * to establish the GOT address. Until recently the GOT was
378 * not marked executable.
379 * - Anton
380 */
381#if 0
382 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
383 return;
384#endif
385
386 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
387 return;
388
389 /* avoid an atomic op if possible */
390 if (test_bit(PG_arch_1, &pg->flags))
391 clear_bit(PG_arch_1, &pg->flags);
392}
393
394void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
395 unsigned long addr, int len)
396{
397 unsigned long maddr;
398
399 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
400 flush_icache_range(maddr, maddr + len);
401 kunmap(page);
402}
403EXPORT_SYMBOL(flush_icache_user_range);
404
405/*
406 * This is called at the end of handling a user page fault, when the
407 * fault has been handled by updating a PTE in the linux page tables.
408 * We use it to preload an HPTE into the hash table corresponding to
409 * the updated linux PTE.
410 *
411 * This must always be called with the mm->page_table_lock held
412 */
413void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
414 pte_t pte)
415{
416 /* handle i-cache coherency */
417 unsigned long pfn = pte_pfn(pte);
418#ifdef CONFIG_PPC32
419 pmd_t *pmd;
420#else
421 unsigned long vsid;
422 void *pgdir;
423 pte_t *ptep;
424 int local = 0;
425 cpumask_t tmp;
426 unsigned long flags;
427#endif
428
429 /* handle i-cache coherency */
430 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
431 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
432 pfn_valid(pfn)) {
433 struct page *page = pfn_to_page(pfn);
434 if (!PageReserved(page)
435 && !test_bit(PG_arch_1, &page->flags)) {
436 if (vma->vm_mm == current->active_mm) {
437#ifdef CONFIG_8xx
438 /* On 8xx, cache control instructions (particularly
439 * "dcbst" from flush_dcache_icache) fault as write
440 * operation if there is an unpopulated TLB entry
441 * for the address in question. To workaround that,
442 * we invalidate the TLB here, thus avoiding dcbst
443 * misbehaviour.
444 */
445 _tlbie(address);
446#endif
447 __flush_dcache_icache((void *) address);
448 } else
449 flush_dcache_icache_page(page);
450 set_bit(PG_arch_1, &page->flags);
451 }
452 }
453
454#ifdef CONFIG_PPC_STD_MMU
455 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
456 if (!pte_young(pte) || address >= TASK_SIZE)
457 return;
458#ifdef CONFIG_PPC32
459 if (Hash == 0)
460 return;
461 pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
462 if (!pmd_none(*pmd))
463 add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
464#else
465 pgdir = vma->vm_mm->pgd;
466 if (pgdir == NULL)
467 return;
468
469 ptep = find_linux_pte(pgdir, address);
470 if (!ptep)
471 return;
472
473 vsid = get_vsid(vma->vm_mm->context.id, address);
474
475 local_irq_save(flags);
476 tmp = cpumask_of_cpu(smp_processor_id());
477 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
478 local = 1;
479
480 __hash_page(address, 0, vsid, ptep, 0x300, local);
481 local_irq_restore(flags);
482#endif
483#endif
484}
diff --git a/arch/ppc64/mm/mmap.c b/arch/powerpc/mm/mmap.c
index fe65f522aff3..fe65f522aff3 100644
--- a/arch/ppc64/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
new file mode 100644
index 000000000000..a8816e0f6a86
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -0,0 +1,86 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/mm.h>
28#include <linux/init.h>
29
30#include <asm/mmu_context.h>
31#include <asm/tlbflush.h>
32
33mm_context_t next_mmu_context;
34unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
35#ifdef FEW_CONTEXTS
36atomic_t nr_free_contexts;
37struct mm_struct *context_mm[LAST_CONTEXT+1];
38void steal_context(void);
39#endif /* FEW_CONTEXTS */
40
41/*
42 * Initialize the context management stuff.
43 */
44void __init
45mmu_context_init(void)
46{
47 /*
48 * Some processors have too few contexts to reserve one for
49 * init_mm, and require using context 0 for a normal task.
50 * Other processors reserve the use of context zero for the kernel.
51 * This code assumes FIRST_CONTEXT < 32.
52 */
53 context_map[0] = (1 << FIRST_CONTEXT) - 1;
54 next_mmu_context = FIRST_CONTEXT;
55#ifdef FEW_CONTEXTS
56 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
57#endif /* FEW_CONTEXTS */
58}
59
60#ifdef FEW_CONTEXTS
61/*
62 * Steal a context from a task that has one at the moment.
63 * This is only used on 8xx and 4xx and we presently assume that
64 * they don't do SMP. If they do then this will have to check
65 * whether the MM we steal is in use.
66 * We also assume that this is only used on systems that don't
67 * use an MMU hash table - this is true for 8xx and 4xx.
68 * This isn't an LRU system, it just frees up each context in
69 * turn (sort-of pseudo-random replacement :). This would be the
70 * place to implement an LRU scheme if anyone was motivated to do it.
71 * -- paulus
72 */
73void
74steal_context(void)
75{
76 struct mm_struct *mm;
77
78 /* free up context `next_mmu_context' */
79 /* if we shouldn't free context 0, don't... */
80 if (next_mmu_context < FIRST_CONTEXT)
81 next_mmu_context = FIRST_CONTEXT;
82 mm = context_mm[next_mmu_context];
83 flush_tlb_mm(mm);
84 destroy_context(mm);
85}
86#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
new file mode 100644
index 000000000000..714a84dd8d5d
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -0,0 +1,63 @@
1/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/mm.h>
20#include <linux/spinlock.h>
21#include <linux/idr.h>
22
23#include <asm/mmu_context.h>
24
25static DEFINE_SPINLOCK(mmu_context_lock);
26static DEFINE_IDR(mmu_context_idr);
27
28int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
29{
30 int index;
31 int err;
32
33again:
34 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
35 return -ENOMEM;
36
37 spin_lock(&mmu_context_lock);
38 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
39 spin_unlock(&mmu_context_lock);
40
41 if (err == -EAGAIN)
42 goto again;
43 else if (err)
44 return err;
45
46 if (index > MAX_CONTEXT) {
47 idr_remove(&mmu_context_idr, index);
48 return -ENOMEM;
49 }
50
51 mm->context.id = index;
52
53 return 0;
54}
55
56void destroy_context(struct mm_struct *mm)
57{
58 spin_lock(&mmu_context_lock);
59 idr_remove(&mmu_context_idr, mm->context.id);
60 spin_unlock(&mmu_context_lock);
61
62 mm->context.id = NO_CONTEXT;
63}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
new file mode 100644
index 000000000000..a4d7a327c0e5
--- /dev/null
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -0,0 +1,87 @@
1/*
2 * Declarations of procedures and variables shared between files
3 * in arch/ppc/mm/.
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22#include <asm/tlbflush.h>
23#include <asm/mmu.h>
24
25#ifdef CONFIG_PPC32
26extern void mapin_ram(void);
27extern int map_page(unsigned long va, phys_addr_t pa, int flags);
28extern void setbat(int index, unsigned long virt, unsigned long phys,
29 unsigned int size, int flags);
30extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
31 unsigned int size, int flags, unsigned int pid);
32extern void invalidate_tlbcam_entry(int index);
33
34extern int __map_without_bats;
35extern unsigned long ioremap_base;
36extern unsigned long ioremap_bot;
37extern unsigned int rtas_data, rtas_size;
38
39extern PTE *Hash, *Hash_end;
40extern unsigned long Hash_size, Hash_mask;
41
42extern unsigned int num_tlbcam_entries;
43#endif
44
45extern unsigned long __max_low_memory;
46extern unsigned long __initial_memory_limit;
47extern unsigned long total_memory;
48extern unsigned long total_lowmem;
49
50/* ...and now those things that may be slightly different between processor
51 * architectures. -- Dan
52 */
53#if defined(CONFIG_8xx)
54#define flush_HPTE(X, va, pg) _tlbie(va)
55#define MMU_init_hw() do { } while(0)
56#define mmu_mapin_ram() (0UL)
57
58#elif defined(CONFIG_4xx)
59#define flush_HPTE(X, va, pg) _tlbie(va)
60extern void MMU_init_hw(void);
61extern unsigned long mmu_mapin_ram(void);
62
63#elif defined(CONFIG_FSL_BOOKE)
64#define flush_HPTE(X, va, pg) _tlbie(va)
65extern void MMU_init_hw(void);
66extern unsigned long mmu_mapin_ram(void);
67extern void adjust_total_lowmem(void);
68
69#elif defined(CONFIG_PPC32)
70/* anything 32-bit except 4xx or 8xx */
71extern void MMU_init_hw(void);
72extern unsigned long mmu_mapin_ram(void);
73
74/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
75 * which includes all new 82xx processors. We need tlbie/tlbsync here
76 * in that case (I think). -- Dan.
77 */
78static inline void flush_HPTE(unsigned context, unsigned long va,
79 unsigned long pdval)
80{
81 if ((Hash != 0) &&
82 cpu_has_feature(CPU_FTR_HPTE_TABLE))
83 flush_hash_pages(0, va, pdval, 1);
84 else
85 _tlbie(va);
86}
87#endif
diff --git a/arch/ppc64/mm/numa.c b/arch/powerpc/mm/numa.c
index cb864b8f2750..cb864b8f2750 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
new file mode 100644
index 000000000000..5792e533916f
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -0,0 +1,469 @@
1/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/io.h>
35
36#include "mmu_decl.h"
37
38unsigned long ioremap_base;
39unsigned long ioremap_bot;
40int io_bat_index;
41
42#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
43#define HAVE_BATS 1
44#endif
45
46#if defined(CONFIG_FSL_BOOKE)
47#define HAVE_TLBCAM 1
48#endif
49
50extern char etext[], _stext[];
51
52#ifdef CONFIG_SMP
53extern void hash_page_sync(void);
54#endif
55
56#ifdef HAVE_BATS
57extern unsigned long v_mapped_by_bats(unsigned long va);
58extern unsigned long p_mapped_by_bats(unsigned long pa);
59void setbat(int index, unsigned long virt, unsigned long phys,
60 unsigned int size, int flags);
61
62#else /* !HAVE_BATS */
63#define v_mapped_by_bats(x) (0UL)
64#define p_mapped_by_bats(x) (0UL)
65#endif /* HAVE_BATS */
66
67#ifdef HAVE_TLBCAM
68extern unsigned int tlbcam_index;
69extern unsigned long v_mapped_by_tlbcam(unsigned long va);
70extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
71#else /* !HAVE_TLBCAM */
72#define v_mapped_by_tlbcam(x) (0UL)
73#define p_mapped_by_tlbcam(x) (0UL)
74#endif /* HAVE_TLBCAM */
75
76#ifdef CONFIG_PTE_64BIT
77/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
78#define PGDIR_ORDER 1
79#else
80#define PGDIR_ORDER 0
81#endif
82
83pgd_t *pgd_alloc(struct mm_struct *mm)
84{
85 pgd_t *ret;
86
87 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
88 return ret;
89}
90
91void pgd_free(pgd_t *pgd)
92{
93 free_pages((unsigned long)pgd, PGDIR_ORDER);
94}
95
96pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
97{
98 pte_t *pte;
99 extern int mem_init_done;
100 extern void *early_get_page(void);
101
102 if (mem_init_done) {
103 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
104 } else {
105 pte = (pte_t *)early_get_page();
106 if (pte)
107 clear_page(pte);
108 }
109 return pte;
110}
111
112struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
113{
114 struct page *ptepage;
115
116#ifdef CONFIG_HIGHPTE
117 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
118#else
119 int flags = GFP_KERNEL | __GFP_REPEAT;
120#endif
121
122 ptepage = alloc_pages(flags, 0);
123 if (ptepage)
124 clear_highpage(ptepage);
125 return ptepage;
126}
127
128void pte_free_kernel(pte_t *pte)
129{
130#ifdef CONFIG_SMP
131 hash_page_sync();
132#endif
133 free_page((unsigned long)pte);
134}
135
136void pte_free(struct page *ptepage)
137{
138#ifdef CONFIG_SMP
139 hash_page_sync();
140#endif
141 __free_page(ptepage);
142}
143
144#ifndef CONFIG_PHYS_64BIT
145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size)
147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE);
149}
150#else /* CONFIG_PHYS_64BIT */
151void __iomem *
152ioremap64(unsigned long long addr, unsigned long size)
153{
154 return __ioremap(addr, size, _PAGE_NO_CACHE);
155}
156
157void __iomem *
158ioremap(phys_addr_t addr, unsigned long size)
159{
160 phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
161
162 return ioremap64(addr64, size);
163}
164#endif /* CONFIG_PHYS_64BIT */
165
166void __iomem *
167__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
168{
169 unsigned long v, i;
170 phys_addr_t p;
171 int err;
172
173 /*
174 * Choose an address to map it to.
175 * Once the vmalloc system is running, we use it.
176 * Before then, we use space going down from ioremap_base
177 * (ioremap_bot records where we're up to).
178 */
179 p = addr & PAGE_MASK;
180 size = PAGE_ALIGN(addr + size) - p;
181
182 /*
183 * If the address lies within the first 16 MB, assume it's in ISA
184 * memory space
185 */
186 if (p < 16*1024*1024)
187 p += _ISA_MEM_BASE;
188
189 /*
190 * Don't allow anybody to remap normal RAM that we're using.
191 * mem_init() sets high_memory so only do the check after that.
192 */
193 if (mem_init_done && (p < virt_to_phys(high_memory))) {
194 printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
195 __builtin_return_address(0));
196 return NULL;
197 }
198
199 if (size == 0)
200 return NULL;
201
202 /*
203 * Is it already mapped? Perhaps overlapped by a previous
204 * BAT mapping. If the whole area is mapped then we're done,
205 * otherwise remap it since we want to keep the virt addrs for
206 * each request contiguous.
207 *
208 * We make the assumption here that if the bottom and top
209 * of the range we want are mapped then it's mapped to the
210 * same virt address (and this is contiguous).
211 * -- Cort
212 */
213 if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
214 goto out;
215
216 if ((v = p_mapped_by_tlbcam(p)))
217 goto out;
218
219 if (mem_init_done) {
220 struct vm_struct *area;
221 area = get_vm_area(size, VM_IOREMAP);
222 if (area == 0)
223 return NULL;
224 v = (unsigned long) area->addr;
225 } else {
226 v = (ioremap_bot -= size);
227 }
228
229 if ((flags & _PAGE_PRESENT) == 0)
230 flags |= _PAGE_KERNEL;
231 if (flags & _PAGE_NO_CACHE)
232 flags |= _PAGE_GUARDED;
233
234 /*
235 * Should check if it is a candidate for a BAT mapping
236 */
237
238 err = 0;
239 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
240 err = map_page(v+i, p+i, flags);
241 if (err) {
242 if (mem_init_done)
243 vunmap((void *)v);
244 return NULL;
245 }
246
247out:
248 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
249}
250
251void iounmap(volatile void __iomem *addr)
252{
253 /*
254 * If mapped by BATs then there is nothing to do.
255 * Calling vfree() generates a benign warning.
256 */
257 if (v_mapped_by_bats((unsigned long)addr)) return;
258
259 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
260 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
261}
262
263void __iomem *ioport_map(unsigned long port, unsigned int len)
264{
265 return (void __iomem *) (port + _IO_BASE);
266}
267
268void ioport_unmap(void __iomem *addr)
269{
270 /* Nothing to do */
271}
272EXPORT_SYMBOL(ioport_map);
273EXPORT_SYMBOL(ioport_unmap);
274
275int
276map_page(unsigned long va, phys_addr_t pa, int flags)
277{
278 pmd_t *pd;
279 pte_t *pg;
280 int err = -ENOMEM;
281
282 spin_lock(&init_mm.page_table_lock);
283 /* Use upper 10 bits of VA to index the first level map */
284 pd = pmd_offset(pgd_offset_k(va), va);
285 /* Use middle 10 bits of VA to index the second-level map */
286 pg = pte_alloc_kernel(&init_mm, pd, va);
287 if (pg != 0) {
288 err = 0;
289 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
290 if (mem_init_done)
291 flush_HPTE(0, va, pmd_val(*pd));
292 }
293 spin_unlock(&init_mm.page_table_lock);
294 return err;
295}
296
297/*
298 * Map in all of physical memory starting at KERNELBASE.
299 */
300void __init mapin_ram(void)
301{
302 unsigned long v, p, s, f;
303
304 s = mmu_mapin_ram();
305 v = KERNELBASE + s;
306 p = PPC_MEMSTART + s;
307 for (; s < total_lowmem; s += PAGE_SIZE) {
308 if ((char *) v >= _stext && (char *) v < etext)
309 f = _PAGE_RAM_TEXT;
310 else
311 f = _PAGE_RAM;
312 map_page(v, p, f);
313 v += PAGE_SIZE;
314 p += PAGE_SIZE;
315 }
316}
317
318/* is x a power of 2? */
319#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
320
321/* is x a power of 4? */
322#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
323
324/*
325 * Set up a mapping for a block of I/O.
326 * virt, phys, size must all be page-aligned.
327 * This should only be called before ioremap is called.
328 */
329void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
330 unsigned int size, int flags)
331{
332 int i;
333
334 if (virt > KERNELBASE && virt < ioremap_bot)
335 ioremap_bot = ioremap_base = virt;
336
337#ifdef HAVE_BATS
338 /*
339 * Use a BAT for this if possible...
340 */
341 if (io_bat_index < 2 && is_power_of_2(size)
342 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
343 setbat(io_bat_index, virt, phys, size, flags);
344 ++io_bat_index;
345 return;
346 }
347#endif /* HAVE_BATS */
348
349#ifdef HAVE_TLBCAM
350 /*
351 * Use a CAM for this if possible...
352 */
353 if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
354 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
355 settlbcam(tlbcam_index, virt, phys, size, flags, 0);
356 ++tlbcam_index;
357 return;
358 }
359#endif /* HAVE_TLBCAM */
360
361 /* No BATs available, put it in the page tables. */
362 for (i = 0; i < size; i += PAGE_SIZE)
363 map_page(virt + i, phys + i, flags);
364}
365
366/* Scan the real Linux page tables and return a PTE pointer for
367 * a virtual address in a context.
368 * Returns true (1) if PTE was found, zero otherwise. The pointer to
369 * the PTE pointer is unmodified if PTE is not found.
370 */
371int
372get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
373{
374 pgd_t *pgd;
375 pmd_t *pmd;
376 pte_t *pte;
377 int retval = 0;
378
379 pgd = pgd_offset(mm, addr & PAGE_MASK);
380 if (pgd) {
381 pmd = pmd_offset(pgd, addr & PAGE_MASK);
382 if (pmd_present(*pmd)) {
383 pte = pte_offset_map(pmd, addr & PAGE_MASK);
384 if (pte) {
385 retval = 1;
386 *ptep = pte;
387 /* XXX caller needs to do pte_unmap, yuck */
388 }
389 }
390 }
391 return(retval);
392}
393
394/* Find physical address for this virtual address. Normally used by
395 * I/O functions, but anyone can call it.
396 */
397unsigned long iopa(unsigned long addr)
398{
399 unsigned long pa;
400
401 /* I don't know why this won't work on PMacs or CHRP. It
402 * appears there is some bug, or there is some implicit
403 * mapping done not properly represented by BATs or in page
404 * tables.......I am actively working on resolving this, but
405 * can't hold up other stuff. -- Dan
406 */
407 pte_t *pte;
408 struct mm_struct *mm;
409
410 /* Check the BATs */
411 pa = v_mapped_by_bats(addr);
412 if (pa)
413 return pa;
414
415 /* Allow mapping of user addresses (within the thread)
416 * for DMA if necessary.
417 */
418 if (addr < TASK_SIZE)
419 mm = current->mm;
420 else
421 mm = &init_mm;
422
423 pa = 0;
424 if (get_pteptr(mm, addr, &pte)) {
425 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
426 pte_unmap(pte);
427 }
428
429 return(pa);
430}
431
432/* This is will find the virtual address for a physical one....
433 * Swiped from APUS, could be dangerous :-).
434 * This is only a placeholder until I really find a way to make this
435 * work. -- Dan
436 */
437unsigned long
438mm_ptov (unsigned long paddr)
439{
440 unsigned long ret;
441#if 0
442 if (paddr < 16*1024*1024)
443 ret = ZTWO_VADDR(paddr);
444 else {
445 int i;
446
447 for (i = 0; i < kmap_chunk_count;){
448 unsigned long phys = kmap_chunks[i++];
449 unsigned long size = kmap_chunks[i++];
450 unsigned long virt = kmap_chunks[i++];
451 if (paddr >= phys
452 && paddr < (phys + size)){
453 ret = virt + paddr - phys;
454 goto exit;
455 }
456 }
457
458 ret = (unsigned long) __va(paddr);
459 }
460exit:
461#ifdef DEBUGPV
462 printk ("PTOV(%lx)=%lx\n", paddr, ret);
463#endif
464#else
465 ret = (unsigned long)paddr + KERNELBASE;
466#endif
467 return ret;
468}
469
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
new file mode 100644
index 000000000000..484d24f9208b
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -0,0 +1,349 @@
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/signal.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/bootmem.h>
40#include <linux/highmem.h>
41#include <linux/idr.h>
42#include <linux/nodemask.h>
43#include <linux/module.h>
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/ppcdebug.h>
63#include <asm/sections.h>
64#include <asm/system.h>
65#include <asm/iommu.h>
66#include <asm/abs_addr.h>
67#include <asm/vdso.h>
68#include <asm/imalloc.h>
69
70unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
73#ifdef CONFIG_PPC_ISERIES
74
75void __iomem *ioremap(unsigned long addr, unsigned long size)
76{
77 return (void __iomem *)addr;
78}
79
80extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
81 unsigned long flags)
82{
83 return (void __iomem *)addr;
84}
85
86void iounmap(volatile void __iomem *addr)
87{
88 return;
89}
90
91#else
92
93/*
94 * map_io_page currently only called by __ioremap
95 * map_io_page adds an entry to the ioremap page table
96 * and adds an entry to the HPT, possibly bolting it
97 */
98static int map_io_page(unsigned long ea, unsigned long pa, int flags)
99{
100 pgd_t *pgdp;
101 pud_t *pudp;
102 pmd_t *pmdp;
103 pte_t *ptep;
104 unsigned long vsid;
105
106 if (mem_init_done) {
107 spin_lock(&init_mm.page_table_lock);
108 pgdp = pgd_offset_k(ea);
109 pudp = pud_alloc(&init_mm, pgdp, ea);
110 if (!pudp)
111 return -ENOMEM;
112 pmdp = pmd_alloc(&init_mm, pudp, ea);
113 if (!pmdp)
114 return -ENOMEM;
115 ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
116 if (!ptep)
117 return -ENOMEM;
118 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
119 __pgprot(flags)));
120 spin_unlock(&init_mm.page_table_lock);
121 } else {
122 unsigned long va, vpn, hash, hpteg;
123
124 /*
125 * If the mm subsystem is not fully up, we cannot create a
126 * linux page table entry for this mapping. Simply bolt an
127 * entry in the hardware page table.
128 */
129 vsid = get_kernel_vsid(ea);
130 va = (vsid << 28) | (ea & 0xFFFFFFF);
131 vpn = va >> PAGE_SHIFT;
132
133 hash = hpt_hash(vpn, 0);
134
135 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
136
137 /* Panic if a pte grpup is full */
138 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
139 HPTE_V_BOLTED,
140 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
141 == -1) {
142 panic("map_io_page: could not insert mapping");
143 }
144 }
145 return 0;
146}
147
148
149static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
150 unsigned long ea, unsigned long size,
151 unsigned long flags)
152{
153 unsigned long i;
154
155 if ((flags & _PAGE_PRESENT) == 0)
156 flags |= pgprot_val(PAGE_KERNEL);
157
158 for (i = 0; i < size; i += PAGE_SIZE)
159 if (map_io_page(ea+i, pa+i, flags))
160 return NULL;
161
162 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
163}
164
165
166void __iomem *
167ioremap(unsigned long addr, unsigned long size)
168{
169 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
170}
171
172void __iomem * __ioremap(unsigned long addr, unsigned long size,
173 unsigned long flags)
174{
175 unsigned long pa, ea;
176 void __iomem *ret;
177
178 /*
179 * Choose an address to map it to.
180 * Once the imalloc system is running, we use it.
181 * Before that, we map using addresses going
182 * up from ioremap_bot. imalloc will use
183 * the addresses from ioremap_bot through
184 * IMALLOC_END
185 *
186 */
187 pa = addr & PAGE_MASK;
188 size = PAGE_ALIGN(addr + size) - pa;
189
190 if (size == 0)
191 return NULL;
192
193 if (mem_init_done) {
194 struct vm_struct *area;
195 area = im_get_free_area(size);
196 if (area == NULL)
197 return NULL;
198 ea = (unsigned long)(area->addr);
199 ret = __ioremap_com(addr, pa, ea, size, flags);
200 if (!ret)
201 im_free(area->addr);
202 } else {
203 ea = ioremap_bot;
204 ret = __ioremap_com(addr, pa, ea, size, flags);
205 if (ret)
206 ioremap_bot += size;
207 }
208 return ret;
209}
210
211#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
212
213int __ioremap_explicit(unsigned long pa, unsigned long ea,
214 unsigned long size, unsigned long flags)
215{
216 struct vm_struct *area;
217 void __iomem *ret;
218
219 /* For now, require page-aligned values for pa, ea, and size */
220 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
221 !IS_PAGE_ALIGNED(size)) {
222 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
223 return 1;
224 }
225
226 if (!mem_init_done) {
227 /* Two things to consider in this case:
228 * 1) No records will be kept (imalloc, etc) that the region
229 * has been remapped
230 * 2) It won't be easy to iounmap() the region later (because
231 * of 1)
232 */
233 ;
234 } else {
235 area = im_get_area(ea, size,
236 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
237 if (area == NULL) {
238 /* Expected when PHB-dlpar is in play */
239 return 1;
240 }
241 if (ea != (unsigned long) area->addr) {
242 printk(KERN_ERR "unexpected addr return from "
243 "im_get_area\n");
244 return 1;
245 }
246 }
247
248 ret = __ioremap_com(pa, pa, ea, size, flags);
249 if (ret == NULL) {
250 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
251 return 1;
252 }
253 if (ret != (void *) ea) {
254 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
255 return 1;
256 }
257
258 return 0;
259}
260
261/*
262 * Unmap an IO region and remove it from imalloc'd list.
263 * Access to IO memory should be serialized by driver.
264 * This code is modeled after vmalloc code - unmap_vm_area()
265 *
266 * XXX what about calls before mem_init_done (ie python_countermeasures())
267 */
268void iounmap(volatile void __iomem *token)
269{
270 void *addr;
271
272 if (!mem_init_done)
273 return;
274
275 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
276
277 im_free(addr);
278}
279
280static int iounmap_subset_regions(unsigned long addr, unsigned long size)
281{
282 struct vm_struct *area;
283
284 /* Check whether subsets of this region exist */
285 area = im_get_area(addr, size, IM_REGION_SUPERSET);
286 if (area == NULL)
287 return 1;
288
289 while (area) {
290 iounmap((void __iomem *) area->addr);
291 area = im_get_area(addr, size,
292 IM_REGION_SUPERSET);
293 }
294
295 return 0;
296}
297
298int iounmap_explicit(volatile void __iomem *start, unsigned long size)
299{
300 struct vm_struct *area;
301 unsigned long addr;
302 int rc;
303
304 addr = (unsigned long __force) start & PAGE_MASK;
305
306 /* Verify that the region either exists or is a subset of an existing
307 * region. In the latter case, split the parent region to create
308 * the exact region
309 */
310 area = im_get_area(addr, size,
311 IM_REGION_EXISTS | IM_REGION_SUBSET);
312 if (area == NULL) {
313 /* Determine whether subset regions exist. If so, unmap */
314 rc = iounmap_subset_regions(addr, size);
315 if (rc) {
316 printk(KERN_ERR
317 "%s() cannot unmap nonexistent range 0x%lx\n",
318 __FUNCTION__, addr);
319 return 1;
320 }
321 } else {
322 iounmap((void __iomem *) area->addr);
323 }
324 /*
325 * FIXME! This can't be right:
326 iounmap(area->addr);
327 * Maybe it should be "iounmap(area);"
328 */
329 return 0;
330}
331
332#endif
333
334EXPORT_SYMBOL(ioremap);
335EXPORT_SYMBOL(__ioremap);
336EXPORT_SYMBOL(iounmap);
337
338void __iomem * reserve_phb_iospace(unsigned long size)
339{
340 void __iomem *virt_addr;
341
342 if (phbs_io_bot >= IMALLOC_BASE)
343 panic("reserve_phb_iospace(): phb io space overflow\n");
344
345 virt_addr = (void __iomem *) phbs_io_bot;
346 phbs_io_bot += size;
347
348 return virt_addr;
349}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
new file mode 100644
index 000000000000..cef9e83cc7e9
--- /dev/null
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -0,0 +1,285 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/prom.h>
33#include <asm/mmu.h>
34#include <asm/machdep.h>
35#include <asm/lmb.h>
36
37#include "mmu_decl.h"
38
39PTE *Hash, *Hash_end;
40unsigned long Hash_size, Hash_mask;
41unsigned long _SDR1;
42
43union ubat { /* BAT register values to be loaded */
44 BAT bat;
45#ifdef CONFIG_PPC64BRIDGE
46 u64 word[2];
47#else
48 u32 word[2];
49#endif
50} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
51
52struct batrange { /* stores address ranges mapped by BATs */
53 unsigned long start;
54 unsigned long limit;
55 unsigned long phys;
56} bat_addrs[4];
57
58/*
59 * Return PA for this VA if it is mapped by a BAT, or 0
60 */
61unsigned long v_mapped_by_bats(unsigned long va)
62{
63 int b;
64 for (b = 0; b < 4; ++b)
65 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
66 return bat_addrs[b].phys + (va - bat_addrs[b].start);
67 return 0;
68}
69
70/*
71 * Return VA for a given PA or 0 if not mapped
72 */
73unsigned long p_mapped_by_bats(unsigned long pa)
74{
75 int b;
76 for (b = 0; b < 4; ++b)
77 if (pa >= bat_addrs[b].phys
78 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
79 +bat_addrs[b].phys)
80 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
81 return 0;
82}
83
84unsigned long __init mmu_mapin_ram(void)
85{
86#ifdef CONFIG_POWER4
87 return 0;
88#else
89 unsigned long tot, bl, done;
90 unsigned long max_size = (256<<20);
91 unsigned long align;
92
93 if (__map_without_bats)
94 return 0;
95
96 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
97
98 /* Make sure we don't map a block larger than the
99 smallest alignment of the physical address. */
100 /* alignment of PPC_MEMSTART */
101 align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
102 /* set BAT block size to MIN(max_size, align) */
103 if (align && align < max_size)
104 max_size = align;
105
106 tot = total_lowmem;
107 for (bl = 128<<10; bl < max_size; bl <<= 1) {
108 if (bl * 2 > tot)
109 break;
110 }
111
112 setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
113 done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
114 if ((done < tot) && !bat_addrs[3].limit) {
115 /* use BAT3 to cover a bit more */
116 tot -= done;
117 for (bl = 128<<10; bl < max_size; bl <<= 1)
118 if (bl * 2 > tot)
119 break;
120 setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
121 done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
122 }
123
124 return done;
125#endif
126}
127
128/*
129 * Set up one of the I/D BAT (block address translation) register pairs.
130 * The parameters are not checked; in particular size must be a power
131 * of 2 between 128k and 256M.
132 */
133void __init setbat(int index, unsigned long virt, unsigned long phys,
134 unsigned int size, int flags)
135{
136 unsigned int bl;
137 int wimgxpp;
138 union ubat *bat = BATS[index];
139
140 if (((flags & _PAGE_NO_CACHE) == 0) &&
141 cpu_has_feature(CPU_FTR_NEED_COHERENT))
142 flags |= _PAGE_COHERENT;
143
144 bl = (size >> 17) - 1;
145 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
146 /* 603, 604, etc. */
147 /* Do DBAT first */
148 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
149 | _PAGE_COHERENT | _PAGE_GUARDED);
150 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
151 bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
152 bat[1].word[1] = phys | wimgxpp;
153#ifndef CONFIG_KGDB /* want user access for breakpoints */
154 if (flags & _PAGE_USER)
155#endif
156 bat[1].bat.batu.vp = 1;
157 if (flags & _PAGE_GUARDED) {
158 /* G bit must be zero in IBATs */
159 bat[0].word[0] = bat[0].word[1] = 0;
160 } else {
161 /* make IBAT same as DBAT */
162 bat[0] = bat[1];
163 }
164 } else {
165 /* 601 cpu */
166 if (bl > BL_8M)
167 bl = BL_8M;
168 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
169 | _PAGE_COHERENT);
170 wimgxpp |= (flags & _PAGE_RW)?
171 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
172 bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
173 bat->word[1] = phys | bl | 0x40; /* V=1 */
174 }
175
176 bat_addrs[index].start = virt;
177 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
178 bat_addrs[index].phys = phys;
179}
180
181/*
182 * Initialize the hash table and patch the instructions in hashtable.S.
183 */
184void __init MMU_init_hw(void)
185{
186 unsigned int hmask, mb, mb2;
187 unsigned int n_hpteg, lg_n_hpteg;
188
189 extern unsigned int hash_page_patch_A[];
190 extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
191 extern unsigned int hash_page[];
192 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
193
194 if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
195 /*
196 * Put a blr (procedure return) instruction at the
197 * start of hash_page, since we can still get DSI
198 * exceptions on a 603.
199 */
200 hash_page[0] = 0x4e800020;
201 flush_icache_range((unsigned long) &hash_page[0],
202 (unsigned long) &hash_page[1]);
203 return;
204 }
205
206 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
207
208#ifdef CONFIG_PPC64BRIDGE
209#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
210#define SDR1_LOW_BITS (lg_n_hpteg - 11)
211#define MIN_N_HPTEG 2048 /* min 256kB hash table */
212#else
213#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
214#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
215#define MIN_N_HPTEG 1024 /* min 64kB hash table */
216#endif
217
218 /*
219 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
220 * This is less than the recommended amount, but then
221 * Linux ain't AIX.
222 */
223 n_hpteg = total_memory / (PAGE_SIZE * 8);
224 if (n_hpteg < MIN_N_HPTEG)
225 n_hpteg = MIN_N_HPTEG;
226 lg_n_hpteg = __ilog2(n_hpteg);
227 if (n_hpteg & (n_hpteg - 1)) {
228 ++lg_n_hpteg; /* round up if not power of 2 */
229 n_hpteg = 1 << lg_n_hpteg;
230 }
231 Hash_size = n_hpteg << LG_HPTEG_SIZE;
232
233 /*
234 * Find some memory for the hash table.
235 */
236 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
237 Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
238 __initial_memory_limit));
239 cacheable_memzero(Hash, Hash_size);
240 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
241
242 Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
243
244 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
245 total_memory >> 20, Hash_size >> 10, Hash);
246
247
248 /*
249 * Patch up the instructions in hashtable.S:create_hpte
250 */
251 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
252 Hash_mask = n_hpteg - 1;
253 hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
254 mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
255 if (lg_n_hpteg > 16)
256 mb2 = 16 - LG_HPTEG_SIZE;
257
258 hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
259 | ((unsigned int)(Hash) >> 16);
260 hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
261 hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
262 hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
263 hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
264
265 /*
266 * Ensure that the locations we've patched have been written
267 * out from the data cache and invalidated in the instruction
268 * cache, on those machines with split caches.
269 */
270 flush_icache_range((unsigned long) &hash_page_patch_A[0],
271 (unsigned long) &hash_page_patch_C[1]);
272
273 /*
274 * Patch up the instructions in hashtable.S:flush_hash_page
275 */
276 flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
277 | ((unsigned int)(Hash) >> 16);
278 flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
279 flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
280 flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
281 flush_icache_range((unsigned long) &flush_hash_patch_A[0],
282 (unsigned long) &flush_hash_patch_B[1]);
283
284 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
285}
diff --git a/arch/ppc64/mm/slb.c b/arch/powerpc/mm/slb.c
index 0473953f6a37..0473953f6a37 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
diff --git a/arch/ppc64/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a3a03da503bc..a3a03da503bc 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
diff --git a/arch/ppc64/mm/stab.c b/arch/powerpc/mm/stab.c
index 1b83f002bf27..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
new file mode 100644
index 000000000000..6c3dc3c44c86
--- /dev/null
+++ b/arch/powerpc/mm/tlb_32.c
@@ -0,0 +1,183 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU uses a hash table to store virtual to
4 * physical translations, these routines flush entries from the
5 * hash table also.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34#include "mmu_decl.h"
35
36/*
37 * Called when unmapping pages to flush entries from the TLB/hash table.
38 */
39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40{
41 unsigned long ptephys;
42
43 if (Hash != 0) {
44 ptephys = __pa(ptep) & PAGE_MASK;
45 flush_hash_pages(mm->context, addr, ptephys, 1);
46 }
47}
48
49/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the
51 * DSI handler can't just "fixup" the TLB on a write fault
52 */
53void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
54{
55 if (Hash != 0)
56 return;
57 _tlbie(addr);
58}
59
60/*
61 * Called at the end of a mmu_gather operation to make sure the
62 * TLB flush is completely done.
63 */
64void tlb_flush(struct mmu_gather *tlb)
65{
66 if (Hash == 0) {
67 /*
68 * 603 needs to flush the whole TLB here since
69 * it doesn't use a hash table.
70 */
71 _tlbia();
72 }
73}
74
75/*
76 * TLB flushing:
77 *
78 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
79 * - flush_tlb_page(vma, vmaddr) flushes one page
80 * - flush_tlb_range(vma, start, end) flushes a range of pages
81 * - flush_tlb_kernel_range(start, end) flushes kernel pages
82 *
83 * since the hardware hash table functions as an extension of the
84 * tlb as far as the linux tables are concerned, flush it too.
85 * -- Cort
86 */
87
88/*
89 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
90 * the cache operations on the bus. Hence we need to use an IPI
91 * to get the other CPU(s) to invalidate their TLBs.
92 */
93#ifdef CONFIG_SMP_750
94#define FINISH_FLUSH smp_send_tlb_invalidate(0)
95#else
96#define FINISH_FLUSH do { } while (0)
97#endif
98
99static void flush_range(struct mm_struct *mm, unsigned long start,
100 unsigned long end)
101{
102 pmd_t *pmd;
103 unsigned long pmd_end;
104 int count;
105 unsigned int ctx = mm->context;
106
107 if (Hash == 0) {
108 _tlbia();
109 return;
110 }
111 start &= PAGE_MASK;
112 if (start >= end)
113 return;
114 end = (end - 1) | ~PAGE_MASK;
115 pmd = pmd_offset(pgd_offset(mm, start), start);
116 for (;;) {
117 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
118 if (pmd_end > end)
119 pmd_end = end;
120 if (!pmd_none(*pmd)) {
121 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
122 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
123 }
124 if (pmd_end == end)
125 break;
126 start = pmd_end + 1;
127 ++pmd;
128 }
129}
130
131/*
132 * Flush kernel TLB entries in the given range
133 */
134void flush_tlb_kernel_range(unsigned long start, unsigned long end)
135{
136 flush_range(&init_mm, start, end);
137 FINISH_FLUSH;
138}
139
140/*
141 * Flush all the (user) entries for the address space described by mm.
142 */
143void flush_tlb_mm(struct mm_struct *mm)
144{
145 struct vm_area_struct *mp;
146
147 if (Hash == 0) {
148 _tlbia();
149 return;
150 }
151
152 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
153 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
154 FINISH_FLUSH;
155}
156
157void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
158{
159 struct mm_struct *mm;
160 pmd_t *pmd;
161
162 if (Hash == 0) {
163 _tlbie(vmaddr);
164 return;
165 }
166 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
167 pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
168 if (!pmd_none(*pmd))
169 flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
170 FINISH_FLUSH;
171}
172
173/*
174 * For each address in the range, find the pte for the address
175 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
176 * the corresponding HPTE.
177 */
178void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
179 unsigned long end)
180{
181 flush_range(vma->vm_mm, start, end);
182 FINISH_FLUSH;
183}
diff --git a/arch/ppc64/mm/tlb.c b/arch/powerpc/mm/tlb_64.c
index 21fbffb23a43..09ab81a10f4f 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
128void hpte_update(struct mm_struct *mm, unsigned long addr, 128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot) 129 unsigned long pte, int wrprot)
130{ 130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 131 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
132 unsigned long vsid;
133 int i;
134 134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index; 135 i = batch->index;
138 136
139 /* 137 /*
@@ -143,19 +141,21 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 141 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 142 * will change mid stream.
145 */ 143 */
146 if (i != 0 && (context != batch->context || 144 if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
147 batch->large != pte_huge(pte))) {
148 flush_tlb_pending(); 145 flush_tlb_pending();
149 i = 0; 146 i = 0;
150 } 147 }
151
152 if (i == 0) { 148 if (i == 0) {
153 batch->context = context;
154 batch->mm = mm; 149 batch->mm = mm;
155 batch->large = pte_huge(pte); 150 batch->large = pte_huge(pte);
156 } 151 }
152 if (addr < KERNELBASE) {
153 vsid = get_vsid(mm->context.id, addr);
154 WARN_ON(vsid == 0);
155 } else
156 vsid = get_kernel_vsid(addr);
157 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
157 batch->pte[i] = __pte(pte); 158 batch->pte[i] = __pte(pte);
158 batch->addr[i] = addr;
159 batch->index = ++i; 159 batch->index = ++i;
160 if (i >= PPC64_TLB_BATCH_NR) 160 if (i >= PPC64_TLB_BATCH_NR)
161 flush_tlb_pending(); 161 flush_tlb_pending();
@@ -177,10 +177,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
177 local = 1; 177 local = 1;
178 178
179 if (i == 1) 179 if (i == 1)
180 flush_hash_page(batch->context, batch->addr[0], batch->pte[0], 180 flush_hash_page(batch->vaddr[0], batch->pte[0], local);
181 local);
182 else 181 else
183 flush_hash_range(batch->context, i, local); 182 flush_hash_range(i, local);
184 batch->index = 0; 183 batch->index = 0;
185 put_cpu(); 184 put_cpu();
186} 185}
diff --git a/arch/ppc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index 19d37730b664..19d37730b664 100644
--- a/arch/ppc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
diff --git a/arch/ppc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index e2218d32a4eb..0782d0cca89c 100644
--- a/arch/ppc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -7,8 +7,5 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) common.o 9oprofile-y := $(DRIVER_OBJS) common.o
10 10oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
11ifeq ($(CONFIG_FSL_BOOKE),y) 11oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
12 oprofile-y += op_model_fsl_booke.o
13endif
14
diff --git a/arch/ppc64/oprofile/common.c b/arch/powerpc/oprofile/common.c
index e5f572710aa0..af2c05d20ba5 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * PPC 64 oprofile support:
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM 3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 * PPC 32 oprofile support: (based on PPC 64 support)
5 * Copyright (C) Freescale Semiconductor, Inc 2004
6 * Author: Andy Fleming
3 * 7 *
4 * Based on alpha version. 8 * Based on alpha version.
5 * 9 *
@@ -10,6 +14,9 @@
10 */ 14 */
11 15
12#include <linux/oprofile.h> 16#include <linux/oprofile.h>
17#ifndef __powerpc64__
18#include <linux/slab.h>
19#endif /* ! __powerpc64__ */
13#include <linux/init.h> 20#include <linux/init.h>
14#include <linux/smp.h> 21#include <linux/smp.h>
15#include <linux/errno.h> 22#include <linux/errno.h>
@@ -19,17 +26,21 @@
19#include <asm/cputable.h> 26#include <asm/cputable.h>
20#include <asm/oprofile_impl.h> 27#include <asm/oprofile_impl.h>
21 28
22static struct op_ppc64_model *model; 29static struct op_powerpc_model *model;
23 30
24static struct op_counter_config ctr[OP_MAX_COUNTER]; 31static struct op_counter_config ctr[OP_MAX_COUNTER];
25static struct op_system_config sys; 32static struct op_system_config sys;
26 33
34#ifndef __powerpc64__
35static char *cpu_type;
36#endif /* ! __powerpc64__ */
37
27static void op_handle_interrupt(struct pt_regs *regs) 38static void op_handle_interrupt(struct pt_regs *regs)
28{ 39{
29 model->handle_interrupt(regs, ctr); 40 model->handle_interrupt(regs, ctr);
30} 41}
31 42
32static int op_ppc64_setup(void) 43static int op_powerpc_setup(void)
33{ 44{
34 int err; 45 int err;
35 46
@@ -42,41 +53,49 @@ static int op_ppc64_setup(void)
42 model->reg_setup(ctr, &sys, model->num_counters); 53 model->reg_setup(ctr, &sys, model->num_counters);
43 54
44 /* Configure the registers on all cpus. */ 55 /* Configure the registers on all cpus. */
56#ifdef __powerpc64__
45 on_each_cpu(model->cpu_setup, NULL, 0, 1); 57 on_each_cpu(model->cpu_setup, NULL, 0, 1);
58#else /* __powerpc64__ */
59#if 0
60 /* FIXME: Make multi-cpu work */
61 on_each_cpu(model->reg_setup, NULL, 0, 1);
62#endif
63#endif /* __powerpc64__ */
46 64
47 return 0; 65 return 0;
48} 66}
49 67
50static void op_ppc64_shutdown(void) 68static void op_powerpc_shutdown(void)
51{ 69{
52 release_pmc_hardware(); 70 release_pmc_hardware();
53} 71}
54 72
55static void op_ppc64_cpu_start(void *dummy) 73static void op_powerpc_cpu_start(void *dummy)
56{ 74{
57 model->start(ctr); 75 model->start(ctr);
58} 76}
59 77
60static int op_ppc64_start(void) 78static int op_powerpc_start(void)
61{ 79{
62 on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1); 80 on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
63 return 0; 81 return 0;
64} 82}
65 83
66static inline void op_ppc64_cpu_stop(void *dummy) 84static inline void op_powerpc_cpu_stop(void *dummy)
67{ 85{
68 model->stop(); 86 model->stop();
69} 87}
70 88
71static void op_ppc64_stop(void) 89static void op_powerpc_stop(void)
72{ 90{
73 on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1); 91 on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
74} 92}
75 93
76static int op_ppc64_create_files(struct super_block *sb, struct dentry *root) 94static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
77{ 95{
78 int i; 96 int i;
79 97
98#ifdef __powerpc64__
80 /* 99 /*
81 * There is one mmcr0, mmcr1 and mmcra for setting the events for 100 * There is one mmcr0, mmcr1 and mmcra for setting the events for
82 * all of the counters. 101 * all of the counters.
@@ -84,6 +103,7 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
84 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 103 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
85 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 104 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
86 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 105 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
106#endif /* __powerpc64__ */
87 107
88 for (i = 0; i < model->num_counters; ++i) { 108 for (i = 0; i < model->num_counters; ++i) {
89 struct dentry *dir; 109 struct dentry *dir;
@@ -95,44 +115,70 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
95 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 115 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
96 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 116 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
97 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 117 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
118#ifdef __powerpc64__
98 /* 119 /*
99 * We dont support per counter user/kernel selection, but 120 * We dont support per counter user/kernel selection, but
100 * we leave the entries because userspace expects them 121 * we leave the entries because userspace expects them
101 */ 122 */
123#endif /* __powerpc64__ */
102 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 124 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
103 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 125 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
126
127#ifndef __powerpc64__
128 /* FIXME: Not sure if this is used */
129#endif /* ! __powerpc64__ */
104 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 130 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
105 } 131 }
106 132
107 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 133 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
108 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 134 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
135#ifdef __powerpc64__
109 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", 136 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
110 &sys.backtrace_spinlocks); 137 &sys.backtrace_spinlocks);
138#endif /* __powerpc64__ */
111 139
112 /* Default to tracing both kernel and user */ 140 /* Default to tracing both kernel and user */
113 sys.enable_kernel = 1; 141 sys.enable_kernel = 1;
114 sys.enable_user = 1; 142 sys.enable_user = 1;
115 143#ifdef __powerpc64__
116 /* Turn on backtracing through spinlocks by default */ 144 /* Turn on backtracing through spinlocks by default */
117 sys.backtrace_spinlocks = 1; 145 sys.backtrace_spinlocks = 1;
146#endif /* __powerpc64__ */
118 147
119 return 0; 148 return 0;
120} 149}
121 150
122int __init oprofile_arch_init(struct oprofile_operations *ops) 151int __init oprofile_arch_init(struct oprofile_operations *ops)
123{ 152{
153#ifndef __powerpc64__
154#ifdef CONFIG_FSL_BOOKE
155 model = &op_model_fsl_booke;
156#else
157 return -ENODEV;
158#endif
159
160 cpu_type = kmalloc(32, GFP_KERNEL);
161 if (NULL == cpu_type)
162 return -ENOMEM;
163
164 sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
165
166 model->num_counters = cur_cpu_spec->num_pmcs;
167
168 ops->cpu_type = cpu_type;
169#else /* __powerpc64__ */
124 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type) 170 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
125 return -ENODEV; 171 return -ENODEV;
126
127 model = cur_cpu_spec->oprofile_model; 172 model = cur_cpu_spec->oprofile_model;
128 model->num_counters = cur_cpu_spec->num_pmcs; 173 model->num_counters = cur_cpu_spec->num_pmcs;
129 174
130 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; 175 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
131 ops->create_files = op_ppc64_create_files; 176#endif /* __powerpc64__ */
132 ops->setup = op_ppc64_setup; 177 ops->create_files = op_powerpc_create_files;
133 ops->shutdown = op_ppc64_shutdown; 178 ops->setup = op_powerpc_setup;
134 ops->start = op_ppc64_start; 179 ops->shutdown = op_powerpc_shutdown;
135 ops->stop = op_ppc64_stop; 180 ops->start = op_powerpc_start;
181 ops->stop = op_powerpc_stop;
136 182
137 printk(KERN_INFO "oprofile: using %s performance monitoring.\n", 183 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
138 ops->cpu_type); 184 ops->cpu_type);
@@ -142,4 +188,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
142 188
143void oprofile_arch_exit(void) 189void oprofile_arch_exit(void)
144{ 190{
191#ifndef __powerpc64__
192 kfree(cpu_type);
193 cpu_type = NULL;
194#endif /* ! __powerpc64__ */
145} 195}
diff --git a/arch/ppc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c
index fc9c859358c6..86124a94c9af 100644
--- a/arch/ppc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_booke.c
@@ -24,9 +24,8 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/reg_booke.h> 25#include <asm/reg_booke.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/perfmon.h> 27#include <asm/pmc.h>
28 28#include <asm/oprofile_impl.h>
29#include "op_impl.h"
30 29
31static unsigned long reset_value[OP_MAX_COUNTER]; 30static unsigned long reset_value[OP_MAX_COUNTER];
32 31
@@ -176,7 +175,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
176 pmc_start_ctrs(1); 175 pmc_start_ctrs(1);
177} 176}
178 177
179struct op_ppc32_model op_model_fsl_booke = { 178struct op_powerpc_model op_model_fsl_booke = {
180 .reg_setup = fsl_booke_reg_setup, 179 .reg_setup = fsl_booke_reg_setup,
181 .start = fsl_booke_start, 180 .start = fsl_booke_start,
182 .stop = fsl_booke_stop, 181 .stop = fsl_booke_stop,
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 32b2bb5625fe..886449315847 100644
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -300,7 +300,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
300 mtspr(SPRN_MMCR0, mmcr0); 300 mtspr(SPRN_MMCR0, mmcr0);
301} 301}
302 302
303struct op_ppc64_model op_model_power4 = { 303struct op_powerpc_model op_model_power4 = {
304 .reg_setup = power4_reg_setup, 304 .reg_setup = power4_reg_setup,
305 .cpu_setup = power4_cpu_setup, 305 .cpu_setup = power4_cpu_setup,
306 .start = power4_start, 306 .start = power4_start,
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 08c5b333f5c4..e010b85996e8 100644
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -209,7 +209,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
209 mtspr(SPRN_MMCR0, mmcr0); 209 mtspr(SPRN_MMCR0, mmcr0);
210} 210}
211 211
212struct op_ppc64_model op_model_rs64 = { 212struct op_powerpc_model op_model_rs64 = {
213 .reg_setup = rs64_reg_setup, 213 .reg_setup = rs64_reg_setup,
214 .cpu_setup = rs64_cpu_setup, 214 .cpu_setup = rs64_cpu_setup,
215 .start = rs64_start, 215 .start = rs64_start,
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
new file mode 100644
index 000000000000..ed39d6a3d22a
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -0,0 +1,280 @@
1config 4xx
2 bool
3 depends on 40x || 44x
4 default y
5
6config WANT_EARLY_SERIAL
7 bool
8 select SERIAL_8250
9 default n
10
11menu "AMCC 4xx options"
12 depends on 4xx
13
14choice
15 prompt "Machine Type"
16 depends on 40x
17 default WALNUT
18
19config BUBINGA
20 bool "Bubinga"
21 select WANT_EARLY_SERIAL
22 help
23 This option enables support for the IBM 405EP evaluation board.
24
25config CPCI405
26 bool "CPCI405"
27 help
28 This option enables support for the CPCI405 board.
29
30config EP405
31 bool "EP405/EP405PC"
32 help
33 This option enables support for the EP405/EP405PC boards.
34
35config REDWOOD_5
36 bool "Redwood-5"
37 help
38 This option enables support for the IBM STB04 evaluation board.
39
40config REDWOOD_6
41 bool "Redwood-6"
42 help
43 This option enables support for the IBM STBx25xx evaluation board.
44
45config SYCAMORE
46 bool "Sycamore"
47 help
48 This option enables support for the IBM PPC405GPr evaluation board.
49
50config WALNUT
51 bool "Walnut"
52 help
53 This option enables support for the IBM PPC405GP evaluation board.
54
55config XILINX_ML300
56 bool "Xilinx-ML300"
57 help
58 This option enables support for the Xilinx ML300 evaluation board.
59
60endchoice
61
62choice
63 prompt "Machine Type"
64 depends on 44x
65 default EBONY
66
67config BAMBOO
68 bool "Bamboo"
69 select WANT_EARLY_SERIAL
70 help
71 This option enables support for the IBM PPC440EP evaluation board.
72
73config EBONY
74 bool "Ebony"
75 select WANT_EARLY_SERIAL
76 help
77 This option enables support for the IBM PPC440GP evaluation board.
78
79config LUAN
80 bool "Luan"
81 select WANT_EARLY_SERIAL
82 help
83 This option enables support for the IBM PPC440SP evaluation board.
84
85config OCOTEA
86 bool "Ocotea"
87 select WANT_EARLY_SERIAL
88 help
89 This option enables support for the IBM PPC440GX evaluation board.
90
91endchoice
92
93config EP405PC
94 bool "EP405PC Support"
95 depends on EP405
96
97
98# It's often necessary to know the specific 4xx processor type.
99# Fortunately, it is impled (so far) from the board type, so we
100# don't need to ask more redundant questions.
101config NP405H
102 bool
103 depends on ASH
104 default y
105
106config 440EP
107 bool
108 depends on BAMBOO
109 select PPC_FPU
110 default y
111
112config 440GP
113 bool
114 depends on EBONY
115 default y
116
117config 440GX
118 bool
119 depends on OCOTEA
120 default y
121
122config 440SP
123 bool
124 depends on LUAN
125 default y
126
127config 440
128 bool
129 depends on 440GP || 440SP || 440EP
130 default y
131
132config 440A
133 bool
134 depends on 440GX
135 default y
136
137config IBM440EP_ERR42
138 bool
139 depends on 440EP
140 default y
141
142# All 405-based cores up until the 405GPR and 405EP have this errata.
143config IBM405_ERR77
144 bool
145 depends on 40x && !403GCX && !405GPR && !405EP
146 default y
147
148# All 40x-based cores, up until the 405GPR and 405EP have this errata.
149config IBM405_ERR51
150 bool
151 depends on 40x && !405GPR && !405EP
152 default y
153
154config BOOKE
155 bool
156 depends on 44x
157 default y
158
159config IBM_OCP
160 bool
161 depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
162 default y
163
164config XILINX_OCP
165 bool
166 depends on XILINX_ML300
167 default y
168
169config IBM_EMAC4
170 bool
171 depends on 440GX || 440SP
172 default y
173
174config BIOS_FIXUP
175 bool
176 depends on BUBINGA || EP405 || SYCAMORE || WALNUT
177 default y
178
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX
181 bool
182 depends OAK
183 default y
184
185config 405EP
186 bool
187 depends on BUBINGA
188 default y
189
190config 405GP
191 bool
192 depends on CPCI405 || EP405 || WALNUT
193 default y
194
195config 405GPR
196 bool
197 depends on SYCAMORE
198 default y
199
200config VIRTEX_II_PRO
201 bool
202 depends on XILINX_ML300
203 default y
204
205config STB03xxx
206 bool
207 depends on REDWOOD_5 || REDWOOD_6
208 default y
209
210config EMBEDDEDBOOT
211 bool
212 depends on EP405 || XILINX_ML300
213 default y
214
215config IBM_OPENBIOS
216 bool
217 depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
218 default y
219
220config PPC4xx_DMA
221 bool "PPC4xx DMA controller support"
222 depends on 4xx
223
224config PPC4xx_EDMA
225 bool
226 depends on !STB03xxx && PPC4xx_DMA
227 default y
228
229config PPC_GEN550
230 bool
231 depends on 4xx
232 default y
233
234choice
235 prompt "TTYS0 device and default console"
236 depends on 40x
237 default UART0_TTYS0
238
239config UART0_TTYS0
240 bool "UART0"
241
242config UART0_TTYS1
243 bool "UART1"
244
245endchoice
246
247config SERIAL_SICC
248 bool "SICC Serial port support"
249 depends on STB03xxx
250
251config UART1_DFLT_CONSOLE
252 bool
253 depends on SERIAL_SICC && UART0_TTYS1
254 default y
255
256config SERIAL_SICC_CONSOLE
257 bool
258 depends on SERIAL_SICC && UART0_TTYS1
259 default y
260endmenu
261
262
263menu "IBM 40x options"
264 depends on 40x
265
266config SERIAL_SICC
267 bool "SICC Serial port"
268 depends on STB03xxx
269
270config UART1_DFLT_CONSOLE
271 bool
272 depends on SERIAL_SICC && UART0_TTYS1
273 default y
274
275config SERIAL_SICC_CONSOLE
276 bool
277 depends on SERIAL_SICC && UART0_TTYS1
278 default y
279
280endmenu
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
new file mode 100644
index 000000000000..79ff6b1e887c
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works \ No newline at end of file
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
new file mode 100644
index 000000000000..c5bc2821d991
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -0,0 +1,86 @@
1config 85xx
2 bool
3 depends on E500
4 default y
5
6config PPC_INDIRECT_PCI_BE
7 bool
8 depends on 85xx
9 default y
10
11menu "Freescale 85xx options"
12 depends on E500
13
14choice
15 prompt "Machine Type"
16 depends on 85xx
17 default MPC8540_ADS
18
19config MPC8540_ADS
20 bool "Freescale MPC8540 ADS"
21 help
22 This option enables support for the MPC 8540 ADS evaluation board.
23
24config MPC8548_CDS
25 bool "Freescale MPC8548 CDS"
26 help
27 This option enablese support for the MPC8548 CDS evaluation board.
28
29config MPC8555_CDS
30 bool "Freescale MPC8555 CDS"
31 help
32 This option enablese support for the MPC8555 CDS evaluation board.
33
34config MPC8560_ADS
35 bool "Freescale MPC8560 ADS"
36 help
37 This option enables support for the MPC 8560 ADS evaluation board.
38
39config SBC8560
40 bool "WindRiver PowerQUICC III SBC8560"
41 help
42 This option enables support for the WindRiver PowerQUICC III
43 SBC8560 board.
44
45config STX_GP3
46 bool "Silicon Turnkey Express GP3"
47 help
48 This option enables support for the Silicon Turnkey Express GP3
49 board.
50
51endchoice
52
53# It's often necessary to know the specific 85xx processor type.
54# Fortunately, it is implied (so far) from the board type, so we
55# don't need to ask more redundant questions.
56config MPC8540
57 bool
58 depends on MPC8540_ADS
59 default y
60
61config MPC8548
62 bool
63 depends on MPC8548_CDS
64 default y
65
66config MPC8555
67 bool
68 depends on MPC8555_CDS
69 default y
70
71config MPC8560
72 bool
73 depends on SBC8560 || MPC8560_ADS || STX_GP3
74 default y
75
76config 85xx_PCI2
77 bool "Supprt for 2nd PCI host controller"
78 depends on MPC8555_CDS
79 default y
80
81config PPC_GEN550
82 bool
83 depends on MPC8540 || SBC8560 || MPC8555
84 default y
85
86endmenu
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
new file mode 100644
index 000000000000..6407197ffd89
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
new file mode 100644
index 000000000000..c8c0ba3cf8e8
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -0,0 +1,352 @@
1config FADS
2 bool
3
4choice
5 prompt "8xx Machine Type"
6 depends on 8xx
7 default RPXLITE
8
9config RPXLITE
10 bool "RPX-Lite"
11 ---help---
12 Single-board computers based around the PowerPC MPC8xx chips and
13 intended for embedded applications. The following types are
14 supported:
15
16 RPX-Lite:
17 Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
18
19 RPX-Classic:
20 Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
21 the MPC 860
22
23 BSE-IP:
24 Bright Star Engineering ip-Engine.
25
26 TQM823L:
27 TQM850L:
28 TQM855L:
29 TQM860L:
30 MPC8xx based family of mini modules, half credit card size,
31 up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
32 2 x CAN bus interface, ...
33 Manufacturer: TQ Components, www.tq-group.de
34 Date of Release: October (?) 1999
35 End of Life: not yet :-)
36 URL:
37 - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
38 - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
39 - images: <http://www.denx.de/embedded-ppc-en.html>
40
41 FPS850L:
42 FingerPrint Sensor System (based on TQM850L)
43 Manufacturer: IKENDI AG, <http://www.ikendi.com/>
44 Date of Release: November 1999
45 End of life: end 2000 ?
46 URL: see TQM850L
47
48 IVMS8:
49 MPC860 based board used in the "Integrated Voice Mail System",
50 Small Version (8 voice channels)
51 Manufacturer: Speech Design, <http://www.speech-design.de/>
52 Date of Release: December 2000 (?)
53 End of life: -
54 URL: <http://www.speech-design.de/>
55
56 IVML24:
57 MPC860 based board used in the "Integrated Voice Mail System",
58 Large Version (24 voice channels)
59 Manufacturer: Speech Design, <http://www.speech-design.de/>
60 Date of Release: March 2001 (?)
61 End of life: -
62 URL: <http://www.speech-design.de/>
63
64 HERMES:
65 Hermes-Pro ISDN/LAN router with integrated 8 x hub
66 Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
67 <http://www.multidata.de/>
68 Date of Release: 2000 (?)
69 End of life: -
70 URL: <http://www.multidata.de/english/products/hpro.htm>
71
72 IP860:
73 VMEBus IP (Industry Pack) carrier board with MPC860
74 Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
75 Date of Release: ?
76 End of life: -
77 URL: <http://www.microsys.de/html/ip860.html>
78
79 PCU_E:
80 PCU = Peripheral Controller Unit, Extended
81 Manufacturer: Siemens AG, ICN (Information and Communication Networks)
82 <http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
83 Date of Release: April 2001
84 End of life: August 2001
85 URL: n. a.
86
87config RPXCLASSIC
88 bool "RPX-Classic"
89 help
90 The RPX-Classic is a single-board computer based on the Motorola
91 MPC860. It features 16MB of DRAM and a variable amount of flash,
92 I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
93 LEDs. Variants with Ethernet ports exist. Say Y here to support it
94 directly.
95
96config BSEIP
97 bool "BSE-IP"
98 help
99 Say Y here to support the Bright Star Engineering ipEngine SBC.
100 This is a credit-card-sized device featuring a MPC823 processor,
101 26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
102 controller, and two RS232 ports.
103
104config MPC8XXFADS
105 bool "FADS"
106 select FADS
107
108config MPC86XADS
109 bool "MPC86XADS"
110 help
111 MPC86x Application Development System by Freescale Semiconductor.
112 The MPC86xADS is meant to serve as a platform for s/w and h/w
113 development around the MPC86X processor families.
114 select FADS
115
116config MPC885ADS
117 bool "MPC885ADS"
118 help
119 Freescale Semiconductor MPC885 Application Development System (ADS).
120 Also known as DUET.
121 The MPC885ADS is meant to serve as a platform for s/w and h/w
122 development around the MPC885 processor family.
123
124config TQM823L
125 bool "TQM823L"
126 help
127 Say Y here to support the TQM823L, one of an MPC8xx-based family of
128 mini SBCs (half credit-card size) from TQ Components first released
129 in late 1999. Technical references are at
130 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
131 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
132 <http://www.denx.de/embedded-ppc-en.html>.
133
134config TQM850L
135 bool "TQM850L"
136 help
137 Say Y here to support the TQM850L, one of an MPC8xx-based family of
138 mini SBCs (half credit-card size) from TQ Components first released
139 in late 1999. Technical references are at
140 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
141 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
142 <http://www.denx.de/embedded-ppc-en.html>.
143
144config TQM855L
145 bool "TQM855L"
146 help
147 Say Y here to support the TQM855L, one of an MPC8xx-based family of
148 mini SBCs (half credit-card size) from TQ Components first released
149 in late 1999. Technical references are at
150 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
151 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
152 <http://www.denx.de/embedded-ppc-en.html>.
153
154config TQM860L
155 bool "TQM860L"
156 help
157 Say Y here to support the TQM860L, one of an MPC8xx-based family of
158 mini SBCs (half credit-card size) from TQ Components first released
159 in late 1999. Technical references are at
160 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
161 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
162 <http://www.denx.de/embedded-ppc-en.html>.
163
164config FPS850L
165 bool "FPS850L"
166
167config IVMS8
168 bool "IVMS8"
169 help
170 Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
171 from Speech Design, released March 2001. The manufacturer's website
172 is at <http://www.speech-design.de/>.
173
174config IVML24
175 bool "IVML24"
176 help
177 Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
178 from Speech Design, released March 2001. The manufacturer's website
179 is at <http://www.speech-design.de/>.
180
181config HERMES_PRO
182 bool "HERMES"
183
184config IP860
185 bool "IP860"
186
187config LWMON
188 bool "LWMON"
189
190config PCU_E
191 bool "PCU_E"
192
193config CCM
194 bool "CCM"
195
196config LANTEC
197 bool "LANTEC"
198
199config MBX
200 bool "MBX"
201 help
202 MBX is a line of Motorola single-board computer based around the
203 MPC821 and MPC860 processors, and intended for embedded-controller
204 applications. Say Y here to support these boards directly.
205
206config WINCEPT
207 bool "WinCept"
208 help
209 The Wincept 100/110 is a Motorola single-board computer based on the
210 MPC821 PowerPC, introduced in 1998 and designed to be used in
211 thin-client machines. Say Y to support it directly.
212
213endchoice
214
215#
216# MPC8xx Communication options
217#
218
219menu "MPC8xx CPM Options"
220 depends on 8xx
221
222config SCC_ENET
223 bool "CPM SCC Ethernet"
224 depends on NET_ETHERNET
225 help
226 Enable Ethernet support via the Motorola MPC8xx serial
227 communications controller.
228
229choice
230 prompt "SCC used for Ethernet"
231 depends on SCC_ENET
232 default SCC1_ENET
233
234config SCC1_ENET
235 bool "SCC1"
236 help
237 Use MPC8xx serial communications controller 1 to drive Ethernet
238 (default).
239
240config SCC2_ENET
241 bool "SCC2"
242 help
243 Use MPC8xx serial communications controller 2 to drive Ethernet.
244
245config SCC3_ENET
246 bool "SCC3"
247 help
248 Use MPC8xx serial communications controller 3 to drive Ethernet.
249
250endchoice
251
252config FEC_ENET
253 bool "860T FEC Ethernet"
254 depends on NET_ETHERNET
255 help
256 Enable Ethernet support via the Fast Ethernet Controller (FCC) on
257 the Motorola MPC8260.
258
259config USE_MDIO
260 bool "Use MDIO for PHY configuration"
261 depends on FEC_ENET
262 help
263 On some boards the hardware configuration of the ethernet PHY can be
264 used without any software interaction over the MDIO interface, so
265 all MII code can be omitted. Say N here if unsure or if you don't
266 need link status reports.
267
268config FEC_AM79C874
269 bool "Support AMD79C874 PHY"
270 depends on USE_MDIO
271
272config FEC_LXT970
273 bool "Support LXT970 PHY"
274 depends on USE_MDIO
275
276config FEC_LXT971
277 bool "Support LXT971 PHY"
278 depends on USE_MDIO
279
280config FEC_QS6612
281 bool "Support QS6612 PHY"
282 depends on USE_MDIO
283
284config ENET_BIG_BUFFERS
285 bool "Use Big CPM Ethernet Buffers"
286 depends on SCC_ENET || FEC_ENET
287 help
288 Allocate large buffers for MPC8xx Ethernet. Increases throughput
289 and decreases the likelihood of dropped packets, but costs memory.
290
291config HTDMSOUND
292 bool "Embedded Planet HIOX Audio"
293 depends on SOUND=y
294
295# This doesn't really belong here, but it is convenient to ask
296# 8xx specific questions.
297comment "Generic MPC8xx Options"
298
299config 8xx_COPYBACK
300 bool "Copy-Back Data Cache (else Writethrough)"
301 help
302 Saying Y here will cause the cache on an MPC8xx processor to be used
303 in Copy-Back mode. If you say N here, it is used in Writethrough
304 mode.
305
306 If in doubt, say Y here.
307
308config 8xx_CPU6
309 bool "CPU6 Silicon Errata (860 Pre Rev. C)"
310 help
311 MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
312 require workarounds for Linux (and most other OSes to work). If you
313 get a BUG() very early in boot, this might fix the problem. For
314 more details read the document entitled "MPC860 Family Device Errata
315 Reference" on Motorola's website. This option also incurs a
316 performance hit.
317
318 If in doubt, say N here.
319
320choice
321 prompt "Microcode patch selection"
322 default NO_UCODE_PATCH
323 help
324 Help not implemented yet, coming soon.
325
326config NO_UCODE_PATCH
327 bool "None"
328
329config USB_SOF_UCODE_PATCH
330 bool "USB SOF patch"
331 help
332 Help not implemented yet, coming soon.
333
334config I2C_SPI_UCODE_PATCH
335 bool "I2C/SPI relocation patch"
336 help
337 Help not implemented yet, coming soon.
338
339config I2C_SPI_SMC1_UCODE_PATCH
340 bool "I2C/SPI/SMC1 relocation patch"
341 help
342 Help not implemented yet, coming soon.
343
344endchoice
345
346config UCODE_PATCH
347 bool
348 default y
349 depends on !NO_UCODE_PATCH
350
351endmenu
352
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
new file mode 100644
index 000000000000..509622da5408
--- /dev/null
+++ b/arch/powerpc/platforms/Makefile
@@ -0,0 +1,11 @@
1ifeq ($(CONFIG_PPC_MERGE),y)
2obj-$(CONFIG_PPC_PMAC) += powermac/
3else
4ifeq ($(CONFIG_PPC64),y)
5obj-$(CONFIG_PPC_PMAC) += powermac/
6endif
7endif
8obj-$(CONFIG_4xx) += 4xx/
9obj-$(CONFIG_85xx) += 85xx/
10obj-$(CONFIG_PPC_PSERIES) += pseries/
11obj-$(CONFIG_PPC_ISERIES) += iseries/
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
new file mode 100644
index 000000000000..6bde3bffed86
--- /dev/null
+++ b/arch/powerpc/platforms/apus/Kconfig
@@ -0,0 +1,130 @@
1
2config AMIGA
3 bool
4 depends on APUS
5 default y
6 help
7 This option enables support for the Amiga series of computers.
8
9config ZORRO
10 bool
11 depends on APUS
12 default y
13 help
14 This enables support for the Zorro bus in the Amiga. If you have
15 expansion cards in your Amiga that conform to the Amiga
16 AutoConfig(tm) specification, say Y, otherwise N. Note that even
17 expansion cards that do not fit in the Zorro slots but fit in e.g.
18 the CPU slot may fall in this category, so you have to say Y to let
19 Linux use these.
20
21config ABSTRACT_CONSOLE
22 bool
23 depends on APUS
24 default y
25
26config APUS_FAST_EXCEPT
27 bool
28 depends on APUS
29 default y
30
31config AMIGA_PCMCIA
32 bool "Amiga 1200/600 PCMCIA support"
33 depends on APUS && EXPERIMENTAL
34 help
35 Include support in the kernel for pcmcia on Amiga 1200 and Amiga
36 600. If you intend to use pcmcia cards say Y; otherwise say N.
37
38config AMIGA_BUILTIN_SERIAL
39 tristate "Amiga builtin serial support"
40 depends on APUS
41 help
42 If you want to use your Amiga's built-in serial port in Linux,
43 answer Y.
44
45 To compile this driver as a module, choose M here.
46
47config GVPIOEXT
48 tristate "GVP IO-Extender support"
49 depends on APUS
50 help
51 If you want to use a GVP IO-Extender serial card in Linux, say Y.
52 Otherwise, say N.
53
54config GVPIOEXT_LP
55 tristate "GVP IO-Extender parallel printer support"
56 depends on GVPIOEXT
57 help
58 Say Y to enable driving a printer from the parallel port on your
59 GVP IO-Extender card, N otherwise.
60
61config GVPIOEXT_PLIP
62 tristate "GVP IO-Extender PLIP support"
63 depends on GVPIOEXT
64 help
65 Say Y to enable doing IP over the parallel port on your GVP
66 IO-Extender card, N otherwise.
67
68config MULTIFACE_III_TTY
69 tristate "Multiface Card III serial support"
70 depends on APUS
71 help
72 If you want to use a Multiface III card's serial port in Linux,
73 answer Y.
74
75 To compile this driver as a module, choose M here.
76
77config A2232
78 tristate "Commodore A2232 serial support (EXPERIMENTAL)"
79 depends on EXPERIMENTAL && APUS
80 ---help---
81 This option supports the 2232 7-port serial card shipped with the
82 Amiga 2000 and other Zorro-bus machines, dating from 1989. At
83 a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
84 each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
85 ports were connected with 8 pin DIN connectors on the card bracket,
86 for which 8 pin to DB25 adapters were supplied. The card also had
87 jumpers internally to toggle various pinning configurations.
88
89 This driver can be built as a module; but then "generic_serial"
90 will also be built as a module. This has to be loaded before
91 "ser_a2232". If you want to do this, answer M here.
92
93config WHIPPET_SERIAL
94 tristate "Hisoft Whippet PCMCIA serial support"
95 depends on AMIGA_PCMCIA
96 help
97 HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
98 is no listing for the Whippet in their Amiga section.
99
100config APNE
101 tristate "PCMCIA NE2000 support"
102 depends on AMIGA_PCMCIA
103 help
104 If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
105 say N.
106
107 To compile this driver as a module, choose M here: the
108 module will be called apne.
109
110config SERIAL_CONSOLE
111 bool "Support for serial port console"
112 depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
113
114config HEARTBEAT
115 bool "Use power LED as a heartbeat"
116 depends on APUS
117 help
118 Use the power-on LED on your machine as a load meter. The exact
119 behavior is platform-dependent, but normally the flash frequency is
120 a hyperbolic function of the 5-minute load average.
121
122config PROC_HARDWARE
123 bool "/proc/hardware support"
124 depends on APUS
125
126source "drivers/zorro/Kconfig"
127
128config PCI_PERMEDIA
129 bool "PCI for Permedia2"
130 depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
new file mode 100644
index 000000000000..2d755b79d51f
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -0,0 +1,305 @@
1choice
2 prompt "Machine Type"
3 depends on EMBEDDED6xx
4
5config KATANA
6 bool "Artesyn-Katana"
7 help
8 Select KATANA if configuring an Artesyn KATANA 750i or 3750
9 cPCI board.
10
11config WILLOW
12 bool "Cogent-Willow"
13
14config CPCI690
15 bool "Force-CPCI690"
16 help
17 Select CPCI690 if configuring a Force CPCI690 cPCI board.
18
19config POWERPMC250
20 bool "Force-PowerPMC250"
21
22config CHESTNUT
23 bool "IBM 750FX Eval board or 750GX Eval board"
24 help
25 Select CHESTNUT if configuring an IBM 750FX Eval Board or a
26 IBM 750GX Eval board.
27
28config SPRUCE
29 bool "IBM-Spruce"
30
31config HDPU
32 bool "Sky-HDPU"
33 help
34 Select HDPU if configuring a Sky Computers Compute Blade.
35
36config HDPU_FEATURES
37 depends HDPU
38 tristate "HDPU-Features"
39 help
40 Select to enable HDPU enhanced features.
41
42config EV64260
43 bool "Marvell-EV64260BP"
44 help
45 Select EV64260 if configuring a Marvell (formerly Galileo)
46 EV64260BP Evaluation platform.
47
48config LOPEC
49 bool "Motorola-LoPEC"
50
51config MVME5100
52 bool "Motorola-MVME5100"
53
54config PPLUS
55 bool "Motorola-PowerPlus"
56
57config PRPMC750
58 bool "Motorola-PrPMC750"
59
60config PRPMC800
61 bool "Motorola-PrPMC800"
62
63config SANDPOINT
64 bool "Motorola-Sandpoint"
65 help
66 Select SANDPOINT if configuring for a Motorola Sandpoint X3
67 (any flavor).
68
69config RADSTONE_PPC7D
70 bool "Radstone Technology PPC7D board"
71
72config PAL4
73 bool "SBS-Palomar4"
74
75config GEMINI
76 bool "Synergy-Gemini"
77 depends on BROKEN
78 help
79 Select Gemini if configuring for a Synergy Microsystems' Gemini
80 series Single Board Computer. More information is available at:
81 <http://www.synergymicro.com/PressRel/97_10_15.html>.
82
83config EST8260
84 bool "EST8260"
85 ---help---
86 The EST8260 is a single-board computer manufactured by Wind River
87 Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
88 the MPC8260. Wind River Systems has a website at
89 <http://www.windriver.com/>, but the EST8260 cannot be found on it
90 and has probably been discontinued or rebadged.
91
92config SBC82xx
93 bool "SBC82xx"
94 ---help---
95 SBC PowerQUICC II, single-board computer with MPC82xx CPU
96 Manufacturer: Wind River Systems, Inc.
97 Date of Release: May 2003
98 End of Life: -
99 URL: <http://www.windriver.com/>
100
101config SBS8260
102 bool "SBS8260"
103
104config RPX8260
105 bool "RPXSUPER"
106
107config TQM8260
108 bool "TQM8260"
109 ---help---
110 MPC8260 based module, little larger than credit card,
111 up to 128 MB global + 64 MB local RAM, 32 MB Flash,
112 32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
113 2 x serial ports, ...
114 Manufacturer: TQ Components, www.tq-group.de
115 Date of Release: June 2001
116 End of Life: not yet :-)
117 URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
118
119config ADS8272
120 bool "ADS8272"
121
122config PQ2FADS
123 bool "Freescale-PQ2FADS"
124 help
125 Select PQ2FADS if you wish to configure for a Freescale
126 PQ2FADS board (-VR or -ZU).
127
128config LITE5200
129 bool "Freescale LITE5200 / (IceCube)"
130 select PPC_MPC52xx
131 help
132 Support for the LITE5200 dev board for the MPC5200 from Freescale.
133 This is for the LITE5200 version 2.0 board. Don't know if it changes
134 much but it's only been tested on this board version. I think this
135 board is also known as IceCube.
136
137config MPC834x_SYS
138 bool "Freescale MPC834x SYS"
139 help
140 This option enables support for the MPC 834x SYS evaluation board.
141
142 Be aware that PCI buses can only function when SYS board is plugged
143 into the PIB (Platform IO Board) board from Freescale which provide
144 3 PCI slots. The PIBs PCI initialization is the bootloader's
145 responsiblilty.
146
147config EV64360
148 bool "Marvell-EV64360BP"
149 help
150 Select EV64360 if configuring a Marvell EV64360BP Evaluation
151 platform.
152endchoice
153
154config PQ2ADS
155 bool
156 depends on ADS8272
157 default y
158
159config TQM8xxL
160 bool
161 depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
162 default y
163
164config PPC_MPC52xx
165 bool
166
167config 8260
168 bool "CPM2 Support" if WILLOW
169 depends on 6xx
170 default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
171 help
172 The MPC8260 is a typical embedded CPU made by Motorola. Selecting
173 this option means that you wish to build a kernel for a machine with
174 an 8260 class CPU.
175
176config 8272
177 bool
178 depends on 6xx
179 default y if ADS8272
180 select 8260
181 help
182 The MPC8272 CPM has a different internal dpram setup than other CPM2
183 devices
184
185config 83xx
186 bool
187 default y if MPC834x_SYS
188
189config MPC834x
190 bool
191 default y if MPC834x_SYS
192
193config CPM2
194 bool
195 depends on 8260 || MPC8560 || MPC8555
196 default y
197 help
198 The CPM2 (Communications Processor Module) is a coprocessor on
199 embedded CPUs made by Motorola. Selecting this option means that
200 you wish to build a kernel for a machine with a CPM2 coprocessor
201 on it (826x, 827x, 8560).
202
203config PPC_GEN550
204 bool
205 depends on SANDPOINT || SPRUCE || PPLUS || \
206 PRPMC750 || PRPMC800 || LOPEC || \
207 (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
208 83xx
209 default y
210
211config FORCE
212 bool
213 depends on 6xx && POWERPMC250
214 default y
215
216config GT64260
217 bool
218 depends on EV64260 || CPCI690
219 default y
220
221config MV64360 # Really MV64360 & MV64460
222 bool
223 depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
224 default y
225
226config MV64X60
227 bool
228 depends on (GT64260 || MV64360)
229 default y
230
231menu "Set bridge options"
232 depends on MV64X60
233
234config NOT_COHERENT_CACHE
235 bool "Turn off Cache Coherency"
236 default n
237 help
238 Some 64x60 bridges lock up when trying to enforce cache coherency.
239 When this option is selected, cache coherency will be turned off.
240 Note that this can cause other problems (e.g., stale data being
241 speculatively loaded via a cached mapping). Use at your own risk.
242
243config MV64X60_BASE
244 hex "Set bridge base used by firmware"
245 default "0xf1000000"
246 help
247 A firmware can leave the base address of the bridge's registers at
248 a non-standard location. If so, set this value to reflect the
249 address of that non-standard location.
250
251config MV64X60_NEW_BASE
252 hex "Set bridge base used by kernel"
253 default "0xf1000000"
254 help
255 If the current base address of the bridge's registers is not where
256 you want it, set this value to the address that you want it moved to.
257
258endmenu
259
260config NONMONARCH_SUPPORT
261 bool "Enable Non-Monarch Support"
262 depends on PRPMC800
263
264config HARRIER
265 bool
266 depends on PRPMC800
267 default y
268
269config EPIC_SERIAL_MODE
270 bool
271 depends on 6xx && (LOPEC || SANDPOINT)
272 default y
273
274config MPC10X_BRIDGE
275 bool
276 depends on POWERPMC250 || LOPEC || SANDPOINT
277 default y
278
279config MPC10X_OPENPIC
280 bool
281 depends on POWERPMC250 || LOPEC || SANDPOINT
282 default y
283
284config MPC10X_STORE_GATHERING
285 bool "Enable MPC10x store gathering"
286 depends on MPC10X_BRIDGE
287
288config SANDPOINT_ENABLE_UART1
289 bool "Enable DUART mode on Sandpoint"
290 depends on SANDPOINT
291 help
292 If this option is enabled then the MPC824x processor will run
293 in DUART mode instead of UART mode.
294
295config HARRIER_STORE_GATHERING
296 bool "Enable Harrier store gathering"
297 depends on HARRIER
298
299config MVME5100_IPMC761_PRESENT
300 bool "MVME5100 configured with an IPMC761"
301 depends on MVME5100
302
303config SPRUCE_BAUD_33M
304 bool "Spruce baud clock support"
305 depends on SPRUCE
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644
index 000000000000..3d957a30c8c2
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -0,0 +1,31 @@
1
2menu "iSeries device drivers"
3 depends on PPC_ISERIES
4
5config VIOCONS
6 tristate "iSeries Virtual Console Support"
7
8config VIODASD
9 tristate "iSeries Virtual I/O disk support"
10 help
11 If you are running on an iSeries system and you want to use
12 virtual disks created and managed by OS/400, say Y.
13
14config VIOCD
15 tristate "iSeries Virtual I/O CD support"
16 help
17 If you are running Linux on an IBM iSeries system and you want to
18 read a CD drive owned by OS/400, say Y here.
19
20config VIOTAPE
21 tristate "iSeries Virtual Tape Support"
22 help
23 If you are running Linux on an iSeries system and you want Linux
24 to read and/or write a tape drive owned by OS/400, say Y here.
25
26endmenu
27
28config VIOPATH
29 bool
30 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
31 default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644
index 000000000000..127b465308be
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -0,0 +1,9 @@
1EXTRA_CFLAGS += -mno-minimal-toc
2
3obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
4 hvcall.o proc.o htab.o iommu.o misc.o
5obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
6obj-$(CONFIG_IBMVIO) += vio.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/include/asm-ppc64/iSeries/HvCallHpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
index 43a1969230b8..321f3bb7a8f5 100644
--- a/include/asm-ppc64/iSeries/HvCallHpt.h
+++ b/arch/powerpc/platforms/iseries/call_hpt.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCallHpt.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVCALLHPT_H 18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
20#define _HVCALLHPT_H 19#define _PLATFORMS_ISERIES_CALL_HPT_H
21 20
22/* 21/*
23 * This file contains the "hypervisor call" interface which is used to 22 * This file contains the "hypervisor call" interface which is used to
@@ -99,4 +98,4 @@ static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
99 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r); 98 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
100} 99}
101 100
102#endif /* _HVCALLHPT_H */ 101#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/include/asm-ppc64/iSeries/HvCallPci.h b/arch/powerpc/platforms/iseries/call_pci.h
index c8d675c40f5e..a86e065b9577 100644
--- a/include/asm-ppc64/iSeries/HvCallPci.h
+++ b/arch/powerpc/platforms/iseries/call_pci.h
@@ -22,8 +22,8 @@
22 * Created, Jan 9, 2001 22 * Created, Jan 9, 2001
23 */ 23 */
24 24
25#ifndef _HVCALLPCI_H 25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _HVCALLPCI_H 26#define _PLATFORMS_ISERIES_CALL_PCI_H
27 27
28#include <asm/iSeries/HvCallSc.h> 28#include <asm/iSeries/HvCallSc.h>
29#include <asm/iSeries/HvTypes.h> 29#include <asm/iSeries/HvTypes.h>
@@ -126,25 +126,6 @@ enum HvCallPci_VpdType {
126#define HvCallPciUnmaskInterrupts HvCallPci + 49 126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50 127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128 128
129static inline u64 HvCallPci_configLoad8(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u8 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad8, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber, 129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u16 *value) 130 u8 deviceId, u32 offset, u16 *value)
150{ 131{
@@ -164,25 +145,6 @@ static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
164 return retVal.rc; 145 return retVal.rc;
165} 146}
166 147
167static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
168 u8 deviceId, u32 offset, u32 *value)
169{
170 struct HvCallPci_DsaAddr dsa;
171 struct HvCallPci_LoadReturn retVal;
172
173 *((u64*)&dsa) = 0;
174
175 dsa.busNumber = busNumber;
176 dsa.subBusNumber = subBusNumber;
177 dsa.deviceId = deviceId;
178
179 HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
180
181 *value = retVal.value;
182
183 return retVal.rc;
184}
185
186static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber, 148static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
187 u8 deviceId, u32 offset, u8 value) 149 u8 deviceId, u32 offset, u8 value)
188{ 150{
@@ -197,186 +159,6 @@ static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
197 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0); 159 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
198} 160}
199 161
200static inline u64 HvCallPci_configStore16(u16 busNumber, u8 subBusNumber,
201 u8 deviceId, u32 offset, u16 value)
202{
203 struct HvCallPci_DsaAddr dsa;
204
205 *((u64*)&dsa) = 0;
206
207 dsa.busNumber = busNumber;
208 dsa.subBusNumber = subBusNumber;
209 dsa.deviceId = deviceId;
210
211 return HvCall4(HvCallPciConfigStore16, *(u64 *)&dsa, offset, value, 0);
212}
213
214static inline u64 HvCallPci_configStore32(u16 busNumber, u8 subBusNumber,
215 u8 deviceId, u32 offset, u32 value)
216{
217 struct HvCallPci_DsaAddr dsa;
218
219 *((u64*)&dsa) = 0;
220
221 dsa.busNumber = busNumber;
222 dsa.subBusNumber = subBusNumber;
223 dsa.deviceId = deviceId;
224
225 return HvCall4(HvCallPciConfigStore32, *(u64 *)&dsa, offset, value, 0);
226}
227
228static inline u64 HvCallPci_barLoad8(u16 busNumberParm, u8 subBusParm,
229 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
230 u8 *valueParm)
231{
232 struct HvCallPci_DsaAddr dsa;
233 struct HvCallPci_LoadReturn retVal;
234
235 *((u64*)&dsa) = 0;
236
237 dsa.busNumber = busNumberParm;
238 dsa.subBusNumber = subBusParm;
239 dsa.deviceId = deviceIdParm;
240 dsa.barNumber = barNumberParm;
241
242 HvCall3Ret16(HvCallPciBarLoad8, &retVal, *(u64 *)&dsa, offsetParm, 0);
243
244 *valueParm = retVal.value;
245
246 return retVal.rc;
247}
248
249static inline u64 HvCallPci_barLoad16(u16 busNumberParm, u8 subBusParm,
250 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
251 u16 *valueParm)
252{
253 struct HvCallPci_DsaAddr dsa;
254 struct HvCallPci_LoadReturn retVal;
255
256 *((u64*)&dsa) = 0;
257
258 dsa.busNumber = busNumberParm;
259 dsa.subBusNumber = subBusParm;
260 dsa.deviceId = deviceIdParm;
261 dsa.barNumber = barNumberParm;
262
263 HvCall3Ret16(HvCallPciBarLoad16, &retVal, *(u64 *)&dsa, offsetParm, 0);
264
265 *valueParm = retVal.value;
266
267 return retVal.rc;
268}
269
270static inline u64 HvCallPci_barLoad32(u16 busNumberParm, u8 subBusParm,
271 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
272 u32 *valueParm)
273{
274 struct HvCallPci_DsaAddr dsa;
275 struct HvCallPci_LoadReturn retVal;
276
277 *((u64*)&dsa) = 0;
278
279 dsa.busNumber = busNumberParm;
280 dsa.subBusNumber = subBusParm;
281 dsa.deviceId = deviceIdParm;
282 dsa.barNumber = barNumberParm;
283
284 HvCall3Ret16(HvCallPciBarLoad32, &retVal, *(u64 *)&dsa, offsetParm, 0);
285
286 *valueParm = retVal.value;
287
288 return retVal.rc;
289}
290
291static inline u64 HvCallPci_barLoad64(u16 busNumberParm, u8 subBusParm,
292 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
293 u64 *valueParm)
294{
295 struct HvCallPci_DsaAddr dsa;
296 struct HvCallPci_LoadReturn retVal;
297
298 *((u64*)&dsa) = 0;
299
300 dsa.busNumber = busNumberParm;
301 dsa.subBusNumber = subBusParm;
302 dsa.deviceId = deviceIdParm;
303 dsa.barNumber = barNumberParm;
304
305 HvCall3Ret16(HvCallPciBarLoad64, &retVal, *(u64 *)&dsa, offsetParm, 0);
306
307 *valueParm = retVal.value;
308
309 return retVal.rc;
310}
311
312static inline u64 HvCallPci_barStore8(u16 busNumberParm, u8 subBusParm,
313 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
314 u8 valueParm)
315{
316 struct HvCallPci_DsaAddr dsa;
317
318 *((u64*)&dsa) = 0;
319
320 dsa.busNumber = busNumberParm;
321 dsa.subBusNumber = subBusParm;
322 dsa.deviceId = deviceIdParm;
323 dsa.barNumber = barNumberParm;
324
325 return HvCall4(HvCallPciBarStore8, *(u64 *)&dsa, offsetParm,
326 valueParm, 0);
327}
328
329static inline u64 HvCallPci_barStore16(u16 busNumberParm, u8 subBusParm,
330 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
331 u16 valueParm)
332{
333 struct HvCallPci_DsaAddr dsa;
334
335 *((u64*)&dsa) = 0;
336
337 dsa.busNumber = busNumberParm;
338 dsa.subBusNumber = subBusParm;
339 dsa.deviceId = deviceIdParm;
340 dsa.barNumber = barNumberParm;
341
342 return HvCall4(HvCallPciBarStore16, *(u64 *)&dsa, offsetParm,
343 valueParm, 0);
344}
345
346static inline u64 HvCallPci_barStore32(u16 busNumberParm, u8 subBusParm,
347 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
348 u32 valueParm)
349{
350 struct HvCallPci_DsaAddr dsa;
351
352 *((u64*)&dsa) = 0;
353
354 dsa.busNumber = busNumberParm;
355 dsa.subBusNumber = subBusParm;
356 dsa.deviceId = deviceIdParm;
357 dsa.barNumber = barNumberParm;
358
359 return HvCall4(HvCallPciBarStore32, *(u64 *)&dsa, offsetParm,
360 valueParm, 0);
361}
362
363static inline u64 HvCallPci_barStore64(u16 busNumberParm, u8 subBusParm,
364 u8 deviceIdParm, u8 barNumberParm, u64 offsetParm,
365 u64 valueParm)
366{
367 struct HvCallPci_DsaAddr dsa;
368
369 *((u64*)&dsa) = 0;
370
371 dsa.busNumber = busNumberParm;
372 dsa.subBusNumber = subBusParm;
373 dsa.deviceId = deviceIdParm;
374 dsa.barNumber = barNumberParm;
375
376 return HvCall4(HvCallPciBarStore64, *(u64 *)&dsa, offsetParm,
377 valueParm, 0);
378}
379
380static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm, 162static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
381 u8 deviceIdParm) 163 u8 deviceIdParm)
382{ 164{
@@ -437,20 +219,6 @@ static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
437 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask); 219 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
438} 220}
439 221
440static inline u64 HvCallPci_setSlotReset(u16 busNumberParm, u8 subBusParm,
441 u8 deviceIdParm, u64 onNotOff)
442{
443 struct HvCallPci_DsaAddr dsa;
444
445 *((u64*)&dsa) = 0;
446
447 dsa.busNumber = busNumberParm;
448 dsa.subBusNumber = subBusParm;
449 dsa.deviceId = deviceIdParm;
450
451 return HvCall2(HvCallPciSetSlotReset, *(u64*)&dsa, onNotOff);
452}
453
454static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm, 222static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
455 u8 deviceNumberParm, u64 parms, u32 sizeofParms) 223 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
456{ 224{
@@ -519,15 +287,4 @@ static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
519 return xRc & 0xFFFF; 287 return xRc & 0xFFFF;
520} 288}
521 289
522static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, 290#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
523 u16 sizeParm)
524{
525 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
526 sizeParm, HvCallPci_BusAdapterVpd);
527 if (xRc == -1)
528 return -1;
529 else
530 return xRc & 0xFFFF;
531}
532
533#endif /* _HVCALLPCI_H */
diff --git a/include/asm-ppc64/iSeries/HvCallSm.h b/arch/powerpc/platforms/iseries/call_sm.h
index 8a3dbb071a43..ef223166cf22 100644
--- a/include/asm-ppc64/iSeries/HvCallSm.h
+++ b/arch/powerpc/platforms/iseries/call_sm.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCallSm.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVCALLSM_H 18#ifndef _ISERIES_CALL_SM_H
20#define _HVCALLSM_H 19#define _ISERIES_CALL_SM_H
21 20
22/* 21/*
23 * This file contains the "hypervisor call" interface which is used to 22 * This file contains the "hypervisor call" interface which is used to
@@ -35,4 +34,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
35 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap); 34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
36} 35}
37 36
38#endif /* _HVCALLSM_H */ 37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/powerpc/platforms/iseries/htab.c
index 073b76661747..b3c6c3374ca6 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * iSeries hashtable management. 2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c 3 * Derived from pSeries_htab.c
4 * 4 *
5 * SMP scalability work: 5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -14,11 +14,13 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/iSeries/HvCallHpt.h>
18#include <asm/abs_addr.h> 17#include <asm/abs_addr.h>
19#include <linux/spinlock.h> 18#include <linux/spinlock.h>
20 19
21static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED}; 20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
23 { [0 ... 63] = SPIN_LOCK_UNLOCKED};
22 24
23/* 25/*
24 * Very primitive algorithm for picking up a lock 26 * Very primitive algorithm for picking up a lock
@@ -84,6 +86,25 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
84 return (secondary << 3) | (slot & 7); 86 return (secondary << 3) | (slot & 7);
85} 87}
86 88
89long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
90 unsigned long va, unsigned long prpn, unsigned long vflags,
91 unsigned long rflags)
92{
93 long slot;
94 hpte_t lhpte;
95
96 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
97
98 if (lhpte.v & HPTE_V_VALID) {
99 /* Bolt the existing HPTE */
100 HvCallHpt_setSwBits(slot, 0x10, 0);
101 HvCallHpt_setPp(slot, PP_RWXX);
102 return 0;
103 }
104
105 return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
106}
107
87static unsigned long iSeries_hpte_getword0(unsigned long slot) 108static unsigned long iSeries_hpte_getword0(unsigned long slot)
88{ 109{
89 hpte_t hpte; 110 hpte_t hpte;
@@ -107,7 +128,7 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
107 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset); 128 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
108 129
109 if (! (hpte_v & HPTE_V_BOLTED)) { 130 if (! (hpte_v & HPTE_V_BOLTED)) {
110 HvCallHpt_invalidateSetSwBitsGet(hpte_group + 131 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
111 slot_offset, 0, 0); 132 slot_offset, 0, 0);
112 iSeries_hunlock(hpte_group); 133 iSeries_hunlock(hpte_group);
113 return i; 134 return i;
@@ -124,9 +145,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
124 145
125/* 146/*
126 * The HyperVisor expects the "flags" argument in this form: 147 * The HyperVisor expects the "flags" argument in this form:
127 * bits 0..59 : reserved 148 * bits 0..59 : reserved
128 * bit 60 : N 149 * bit 60 : N
129 * bits 61..63 : PP2,PP1,PP0 150 * bits 61..63 : PP2,PP1,PP0
130 */ 151 */
131static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, 152static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
132 unsigned long va, int large, int local) 153 unsigned long va, int large, int local)
@@ -152,7 +173,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
152} 173}
153 174
154/* 175/*
155 * Functions used to find the PTE for a particular virtual address. 176 * Functions used to find the PTE for a particular virtual address.
156 * Only used during boot when bolting pages. 177 * Only used during boot when bolting pages.
157 * 178 *
158 * Input : vpn : virtual page number 179 * Input : vpn : virtual page number
@@ -170,7 +191,7 @@ static long iSeries_hpte_find(unsigned long vpn)
170 * 0x00000000xxxxxxxx : Entry found in primary group, slot x 191 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
171 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x 192 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
172 */ 193 */
173 slot = HvCallHpt_findValid(&hpte, vpn); 194 slot = HvCallHpt_findValid(&hpte, vpn);
174 if (hpte.v & HPTE_V_VALID) { 195 if (hpte.v & HPTE_V_VALID) {
175 if (slot < 0) { 196 if (slot < 0) {
176 slot &= 0x7fffffffffffffff; 197 slot &= 0x7fffffffffffffff;
@@ -197,7 +218,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
197 vsid = get_kernel_vsid(ea); 218 vsid = get_kernel_vsid(ea);
198 va = (vsid << 28) | (ea & 0x0fffffff); 219 va = (vsid << 28) | (ea & 0x0fffffff);
199 vpn = va >> PAGE_SHIFT; 220 vpn = va >> PAGE_SHIFT;
200 slot = iSeries_hpte_find(vpn); 221 slot = iSeries_hpte_find(vpn);
201 if (slot == -1) 222 if (slot == -1)
202 panic("updateboltedpp: Could not find page to bolt\n"); 223 panic("updateboltedpp: Could not find page to bolt\n");
203 HvCallHpt_setPp(slot, newpp); 224 HvCallHpt_setPp(slot, newpp);
@@ -215,7 +236,7 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
215 iSeries_hlock(slot); 236 iSeries_hlock(slot);
216 237
217 hpte_v = iSeries_hpte_getword0(slot); 238 hpte_v = iSeries_hpte_getword0(slot);
218 239
219 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID)) 240 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
220 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0); 241 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
221 242
@@ -230,7 +251,7 @@ void hpte_init_iSeries(void)
230 ppc_md.hpte_updatepp = iSeries_hpte_updatepp; 251 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
231 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; 252 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
232 ppc_md.hpte_insert = iSeries_hpte_insert; 253 ppc_md.hpte_insert = iSeries_hpte_insert;
233 ppc_md.hpte_remove = iSeries_hpte_remove; 254 ppc_md.hpte_remove = iSeries_hpte_remove;
234 255
235 htab_finish_init(); 256 htab_finish_init();
236} 257}
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/powerpc/platforms/iseries/hvcall.S
index 4c699eab1b95..07ae6ad5f49f 100644
--- a/arch/ppc64/kernel/hvCall.S
+++ b/arch/powerpc/platforms/iseries/hvcall.S
@@ -1,7 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/hvCall.S
3 *
4 *
5 * This file contains the code to perform calls to the 2 * This file contains the code to perform calls to the
6 * iSeries LPAR hypervisor 3 * iSeries LPAR hypervisor
7 * 4 *
@@ -13,15 +10,16 @@
13 10
14#include <asm/ppc_asm.h> 11#include <asm/ppc_asm.h>
15#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
16 14
17 .text 15 .text
18 16
19/* 17/*
20 * Hypervisor call 18 * Hypervisor call
21 * 19 *
22 * Invoke the iSeries hypervisor via the System Call instruction 20 * Invoke the iSeries hypervisor via the System Call instruction
23 * Parameters are passed to this routine in registers r3 - r10 21 * Parameters are passed to this routine in registers r3 - r10
24 * 22 *
25 * r3 contains the HV function to be called 23 * r3 contains the HV function to be called
26 * r4-r10 contain the operands to the hypervisor function 24 * r4-r10 contain the operands to the hypervisor function
27 * 25 *
@@ -41,11 +39,11 @@ _GLOBAL(HvCall7)
41 mfcr r0 39 mfcr r0
42 std r0,-8(r1) 40 std r0,-8(r1)
43 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1) 41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
44 42
45 /* r0 = 0xffffffffffffffff indicates a hypervisor call */ 43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
46 44
47 li r0,-1 45 li r0,-1
48 46
49 /* Invoke the hypervisor */ 47 /* Invoke the hypervisor */
50 48
51 sc 49 sc
@@ -55,7 +53,7 @@ _GLOBAL(HvCall7)
55 mtcrf 0xff,r0 53 mtcrf 0xff,r0
56 54
57 /* return to caller, return value in r3 */ 55 /* return to caller, return value in r3 */
58 56
59 blr 57 blr
60 58
61_GLOBAL(HvCall0Ret16) 59_GLOBAL(HvCall0Ret16)
@@ -92,7 +90,5 @@ _GLOBAL(HvCall7Ret16)
92 ld r0,-8(r1) 90 ld r0,-8(r1)
93 mtcrf 0xff,r0 91 mtcrf 0xff,r0
94 ld r31,-16(r1) 92 ld r31,-16(r1)
95
96 blr
97
98 93
94 blr
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/powerpc/platforms/iseries/hvlog.c
index b772e65b57a2..f61e2e9ac9ec 100644
--- a/arch/ppc64/kernel/HvCall.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCall.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
index cb1d6473203c..dc28621aea0d 100644
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ b/arch/powerpc/platforms/iseries/hvlpconfig.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvLpConfig.c
3 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index f8ff1bb054dc..1db26d8be640 100644
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/iSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * 3 *
6 * Rewrite, cleanup: 4 * Rewrite, cleanup:
@@ -30,9 +28,11 @@
30#include <linux/list.h> 28#include <linux/list.h>
31 29
32#include <asm/iommu.h> 30#include <asm/iommu.h>
31#include <asm/tce.h>
33#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/abs_addr.h>
34#include <asm/pci-bridge.h>
34#include <asm/iSeries/HvCallXm.h> 35#include <asm/iSeries/HvCallXm.h>
35#include <asm/iSeries/iSeries_pci.h>
36 36
37extern struct list_head iSeries_Global_Device_List; 37extern struct list_head iSeries_Global_Device_List;
38 38
@@ -90,15 +90,16 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
90 */ 90 */
91static struct iommu_table *iommu_table_find(struct iommu_table * tbl) 91static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
92{ 92{
93 struct iSeries_Device_Node *dp; 93 struct pci_dn *pdn;
94 94
95 list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) { 95 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
96 if ((dp->iommu_table != NULL) && 96 struct iommu_table *it = pdn->iommu_table;
97 (dp->iommu_table->it_type == TCE_PCI) && 97 if ((it != NULL) &&
98 (dp->iommu_table->it_offset == tbl->it_offset) && 98 (it->it_type == TCE_PCI) &&
99 (dp->iommu_table->it_index == tbl->it_index) && 99 (it->it_offset == tbl->it_offset) &&
100 (dp->iommu_table->it_size == tbl->it_size)) 100 (it->it_index == tbl->it_index) &&
101 return dp->iommu_table; 101 (it->it_size == tbl->it_size))
102 return it;
102 } 103 }
103 return NULL; 104 return NULL;
104} 105}
@@ -112,7 +113,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
112 * 2. TCE table per Bus. 113 * 2. TCE table per Bus.
113 * 3. TCE Table per IOA. 114 * 3. TCE Table per IOA.
114 */ 115 */
115static void iommu_table_getparms(struct iSeries_Device_Node* dn, 116static void iommu_table_getparms(struct pci_dn *pdn,
116 struct iommu_table* tbl) 117 struct iommu_table* tbl)
117{ 118{
118 struct iommu_table_cb *parms; 119 struct iommu_table_cb *parms;
@@ -123,11 +124,11 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
123 124
124 memset(parms, 0, sizeof(*parms)); 125 memset(parms, 0, sizeof(*parms));
125 126
126 parms->itc_busno = ISERIES_BUS(dn); 127 parms->itc_busno = pdn->busno;
127 parms->itc_slotno = dn->LogicalSlot; 128 parms->itc_slotno = pdn->LogicalSlot;
128 parms->itc_virtbus = 0; 129 parms->itc_virtbus = 0;
129 130
130 HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms)); 131 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
131 132
132 if (parms->itc_size == 0) 133 if (parms->itc_size == 0)
133 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
@@ -144,18 +145,19 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
144} 145}
145 146
146 147
147void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn) 148void iommu_devnode_init_iSeries(struct device_node *dn)
148{ 149{
149 struct iommu_table *tbl; 150 struct iommu_table *tbl;
151 struct pci_dn *pdn = PCI_DN(dn);
150 152
151 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 153 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
152 154
153 iommu_table_getparms(dn, tbl); 155 iommu_table_getparms(pdn, tbl);
154 156
155 /* Look for existing tce table */ 157 /* Look for existing tce table */
156 dn->iommu_table = iommu_table_find(tbl); 158 pdn->iommu_table = iommu_table_find(tbl);
157 if (dn->iommu_table == NULL) 159 if (pdn->iommu_table == NULL)
158 dn->iommu_table = iommu_init_table(tbl); 160 pdn->iommu_table = iommu_init_table(tbl);
159 else 161 else
160 kfree(tbl); 162 kfree(tbl);
161} 163}
diff --git a/include/asm-ppc64/iSeries/ItIplParmsReal.h b/arch/powerpc/platforms/iseries/ipl_parms.h
index ae3417dc599e..77c135ddbf1b 100644
--- a/include/asm-ppc64/iSeries/ItIplParmsReal.h
+++ b/arch/powerpc/platforms/iseries/ipl_parms.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItIplParmsReal.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _ITIPLPARMSREAL_H 18#ifndef _ISERIES_IPL_PARMS_H
20#define _ITIPLPARMSREAL_H 19#define _ISERIES_IPL_PARMS_H
21 20
22/* 21/*
23 * This struct maps the IPL Parameters DMA'd from the SP. 22 * This struct maps the IPL Parameters DMA'd from the SP.
@@ -68,4 +67,4 @@ struct ItIplParmsReal {
68 67
69extern struct ItIplParmsReal xItIplParmsReal; 68extern struct ItIplParmsReal xItIplParmsReal;
70 69
71#endif /* _ITIPLPARMSREAL_H */ 70#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/powerpc/platforms/iseries/irq.c
index 77376c1bd611..937ac99b9d33 100644
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -38,9 +38,10 @@
38#include <asm/ppcdebug.h> 38#include <asm/ppcdebug.h>
39#include <asm/iSeries/HvTypes.h> 39#include <asm/iSeries/HvTypes.h>
40#include <asm/iSeries/HvLpEvent.h> 40#include <asm/iSeries/HvLpEvent.h>
41#include <asm/iSeries/HvCallPci.h>
42#include <asm/iSeries/HvCallXm.h> 41#include <asm/iSeries/HvCallXm.h>
43#include <asm/iSeries/iSeries_irq.h> 42
43#include "irq.h"
44#include "call_pci.h"
44 45
45/* This maps virtual irq numbers to real irqs */ 46/* This maps virtual irq numbers to real irqs */
46unsigned int virt_irq_to_real_map[NR_IRQS]; 47unsigned int virt_irq_to_real_map[NR_IRQS];
@@ -351,3 +352,15 @@ int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
351 irq_desc[virtirq].handler = &iSeries_IRQ_handler; 352 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
352 return virtirq; 353 return virtirq;
353} 354}
355
356int virt_irq_create_mapping(unsigned int real_irq)
357{
358 BUG(); /* Don't call this on iSeries, yet */
359
360 return 0;
361}
362
363void virt_irq_init(void)
364{
365 return;
366}
diff --git a/include/asm-ppc64/iSeries/iSeries_irq.h b/arch/powerpc/platforms/iseries/irq.h
index 6c9767ac1302..5f643f16ecc0 100644
--- a/include/asm-ppc64/iSeries/iSeries_irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -1,8 +1,8 @@
1#ifndef __ISERIES_IRQ_H__ 1#ifndef _ISERIES_IRQ_H
2#define __ISERIES_IRQ_H__ 2#define _ISERIES_IRQ_H
3 3
4extern void iSeries_init_IRQ(void); 4extern void iSeries_init_IRQ(void);
5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); 5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
6extern void iSeries_activate_IRQs(void); 6extern void iSeries_activate_IRQs(void);
7 7
8#endif /* __ISERIES_IRQ_H__ */ 8#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644
index 000000000000..f271b3539721
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -0,0 +1,27 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iSeries/HvCallSc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
22
23#ifdef CONFIG_SMP
24EXPORT_SYMBOL(local_get_flags);
25EXPORT_SYMBOL(local_irq_disable);
26EXPORT_SYMBOL(local_irq_restore);
27#endif
diff --git a/arch/ppc64/kernel/LparData.c b/arch/powerpc/platforms/iseries/lpardata.c
index 0a9c23ca2f0c..ed2ffee6f731 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp 2 * Copyright 2001 Mike Corrigan, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -19,18 +19,18 @@
19#include <asm/lppaca.h> 19#include <asm/lppaca.h>
20#include <asm/iSeries/ItLpRegSave.h> 20#include <asm/iSeries/ItLpRegSave.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/iSeries/HvReleaseData.h>
23#include <asm/iSeries/LparMap.h> 22#include <asm/iSeries/LparMap.h>
24#include <asm/iSeries/ItVpdAreas.h>
25#include <asm/iSeries/ItIplParmsReal.h>
26#include <asm/iSeries/ItExtVpdPanel.h> 23#include <asm/iSeries/ItExtVpdPanel.h>
27#include <asm/iSeries/ItLpQueue.h> 24#include <asm/iSeries/ItLpQueue.h>
28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h>
30 25
26#include "vpd_areas.h"
27#include "spcomm_area.h"
28#include "ipl_parms.h"
29#include "processor_vpd.h"
30#include "release_data.h"
31 31
32/* The HvReleaseData is the root of the information shared between 32/* The HvReleaseData is the root of the information shared between
33 * the hypervisor and Linux. 33 * the hypervisor and Linux.
34 */ 34 */
35struct HvReleaseData hvReleaseData = { 35struct HvReleaseData hvReleaseData = {
36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */ 36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
@@ -79,7 +79,7 @@ extern void trap_0e_iSeries(void);
79extern void performance_monitor_iSeries(void); 79extern void performance_monitor_iSeries(void);
80extern void data_access_slb_iSeries(void); 80extern void data_access_slb_iSeries(void);
81extern void instruction_access_slb_iSeries(void); 81extern void instruction_access_slb_iSeries(void);
82 82
83struct ItLpNaca itLpNaca = { 83struct ItLpNaca itLpNaca = {
84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */ 84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
85 .xSize = 0x0400, /* size of ItLpNaca */ 85 .xSize = 0x0400, /* size of ItLpNaca */
@@ -106,7 +106,7 @@ struct ItLpNaca itLpNaca = {
106 .xLoadAreaChunks = 0, /* chunks for load area */ 106 .xLoadAreaChunks = 0, /* chunks for load area */
107 .xPaseSysCallCRMask = 0, /* PASE mask */ 107 .xPaseSysCallCRMask = 0, /* PASE mask */
108 .xSlicSegmentTablePtr = 0, /* seg table */ 108 .xSlicSegmentTablePtr = 0, /* seg table */
109 .xOldLpQueue = { 0 }, /* Old LP Queue */ 109 .xOldLpQueue = { 0 }, /* Old LP Queue */
110 .xInterruptHdlr = { 110 .xInterruptHdlr = {
111 (u64)system_reset_iSeries, /* 0x100 System Reset */ 111 (u64)system_reset_iSeries, /* 0x100 System Reset */
112 (u64)machine_check_iSeries, /* 0x200 Machine Check */ 112 (u64)machine_check_iSeries, /* 0x200 Machine Check */
@@ -134,7 +134,7 @@ struct ItLpNaca itLpNaca = {
134EXPORT_SYMBOL(itLpNaca); 134EXPORT_SYMBOL(itLpNaca);
135 135
136/* May be filled in by the hypervisor so cannot end up in the BSS */ 136/* May be filled in by the hypervisor so cannot end up in the BSS */
137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); 137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
138 138
139/* May be filled in by the hypervisor so cannot end up in the BSS */ 139/* May be filled in by the hypervisor so cannot end up in the BSS */
140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data"))); 140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
@@ -151,7 +151,7 @@ struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
151 .xPVR = 0x3600 151 .xPVR = 0x3600
152 } 152 }
153}; 153};
154 154
155/* Space for Main Store Vpd 27,200 bytes */ 155/* Space for Main Store Vpd 27,200 bytes */
156/* May be filled in by the hypervisor so cannot end up in the BSS */ 156/* May be filled in by the hypervisor so cannot end up in the BSS */
157u64 xMsVpd[3400] __attribute__((__section__(".data"))); 157u64 xMsVpd[3400] __attribute__((__section__(".data")));
@@ -197,7 +197,7 @@ struct ItVpdAreas itVpdAreas = {
197 26992, /* 7 length of MS VPD */ 197 26992, /* 7 length of MS VPD */
198 0, /* 8 */ 198 0, /* 8 */
199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */ 199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
200 0, /* 10 */ 200 0, /* 10 */
201 256, /* 11 length of Recovery Log Buf */ 201 256, /* 11 length of Recovery Log Buf */
202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */ 202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
203 0,0,0, /* 13 - 15 */ 203 0,0,0, /* 13 - 15 */
@@ -207,7 +207,7 @@ struct ItVpdAreas itVpdAreas = {
207 0,0 /* 24 - 25 */ 207 0,0 /* 24 - 25 */
208 }, 208 },
209 .xSlicVpdAdrs = { /* VPD addresses */ 209 .xSlicVpdAdrs = { /* VPD addresses */
210 0,0,0, /* 0 - 2 */ 210 0,0,0, /* 0 - 2 */
211 &xItExtVpdPanel, /* 3 Extended VPD */ 211 &xItExtVpdPanel, /* 3 Extended VPD */
212 &paca[0], /* 4 first Paca */ 212 &paca[0], /* 4 first Paca */
213 0, /* 5 */ 213 0, /* 5 */
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/powerpc/platforms/iseries/lpevents.c
index 4231861288a3..f8b4155b0481 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -14,11 +13,14 @@
14#include <linux/bootmem.h> 13#include <linux/bootmem.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/module.h>
17
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/paca.h> 19#include <asm/paca.h>
19#include <asm/iSeries/ItLpQueue.h> 20#include <asm/iSeries/ItLpQueue.h>
20#include <asm/iSeries/HvLpEvent.h> 21#include <asm/iSeries/HvLpEvent.h>
21#include <asm/iSeries/HvCallEvent.h> 22#include <asm/iSeries/HvCallEvent.h>
23#include <asm/iSeries/ItLpNaca.h>
22 24
23/* 25/*
24 * The LpQueue is used to pass event data from the hypervisor to 26 * The LpQueue is used to pass event data from the hypervisor to
@@ -43,7 +45,8 @@ static char *event_types[HvLpEvent_Type_NumTypes] = {
43}; 45};
44 46
45/* Array of LpEvent handler functions */ 47/* Array of LpEvent handler functions */
46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 48static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
47 50
48static struct HvLpEvent * get_next_hvlpevent(void) 51static struct HvLpEvent * get_next_hvlpevent(void)
49{ 52{
@@ -199,6 +202,70 @@ void setup_hvlpevent_queue(void)
199 hvlpevent_queue.xIndex = 0; 202 hvlpevent_queue.xIndex = 0;
200} 203}
201 204
205/* Register a handler for an LpEvent type */
206int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
207{
208 if (eventType < HvLpEvent_Type_NumTypes) {
209 lpEventHandler[eventType] = handler;
210 return 0;
211 }
212 return 1;
213}
214EXPORT_SYMBOL(HvLpEvent_registerHandler);
215
216int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
217{
218 might_sleep();
219
220 if (eventType < HvLpEvent_Type_NumTypes) {
221 if (!lpEventHandlerPaths[eventType]) {
222 lpEventHandler[eventType] = NULL;
223 /*
224 * We now sleep until all other CPUs have scheduled.
225 * This ensures that the deletion is seen by all
226 * other CPUs, and that the deleted handler isn't
227 * still running on another CPU when we return.
228 */
229 synchronize_rcu();
230 return 0;
231 }
232 }
233 return 1;
234}
235EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
236
237/*
238 * lpIndex is the partition index of the target partition.
239 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
240 * indicates to use our partition index - for the other types.
241 */
242int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
243{
244 if ((eventType < HvLpEvent_Type_NumTypes) &&
245 lpEventHandler[eventType]) {
246 if (lpIndex == 0)
247 lpIndex = itLpNaca.xLpIndex;
248 HvCallEvent_openLpEventPath(lpIndex, eventType);
249 ++lpEventHandlerPaths[eventType];
250 return 0;
251 }
252 return 1;
253}
254
255int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
256{
257 if ((eventType < HvLpEvent_Type_NumTypes) &&
258 lpEventHandler[eventType] &&
259 lpEventHandlerPaths[eventType]) {
260 if (lpIndex == 0)
261 lpIndex = itLpNaca.xLpIndex;
262 HvCallEvent_closeLpEventPath(lpIndex, eventType);
263 --lpEventHandlerPaths[eventType];
264 return 0;
265 }
266 return 1;
267}
268
202static int proc_lpevents_show(struct seq_file *m, void *v) 269static int proc_lpevents_show(struct seq_file *m, void *v)
203{ 270{
204 int cpu, i; 271 int cpu, i;
diff --git a/include/asm-ppc64/iSeries/IoHriMainStore.h b/arch/powerpc/platforms/iseries/main_store.h
index 45ed3ea67d06..74f6889f834f 100644
--- a/include/asm-ppc64/iSeries/IoHriMainStore.h
+++ b/arch/powerpc/platforms/iseries/main_store.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IoHriMainStore.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19 18
20#ifndef _IOHRIMAINSTORE_H 19#ifndef _ISERIES_MAIN_STORE_H
21#define _IOHRIMAINSTORE_H 20#define _ISERIES_MAIN_STORE_H
22 21
23/* Main Store Vpd for Condor,iStar,sStar */ 22/* Main Store Vpd for Condor,iStar,sStar */
24struct IoHriMainStoreSegment4 { 23struct IoHriMainStoreSegment4 {
@@ -163,4 +162,4 @@ struct IoHriMainStoreSegment5 {
163 162
164extern u64 xMsVpd[]; 163extern u64 xMsVpd[];
165 164
166#endif /* _IOHRIMAINSTORE_H */ 165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/ppc64/kernel/mf.c b/arch/powerpc/platforms/iseries/mf.c
index ef4a338ebd01..e5de31aa0015 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1,29 +1,28 @@
1/* 1/*
2 * mf.c 2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation 3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation 4 *
5 * 5 * This modules exists as an interface between a Linux secondary partition
6 * This modules exists as an interface between a Linux secondary partition 6 * running on an iSeries and the primary partition's Virtual Service
7 * running on an iSeries and the primary partition's Virtual Service 7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * Processor (VSP) object. The VSP has final authority over powering on/off 8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * all partitions in the iSeries. It also provides miscellaneous low-level 9 * machine facility type operations.
10 * machine facility type operations. 10 *
11 * 11 *
12 * 12 * This program is free software; you can redistribute it and/or modify
13 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by
14 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or
15 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version.
16 * (at your option) any later version. 16 *
17 * 17 * This program is distributed in the hope that it will be useful,
18 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details.
21 * GNU General Public License for more details. 21 *
22 * 22 * You should have received a copy of the GNU General Public License
23 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software
24 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 */
26 */
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/errno.h> 28#include <linux/errno.h>
@@ -33,15 +32,21 @@
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
35#include <linux/bcd.h> 34#include <linux/bcd.h>
35#include <linux/rtc.h>
36 36
37#include <asm/time.h> 37#include <asm/time.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/abs_addr.h>
40#include <asm/iSeries/vio.h> 41#include <asm/iSeries/vio.h>
41#include <asm/iSeries/mf.h> 42#include <asm/iSeries/mf.h>
42#include <asm/iSeries/HvLpConfig.h> 43#include <asm/iSeries/HvLpConfig.h>
43#include <asm/iSeries/ItLpQueue.h> 44#include <asm/iSeries/ItLpQueue.h>
44 45
46#include "setup.h"
47
48extern int piranha_simulator;
49
45/* 50/*
46 * This is the structure layout for the Machine Facilites LPAR event 51 * This is the structure layout for the Machine Facilites LPAR event
47 * flows. 52 * flows.
@@ -1061,10 +1066,10 @@ static void mf_getSrcHistory(char *buffer, int size)
1061 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); 1066 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062 ev->event.data.vsp_cmd.result_code = 0xFF; 1067 ev->event.data.vsp_cmd.result_code = 0xFF;
1063 ev->event.data.vsp_cmd.reserved = 0; 1068 ev->event.data.vsp_cmd.reserved = 0;
1064 ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]); 1069 ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
1065 ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]); 1070 ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
1066 ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]); 1071 ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
1067 ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]); 1072 ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
1068 mb(); 1073 mb();
1069 if (signal_event(ev) != 0) 1074 if (signal_event(ev) != 0)
1070 return; 1075 return;
@@ -1279,3 +1284,38 @@ static int __init mf_proc_init(void)
1279__initcall(mf_proc_init); 1284__initcall(mf_proc_init);
1280 1285
1281#endif /* CONFIG_PROC_FS */ 1286#endif /* CONFIG_PROC_FS */
1287
1288/*
1289 * Get the RTC from the virtual service processor
1290 * This requires flowing LpEvents to the primary partition
1291 */
1292void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1293{
1294 if (piranha_simulator)
1295 return;
1296
1297 mf_get_rtc(rtc_tm);
1298 rtc_tm->tm_mon--;
1299}
1300
1301/*
1302 * Set the RTC in the virtual service processor
1303 * This requires flowing LpEvents to the primary partition
1304 */
1305int iSeries_set_rtc_time(struct rtc_time *tm)
1306{
1307 mf_set_rtc(tm);
1308 return 0;
1309}
1310
1311unsigned long iSeries_get_boot_time(void)
1312{
1313 struct rtc_time tm;
1314
1315 if (piranha_simulator)
1316 return 0;
1317
1318 mf_get_boot_rtc(&tm);
1319 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1320 tm.tm_hour, tm.tm_min, tm.tm_sec);
1321}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644
index 000000000000..09f14522e176
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -0,0 +1,55 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18
19 .text
20
21/* unsigned long local_save_flags(void) */
22_GLOBAL(local_get_flags)
23 lbz r3,PACAPROCENABLED(r13)
24 blr
25
26/* unsigned long local_irq_disable(void) */
27_GLOBAL(local_irq_disable)
28 lbz r3,PACAPROCENABLED(r13)
29 li r4,0
30 stb r4,PACAPROCENABLED(r13)
31 blr /* Done */
32
33/* void local_irq_restore(unsigned long flags) */
34_GLOBAL(local_irq_restore)
35 lbz r5,PACAPROCENABLED(r13)
36 /* Check if things are setup the way we want _already_. */
37 cmpw 0,r3,r5
38 beqlr
39 /* are we enabling interrupts? */
40 cmpdi 0,r3,0
41 stb r3,PACAPROCENABLED(r13)
42 beqlr
43 /* Check pending interrupts */
44 /* A decrementer, IPI or PMC interrupt may have occurred
45 * while we were in the hypervisor (which enables) */
46 ld r4,PACALPPACA+LPPACAANYINT(r13)
47 cmpdi r4,0
48 beqlr
49
50 /*
51 * Handle pending interrupts in interrupt context
52 */
53 li r0,0x5555
54 sc
55 blr
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/powerpc/platforms/iseries/pci.c
index fbc273c32bcc..959e59fd9c11 100644
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -1,28 +1,26 @@
1/* 1/*
2 * iSeries_pci.c
3 *
4 * Copyright (C) 2001 Allan Trautman, IBM Corporation 2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
5 * 3 *
6 * iSeries specific routines for PCI. 4 * iSeries specific routines for PCI.
7 * 5 *
8 * Based on code from pci.c and iSeries_pci.c 32bit 6 * Based on code from pci.c and iSeries_pci.c 32bit
9 * 7 *
10 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 11 * (at your option) any later version.
14 * 12 *
15 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 16 * GNU General Public License for more details.
19 * 17 *
20 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 21 */
24#include <linux/kernel.h> 22#include <linux/kernel.h>
25#include <linux/list.h> 23#include <linux/list.h>
26#include <linux/string.h> 24#include <linux/string.h>
27#include <linux/init.h> 25#include <linux/init.h>
28#include <linux/module.h> 26#include <linux/module.h>
@@ -36,21 +34,23 @@
36#include <asm/pci-bridge.h> 34#include <asm/pci-bridge.h>
37#include <asm/ppcdebug.h> 35#include <asm/ppcdebug.h>
38#include <asm/iommu.h> 36#include <asm/iommu.h>
37#include <asm/abs_addr.h>
39 38
40#include <asm/iSeries/HvCallPci.h>
41#include <asm/iSeries/HvCallXm.h> 39#include <asm/iSeries/HvCallXm.h>
42#include <asm/iSeries/iSeries_irq.h>
43#include <asm/iSeries/iSeries_pci.h>
44#include <asm/iSeries/mf.h> 40#include <asm/iSeries/mf.h>
45 41
42#include <asm/ppc-pci.h>
43
44#include "irq.h"
46#include "pci.h" 45#include "pci.h"
46#include "call_pci.h"
47 47
48extern unsigned long io_page_mask; 48extern unsigned long io_page_mask;
49 49
50/* 50/*
51 * Forward declares of prototypes. 51 * Forward declares of prototypes.
52 */ 52 */
53static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn); 53static struct device_node *find_Device_Node(int bus, int devfn);
54static void scan_PHB_slots(struct pci_controller *Phb); 54static void scan_PHB_slots(struct pci_controller *Phb);
55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); 55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info); 56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
@@ -68,7 +68,7 @@ static long Pci_Cfg_Write_Count;
68#endif 68#endif
69static long Pci_Error_Count; 69static long Pci_Error_Count;
70 70
71static int Pci_Retry_Max = 3; /* Only retry 3 times */ 71static int Pci_Retry_Max = 3; /* Only retry 3 times */
72static int Pci_Error_Flag = 1; /* Set Retry Error on. */ 72static int Pci_Error_Flag = 1; /* Set Retry Error on. */
73 73
74static struct pci_ops iSeries_pci_ops; 74static struct pci_ops iSeries_pci_ops;
@@ -87,7 +87,7 @@ static long current_iomm_table_entry;
87/* 87/*
88 * Lookup Tables. 88 * Lookup Tables.
89 */ 89 */
90static struct iSeries_Device_Node **iomm_table; 90static struct device_node **iomm_table;
91static u8 *iobar_table; 91static u8 *iobar_table;
92 92
93/* 93/*
@@ -179,7 +179,7 @@ static void allocate_device_bars(struct pci_dev *dev)
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) { 179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num]; 180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num); 181 iomm_table_allocate_entry(dev, bar_num);
182 } 182 }
183} 183}
184 184
185/* 185/*
@@ -201,29 +201,31 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
201/* 201/*
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn) 202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
203 */ 203 */
204static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus, 204static struct device_node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function) 205 HvSubBusNumber SubBus, int AgentId, int Function)
206{ 206{
207 struct iSeries_Device_Node *node; 207 struct device_node *node;
208 struct pci_dn *pdn;
208 209
209 PPCDBG(PPCDBG_BUSWALK, 210 PPCDBG(PPCDBG_BUSWALK,
210 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n", 211 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
211 Bus, SubBus, AgentId, Function); 212 Bus, SubBus, AgentId, Function);
212 213
213 node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL); 214 node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
214 if (node == NULL) 215 if (node == NULL)
215 return NULL; 216 return NULL;
216 217 memset(node, 0, sizeof(struct device_node));
217 memset(node, 0, sizeof(struct iSeries_Device_Node)); 218 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
218 list_add_tail(&node->Device_List, &iSeries_Global_Device_List); 219 if (pdn == NULL) {
219#if 0 220 kfree(node);
220 node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32); 221 return NULL;
221#endif 222 }
222 node->DsaAddr.DsaAddr = 0; 223 node->data = pdn;
223 node->DsaAddr.Dsa.busNumber = Bus; 224 pdn->node = node;
224 node->DsaAddr.Dsa.subBusNumber = SubBus; 225 list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
225 node->DsaAddr.Dsa.deviceId = 0x10; 226 pdn->busno = Bus;
226 node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); 227 pdn->bussubno = SubBus;
228 pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
227 return node; 229 return node;
228} 230}
229 231
@@ -278,28 +280,28 @@ unsigned long __init find_and_init_phbs(void)
278 280
279/* 281/*
280 * iSeries_pcibios_init 282 * iSeries_pcibios_init
281 * 283 *
282 * Chance to initialize and structures or variable before PCI Bus walk. 284 * Chance to initialize and structures or variable before PCI Bus walk.
283 */ 285 */
284void iSeries_pcibios_init(void) 286void iSeries_pcibios_init(void)
285{ 287{
286 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n"); 288 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
287 iomm_table_initialize(); 289 iomm_table_initialize();
288 find_and_init_phbs(); 290 find_and_init_phbs();
289 io_page_mask = -1; 291 io_page_mask = -1;
290 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n"); 292 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
291} 293}
292 294
293/* 295/*
294 * iSeries_pci_final_fixup(void) 296 * iSeries_pci_final_fixup(void)
295 */ 297 */
296void __init iSeries_pci_final_fixup(void) 298void __init iSeries_pci_final_fixup(void)
297{ 299{
298 struct pci_dev *pdev = NULL; 300 struct pci_dev *pdev = NULL;
299 struct iSeries_Device_Node *node; 301 struct device_node *node;
300 int DeviceCount = 0; 302 int DeviceCount = 0;
301 303
302 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); 304 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
303 305
304 /* Fix up at the device node and pci_dev relationship */ 306 /* Fix up at the device node and pci_dev relationship */
305 mf_display_src(0xC9000100); 307 mf_display_src(0xC9000100);
@@ -313,7 +315,7 @@ void __init iSeries_pci_final_fixup(void)
313 if (node != NULL) { 315 if (node != NULL) {
314 ++DeviceCount; 316 ++DeviceCount;
315 pdev->sysdata = (void *)node; 317 pdev->sysdata = (void *)node;
316 node->PciDev = pdev; 318 PCI_DN(node)->pcidev = pdev;
317 PPCDBG(PPCDBG_BUSWALK, 319 PPCDBG(PPCDBG_BUSWALK,
318 "pdev 0x%p <==> DevNode 0x%p\n", 320 "pdev 0x%p <==> DevNode 0x%p\n",
319 pdev, node); 321 pdev, node);
@@ -323,7 +325,7 @@ void __init iSeries_pci_final_fixup(void)
323 } else 325 } else
324 printk("PCI: Device Tree not found for 0x%016lX\n", 326 printk("PCI: Device Tree not found for 0x%016lX\n",
325 (unsigned long)pdev); 327 (unsigned long)pdev);
326 pdev->irq = node->Irq; 328 pdev->irq = PCI_DN(node)->Irq;
327 } 329 }
328 iSeries_activate_IRQs(); 330 iSeries_activate_IRQs();
329 mf_display_src(0xC9000200); 331 mf_display_src(0xC9000200);
@@ -332,24 +334,24 @@ void __init iSeries_pci_final_fixup(void)
332void pcibios_fixup_bus(struct pci_bus *PciBus) 334void pcibios_fixup_bus(struct pci_bus *PciBus)
333{ 335{
334 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n", 336 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
335 PciBus->number); 337 PciBus->number);
336} 338}
337 339
338void pcibios_fixup_resources(struct pci_dev *pdev) 340void pcibios_fixup_resources(struct pci_dev *pdev)
339{ 341{
340 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev); 342 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
341} 343}
342 344
343/* 345/*
344 * Loop through each node function to find usable EADs bridges. 346 * Loop through each node function to find usable EADs bridges.
345 */ 347 */
346static void scan_PHB_slots(struct pci_controller *Phb) 348static void scan_PHB_slots(struct pci_controller *Phb)
347{ 349{
348 struct HvCallPci_DeviceInfo *DevInfo; 350 struct HvCallPci_DeviceInfo *DevInfo;
349 HvBusNumber bus = Phb->local_number; /* System Bus */ 351 HvBusNumber bus = Phb->local_number; /* System Bus */
350 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */ 352 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
351 int HvRc = 0; 353 int HvRc = 0;
352 int IdSel; 354 int IdSel;
353 const int MaxAgents = 8; 355 const int MaxAgents = 8;
354 356
355 DevInfo = (struct HvCallPci_DeviceInfo*) 357 DevInfo = (struct HvCallPci_DeviceInfo*)
@@ -358,11 +360,11 @@ static void scan_PHB_slots(struct pci_controller *Phb)
358 return; 360 return;
359 361
360 /* 362 /*
361 * Probe for EADs Bridges 363 * Probe for EADs Bridges
362 */ 364 */
363 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) { 365 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
364 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel, 366 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
365 ISERIES_HV_ADDR(DevInfo), 367 iseries_hv_addr(DevInfo),
366 sizeof(struct HvCallPci_DeviceInfo)); 368 sizeof(struct HvCallPci_DeviceInfo));
367 if (HvRc == 0) { 369 if (HvRc == 0) {
368 if (DevInfo->deviceType == HvCallPci_NodeDevice) 370 if (DevInfo->deviceType == HvCallPci_NodeDevice)
@@ -393,19 +395,19 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
393 395
394 /* Note: hvSubBus and irq is always be 0 at this level! */ 396 /* Note: hvSubBus and irq is always be 0 at this level! */
395 for (Function = 0; Function < 8; ++Function) { 397 for (Function = 0; Function < 8; ++Function) {
396 AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 398 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
397 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0); 399 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
398 if (HvRc == 0) { 400 if (HvRc == 0) {
399 printk("found device at bus %d idsel %d func %d (AgentId %x)\n", 401 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
400 bus, IdSel, Function, AgentId); 402 bus, IdSel, Function, AgentId);
401 /* Connect EADs: 0x18.00.12 = 0x00 */ 403 /* Connect EADs: 0x18.00.12 = 0x00 */
402 PPCDBG(PPCDBG_BUSWALK, 404 PPCDBG(PPCDBG_BUSWALK,
403 "PCI:Connect EADs: 0x%02X.%02X.%02X\n", 405 "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
404 bus, SubBus, AgentId); 406 bus, SubBus, AgentId);
405 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId, 407 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
406 ISERIES_HV_ADDR(BridgeInfo), 408 iseries_hv_addr(BridgeInfo),
407 sizeof(struct HvCallPci_BridgeInfo)); 409 sizeof(struct HvCallPci_BridgeInfo));
408 if (HvRc == 0) { 410 if (HvRc == 0) {
409 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n", 411 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
410 BridgeInfo->busUnitInfo.deviceType, 412 BridgeInfo->busUnitInfo.deviceType,
411 BridgeInfo->subBusNumber, 413 BridgeInfo->subBusNumber,
@@ -428,7 +430,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
428 printk("PCI: Invalid Bridge Configuration(0x%02X)", 430 printk("PCI: Invalid Bridge Configuration(0x%02X)",
429 BridgeInfo->busUnitInfo.deviceType); 431 BridgeInfo->busUnitInfo.deviceType);
430 } 432 }
431 } else if (HvRc != 0x000B) 433 } else if (HvRc != 0x000B)
432 pci_Log_Error("EADs Connect", 434 pci_Log_Error("EADs Connect",
433 bus, SubBus, AgentId, HvRc); 435 bus, SubBus, AgentId, HvRc);
434 } 436 }
@@ -441,7 +443,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
441static int scan_bridge_slot(HvBusNumber Bus, 443static int scan_bridge_slot(HvBusNumber Bus,
442 struct HvCallPci_BridgeInfo *BridgeInfo) 444 struct HvCallPci_BridgeInfo *BridgeInfo)
443{ 445{
444 struct iSeries_Device_Node *node; 446 struct device_node *node;
445 HvSubBusNumber SubBus = BridgeInfo->subBusNumber; 447 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
446 u16 VendorId = 0; 448 u16 VendorId = 0;
447 int HvRc = 0; 449 int HvRc = 0;
@@ -451,16 +453,16 @@ static int scan_bridge_slot(HvBusNumber Bus,
451 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); 453 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
452 454
453 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ 455 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
454 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel); 456 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
455 PPCDBG(PPCDBG_BUSWALK, 457 PPCDBG(PPCDBG_BUSWALK,
456 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n", 458 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
457 Bus, 0, EADsIdSel, Irq); 459 Bus, 0, EADsIdSel, Irq);
458 460
459 /* 461 /*
460 * Connect all functions of any device found. 462 * Connect all functions of any device found.
461 */ 463 */
462 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { 464 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
463 for (Function = 0; Function < 8; ++Function) { 465 for (Function = 0; Function < 8; ++Function) {
464 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 466 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
465 HvRc = HvCallXm_connectBusUnit(Bus, SubBus, 467 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
466 AgentId, Irq); 468 AgentId, Irq);
@@ -484,15 +486,15 @@ static int scan_bridge_slot(HvBusNumber Bus,
484 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n", 486 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
485 Bus, SubBus, AgentId, VendorId, Irq); 487 Bus, SubBus, AgentId, VendorId, Irq);
486 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, 488 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
487 PCI_INTERRUPT_LINE, Irq); 489 PCI_INTERRUPT_LINE, Irq);
488 if (HvRc != 0) 490 if (HvRc != 0)
489 pci_Log_Error("PciCfgStore Irq Failed!", 491 pci_Log_Error("PciCfgStore Irq Failed!",
490 Bus, SubBus, AgentId, HvRc); 492 Bus, SubBus, AgentId, HvRc);
491 493
492 ++DeviceCount; 494 ++DeviceCount;
493 node = build_device_node(Bus, SubBus, EADsIdSel, Function); 495 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
494 node->Irq = Irq; 496 PCI_DN(node)->Irq = Irq;
495 node->LogicalSlot = BridgeInfo->logicalSlotNumber; 497 PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
496 498
497 } /* for (Function = 0; Function < 8; ++Function) */ 499 } /* for (Function = 0; Function < 8; ++Function) */
498 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ 500 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
@@ -542,16 +544,13 @@ EXPORT_SYMBOL(iSeries_memcpy_fromio);
542/* 544/*
543 * Look down the chain to find the matching Device Device 545 * Look down the chain to find the matching Device Device
544 */ 546 */
545static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn) 547static struct device_node *find_Device_Node(int bus, int devfn)
546{ 548{
547 struct list_head *pos; 549 struct pci_dn *pdn;
548 550
549 list_for_each(pos, &iSeries_Global_Device_List) { 551 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
550 struct iSeries_Device_Node *node = 552 if ((bus == pdn->busno) && (devfn == pdn->devfn))
551 list_entry(pos, struct iSeries_Device_Node, Device_List); 553 return pdn->node;
552
553 if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
554 return node;
555 } 554 }
556 return NULL; 555 return NULL;
557} 556}
@@ -562,12 +561,12 @@ static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
562 * Sanity Check Node PciDev to passed pci_dev 561 * Sanity Check Node PciDev to passed pci_dev
563 * If none is found, returns a NULL which the client must handle. 562 * If none is found, returns a NULL which the client must handle.
564 */ 563 */
565static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev) 564static struct device_node *get_Device_Node(struct pci_dev *pdev)
566{ 565{
567 struct iSeries_Device_Node *node; 566 struct device_node *node;
568 567
569 node = pdev->sysdata; 568 node = pdev->sysdata;
570 if (node == NULL || node->PciDev != pdev) 569 if (node == NULL || PCI_DN(node)->pcidev != pdev)
571 node = find_Device_Node(pdev->bus->number, pdev->devfn); 570 node = find_Device_Node(pdev->bus->number, pdev->devfn);
572 return node; 571 return node;
573} 572}
@@ -595,7 +594,7 @@ static u64 hv_cfg_write_func[4] = {
595static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, 594static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
596 int offset, int size, u32 *val) 595 int offset, int size, u32 *val)
597{ 596{
598 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 597 struct device_node *node = find_Device_Node(bus->number, devfn);
599 u64 fn; 598 u64 fn;
600 struct HvCallPci_LoadReturn ret; 599 struct HvCallPci_LoadReturn ret;
601 600
@@ -607,7 +606,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
607 } 606 }
608 607
609 fn = hv_cfg_read_func[(size - 1) & 3]; 608 fn = hv_cfg_read_func[(size - 1) & 3];
610 HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0); 609 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
611 610
612 if (ret.rc != 0) { 611 if (ret.rc != 0) {
613 *val = ~0; 612 *val = ~0;
@@ -625,7 +624,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
625static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, 624static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
626 int offset, int size, u32 val) 625 int offset, int size, u32 val)
627{ 626{
628 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 627 struct device_node *node = find_Device_Node(bus->number, devfn);
629 u64 fn; 628 u64 fn;
630 u64 ret; 629 u64 ret;
631 630
@@ -635,7 +634,7 @@ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
635 return PCIBIOS_BAD_REGISTER_NUMBER; 634 return PCIBIOS_BAD_REGISTER_NUMBER;
636 635
637 fn = hv_cfg_write_func[(size - 1) & 3]; 636 fn = hv_cfg_write_func[(size - 1) & 3];
638 ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0); 637 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
639 638
640 if (ret != 0) 639 if (ret != 0)
641 return PCIBIOS_DEVICE_NOT_FOUND; 640 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -657,14 +656,16 @@ static struct pci_ops iSeries_pci_ops = {
657 * PCI: Device 23.90 ReadL Retry( 1) 656 * PCI: Device 23.90 ReadL Retry( 1)
658 * PCI: Device 23.90 ReadL Retry Successful(1) 657 * PCI: Device 23.90 ReadL Retry Successful(1)
659 */ 658 */
660static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode, 659static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
661 int *retry, u64 ret) 660 int *retry, u64 ret)
662{ 661{
663 if (ret != 0) { 662 if (ret != 0) {
663 struct pci_dn *pdn = PCI_DN(DevNode);
664
664 ++Pci_Error_Count; 665 ++Pci_Error_Count;
665 (*retry)++; 666 (*retry)++;
666 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", 667 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
667 TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn, 668 TextHdr, pdn->busno, pdn->devfn,
668 *retry, (int)ret); 669 *retry, (int)ret);
669 /* 670 /*
670 * Bump the retry and check for retry count exceeded. 671 * Bump the retry and check for retry count exceeded.
@@ -687,14 +688,14 @@ static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
687 * Note: Make sure the passed variable end up on the stack to avoid 688 * Note: Make sure the passed variable end up on the stack to avoid
688 * the exposure of being device global. 689 * the exposure of being device global.
689 */ 690 */
690static inline struct iSeries_Device_Node *xlate_iomm_address( 691static inline struct device_node *xlate_iomm_address(
691 const volatile void __iomem *IoAddress, 692 const volatile void __iomem *IoAddress,
692 u64 *dsaptr, u64 *BarOffsetPtr) 693 u64 *dsaptr, u64 *BarOffsetPtr)
693{ 694{
694 unsigned long OrigIoAddr; 695 unsigned long OrigIoAddr;
695 unsigned long BaseIoAddr; 696 unsigned long BaseIoAddr;
696 unsigned long TableIndex; 697 unsigned long TableIndex;
697 struct iSeries_Device_Node *DevNode; 698 struct device_node *DevNode;
698 699
699 OrigIoAddr = (unsigned long __force)IoAddress; 700 OrigIoAddr = (unsigned long __force)IoAddress;
700 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory)) 701 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
@@ -705,7 +706,7 @@ static inline struct iSeries_Device_Node *xlate_iomm_address(
705 706
706 if (DevNode != NULL) { 707 if (DevNode != NULL) {
707 int barnum = iobar_table[TableIndex]; 708 int barnum = iobar_table[TableIndex];
708 *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24); 709 *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
709 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE; 710 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
710 } else 711 } else
711 panic("PCI: Invalid PCI IoAddress detected!\n"); 712 panic("PCI: Invalid PCI IoAddress detected!\n");
@@ -727,7 +728,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
727 u64 dsa; 728 u64 dsa;
728 int retry = 0; 729 int retry = 0;
729 struct HvCallPci_LoadReturn ret; 730 struct HvCallPci_LoadReturn ret;
730 struct iSeries_Device_Node *DevNode = 731 struct device_node *DevNode =
731 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 732 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
732 733
733 if (DevNode == NULL) { 734 if (DevNode == NULL) {
@@ -757,7 +758,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
757 u64 dsa; 758 u64 dsa;
758 int retry = 0; 759 int retry = 0;
759 struct HvCallPci_LoadReturn ret; 760 struct HvCallPci_LoadReturn ret;
760 struct iSeries_Device_Node *DevNode = 761 struct device_node *DevNode =
761 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 762 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
762 763
763 if (DevNode == NULL) { 764 if (DevNode == NULL) {
@@ -788,7 +789,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
788 u64 dsa; 789 u64 dsa;
789 int retry = 0; 790 int retry = 0;
790 struct HvCallPci_LoadReturn ret; 791 struct HvCallPci_LoadReturn ret;
791 struct iSeries_Device_Node *DevNode = 792 struct device_node *DevNode =
792 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 793 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
793 794
794 if (DevNode == NULL) { 795 if (DevNode == NULL) {
@@ -826,7 +827,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
826 u64 dsa; 827 u64 dsa;
827 int retry = 0; 828 int retry = 0;
828 u64 rc; 829 u64 rc;
829 struct iSeries_Device_Node *DevNode = 830 struct device_node *DevNode =
830 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 831 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
831 832
832 if (DevNode == NULL) { 833 if (DevNode == NULL) {
@@ -854,7 +855,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
854 u64 dsa; 855 u64 dsa;
855 int retry = 0; 856 int retry = 0;
856 u64 rc; 857 u64 rc;
857 struct iSeries_Device_Node *DevNode = 858 struct device_node *DevNode =
858 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 859 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
859 860
860 if (DevNode == NULL) { 861 if (DevNode == NULL) {
@@ -882,7 +883,7 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
882 u64 dsa; 883 u64 dsa;
883 int retry = 0; 884 int retry = 0;
884 u64 rc; 885 u64 rc;
885 struct iSeries_Device_Node *DevNode = 886 struct device_node *DevNode =
886 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 887 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
887 888
888 if (DevNode == NULL) { 889 if (DevNode == NULL) {
diff --git a/include/asm-ppc64/iSeries/iSeries_pci.h b/arch/powerpc/platforms/iseries/pci.h
index 575f611f8b33..33a8489fde54 100644
--- a/include/asm-ppc64/iSeries/iSeries_pci.h
+++ b/arch/powerpc/platforms/iseries/pci.h
@@ -1,8 +1,8 @@
1#ifndef _ISERIES_64_PCI_H 1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _ISERIES_64_PCI_H 2#define _PLATFORMS_ISERIES_PCI_H
3 3
4/* 4/*
5 * File iSeries_pci.h created by Allan Trautman on Tue Feb 20, 2001. 5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 * 6 *
7 * Define some useful macros for the iSeries pci routines. 7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation 8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
@@ -30,23 +30,9 @@
30 * End Change Activity 30 * End Change Activity
31 */ 31 */
32 32
33#include <asm/iSeries/HvCallPci.h> 33#include <asm/pci-bridge.h>
34#include <asm/abs_addr.h>
35 34
36struct pci_dev; /* For Forward Reference */ 35struct pci_dev; /* For Forward Reference */
37struct iSeries_Device_Node;
38
39/*
40 * Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure
41 */
42
43#define ISERIES_BUS(DevPtr) DevPtr->DsaAddr.Dsa.busNumber
44#define ISERIES_SUBBUS(DevPtr) DevPtr->DsaAddr.Dsa.subBusNumber
45#define ISERIES_DEVICE(DevPtr) DevPtr->DsaAddr.Dsa.deviceId
46#define ISERIES_DSA(DevPtr) DevPtr->DsaAddr.DsaAddr
47#define ISERIES_DEVNODE(PciDev) ((struct iSeries_Device_Node *)PciDev->sysdata)
48
49#define EADsMaxAgents 7
50 36
51/* 37/*
52 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function. 38 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
@@ -62,27 +48,16 @@ struct iSeries_Device_Node;
62#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7) 48#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
63 49
64/* 50/*
65 * Converts Virtual Address to Real Address for Hypervisor calls 51 * Generate a Direct Select Address for the Hypervisor
66 */ 52 */
67#define ISERIES_HV_ADDR(virtaddr) \ 53static inline u64 iseries_ds_addr(struct device_node *node)
68 (0x8000000000000000 | virt_to_abs(virtaddr)) 54{
55 struct pci_dn *pdn = PCI_DN(node);
69 56
70/* 57 return ((u64)pdn->busno << 48) + ((u64)pdn->bussubno << 40)
71 * iSeries Device Information 58 + ((u64)0x10 << 32);
72 */ 59}
73struct iSeries_Device_Node {
74 struct list_head Device_List;
75 struct pci_dev *PciDev;
76 union HvDsaMap DsaAddr; /* Direct Select Address */
77 /* busNumber, subBusNumber, */
78 /* deviceId, barNumber */
79 int DevFn; /* Linux devfn */
80 int Irq; /* Assigned IRQ */
81 int Flags; /* Possible flags(disable/bist)*/
82 u8 LogicalSlot; /* Hv Slot Index for Tces */
83 struct iommu_table *iommu_table;/* Device TCE Table */
84};
85 60
86extern void iSeries_Device_Information(struct pci_dev*, int); 61extern void iSeries_Device_Information(struct pci_dev*, int);
87 62
88#endif /* _ISERIES_64_PCI_H */ 63#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/powerpc/platforms/iseries/proc.c
index 0fe3116eba29..6f1929cac66b 100644
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * iSeries_proc.c
3 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
5 * 4 *
@@ -27,8 +26,9 @@
27#include <asm/lppaca.h> 26#include <asm/lppaca.h>
28#include <asm/iSeries/ItLpQueue.h> 27#include <asm/iSeries/ItLpQueue.h>
29#include <asm/iSeries/HvCallXm.h> 28#include <asm/iSeries/HvCallXm.h>
30#include <asm/iSeries/IoHriMainStore.h> 29
31#include <asm/iSeries/IoHriProcessorVpd.h> 30#include "processor_vpd.h"
31#include "main_store.h"
32 32
33static int __init iseries_proc_create(void) 33static int __init iseries_proc_create(void)
34{ 34{
@@ -68,12 +68,15 @@ static int proc_titantod_show(struct seq_file *m, void *v)
68 unsigned long tb_ticks = (tb0 - startTb); 68 unsigned long tb_ticks = (tb0 - startTb);
69 unsigned long titan_jiffies = titan_usec / (1000000/HZ); 69 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ); 70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
71 unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec; 71 unsigned long titan_jiff_rem_usec =
72 titan_usec - titan_jiff_usec;
72 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy; 73 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
73 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy; 74 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
74 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks; 75 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
75 unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec; 76 unsigned long tb_jiff_rem_usec =
76 unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec; 77 tb_jiff_rem_ticks / tb_ticks_per_usec;
78 unsigned long new_tb_ticks_per_jiffy =
79 (tb_ticks * (1000000/HZ))/titan_usec;
77 80
78 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec); 81 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
79 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks); 82 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
diff --git a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
index 73b73d80b8b1..7ac5d0d0dbfa 100644
--- a/include/asm-ppc64/iSeries/IoHriProcessorVpd.h
+++ b/arch/powerpc/platforms/iseries/processor_vpd.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IoHriProcessorVpd.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _IOHRIPROCESSORVPD_H 18#ifndef _ISERIES_PROCESSOR_VPD_H
20#define _IOHRIPROCESSORVPD_H 19#define _ISERIES_PROCESSOR_VPD_H
21 20
22#include <asm/types.h> 21#include <asm/types.h>
23 22
@@ -83,4 +82,4 @@ struct IoHriProcessorVpd {
83 82
84extern struct IoHriProcessorVpd xIoHriProcessorVpd[]; 83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
85 84
86#endif /* _IOHRIPROCESSORVPD_H */ 85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/include/asm-ppc64/iSeries/HvReleaseData.h b/arch/powerpc/platforms/iseries/release_data.h
index c8162e5ccb21..c68b9c3e5caf 100644
--- a/include/asm-ppc64/iSeries/HvReleaseData.h
+++ b/arch/powerpc/platforms/iseries/release_data.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvReleaseData.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _HVRELEASEDATA_H 18#ifndef _ISERIES_RELEASE_DATA_H
20#define _HVRELEASEDATA_H 19#define _ISERIES_RELEASE_DATA_H
21 20
22/* 21/*
23 * This control block contains the critical information about the 22 * This control block contains the critical information about the
@@ -61,4 +60,4 @@ struct HvReleaseData {
61 60
62extern struct HvReleaseData hvReleaseData; 61extern struct HvReleaseData hvReleaseData;
63 62
64#endif /* _HVRELEASEDATA_H */ 63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/powerpc/platforms/iseries/setup.c
index 3ffefbbc6623..b27901481782 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: iSeries_setup.c
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and 7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
@@ -42,26 +40,27 @@
42#include <asm/firmware.h> 40#include <asm/firmware.h>
43 41
44#include <asm/time.h> 42#include <asm/time.h>
45#include "iSeries_setup.h"
46#include <asm/naca.h> 43#include <asm/naca.h>
47#include <asm/paca.h> 44#include <asm/paca.h>
48#include <asm/cache.h> 45#include <asm/cache.h>
49#include <asm/sections.h> 46#include <asm/sections.h>
50#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/iSeries/HvLpConfig.h> 48#include <asm/iSeries/HvLpConfig.h>
53#include <asm/iSeries/HvCallEvent.h> 49#include <asm/iSeries/HvCallEvent.h>
54#include <asm/iSeries/HvCallSm.h>
55#include <asm/iSeries/HvCallXm.h> 50#include <asm/iSeries/HvCallXm.h>
56#include <asm/iSeries/ItLpQueue.h> 51#include <asm/iSeries/ItLpQueue.h>
57#include <asm/iSeries/IoHriMainStore.h>
58#include <asm/iSeries/mf.h> 52#include <asm/iSeries/mf.h>
59#include <asm/iSeries/HvLpEvent.h> 53#include <asm/iSeries/HvLpEvent.h>
60#include <asm/iSeries/iSeries_irq.h>
61#include <asm/iSeries/IoHriProcessorVpd.h>
62#include <asm/iSeries/ItVpdAreas.h>
63#include <asm/iSeries/LparMap.h> 54#include <asm/iSeries/LparMap.h>
64 55
56#include "setup.h"
57#include "irq.h"
58#include "vpd_areas.h"
59#include "processor_vpd.h"
60#include "main_store.h"
61#include "call_sm.h"
62#include "call_hpt.h"
63
65extern void hvlog(char *fmt, ...); 64extern void hvlog(char *fmt, ...);
66 65
67#ifdef DEBUG 66#ifdef DEBUG
@@ -74,8 +73,8 @@ extern void hvlog(char *fmt, ...);
74extern void ppcdbg_initialize(void); 73extern void ppcdbg_initialize(void);
75 74
76static void build_iSeries_Memory_Map(void); 75static void build_iSeries_Memory_Map(void);
77static void setup_iSeries_cache_sizes(void); 76static void iseries_shared_idle(void);
78static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr); 77static void iseries_dedicated_idle(void);
79#ifdef CONFIG_PCI 78#ifdef CONFIG_PCI
80extern void iSeries_pci_final_fixup(void); 79extern void iSeries_pci_final_fixup(void);
81#else 80#else
@@ -83,14 +82,6 @@ static void iSeries_pci_final_fixup(void) { }
83#endif 82#endif
84 83
85/* Global Variables */ 84/* Global Variables */
86static unsigned long procFreqHz;
87static unsigned long procFreqMhz;
88static unsigned long procFreqMhzHundreths;
89
90static unsigned long tbFreqHz;
91static unsigned long tbFreqMhz;
92static unsigned long tbFreqMhzHundreths;
93
94int piranha_simulator; 85int piranha_simulator;
95 86
96extern int rd_size; /* Defined in drivers/block/rd.c */ 87extern int rd_size; /* Defined in drivers/block/rd.c */
@@ -319,6 +310,8 @@ static void __init iSeries_init_early(void)
319 310
320 ppcdbg_initialize(); 311 ppcdbg_initialize();
321 312
313 ppc64_interrupt_controller = IC_ISERIES;
314
322#if defined(CONFIG_BLK_DEV_INITRD) 315#if defined(CONFIG_BLK_DEV_INITRD)
323 /* 316 /*
324 * If the init RAM disk has been configured and there is 317 * If the init RAM disk has been configured and there is
@@ -341,12 +334,6 @@ static void __init iSeries_init_early(void)
341 iSeries_recal_titan = HvCallXm_loadTod(); 334 iSeries_recal_titan = HvCallXm_loadTod();
342 335
343 /* 336 /*
344 * Cache sizes must be initialized before hpte_init_iSeries is called
345 * as the later need them for flush_icache_range()
346 */
347 setup_iSeries_cache_sizes();
348
349 /*
350 * Initialize the hash table management pointers 337 * Initialize the hash table management pointers
351 */ 338 */
352 hpte_init_iSeries(); 339 hpte_init_iSeries();
@@ -356,12 +343,6 @@ static void __init iSeries_init_early(void)
356 */ 343 */
357 iommu_init_early_iSeries(); 344 iommu_init_early_iSeries();
358 345
359 /*
360 * Initialize the table which translate Linux physical addresses to
361 * AS/400 absolute addresses
362 */
363 build_iSeries_Memory_Map();
364
365 iSeries_get_cmdline(); 346 iSeries_get_cmdline();
366 347
367 /* Save unparsed command line copy for /proc/cmdline */ 348 /* Save unparsed command line copy for /proc/cmdline */
@@ -379,14 +360,6 @@ static void __init iSeries_init_early(void)
379 } 360 }
380 } 361 }
381 362
382 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
383 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
384
385 lmb_init();
386 lmb_add(0, systemcfg->physicalMemorySize);
387 lmb_analyze();
388 lmb_reserve(0, __pa(klimit));
389
390 /* Initialize machine-dependency vectors */ 363 /* Initialize machine-dependency vectors */
391#ifdef CONFIG_SMP 364#ifdef CONFIG_SMP
392 smp_init_iSeries(); 365 smp_init_iSeries();
@@ -457,7 +430,6 @@ static void __init build_iSeries_Memory_Map(void)
457 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; 430 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
458 u32 nextPhysChunk; 431 u32 nextPhysChunk;
459 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages; 432 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
460 u32 num_ptegs;
461 u32 totalChunks,moreChunks; 433 u32 totalChunks,moreChunks;
462 u32 currChunk, thisChunk, absChunk; 434 u32 currChunk, thisChunk, absChunk;
463 u32 currDword; 435 u32 currDword;
@@ -520,10 +492,7 @@ static void __init build_iSeries_Memory_Map(void)
520 printk("HPT absolute addr = %016lx, size = %dK\n", 492 printk("HPT absolute addr = %016lx, size = %dK\n",
521 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); 493 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
522 494
523 /* Fill in the hashed page table hash mask */ 495 ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE);
524 num_ptegs = hptSizePages *
525 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
526 htab_hash_mask = num_ptegs - 1;
527 496
528 /* 497 /*
529 * The actual hashed page table is in the hypervisor, 498 * The actual hashed page table is in the hypervisor,
@@ -592,144 +561,33 @@ static void __init build_iSeries_Memory_Map(void)
592} 561}
593 562
594/* 563/*
595 * Set up the variables that describe the cache line sizes
596 * for this machine.
597 */
598static void __init setup_iSeries_cache_sizes(void)
599{
600 unsigned int i, n;
601 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
602
603 systemcfg->icache_size =
604 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
605 systemcfg->icache_line_size =
606 ppc64_caches.iline_size =
607 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
608 systemcfg->dcache_size =
609 ppc64_caches.dsize =
610 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
611 systemcfg->dcache_line_size =
612 ppc64_caches.dline_size =
613 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
614 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
615 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
616
617 i = ppc64_caches.iline_size;
618 n = 0;
619 while ((i = (i / 2)))
620 ++n;
621 ppc64_caches.log_iline_size = n;
622
623 i = ppc64_caches.dline_size;
624 n = 0;
625 while ((i = (i / 2)))
626 ++n;
627 ppc64_caches.log_dline_size = n;
628
629 printk("D-cache line size = %d\n",
630 (unsigned int)ppc64_caches.dline_size);
631 printk("I-cache line size = %d\n",
632 (unsigned int)ppc64_caches.iline_size);
633}
634
635/*
636 * Create a pte. Used during initialization only.
637 */
638static void iSeries_make_pte(unsigned long va, unsigned long pa,
639 int mode)
640{
641 hpte_t local_hpte, rhpte;
642 unsigned long hash, vpn;
643 long slot;
644
645 vpn = va >> PAGE_SHIFT;
646 hash = hpt_hash(vpn, 0);
647
648 local_hpte.r = pa | mode;
649 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
650 | HPTE_V_BOLTED | HPTE_V_VALID;
651
652 slot = HvCallHpt_findValid(&rhpte, vpn);
653 if (slot < 0) {
654 /* Must find space in primary group */
655 panic("hash_page: hpte already exists\n");
656 }
657 HvCallHpt_addValidate(slot, 0, &local_hpte);
658}
659
660/*
661 * Bolt the kernel addr space into the HPT
662 */
663static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
664{
665 unsigned long pa;
666 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
667 hpte_t hpte;
668
669 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
670 unsigned long ea = (unsigned long)__va(pa);
671 unsigned long vsid = get_kernel_vsid(ea);
672 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
673 unsigned long vpn = va >> PAGE_SHIFT;
674 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
675
676 /* Make non-kernel text non-executable */
677 if (!in_kernel_text(ea))
678 mode_rw |= HW_NO_EXEC;
679
680 if (hpte.v & HPTE_V_VALID) {
681 /* HPTE exists, so just bolt it */
682 HvCallHpt_setSwBits(slot, 0x10, 0);
683 /* And make sure the pp bits are correct */
684 HvCallHpt_setPp(slot, PP_RWXX);
685 } else
686 /* No HPTE exists, so create a new bolted one */
687 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
688 }
689}
690
691/*
692 * Document me. 564 * Document me.
693 */ 565 */
694static void __init iSeries_setup_arch(void) 566static void __init iSeries_setup_arch(void)
695{ 567{
696 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; 568 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
697 569
698 /* Add an eye catcher and the systemcfg layout version number */ 570 if (get_paca()->lppaca.shared_proc) {
699 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 571 ppc_md.idle_loop = iseries_shared_idle;
700 systemcfg->version.major = SYSTEMCFG_MAJOR; 572 printk(KERN_INFO "Using shared processor idle loop\n");
701 systemcfg->version.minor = SYSTEMCFG_MINOR; 573 } else {
574 ppc_md.idle_loop = iseries_dedicated_idle;
575 printk(KERN_INFO "Using dedicated idle loop\n");
576 }
702 577
703 /* Setup the Lp Event Queue */ 578 /* Setup the Lp Event Queue */
704 setup_hvlpevent_queue(); 579 setup_hvlpevent_queue();
705 580
706 /* Compute processor frequency */
707 procFreqHz = ((1UL << 34) * 1000000) /
708 xIoHriProcessorVpd[procIx].xProcFreq;
709 procFreqMhz = procFreqHz / 1000000;
710 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
711 ppc_proc_freq = procFreqHz;
712
713 /* Compute time base frequency */
714 tbFreqHz = ((1UL << 32) * 1000000) /
715 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
716 tbFreqMhz = tbFreqHz / 1000000;
717 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
718 ppc_tb_freq = tbFreqHz;
719
720 printk("Max logical processors = %d\n", 581 printk("Max logical processors = %d\n",
721 itVpdAreas.xSlicMaxLogicalProcs); 582 itVpdAreas.xSlicMaxLogicalProcs);
722 printk("Max physical processors = %d\n", 583 printk("Max physical processors = %d\n",
723 itVpdAreas.xSlicMaxPhysicalProcs); 584 itVpdAreas.xSlicMaxPhysicalProcs);
724 printk("Processor frequency = %lu.%02lu\n", procFreqMhz, 585
725 procFreqMhzHundreths);
726 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
727 tbFreqMhzHundreths);
728 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; 586 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
729 printk("Processor version = %x\n", systemcfg->processor); 587 printk("Processor version = %x\n", systemcfg->processor);
730} 588}
731 589
732static void iSeries_get_cpuinfo(struct seq_file *m) 590static void iSeries_show_cpuinfo(struct seq_file *m)
733{ 591{
734 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n"); 592 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
735} 593}
@@ -768,49 +626,6 @@ static void iSeries_halt(void)
768 mf_power_off(); 626 mf_power_off();
769} 627}
770 628
771/*
772 * void __init iSeries_calibrate_decr()
773 *
774 * Description:
775 * This routine retrieves the internal processor frequency from the VPD,
776 * and sets up the kernel timer decrementer based on that value.
777 *
778 */
779static void __init iSeries_calibrate_decr(void)
780{
781 unsigned long cyclesPerUsec;
782 struct div_result divres;
783
784 /* Compute decrementer (and TB) frequency in cycles/sec */
785 cyclesPerUsec = ppc_tb_freq / 1000000;
786
787 /*
788 * Set the amount to refresh the decrementer by. This
789 * is the number of decrementer ticks it takes for
790 * 1/HZ seconds.
791 */
792 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
793
794#if 0
795 /* TEST CODE FOR ADJTIME */
796 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
797 /* END OF TEST CODE */
798#endif
799
800 /*
801 * tb_ticks_per_sec = freq; would give better accuracy
802 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
803 * that jiffies (and xtime) will match the time returned
804 * by do_gettimeofday.
805 */
806 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
807 tb_ticks_per_usec = cyclesPerUsec;
808 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
809 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
810 tb_to_xs = divres.result_low;
811 setup_default_decr();
812}
813
814static void __init iSeries_progress(char * st, unsigned short code) 629static void __init iSeries_progress(char * st, unsigned short code)
815{ 630{
816 printk("Progress: [%04x] - %s\n", (unsigned)code, st); 631 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
@@ -878,7 +693,7 @@ static void yield_shared_processor(void)
878 process_iSeries_events(); 693 process_iSeries_events();
879} 694}
880 695
881static int iseries_shared_idle(void) 696static void iseries_shared_idle(void)
882{ 697{
883 while (1) { 698 while (1) {
884 while (!need_resched() && !hvlpevent_is_pending()) { 699 while (!need_resched() && !hvlpevent_is_pending()) {
@@ -900,11 +715,9 @@ static int iseries_shared_idle(void)
900 715
901 schedule(); 716 schedule();
902 } 717 }
903
904 return 0;
905} 718}
906 719
907static int iseries_dedicated_idle(void) 720static void iseries_dedicated_idle(void)
908{ 721{
909 long oldval; 722 long oldval;
910 723
@@ -934,44 +747,252 @@ static int iseries_dedicated_idle(void)
934 ppc64_runlatch_on(); 747 ppc64_runlatch_on();
935 schedule(); 748 schedule();
936 } 749 }
937
938 return 0;
939} 750}
940 751
941#ifndef CONFIG_PCI 752#ifndef CONFIG_PCI
942void __init iSeries_init_IRQ(void) { } 753void __init iSeries_init_IRQ(void) { }
943#endif 754#endif
944 755
945void __init iSeries_early_setup(void) 756static int __init iseries_probe(int platform)
946{ 757{
947 iSeries_fixup_klimit(); 758 return PLATFORM_ISERIES_LPAR == platform;
759}
948 760
949 ppc_md.setup_arch = iSeries_setup_arch; 761struct machdep_calls __initdata iseries_md = {
950 ppc_md.get_cpuinfo = iSeries_get_cpuinfo; 762 .setup_arch = iSeries_setup_arch,
951 ppc_md.init_IRQ = iSeries_init_IRQ; 763 .show_cpuinfo = iSeries_show_cpuinfo,
952 ppc_md.get_irq = iSeries_get_irq; 764 .init_IRQ = iSeries_init_IRQ,
953 ppc_md.init_early = iSeries_init_early, 765 .get_irq = iSeries_get_irq,
766 .init_early = iSeries_init_early,
767 .pcibios_fixup = iSeries_pci_final_fixup,
768 .restart = iSeries_restart,
769 .power_off = iSeries_power_off,
770 .halt = iSeries_halt,
771 .get_boot_time = iSeries_get_boot_time,
772 .set_rtc_time = iSeries_set_rtc_time,
773 .get_rtc_time = iSeries_get_rtc_time,
774 .calibrate_decr = generic_calibrate_decr,
775 .progress = iSeries_progress,
776 .probe = iseries_probe,
777 /* XXX Implement enable_pmcs for iSeries */
778};
954 779
955 ppc_md.pcibios_fixup = iSeries_pci_final_fixup; 780struct blob {
781 unsigned char data[PAGE_SIZE];
782 unsigned long next;
783};
956 784
957 ppc_md.restart = iSeries_restart; 785struct iseries_flat_dt {
958 ppc_md.power_off = iSeries_power_off; 786 struct boot_param_header header;
959 ppc_md.halt = iSeries_halt; 787 u64 reserve_map[2];
788 struct blob dt;
789 struct blob strings;
790};
960 791
961 ppc_md.get_boot_time = iSeries_get_boot_time; 792struct iseries_flat_dt iseries_dt;
962 ppc_md.set_rtc_time = iSeries_set_rtc_time;
963 ppc_md.get_rtc_time = iSeries_get_rtc_time;
964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
965 ppc_md.progress = iSeries_progress;
966 793
967 /* XXX Implement enable_pmcs for iSeries */ 794void dt_init(struct iseries_flat_dt *dt)
795{
796 dt->header.off_mem_rsvmap =
797 offsetof(struct iseries_flat_dt, reserve_map);
798 dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
799 dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
800 dt->header.totalsize = sizeof(struct iseries_flat_dt);
801 dt->header.dt_strings_size = sizeof(struct blob);
968 802
969 if (get_paca()->lppaca.shared_proc) { 803 /* There is no notion of hardware cpu id on iSeries */
970 ppc_md.idle_loop = iseries_shared_idle; 804 dt->header.boot_cpuid_phys = smp_processor_id();
971 printk(KERN_INFO "Using shared processor idle loop\n"); 805
972 } else { 806 dt->dt.next = (unsigned long)&dt->dt.data;
973 ppc_md.idle_loop = iseries_dedicated_idle; 807 dt->strings.next = (unsigned long)&dt->strings.data;
974 printk(KERN_INFO "Using dedicated idle loop\n"); 808
809 dt->header.magic = OF_DT_HEADER;
810 dt->header.version = 0x10;
811 dt->header.last_comp_version = 0x10;
812
813 dt->reserve_map[0] = 0;
814 dt->reserve_map[1] = 0;
815}
816
817void dt_check_blob(struct blob *b)
818{
819 if (b->next >= (unsigned long)&b->next) {
820 DBG("Ran out of space in flat device tree blob!\n");
821 BUG();
822 }
823}
824
825void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
826{
827 *((u32*)dt->dt.next) = value;
828 dt->dt.next += sizeof(u32);
829
830 dt_check_blob(&dt->dt);
831}
832
833void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
834{
835 *((u64*)dt->dt.next) = value;
836 dt->dt.next += sizeof(u64);
837
838 dt_check_blob(&dt->dt);
839}
840
841unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
842{
843 unsigned long start = blob->next - (unsigned long)blob->data;
844
845 memcpy((char *)blob->next, data, len);
846 blob->next = _ALIGN(blob->next + len, 4);
847
848 dt_check_blob(blob);
849
850 return start;
851}
852
853void dt_start_node(struct iseries_flat_dt *dt, char *name)
854{
855 dt_push_u32(dt, OF_DT_BEGIN_NODE);
856 dt_push_bytes(&dt->dt, name, strlen(name) + 1);
857}
858
859#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
860
861void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
862{
863 unsigned long offset;
864
865 dt_push_u32(dt, OF_DT_PROP);
866
867 /* Length of the data */
868 dt_push_u32(dt, len);
869
870 /* Put the property name in the string blob. */
871 offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
872
873 /* The offset of the properties name in the string blob. */
874 dt_push_u32(dt, (u32)offset);
875
876 /* The actual data. */
877 dt_push_bytes(&dt->dt, data, len);
878}
879
880void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
881{
882 dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
883}
884
885void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
886{
887 dt_prop(dt, name, (char *)&data, sizeof(u32));
888}
889
890void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
891{
892 dt_prop(dt, name, (char *)&data, sizeof(u64));
893}
894
895void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
896{
897 dt_prop(dt, name, (char *)data, sizeof(u64) * n);
898}
899
900void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
901{
902 dt_prop(dt, name, NULL, 0);
903}
904
905void dt_cpus(struct iseries_flat_dt *dt)
906{
907 unsigned char buf[32];
908 unsigned char *p;
909 unsigned int i, index;
910 struct IoHriProcessorVpd *d;
911
912 /* yuck */
913 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
914 p = strchr(buf, ' ');
915 if (!p) p = buf + strlen(buf);
916
917 dt_start_node(dt, "cpus");
918 dt_prop_u32(dt, "#address-cells", 1);
919 dt_prop_u32(dt, "#size-cells", 0);
920
921 for (i = 0; i < NR_CPUS; i++) {
922 if (paca[i].lppaca.dyn_proc_status >= 2)
923 continue;
924
925 snprintf(p, 32 - (p - buf), "@%d", i);
926 dt_start_node(dt, buf);
927
928 dt_prop_str(dt, "device_type", "cpu");
929
930 index = paca[i].lppaca.dyn_hv_phys_proc_index;
931 d = &xIoHriProcessorVpd[index];
932
933 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
934 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
935
936 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
937 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
938
939 /* magic conversions to Hz copied from old code */
940 dt_prop_u32(dt, "clock-frequency",
941 ((1UL << 34) * 1000000) / d->xProcFreq);
942 dt_prop_u32(dt, "timebase-frequency",
943 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
944
945 dt_prop_u32(dt, "reg", i);
946
947 dt_end_node(dt);
975 } 948 }
949
950 dt_end_node(dt);
951}
952
953void build_flat_dt(struct iseries_flat_dt *dt)
954{
955 u64 tmp[2];
956
957 dt_init(dt);
958
959 dt_start_node(dt, "");
960
961 dt_prop_u32(dt, "#address-cells", 2);
962 dt_prop_u32(dt, "#size-cells", 2);
963
964 /* /memory */
965 dt_start_node(dt, "memory@0");
966 dt_prop_str(dt, "name", "memory");
967 dt_prop_str(dt, "device_type", "memory");
968 tmp[0] = 0;
969 tmp[1] = systemcfg->physicalMemorySize;
970 dt_prop_u64_list(dt, "reg", tmp, 2);
971 dt_end_node(dt);
972
973 /* /chosen */
974 dt_start_node(dt, "chosen");
975 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
976 dt_end_node(dt);
977
978 dt_cpus(dt);
979
980 dt_end_node(dt);
981
982 dt_push_u32(dt, OF_DT_END);
976} 983}
977 984
985void * __init iSeries_early_setup(void)
986{
987 iSeries_fixup_klimit();
988
989 /*
990 * Initialize the table which translate Linux physical addresses to
991 * AS/400 absolute addresses
992 */
993 build_iSeries_Memory_Map();
994
995 build_flat_dt(&iseries_dt);
996
997 return (void *) __pa(&iseries_dt);
998}
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/powerpc/platforms/iseries/setup.h
index c6eb29a245ac..5213044ec411 100644
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: as400_setup.h
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and 7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
@@ -19,7 +17,7 @@
19#ifndef __ISERIES_SETUP_H__ 17#ifndef __ISERIES_SETUP_H__
20#define __ISERIES_SETUP_H__ 18#define __ISERIES_SETUP_H__
21 19
22extern void iSeries_get_boot_time(struct rtc_time *tm); 20extern unsigned long iSeries_get_boot_time(void);
23extern int iSeries_set_rtc_time(struct rtc_time *tm); 21extern int iSeries_set_rtc_time(struct rtc_time *tm);
24extern void iSeries_get_rtc_time(struct rtc_time *tm); 22extern void iSeries_get_rtc_time(struct rtc_time *tm);
25 23
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/powerpc/platforms/iseries/smp.c
index f74386e31638..f720916682f6 100644
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -47,17 +47,17 @@
47 47
48static unsigned long iSeries_smp_message[NR_CPUS]; 48static unsigned long iSeries_smp_message[NR_CPUS];
49 49
50void iSeries_smp_message_recv( struct pt_regs * regs ) 50void iSeries_smp_message_recv(struct pt_regs *regs)
51{ 51{
52 int cpu = smp_processor_id(); 52 int cpu = smp_processor_id();
53 int msg; 53 int msg;
54 54
55 if ( num_online_cpus() < 2 ) 55 if (num_online_cpus() < 2)
56 return; 56 return;
57 57
58 for ( msg = 0; msg < 4; ++msg ) 58 for (msg = 0; msg < 4; msg++)
59 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) ) 59 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
60 smp_message_recv( msg, regs ); 60 smp_message_recv(msg, regs);
61} 61}
62 62
63static inline void smp_iSeries_do_message(int cpu, int msg) 63static inline void smp_iSeries_do_message(int cpu, int msg)
@@ -74,48 +74,22 @@ static void smp_iSeries_message_pass(int target, int msg)
74 smp_iSeries_do_message(target, msg); 74 smp_iSeries_do_message(target, msg);
75 else { 75 else {
76 for_each_online_cpu(i) { 76 for_each_online_cpu(i) {
77 if (target == MSG_ALL_BUT_SELF 77 if ((target == MSG_ALL_BUT_SELF) &&
78 && i == smp_processor_id()) 78 (i == smp_processor_id()))
79 continue; 79 continue;
80 smp_iSeries_do_message(i, msg); 80 smp_iSeries_do_message(i, msg);
81 } 81 }
82 } 82 }
83} 83}
84 84
85static int smp_iSeries_numProcs(void)
86{
87 unsigned np, i;
88
89 np = 0;
90 for (i=0; i < NR_CPUS; ++i) {
91 if (paca[i].lppaca.dyn_proc_status < 2) {
92 cpu_set(i, cpu_possible_map);
93 cpu_set(i, cpu_present_map);
94 cpu_set(i, cpu_sibling_map[i]);
95 ++np;
96 }
97 }
98 return np;
99}
100
101static int smp_iSeries_probe(void) 85static int smp_iSeries_probe(void)
102{ 86{
103 unsigned i; 87 return cpus_weight(cpu_possible_map);
104 unsigned np = 0;
105
106 for (i=0; i < NR_CPUS; ++i) {
107 if (paca[i].lppaca.dyn_proc_status < 2) {
108 /*paca[i].active = 1;*/
109 ++np;
110 }
111 }
112
113 return np;
114} 88}
115 89
116static void smp_iSeries_kick_cpu(int nr) 90static void smp_iSeries_kick_cpu(int nr)
117{ 91{
118 BUG_ON(nr < 0 || nr >= NR_CPUS); 92 BUG_ON((nr < 0) || (nr >= NR_CPUS));
119 93
120 /* Verify that our partition has a processor nr */ 94 /* Verify that our partition has a processor nr */
121 if (paca[nr].lppaca.dyn_proc_status >= 2) 95 if (paca[nr].lppaca.dyn_proc_status >= 2)
@@ -144,6 +118,4 @@ static struct smp_ops_t iSeries_smp_ops = {
144void __init smp_init_iSeries(void) 118void __init smp_init_iSeries(void)
145{ 119{
146 smp_ops = &iSeries_smp_ops; 120 smp_ops = &iSeries_smp_ops;
147 systemcfg->processorCount = smp_iSeries_numProcs();
148} 121}
149
diff --git a/include/asm-ppc64/iSeries/ItSpCommArea.h b/arch/powerpc/platforms/iseries/spcomm_area.h
index 5535f8271c9f..6e3b685115c9 100644
--- a/include/asm-ppc64/iSeries/ItSpCommArea.h
+++ b/arch/powerpc/platforms/iseries/spcomm_area.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItSpCommArea.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -17,8 +16,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19 18
20#ifndef _ITSPCOMMAREA_H 19#ifndef _ISERIES_SPCOMM_AREA_H
21#define _ITSPCOMMAREA_H 20#define _ISERIES_SPCOMM_AREA_H
22 21
23 22
24struct SpCommArea { 23struct SpCommArea {
@@ -34,4 +33,4 @@ struct SpCommArea {
34 33
35extern struct SpCommArea xSpCommArea; 34extern struct SpCommArea xSpCommArea;
36 35
37#endif /* _ITSPCOMMAREA_H */ 36#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/powerpc/platforms/iseries/vio.c
index 6b754b0c8344..c0f7d2e9153f 100644
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/iommu.h> 16#include <asm/iommu.h>
17#include <asm/tce.h>
17#include <asm/abs_addr.h> 18#include <asm/abs_addr.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/iSeries/vio.h> 20#include <asm/iSeries/vio.h>
diff --git a/arch/ppc64/kernel/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 2a6c4f01c45e..c0c767bd37f1 100644
--- a/arch/ppc64/kernel/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -1,5 +1,4 @@
1/* -*- linux-c -*- 1/* -*- linux-c -*-
2 * arch/ppc64/kernel/viopath.c
3 * 2 *
4 * iSeries Virtual I/O Message Path code 3 * iSeries Virtual I/O Message Path code
5 * 4 *
@@ -7,7 +6,7 @@
7 * Ryan Arnold <ryanarn@us.ibm.com> 6 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com> 7 * Colin Devilbiss <devilbis@us.ibm.com>
9 * 8 *
10 * (C) Copyright 2000-2003 IBM Corporation 9 * (C) Copyright 2000-2005 IBM Corporation
11 * 10 *
12 * This code is used by the iSeries virtual disk, cd, 11 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another 12 * tape, and console to communicate with OS/400 in another
diff --git a/include/asm-ppc64/iSeries/ItVpdAreas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
index 71b3ad24f95a..601e6dd860ed 100644
--- a/include/asm-ppc64/iSeries/ItVpdAreas.h
+++ b/arch/powerpc/platforms/iseries/vpd_areas.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItVpdAreas.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,8 +15,8 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 17 */
19#ifndef _ITVPDAREAS_H 18#ifndef _ISERIES_VPD_AREAS_H
20#define _ITVPDAREAS_H 19#define _ISERIES_VPD_AREAS_H
21 20
22/* 21/*
23 * This file defines the address and length of all of the VPD area passed to 22 * This file defines the address and length of all of the VPD area passed to
@@ -86,4 +85,4 @@ struct ItVpdAreas {
86 85
87extern struct ItVpdAreas itVpdAreas; 86extern struct ItVpdAreas itVpdAreas;
88 87
89#endif /* _ITVPDAREAS_H */ 88#endif /* _ISERIES_VPD_AREAS_H */
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c
index 5d921792571f..9c318849dee7 100644
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ b/arch/powerpc/platforms/iseries/vpdinfo.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001.
3 *
4 * This code gets the card location of the hardware 2 * This code gets the card location of the hardware
5 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp> 3 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp>
6 * Copyright (C) 2005 Stephen Rothwel, IBM Corp 4 * Copyright (C) 2005 Stephen Rothwel, IBM Corp
@@ -29,12 +27,15 @@
29#include <linux/init.h> 27#include <linux/init.h>
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/pci.h> 29#include <linux/pci.h>
30
32#include <asm/types.h> 31#include <asm/types.h>
33#include <asm/resource.h> 32#include <asm/resource.h>
34 33#include <asm/abs_addr.h>
35#include <asm/iSeries/HvCallPci.h> 34#include <asm/pci-bridge.h>
36#include <asm/iSeries/HvTypes.h> 35#include <asm/iSeries/HvTypes.h>
37#include <asm/iSeries/iSeries_pci.h> 36
37#include "pci.h"
38#include "call_pci.h"
38 39
39/* 40/*
40 * Size of Bus VPD data 41 * Size of Bus VPD data
@@ -214,7 +215,7 @@ static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
214 printk("PCI: Bus VPD Buffer allocation failure.\n"); 215 printk("PCI: Bus VPD Buffer allocation failure.\n");
215 return; 216 return;
216 } 217 }
217 BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr), 218 BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr),
218 BUS_VPDSIZE); 219 BUS_VPDSIZE);
219 if (BusVpdLen == 0) { 220 if (BusVpdLen == 0) {
220 printk("PCI: Bus VPD Buffer zero length.\n"); 221 printk("PCI: Bus VPD Buffer zero length.\n");
@@ -242,7 +243,8 @@ out_free:
242 */ 243 */
243void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) 244void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
244{ 245{
245 struct iSeries_Device_Node *DevNode = PciDev->sysdata; 246 struct device_node *DevNode = PciDev->sysdata;
247 struct pci_dn *pdn;
246 u16 bus; 248 u16 bus;
247 u8 frame; 249 u8 frame;
248 char card[4]; 250 char card[4];
@@ -255,8 +257,9 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
255 return; 257 return;
256 } 258 }
257 259
258 bus = ISERIES_BUS(DevNode); 260 pdn = PCI_DN(DevNode);
259 subbus = ISERIES_SUBBUS(DevNode); 261 bus = pdn->busno;
262 subbus = pdn->bussubno;
260 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), 263 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
261 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); 264 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
262 iSeries_Get_Location_Code(bus, agent, &frame, card); 265 iSeries_Get_Location_Code(bus, agent, &frame, card);
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
new file mode 100644
index 000000000000..4369676f1d54
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -0,0 +1,8 @@
1obj-y += pic.o setup.o time.o feature.o pci.o \
2 sleep.o low_i2c.o cache.o
3obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
4obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq.o
5obj-$(CONFIG_NVRAM) += nvram.o
6# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
7obj-$(CONFIG_PPC64) += nvram.o
8obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
new file mode 100644
index 000000000000..8be2f7d071f0
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -0,0 +1,202 @@
1/*
2 * Miscellaneous procedures for dealing with the PowerMac hardware.
3 * Contains support for the backlight.
4 *
5 * Copyright (C) 2000 Benjamin Herrenschmidt
6 *
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/stddef.h>
13#include <linux/reboot.h>
14#include <linux/nvram.h>
15#include <linux/console.h>
16#include <asm/sections.h>
17#include <asm/ptrace.h>
18#include <asm/io.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21#include <asm/prom.h>
22#include <asm/machdep.h>
23#include <asm/nvram.h>
24#include <asm/backlight.h>
25
26#include <linux/adb.h>
27#include <linux/pmu.h>
28
29static struct backlight_controller *backlighter;
30static void* backlighter_data;
31static int backlight_autosave;
32static int backlight_level = BACKLIGHT_MAX;
33static int backlight_enabled = 1;
34static int backlight_req_level = -1;
35static int backlight_req_enable = -1;
36
37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39
40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type)
42{
43 struct device_node* bk_node;
44 char *prop;
45 int valid = 0;
46
47 /* There's already a matching controller, bail out */
48 if (backlighter != NULL)
49 return;
50
51 bk_node = find_devices("backlight");
52
53#ifdef CONFIG_ADB_PMU
54 /* Special case for the old PowerBook since I can't test on it */
55 backlight_autosave = machine_is_compatible("AAPL,3400/2400")
56 || machine_is_compatible("AAPL,3500");
57 if ((backlight_autosave
58 || machine_is_compatible("AAPL,PowerBook1998")
59 || machine_is_compatible("PowerBook1,1"))
60 && !strcmp(type, "pmu"))
61 valid = 1;
62#endif
63 if (bk_node) {
64 prop = get_property(bk_node, "backlight-control", NULL);
65 if (prop && !strncmp(prop, type, strlen(type)))
66 valid = 1;
67 }
68 if (!valid)
69 return;
70 backlighter = ctrler;
71 backlighter_data = data;
72
73 if (bk_node && !backlight_autosave)
74 prop = get_property(bk_node, "bklt", NULL);
75 else
76 prop = NULL;
77 if (prop) {
78 backlight_level = ((*prop)+1) >> 1;
79 if (backlight_level > BACKLIGHT_MAX)
80 backlight_level = BACKLIGHT_MAX;
81 }
82
83#ifdef CONFIG_ADB_PMU
84 if (backlight_autosave) {
85 struct adb_request req;
86 pmu_request(&req, NULL, 2, 0xd9, 0);
87 while (!req.complete)
88 pmu_poll();
89 backlight_level = req.reply[0] >> 4;
90 }
91#endif
92 acquire_console_sem();
93 if (!backlighter->set_enable(1, backlight_level, data))
94 backlight_enabled = 1;
95 release_console_sem();
96
97 printk(KERN_INFO "Registered \"%s\" backlight controller,"
98 "level: %d/15\n", type, backlight_level);
99}
100EXPORT_SYMBOL(register_backlight_controller);
101
102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data)
104{
105 /* We keep the current backlight level (for now) */
106 if (ctrler == backlighter && data == backlighter_data)
107 backlighter = NULL;
108}
109EXPORT_SYMBOL(unregister_backlight_controller);
110
111static int __set_backlight_enable(int enable)
112{
113 int rc;
114
115 if (!backlighter)
116 return -ENODEV;
117 acquire_console_sem();
118 rc = backlighter->set_enable(enable, backlight_level,
119 backlighter_data);
120 if (!rc)
121 backlight_enabled = enable;
122 release_console_sem();
123 return rc;
124}
125int set_backlight_enable(int enable)
126{
127 if (!backlighter)
128 return -ENODEV;
129 backlight_req_enable = enable;
130 schedule_work(&backlight_work);
131 return 0;
132}
133
134EXPORT_SYMBOL(set_backlight_enable);
135
136int get_backlight_enable(void)
137{
138 if (!backlighter)
139 return -ENODEV;
140 return backlight_enabled;
141}
142EXPORT_SYMBOL(get_backlight_enable);
143
144static int __set_backlight_level(int level)
145{
146 int rc = 0;
147
148 if (!backlighter)
149 return -ENODEV;
150 if (level < BACKLIGHT_MIN)
151 level = BACKLIGHT_OFF;
152 if (level > BACKLIGHT_MAX)
153 level = BACKLIGHT_MAX;
154 acquire_console_sem();
155 if (backlight_enabled)
156 rc = backlighter->set_level(level, backlighter_data);
157 if (!rc)
158 backlight_level = level;
159 release_console_sem();
160 if (!rc && !backlight_autosave) {
161 level <<=1;
162 if (level & 0x10)
163 level |= 0x01;
164 // -- todo: save to property "bklt"
165 }
166 return rc;
167}
168int set_backlight_level(int level)
169{
170 if (!backlighter)
171 return -ENODEV;
172 backlight_req_level = level;
173 schedule_work(&backlight_work);
174 return 0;
175}
176
177EXPORT_SYMBOL(set_backlight_level);
178
179int get_backlight_level(void)
180{
181 if (!backlighter)
182 return -ENODEV;
183 return backlight_level;
184}
185EXPORT_SYMBOL(get_backlight_level);
186
187static void backlight_callback(void *dummy)
188{
189 int level, enable;
190
191 do {
192 level = backlight_req_level;
193 enable = backlight_req_enable;
194 mb();
195
196 if (level >= 0)
197 __set_backlight_level(level);
198 if (enable >= 0)
199 __set_backlight_enable(enable);
200 } while(cmpxchg(&backlight_req_level, level, -1) != level ||
201 cmpxchg(&backlight_req_enable, enable, -1) != enable);
202}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
new file mode 100644
index 000000000000..fb977de6b704
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -0,0 +1,359 @@
1/*
2 * This file contains low-level cache management functions
3 * used for sleep and CPU speed changes on Apple machines.
4 * (In fact the only thing that is Apple-specific is that we assume
5 * that we can read from ROM at physical address 0xfff00000.)
6 *
7 * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
8 * Benjamin Herrenschmidt (benh@kernel.crashing.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/config.h>
18#include <asm/processor.h>
19#include <asm/ppc_asm.h>
20#include <asm/cputable.h>
21
22/*
23 * Flush and disable all data caches (dL1, L2, L3). This is used
24 * when going to sleep, when doing a PMU based cpufreq transition,
25 * or when "offlining" a CPU on SMP machines. This code is over
26 * paranoid, but I've had enough issues with various CPU revs and
27 * bugs that I decided it was worth beeing over cautious
28 */
29
30_GLOBAL(flush_disable_caches)
31#ifndef CONFIG_6xx
32 blr
33#else
34BEGIN_FTR_SECTION
35 b flush_disable_745x
36END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
37BEGIN_FTR_SECTION
38 b flush_disable_75x
39END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
40 b __flush_disable_L1
41
42/* This is the code for G3 and 74[01]0 */
43flush_disable_75x:
44 mflr r10
45
46 /* Turn off EE and DR in MSR */
47 mfmsr r11
48 rlwinm r0,r11,0,~MSR_EE
49 rlwinm r0,r0,0,~MSR_DR
50 sync
51 mtmsr r0
52 isync
53
54 /* Stop DST streams */
55BEGIN_FTR_SECTION
56 DSSALL
57 sync
58END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
59
60 /* Stop DPM */
61 mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
62 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
63 sync
64 mtspr SPRN_HID0,r4 /* Disable DPM */
65 sync
66
67 /* Disp-flush L1. We have a weird problem here that I never
68 * totally figured out. On 750FX, using the ROM for the flush
69 * results in a non-working flush. We use that workaround for
70 * now until I finally understand what's going on. --BenH
71 */
72
73 /* ROM base by default */
74 lis r4,0xfff0
75 mfpvr r3
76 srwi r3,r3,16
77 cmplwi cr0,r3,0x7000
78 bne+ 1f
79 /* RAM base on 750FX */
80 li r4,0
811: li r4,0x4000
82 mtctr r4
831: lwz r0,0(r4)
84 addi r4,r4,32
85 bdnz 1b
86 sync
87 isync
88
89 /* Disable / invalidate / enable L1 data */
90 mfspr r3,SPRN_HID0
91 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
92 mtspr SPRN_HID0,r3
93 sync
94 isync
95 ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
96 sync
97 isync
98 mtspr SPRN_HID0,r3
99 xori r3,r3,(HID0_DCI|HID0_ICFI)
100 mtspr SPRN_HID0,r3
101 sync
102
103 /* Get the current enable bit of the L2CR into r4 */
104 mfspr r5,SPRN_L2CR
105 /* Set to data-only (pre-745x bit) */
106 oris r3,r5,L2CR_L2DO@h
107 b 2f
108 /* When disabling L2, code must be in L1 */
109 .balign 32
1101: mtspr SPRN_L2CR,r3
1113: sync
112 isync
113 b 1f
1142: b 3f
1153: sync
116 isync
117 b 1b
1181: /* disp-flush L2. The interesting thing here is that the L2 can be
119 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
120 * but that is probbaly fine. We disp-flush over 4Mb to be safe
121 */
122 lis r4,2
123 mtctr r4
124 lis r4,0xfff0
1251: lwz r0,0(r4)
126 addi r4,r4,32
127 bdnz 1b
128 sync
129 isync
130 lis r4,2
131 mtctr r4
132 lis r4,0xfff0
1331: dcbf 0,r4
134 addi r4,r4,32
135 bdnz 1b
136 sync
137 isync
138
139 /* now disable L2 */
140 rlwinm r5,r5,0,~L2CR_L2E
141 b 2f
142 /* When disabling L2, code must be in L1 */
143 .balign 32
1441: mtspr SPRN_L2CR,r5
1453: sync
146 isync
147 b 1f
1482: b 3f
1493: sync
150 isync
151 b 1b
1521: sync
153 isync
154 /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
155 oris r4,r5,L2CR_L2I@h
156 mtspr SPRN_L2CR,r4
157 sync
158 isync
159
160 /* Wait for the invalidation to complete */
1611: mfspr r3,SPRN_L2CR
162 rlwinm. r0,r3,0,31,31
163 bne 1b
164
165 /* Clear L2I */
166 xoris r4,r4,L2CR_L2I@h
167 sync
168 mtspr SPRN_L2CR,r4
169 sync
170
171 /* now disable the L1 data cache */
172 mfspr r0,SPRN_HID0
173 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
174 mtspr SPRN_HID0,r0
175 sync
176 isync
177
178 /* Restore HID0[DPM] to whatever it was before */
179 sync
180 mfspr r0,SPRN_HID0
181 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
182 mtspr SPRN_HID0,r0
183 sync
184
185 /* restore DR and EE */
186 sync
187 mtmsr r11
188 isync
189
190 mtlr r10
191 blr
192
193/* This code is for 745x processors */
194flush_disable_745x:
195 /* Turn off EE and DR in MSR */
196 mfmsr r11
197 rlwinm r0,r11,0,~MSR_EE
198 rlwinm r0,r0,0,~MSR_DR
199 sync
200 mtmsr r0
201 isync
202
203 /* Stop prefetch streams */
204 DSSALL
205 sync
206
207 /* Disable L2 prefetching */
208 mfspr r0,SPRN_MSSCR0
209 rlwinm r0,r0,0,0,29
210 mtspr SPRN_MSSCR0,r0
211 sync
212 isync
213 lis r4,0
214 dcbf 0,r4
215 dcbf 0,r4
216 dcbf 0,r4
217 dcbf 0,r4
218 dcbf 0,r4
219 dcbf 0,r4
220 dcbf 0,r4
221 dcbf 0,r4
222
223 /* Due to a bug with the HW flush on some CPU revs, we occasionally
224 * experience data corruption. I'm adding a displacement flush along
225 * with a dcbf loop over a few Mb to "help". The problem isn't totally
226 * fixed by this in theory, but at least, in practice, I couldn't reproduce
227 * it even with a big hammer...
228 */
229
230 lis r4,0x0002
231 mtctr r4
232 li r4,0
2331:
234 lwz r0,0(r4)
235 addi r4,r4,32 /* Go to start of next cache line */
236 bdnz 1b
237 isync
238
239 /* Now, flush the first 4MB of memory */
240 lis r4,0x0002
241 mtctr r4
242 li r4,0
243 sync
2441:
245 dcbf 0,r4
246 addi r4,r4,32 /* Go to start of next cache line */
247 bdnz 1b
248
249 /* Flush and disable the L1 data cache */
250 mfspr r6,SPRN_LDSTCR
251 lis r3,0xfff0 /* read from ROM for displacement flush */
252 li r4,0xfe /* start with only way 0 unlocked */
253 li r5,128 /* 128 lines in each way */
2541: mtctr r5
255 rlwimi r6,r4,0,24,31
256 mtspr SPRN_LDSTCR,r6
257 sync
258 isync
2592: lwz r0,0(r3) /* touch each cache line */
260 addi r3,r3,32
261 bdnz 2b
262 rlwinm r4,r4,1,24,30 /* move on to the next way */
263 ori r4,r4,1
264 cmpwi r4,0xff /* all done? */
265 bne 1b
266 /* now unlock the L1 data cache */
267 li r4,0
268 rlwimi r6,r4,0,24,31
269 sync
270 mtspr SPRN_LDSTCR,r6
271 sync
272 isync
273
274 /* Flush the L2 cache using the hardware assist */
275 mfspr r3,SPRN_L2CR
276 cmpwi r3,0 /* check if it is enabled first */
277 bge 4f
278 oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
279 b 2f
280 /* When disabling/locking L2, code must be in L1 */
281 .balign 32
2821: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
2833: sync
284 isync
285 b 1f
2862: b 3f
2873: sync
288 isync
289 b 1b
2901: sync
291 isync
292 ori r0,r3,L2CR_L2HWF_745x
293 sync
294 mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
2953: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
296 andi. r0,r0,L2CR_L2HWF_745x
297 bne 3b
298 sync
299 rlwinm r3,r3,0,~L2CR_L2E
300 b 2f
301 /* When disabling L2, code must be in L1 */
302 .balign 32
3031: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3043: sync
305 isync
306 b 1f
3072: b 3f
3083: sync
309 isync
310 b 1b
3111: sync
312 isync
313 oris r4,r3,L2CR_L2I@h
314 mtspr SPRN_L2CR,r4
315 sync
316 isync
3171: mfspr r4,SPRN_L2CR
318 andis. r0,r4,L2CR_L2I@h
319 bne 1b
320 sync
321
322BEGIN_FTR_SECTION
323 /* Flush the L3 cache using the hardware assist */
3244: mfspr r3,SPRN_L3CR
325 cmpwi r3,0 /* check if it is enabled */
326 bge 6f
327 oris r0,r3,L3CR_L3IO@h
328 ori r0,r0,L3CR_L3DO
329 sync
330 mtspr SPRN_L3CR,r0 /* lock the L3 cache */
331 sync
332 isync
333 ori r0,r0,L3CR_L3HWF
334 sync
335 mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
3365: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
337 andi. r0,r0,L3CR_L3HWF
338 bne 5b
339 rlwinm r3,r3,0,~L3CR_L3E
340 sync
341 mtspr SPRN_L3CR,r3 /* disable the L3 cache */
342 sync
343 ori r4,r3,L3CR_L3I
344 mtspr SPRN_L3CR,r4
3451: mfspr r4,SPRN_L3CR
346 andi. r0,r4,L3CR_L3I
347 bne 1b
348 sync
349END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
350
3516: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
352 rlwinm r0,r0,0,~HID0_DCE
353 mtspr SPRN_HID0,r0
354 sync
355 isync
356 mtmsr r11 /* restore DR and EE */
357 isync
358 blr
359#endif /* CONFIG_6xx */
diff --git a/arch/powerpc/platforms/powermac/cpufreq.c b/arch/powerpc/platforms/powermac/cpufreq.c
new file mode 100644
index 000000000000..c47f8b69725c
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cpufreq.c
@@ -0,0 +1,726 @@
1/*
2 * arch/ppc/platforms/pmac_cpufreq.c
3 *
4 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * TODO: Need a big cleanup here. Basically, we need to have different
12 * cpufreq_driver structures for the different type of HW instead of the
13 * current mess. We also need to better deal with the detection of the
14 * type of machine.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/adb.h>
26#include <linux/pmu.h>
27#include <linux/slab.h>
28#include <linux/cpufreq.h>
29#include <linux/init.h>
30#include <linux/sysdev.h>
31#include <linux/i2c.h>
32#include <linux/hardirq.h>
33#include <asm/prom.h>
34#include <asm/machdep.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/mmu_context.h>
38#include <asm/sections.h>
39#include <asm/cputable.h>
40#include <asm/time.h>
41#include <asm/system.h>
42#include <asm/mpic.h>
43#include <asm/keylargo.h>
44
45/* WARNING !!! This will cause calibrate_delay() to be called,
46 * but this is an __init function ! So you MUST go edit
47 * init/main.c to make it non-init before enabling DEBUG_FREQ
48 */
49#undef DEBUG_FREQ
50
51/*
52 * There is a problem with the core cpufreq code on SMP kernels,
53 * it won't recalculate the Bogomips properly
54 */
55#ifdef CONFIG_SMP
56#warning "WARNING, CPUFREQ not recommended on SMP kernels"
57#endif
58
59extern void low_choose_7447a_dfs(int dfs);
60extern void low_choose_750fx_pll(int pll);
61extern void low_sleep_handler(void);
62
63/*
64 * Currently, PowerMac cpufreq supports only high & low frequencies
65 * that are set by the firmware
66 */
67static unsigned int low_freq;
68static unsigned int hi_freq;
69static unsigned int cur_freq;
70static unsigned int sleep_freq;
71
72/*
73 * Different models uses different mecanisms to switch the frequency
74 */
75static int (*set_speed_proc)(int low_speed);
76static unsigned int (*get_speed_proc)(void);
77
78/*
79 * Some definitions used by the various speedprocs
80 */
81static u32 voltage_gpio;
82static u32 frequency_gpio;
83static u32 slew_done_gpio;
84static int no_schedule;
85static int has_cpu_l2lve;
86static int is_pmu_based;
87
88/* There are only two frequency states for each processor. Values
89 * are in kHz for the time being.
90 */
91#define CPUFREQ_HIGH 0
92#define CPUFREQ_LOW 1
93
94static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
95 {CPUFREQ_HIGH, 0},
96 {CPUFREQ_LOW, 0},
97 {0, CPUFREQ_TABLE_END},
98};
99
100static struct freq_attr* pmac_cpu_freqs_attr[] = {
101 &cpufreq_freq_attr_scaling_available_freqs,
102 NULL,
103};
104
105static inline void local_delay(unsigned long ms)
106{
107 if (no_schedule)
108 mdelay(ms);
109 else
110 msleep(ms);
111}
112
113#ifdef DEBUG_FREQ
114static inline void debug_calc_bogomips(void)
115{
116 /* This will cause a recalc of bogomips and display the
117 * result. We backup/restore the value to avoid affecting the
118 * core cpufreq framework's own calculation.
119 */
120 extern void calibrate_delay(void);
121
122 unsigned long save_lpj = loops_per_jiffy;
123 calibrate_delay();
124 loops_per_jiffy = save_lpj;
125}
126#endif /* DEBUG_FREQ */
127
128/* Switch CPU speed under 750FX CPU control
129 */
130static int cpu_750fx_cpu_speed(int low_speed)
131{
132 u32 hid2;
133
134 if (low_speed == 0) {
135 /* ramping up, set voltage first */
136 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
137 /* Make sure we sleep for at least 1ms */
138 local_delay(10);
139
140 /* tweak L2 for high voltage */
141 if (has_cpu_l2lve) {
142 hid2 = mfspr(SPRN_HID2);
143 hid2 &= ~0x2000;
144 mtspr(SPRN_HID2, hid2);
145 }
146 }
147#ifdef CONFIG_6xx
148 low_choose_750fx_pll(low_speed);
149#endif
150 if (low_speed == 1) {
151 /* tweak L2 for low voltage */
152 if (has_cpu_l2lve) {
153 hid2 = mfspr(SPRN_HID2);
154 hid2 |= 0x2000;
155 mtspr(SPRN_HID2, hid2);
156 }
157
158 /* ramping down, set voltage last */
159 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
160 local_delay(10);
161 }
162
163 return 0;
164}
165
166static unsigned int cpu_750fx_get_cpu_speed(void)
167{
168 if (mfspr(SPRN_HID1) & HID1_PS)
169 return low_freq;
170 else
171 return hi_freq;
172}
173
174/* Switch CPU speed using DFS */
175static int dfs_set_cpu_speed(int low_speed)
176{
177 if (low_speed == 0) {
178 /* ramping up, set voltage first */
179 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
180 /* Make sure we sleep for at least 1ms */
181 local_delay(1);
182 }
183
184 /* set frequency */
185#ifdef CONFIG_6xx
186 low_choose_7447a_dfs(low_speed);
187#endif
188 udelay(100);
189
190 if (low_speed == 1) {
191 /* ramping down, set voltage last */
192 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
193 local_delay(1);
194 }
195
196 return 0;
197}
198
199static unsigned int dfs_get_cpu_speed(void)
200{
201 if (mfspr(SPRN_HID1) & HID1_DFS)
202 return low_freq;
203 else
204 return hi_freq;
205}
206
207
208/* Switch CPU speed using slewing GPIOs
209 */
210static int gpios_set_cpu_speed(int low_speed)
211{
212 int gpio, timeout = 0;
213
214 /* If ramping up, set voltage first */
215 if (low_speed == 0) {
216 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
217 /* Delay is way too big but it's ok, we schedule */
218 local_delay(10);
219 }
220
221 /* Set frequency */
222 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
223 if (low_speed == ((gpio & 0x01) == 0))
224 goto skip;
225
226 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
227 low_speed ? 0x04 : 0x05);
228 udelay(200);
229 do {
230 if (++timeout > 100)
231 break;
232 local_delay(1);
233 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
234 } while((gpio & 0x02) == 0);
235 skip:
236 /* If ramping down, set voltage last */
237 if (low_speed == 1) {
238 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
239 /* Delay is way too big but it's ok, we schedule */
240 local_delay(10);
241 }
242
243#ifdef DEBUG_FREQ
244 debug_calc_bogomips();
245#endif
246
247 return 0;
248}
249
250/* Switch CPU speed under PMU control
251 */
252static int pmu_set_cpu_speed(int low_speed)
253{
254 struct adb_request req;
255 unsigned long save_l2cr;
256 unsigned long save_l3cr;
257 unsigned int pic_prio;
258 unsigned long flags;
259
260 preempt_disable();
261
262#ifdef DEBUG_FREQ
263 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
264#endif
265 pmu_suspend();
266
267 /* Disable all interrupt sources on openpic */
268 pic_prio = mpic_cpu_get_priority();
269 mpic_cpu_set_priority(0xf);
270
271 /* Make sure the decrementer won't interrupt us */
272 asm volatile("mtdec %0" : : "r" (0x7fffffff));
273 /* Make sure any pending DEC interrupt occuring while we did
274 * the above didn't re-enable the DEC */
275 mb();
276 asm volatile("mtdec %0" : : "r" (0x7fffffff));
277
278 /* We can now disable MSR_EE */
279 local_irq_save(flags);
280
281 /* Giveup the FPU & vec */
282 enable_kernel_fp();
283
284#ifdef CONFIG_ALTIVEC
285 if (cpu_has_feature(CPU_FTR_ALTIVEC))
286 enable_kernel_altivec();
287#endif /* CONFIG_ALTIVEC */
288
289 /* Save & disable L2 and L3 caches */
290 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
291 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
292
293 /* Send the new speed command. My assumption is that this command
294 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
295 */
296 pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
297 while (!req.complete)
298 pmu_poll();
299
300 /* Prepare the northbridge for the speed transition */
301 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
302
303 /* Call low level code to backup CPU state and recover from
304 * hardware reset
305 */
306 low_sleep_handler();
307
308 /* Restore the northbridge */
309 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
310
311 /* Restore L2 cache */
312 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
313 _set_L2CR(save_l2cr);
314 /* Restore L3 cache */
315 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
316 _set_L3CR(save_l3cr);
317
318 /* Restore userland MMU context */
319 set_context(current->active_mm->context, current->active_mm->pgd);
320
321#ifdef DEBUG_FREQ
322 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
323#endif
324
325 /* Restore low level PMU operations */
326 pmu_unlock();
327
328 /* Restore decrementer */
329 wakeup_decrementer();
330
331 /* Restore interrupts */
332 mpic_cpu_set_priority(pic_prio);
333
334 /* Let interrupts flow again ... */
335 local_irq_restore(flags);
336
337#ifdef DEBUG_FREQ
338 debug_calc_bogomips();
339#endif
340
341 pmu_resume();
342
343 preempt_enable();
344
345 return 0;
346}
347
348static int do_set_cpu_speed(int speed_mode, int notify)
349{
350 struct cpufreq_freqs freqs;
351 unsigned long l3cr;
352 static unsigned long prev_l3cr;
353
354 freqs.old = cur_freq;
355 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
356 freqs.cpu = smp_processor_id();
357
358 if (freqs.old == freqs.new)
359 return 0;
360
361 if (notify)
362 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
363 if (speed_mode == CPUFREQ_LOW &&
364 cpu_has_feature(CPU_FTR_L3CR)) {
365 l3cr = _get_L3CR();
366 if (l3cr & L3CR_L3E) {
367 prev_l3cr = l3cr;
368 _set_L3CR(0);
369 }
370 }
371 set_speed_proc(speed_mode == CPUFREQ_LOW);
372 if (speed_mode == CPUFREQ_HIGH &&
373 cpu_has_feature(CPU_FTR_L3CR)) {
374 l3cr = _get_L3CR();
375 if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
376 _set_L3CR(prev_l3cr);
377 }
378 if (notify)
379 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
380 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
381
382 return 0;
383}
384
385static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
386{
387 return cur_freq;
388}
389
390static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
391{
392 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
393}
394
395static int pmac_cpufreq_target( struct cpufreq_policy *policy,
396 unsigned int target_freq,
397 unsigned int relation)
398{
399 unsigned int newstate = 0;
400
401 if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
402 target_freq, relation, &newstate))
403 return -EINVAL;
404
405 return do_set_cpu_speed(newstate, 1);
406}
407
408unsigned int pmac_get_one_cpufreq(int i)
409{
410 /* Supports only one CPU for now */
411 return (i == 0) ? cur_freq : 0;
412}
413
414static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
415{
416 if (policy->cpu != 0)
417 return -ENODEV;
418
419 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
420 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
421 policy->cur = cur_freq;
422
423 cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
424 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
425}
426
427static u32 read_gpio(struct device_node *np)
428{
429 u32 *reg = (u32 *)get_property(np, "reg", NULL);
430 u32 offset;
431
432 if (reg == NULL)
433 return 0;
434 /* That works for all keylargos but shall be fixed properly
435 * some day... The problem is that it seems we can't rely
436 * on the "reg" property of the GPIO nodes, they are either
437 * relative to the base of KeyLargo or to the base of the
438 * GPIO space, and the device-tree doesn't help.
439 */
440 offset = *reg;
441 if (offset < KEYLARGO_GPIO_LEVELS0)
442 offset += KEYLARGO_GPIO_LEVELS0;
443 return offset;
444}
445
446static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
447{
448 /* Ok, this could be made a bit smarter, but let's be robust for now. We
449 * always force a speed change to high speed before sleep, to make sure
450 * we have appropriate voltage and/or bus speed for the wakeup process,
451 * and to make sure our loops_per_jiffies are "good enough", that is will
452 * not cause too short delays if we sleep in low speed and wake in high
453 * speed..
454 */
455 no_schedule = 1;
456 sleep_freq = cur_freq;
457 if (cur_freq == low_freq && !is_pmu_based)
458 do_set_cpu_speed(CPUFREQ_HIGH, 0);
459 return 0;
460}
461
462static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
463{
464 /* If we resume, first check if we have a get() function */
465 if (get_speed_proc)
466 cur_freq = get_speed_proc();
467 else
468 cur_freq = 0;
469
470 /* We don't, hrm... we don't really know our speed here, best
471 * is that we force a switch to whatever it was, which is
472 * probably high speed due to our suspend() routine
473 */
474 do_set_cpu_speed(sleep_freq == low_freq ?
475 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
476
477 no_schedule = 0;
478 return 0;
479}
480
481static struct cpufreq_driver pmac_cpufreq_driver = {
482 .verify = pmac_cpufreq_verify,
483 .target = pmac_cpufreq_target,
484 .get = pmac_cpufreq_get_speed,
485 .init = pmac_cpufreq_cpu_init,
486 .suspend = pmac_cpufreq_suspend,
487 .resume = pmac_cpufreq_resume,
488 .flags = CPUFREQ_PM_NO_WARN,
489 .attr = pmac_cpu_freqs_attr,
490 .name = "powermac",
491 .owner = THIS_MODULE,
492};
493
494
495static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
496{
497 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
498 "voltage-gpio");
499 struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
500 "frequency-gpio");
501 struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
502 "slewing-done");
503 u32 *value;
504
505 /*
506 * Check to see if it's GPIO driven or PMU only
507 *
508 * The way we extract the GPIO address is slightly hackish, but it
509 * works well enough for now. We need to abstract the whole GPIO
510 * stuff sooner or later anyway
511 */
512
513 if (volt_gpio_np)
514 voltage_gpio = read_gpio(volt_gpio_np);
515 if (freq_gpio_np)
516 frequency_gpio = read_gpio(freq_gpio_np);
517 if (slew_done_gpio_np)
518 slew_done_gpio = read_gpio(slew_done_gpio_np);
519
520 /* If we use the frequency GPIOs, calculate the min/max speeds based
521 * on the bus frequencies
522 */
523 if (frequency_gpio && slew_done_gpio) {
524 int lenp, rc;
525 u32 *freqs, *ratio;
526
527 freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
528 lenp /= sizeof(u32);
529 if (freqs == NULL || lenp != 2) {
530 printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
531 return 1;
532 }
533 ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
534 if (ratio == NULL) {
535 printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
536 return 1;
537 }
538
539 /* Get the min/max bus frequencies */
540 low_freq = min(freqs[0], freqs[1]);
541 hi_freq = max(freqs[0], freqs[1]);
542
543 /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
544 * frequency, it claims it to be around 84Mhz on some models while
545 * it appears to be approx. 101Mhz on all. Let's hack around here...
546 * fortunately, we don't need to be too precise
547 */
548 if (low_freq < 98000000)
549 low_freq = 101000000;
550
551 /* Convert those to CPU core clocks */
552 low_freq = (low_freq * (*ratio)) / 2000;
553 hi_freq = (hi_freq * (*ratio)) / 2000;
554
555 /* Now we get the frequencies, we read the GPIO to see what is out current
556 * speed
557 */
558 rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
559 cur_freq = (rc & 0x01) ? hi_freq : low_freq;
560
561 set_speed_proc = gpios_set_cpu_speed;
562 return 1;
563 }
564
565 /* If we use the PMU, look for the min & max frequencies in the
566 * device-tree
567 */
568 value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
569 if (!value)
570 return 1;
571 low_freq = (*value) / 1000;
572 /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
573 * here */
574 if (low_freq < 100000)
575 low_freq *= 10;
576
577 value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
578 if (!value)
579 return 1;
580 hi_freq = (*value) / 1000;
581 set_speed_proc = pmu_set_cpu_speed;
582 is_pmu_based = 1;
583
584 return 0;
585}
586
587static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
588{
589 struct device_node *volt_gpio_np;
590
591 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
592 return 1;
593
594 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
595 if (volt_gpio_np)
596 voltage_gpio = read_gpio(volt_gpio_np);
597 if (!voltage_gpio){
598 printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
599 return 1;
600 }
601
602 /* OF only reports the high frequency */
603 hi_freq = cur_freq;
604 low_freq = cur_freq/2;
605
606 /* Read actual frequency from CPU */
607 cur_freq = dfs_get_cpu_speed();
608 set_speed_proc = dfs_set_cpu_speed;
609 get_speed_proc = dfs_get_cpu_speed;
610
611 return 0;
612}
613
614static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
615{
616 struct device_node *volt_gpio_np;
617 u32 pvr, *value;
618
619 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
620 return 1;
621
622 hi_freq = cur_freq;
623 value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
624 if (!value)
625 return 1;
626 low_freq = (*value) / 1000;
627
628 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
629 if (volt_gpio_np)
630 voltage_gpio = read_gpio(volt_gpio_np);
631
632 pvr = mfspr(SPRN_PVR);
633 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
634
635 set_speed_proc = cpu_750fx_cpu_speed;
636 get_speed_proc = cpu_750fx_get_cpu_speed;
637 cur_freq = cpu_750fx_get_cpu_speed();
638
639 return 0;
640}
641
642/* Currently, we support the following machines:
643 *
644 * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
645 * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
646 * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
647 * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
648 * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
649 * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
650 * - Recent MacRISC3 laptops
651 * - All new machines with 7447A CPUs
652 */
653static int __init pmac_cpufreq_setup(void)
654{
655 struct device_node *cpunode;
656 u32 *value;
657
658 if (strstr(cmd_line, "nocpufreq"))
659 return 0;
660
661 /* Assume only one CPU */
662 cpunode = find_type_devices("cpu");
663 if (!cpunode)
664 goto out;
665
666 /* Get current cpu clock freq */
667 value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
668 if (!value)
669 goto out;
670 cur_freq = (*value) / 1000;
671
672 /* Check for 7447A based MacRISC3 */
673 if (machine_is_compatible("MacRISC3") &&
674 get_property(cpunode, "dynamic-power-step", NULL) &&
675 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
676 pmac_cpufreq_init_7447A(cpunode);
677 /* Check for other MacRISC3 machines */
678 } else if (machine_is_compatible("PowerBook3,4") ||
679 machine_is_compatible("PowerBook3,5") ||
680 machine_is_compatible("MacRISC3")) {
681 pmac_cpufreq_init_MacRISC3(cpunode);
682 /* Else check for iBook2 500/600 */
683 } else if (machine_is_compatible("PowerBook4,1")) {
684 hi_freq = cur_freq;
685 low_freq = 400000;
686 set_speed_proc = pmu_set_cpu_speed;
687 is_pmu_based = 1;
688 }
689 /* Else check for TiPb 550 */
690 else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
691 hi_freq = cur_freq;
692 low_freq = 500000;
693 set_speed_proc = pmu_set_cpu_speed;
694 is_pmu_based = 1;
695 }
696 /* Else check for TiPb 400 & 500 */
697 else if (machine_is_compatible("PowerBook3,2")) {
698 /* We only know about the 400 MHz and the 500Mhz model
699 * they both have 300 MHz as low frequency
700 */
701 if (cur_freq < 350000 || cur_freq > 550000)
702 goto out;
703 hi_freq = cur_freq;
704 low_freq = 300000;
705 set_speed_proc = pmu_set_cpu_speed;
706 is_pmu_based = 1;
707 }
708 /* Else check for 750FX */
709 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
710 pmac_cpufreq_init_750FX(cpunode);
711out:
712 if (set_speed_proc == NULL)
713 return -ENODEV;
714
715 pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
716 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
717
718 printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
719 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
720 low_freq/1000, hi_freq/1000, cur_freq/1000);
721
722 return cpufreq_register_driver(&pmac_cpufreq_driver);
723}
724
725module_init(pmac_cpufreq_setup);
726
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
new file mode 100644
index 000000000000..10f1d942c661
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -0,0 +1,3063 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/ohare.h>
35#include <asm/heathrow.h>
36#include <asm/keylargo.h>
37#include <asm/uninorth.h>
38#include <asm/io.h>
39#include <asm/prom.h>
40#include <asm/machdep.h>
41#include <asm/pmac_feature.h>
42#include <asm/dbdma.h>
43#include <asm/pci-bridge.h>
44#include <asm/pmac_low_i2c.h>
45
46#undef DEBUG_FEATURE
47
48#ifdef DEBUG_FEATURE
49#define DBG(fmt...) printk(KERN_DEBUG fmt)
50#else
51#define DBG(fmt...)
52#endif
53
54#ifdef CONFIG_6xx
55extern int powersave_lowspeed;
56#endif
57
58extern int powersave_nap;
59extern struct device_node *k2_skiplist[2];
60
61
62/*
63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking
65 */
66static DEFINE_SPINLOCK(feature_lock);
67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
70
71
72/*
73 * Instance of some macio stuffs
74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76
77struct macio_chip *macio_find(struct device_node *child, int type)
78{
79 while(child) {
80 int i;
81
82 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
83 if (child == macio_chips[i].of_node &&
84 (!type || macio_chips[i].type == type))
85 return &macio_chips[i];
86 child = child->parent;
87 }
88 return NULL;
89}
90EXPORT_SYMBOL_GPL(macio_find);
91
92static const char *macio_names[] =
93{
94 "Unknown",
95 "Grand Central",
96 "OHare",
97 "OHareII",
98 "Heathrow",
99 "Gatwick",
100 "Paddington",
101 "Keylargo",
102 "Pangea",
103 "Intrepid",
104 "K2"
105};
106
107
108
109/*
110 * Uninorth reg. access. Note that Uni-N regs are big endian
111 */
112
113#define UN_REG(r) (uninorth_base + ((r) >> 2))
114#define UN_IN(r) (in_be32(UN_REG(r)))
115#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118
119static struct device_node *uninorth_node;
120static u32 __iomem *uninorth_base;
121static u32 uninorth_rev;
122static int uninorth_u3;
123static void __iomem *u3_ht;
124
125/*
126 * For each motherboard family, we have a table of functions pointers
127 * that handle the various features.
128 */
129
130typedef long (*feature_call)(struct device_node *node, long param, long value);
131
132struct feature_table_entry {
133 unsigned int selector;
134 feature_call function;
135};
136
137struct pmac_mb_def
138{
139 const char* model_string;
140 const char* model_name;
141 int model_id;
142 struct feature_table_entry* features;
143 unsigned long board_flags;
144};
145static struct pmac_mb_def pmac_mb;
146
147/*
148 * Here are the chip specific feature functions
149 */
150
151static inline int simple_feature_tweak(struct device_node *node, int type,
152 int reg, u32 mask, int value)
153{
154 struct macio_chip* macio;
155 unsigned long flags;
156
157 macio = macio_find(node, type);
158 if (!macio)
159 return -ENODEV;
160 LOCK(flags);
161 if (value)
162 MACIO_BIS(reg, mask);
163 else
164 MACIO_BIC(reg, mask);
165 (void)MACIO_IN32(reg);
166 UNLOCK(flags);
167
168 return 0;
169}
170
171#ifndef CONFIG_POWER4
172
173static long ohare_htw_scc_enable(struct device_node *node, long param,
174 long value)
175{
176 struct macio_chip* macio;
177 unsigned long chan_mask;
178 unsigned long fcr;
179 unsigned long flags;
180 int htw, trans;
181 unsigned long rmask;
182
183 macio = macio_find(node, 0);
184 if (!macio)
185 return -ENODEV;
186 if (!strcmp(node->name, "ch-a"))
187 chan_mask = MACIO_FLAG_SCCA_ON;
188 else if (!strcmp(node->name, "ch-b"))
189 chan_mask = MACIO_FLAG_SCCB_ON;
190 else
191 return -ENODEV;
192
193 htw = (macio->type == macio_heathrow || macio->type == macio_paddington
194 || macio->type == macio_gatwick);
195 /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
196 trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
197 pmac_mb.model_id != PMAC_TYPE_YIKES);
198 if (value) {
199#ifdef CONFIG_ADB_PMU
200 if ((param & 0xfff) == PMAC_SCC_IRDA)
201 pmu_enable_irled(1);
202#endif /* CONFIG_ADB_PMU */
203 LOCK(flags);
204 fcr = MACIO_IN32(OHARE_FCR);
205 /* Check if scc cell need enabling */
206 if (!(fcr & OH_SCC_ENABLE)) {
207 fcr |= OH_SCC_ENABLE;
208 if (htw) {
209 /* Side effect: this will also power up the
210 * modem, but it's too messy to figure out on which
211 * ports this controls the tranceiver and on which
212 * it controls the modem
213 */
214 if (trans)
215 fcr &= ~HRW_SCC_TRANS_EN_N;
216 MACIO_OUT32(OHARE_FCR, fcr);
217 fcr |= (rmask = HRW_RESET_SCC);
218 MACIO_OUT32(OHARE_FCR, fcr);
219 } else {
220 fcr |= (rmask = OH_SCC_RESET);
221 MACIO_OUT32(OHARE_FCR, fcr);
222 }
223 UNLOCK(flags);
224 (void)MACIO_IN32(OHARE_FCR);
225 mdelay(15);
226 LOCK(flags);
227 fcr &= ~rmask;
228 MACIO_OUT32(OHARE_FCR, fcr);
229 }
230 if (chan_mask & MACIO_FLAG_SCCA_ON)
231 fcr |= OH_SCCA_IO;
232 if (chan_mask & MACIO_FLAG_SCCB_ON)
233 fcr |= OH_SCCB_IO;
234 MACIO_OUT32(OHARE_FCR, fcr);
235 macio->flags |= chan_mask;
236 UNLOCK(flags);
237 if (param & PMAC_SCC_FLAG_XMON)
238 macio->flags |= MACIO_FLAG_SCC_LOCKED;
239 } else {
240 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
241 return -EPERM;
242 LOCK(flags);
243 fcr = MACIO_IN32(OHARE_FCR);
244 if (chan_mask & MACIO_FLAG_SCCA_ON)
245 fcr &= ~OH_SCCA_IO;
246 if (chan_mask & MACIO_FLAG_SCCB_ON)
247 fcr &= ~OH_SCCB_IO;
248 MACIO_OUT32(OHARE_FCR, fcr);
249 if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
250 fcr &= ~OH_SCC_ENABLE;
251 if (htw && trans)
252 fcr |= HRW_SCC_TRANS_EN_N;
253 MACIO_OUT32(OHARE_FCR, fcr);
254 }
255 macio->flags &= ~(chan_mask);
256 UNLOCK(flags);
257 mdelay(10);
258#ifdef CONFIG_ADB_PMU
259 if ((param & 0xfff) == PMAC_SCC_IRDA)
260 pmu_enable_irled(0);
261#endif /* CONFIG_ADB_PMU */
262 }
263 return 0;
264}
265
266static long ohare_floppy_enable(struct device_node *node, long param,
267 long value)
268{
269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271}
272
273static long ohare_mesh_enable(struct device_node *node, long param, long value)
274{
275 return simple_feature_tweak(node, macio_ohare,
276 OHARE_FCR, OH_MESH_ENABLE, value);
277}
278
279static long ohare_ide_enable(struct device_node *node, long param, long value)
280{
281 switch(param) {
282 case 0:
283 /* For some reason, setting the bit in set_initial_features()
284 * doesn't stick. I'm still investigating... --BenH.
285 */
286 if (value)
287 simple_feature_tweak(node, macio_ohare,
288 OHARE_FCR, OH_IOBUS_ENABLE, 1);
289 return simple_feature_tweak(node, macio_ohare,
290 OHARE_FCR, OH_IDE0_ENABLE, value);
291 case 1:
292 return simple_feature_tweak(node, macio_ohare,
293 OHARE_FCR, OH_BAY_IDE_ENABLE, value);
294 default:
295 return -ENODEV;
296 }
297}
298
299static long ohare_ide_reset(struct device_node *node, long param, long value)
300{
301 switch(param) {
302 case 0:
303 return simple_feature_tweak(node, macio_ohare,
304 OHARE_FCR, OH_IDE0_RESET_N, !value);
305 case 1:
306 return simple_feature_tweak(node, macio_ohare,
307 OHARE_FCR, OH_IDE1_RESET_N, !value);
308 default:
309 return -ENODEV;
310 }
311}
312
313static long ohare_sleep_state(struct device_node *node, long param, long value)
314{
315 struct macio_chip* macio = &macio_chips[0];
316
317 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
318 return -EPERM;
319 if (value == 1) {
320 MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
321 } else if (value == 0) {
322 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
323 }
324
325 return 0;
326}
327
328static long heathrow_modem_enable(struct device_node *node, long param,
329 long value)
330{
331 struct macio_chip* macio;
332 u8 gpio;
333 unsigned long flags;
334
335 macio = macio_find(node, macio_unknown);
336 if (!macio)
337 return -ENODEV;
338 gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
339 if (!value) {
340 LOCK(flags);
341 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
342 UNLOCK(flags);
343 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
344 mdelay(250);
345 }
346 if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
347 pmac_mb.model_id != PMAC_TYPE_YIKES) {
348 LOCK(flags);
349 if (value)
350 MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
351 else
352 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
353 UNLOCK(flags);
354 (void)MACIO_IN32(HEATHROW_FCR);
355 mdelay(250);
356 }
357 if (value) {
358 LOCK(flags);
359 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
360 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
361 UNLOCK(flags); mdelay(250); LOCK(flags);
362 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
363 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
364 UNLOCK(flags); mdelay(250); LOCK(flags);
365 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
366 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
367 UNLOCK(flags); mdelay(250);
368 }
369 return 0;
370}
371
372static long heathrow_floppy_enable(struct device_node *node, long param,
373 long value)
374{
375 return simple_feature_tweak(node, macio_unknown,
376 HEATHROW_FCR,
377 HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
378 value);
379}
380
381static long heathrow_mesh_enable(struct device_node *node, long param,
382 long value)
383{
384 struct macio_chip* macio;
385 unsigned long flags;
386
387 macio = macio_find(node, macio_unknown);
388 if (!macio)
389 return -ENODEV;
390 LOCK(flags);
391 /* Set clear mesh cell enable */
392 if (value)
393 MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
394 else
395 MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
396 (void)MACIO_IN32(HEATHROW_FCR);
397 udelay(10);
398 /* Set/Clear termination power */
399 if (value)
400 MACIO_BIC(HEATHROW_MBCR, 0x04000000);
401 else
402 MACIO_BIS(HEATHROW_MBCR, 0x04000000);
403 (void)MACIO_IN32(HEATHROW_MBCR);
404 udelay(10);
405 UNLOCK(flags);
406
407 return 0;
408}
409
410static long heathrow_ide_enable(struct device_node *node, long param,
411 long value)
412{
413 switch(param) {
414 case 0:
415 return simple_feature_tweak(node, macio_unknown,
416 HEATHROW_FCR, HRW_IDE0_ENABLE, value);
417 case 1:
418 return simple_feature_tweak(node, macio_unknown,
419 HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
420 default:
421 return -ENODEV;
422 }
423}
424
425static long heathrow_ide_reset(struct device_node *node, long param,
426 long value)
427{
428 switch(param) {
429 case 0:
430 return simple_feature_tweak(node, macio_unknown,
431 HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
432 case 1:
433 return simple_feature_tweak(node, macio_unknown,
434 HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
435 default:
436 return -ENODEV;
437 }
438}
439
440static long heathrow_bmac_enable(struct device_node *node, long param,
441 long value)
442{
443 struct macio_chip* macio;
444 unsigned long flags;
445
446 macio = macio_find(node, 0);
447 if (!macio)
448 return -ENODEV;
449 if (value) {
450 LOCK(flags);
451 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
452 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
453 UNLOCK(flags);
454 (void)MACIO_IN32(HEATHROW_FCR);
455 mdelay(10);
456 LOCK(flags);
457 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
458 UNLOCK(flags);
459 (void)MACIO_IN32(HEATHROW_FCR);
460 mdelay(10);
461 } else {
462 LOCK(flags);
463 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
464 UNLOCK(flags);
465 }
466 return 0;
467}
468
469static long heathrow_sound_enable(struct device_node *node, long param,
470 long value)
471{
472 struct macio_chip* macio;
473 unsigned long flags;
474
475 /* B&W G3 and Yikes don't support that properly (the
476 * sound appear to never come back after beeing shut down).
477 */
478 if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
479 pmac_mb.model_id == PMAC_TYPE_YIKES)
480 return 0;
481
482 macio = macio_find(node, 0);
483 if (!macio)
484 return -ENODEV;
485 if (value) {
486 LOCK(flags);
487 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
488 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
489 UNLOCK(flags);
490 (void)MACIO_IN32(HEATHROW_FCR);
491 } else {
492 LOCK(flags);
493 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
494 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
495 UNLOCK(flags);
496 }
497 return 0;
498}
499
500static u32 save_fcr[6];
501static u32 save_mbcr;
502static u32 save_gpio_levels[2];
503static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
504static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
505static u32 save_unin_clock_ctl;
506static struct dbdma_regs save_dbdma[13];
507static struct dbdma_regs save_alt_dbdma[13];
508
509static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
510{
511 int i;
512
513 /* Save state & config of DBDMA channels */
514 for (i = 0; i < 13; i++) {
515 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
516 (macio->base + ((0x8000+i*0x100)>>2));
517 save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
518 save[i].cmdptr = in_le32(&chan->cmdptr);
519 save[i].intr_sel = in_le32(&chan->intr_sel);
520 save[i].br_sel = in_le32(&chan->br_sel);
521 save[i].wait_sel = in_le32(&chan->wait_sel);
522 }
523}
524
525static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
526{
527 int i;
528
529 /* Save state & config of DBDMA channels */
530 for (i = 0; i < 13; i++) {
531 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
532 (macio->base + ((0x8000+i*0x100)>>2));
533 out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
534 while (in_le32(&chan->status) & ACTIVE)
535 mb();
536 out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
537 out_le32(&chan->cmdptr, save[i].cmdptr);
538 out_le32(&chan->intr_sel, save[i].intr_sel);
539 out_le32(&chan->br_sel, save[i].br_sel);
540 out_le32(&chan->wait_sel, save[i].wait_sel);
541 }
542}
543
544static void heathrow_sleep(struct macio_chip *macio, int secondary)
545{
546 if (secondary) {
547 dbdma_save(macio, save_alt_dbdma);
548 save_fcr[2] = MACIO_IN32(0x38);
549 save_fcr[3] = MACIO_IN32(0x3c);
550 } else {
551 dbdma_save(macio, save_dbdma);
552 save_fcr[0] = MACIO_IN32(0x38);
553 save_fcr[1] = MACIO_IN32(0x3c);
554 save_mbcr = MACIO_IN32(0x34);
555 /* Make sure sound is shut down */
556 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
557 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
558 /* This seems to be necessary as well or the fan
559 * keeps coming up and battery drains fast */
560 MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
561 MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
562 /* Make sure eth is down even if module or sleep
563 * won't work properly */
564 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
565 }
566 /* Make sure modem is shut down */
567 MACIO_OUT8(HRW_GPIO_MODEM_RESET,
568 MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
569 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
570 MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
571
572 /* Let things settle */
573 (void)MACIO_IN32(HEATHROW_FCR);
574}
575
576static void heathrow_wakeup(struct macio_chip *macio, int secondary)
577{
578 if (secondary) {
579 MACIO_OUT32(0x38, save_fcr[2]);
580 (void)MACIO_IN32(0x38);
581 mdelay(1);
582 MACIO_OUT32(0x3c, save_fcr[3]);
583 (void)MACIO_IN32(0x38);
584 mdelay(10);
585 dbdma_restore(macio, save_alt_dbdma);
586 } else {
587 MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
588 (void)MACIO_IN32(0x38);
589 mdelay(1);
590 MACIO_OUT32(0x3c, save_fcr[1]);
591 (void)MACIO_IN32(0x38);
592 mdelay(1);
593 MACIO_OUT32(0x34, save_mbcr);
594 (void)MACIO_IN32(0x38);
595 mdelay(10);
596 dbdma_restore(macio, save_dbdma);
597 }
598}
599
600static long heathrow_sleep_state(struct device_node *node, long param,
601 long value)
602{
603 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
604 return -EPERM;
605 if (value == 1) {
606 if (macio_chips[1].type == macio_gatwick)
607 heathrow_sleep(&macio_chips[0], 1);
608 heathrow_sleep(&macio_chips[0], 0);
609 } else if (value == 0) {
610 heathrow_wakeup(&macio_chips[0], 0);
611 if (macio_chips[1].type == macio_gatwick)
612 heathrow_wakeup(&macio_chips[0], 1);
613 }
614 return 0;
615}
616
617static long core99_scc_enable(struct device_node *node, long param, long value)
618{
619 struct macio_chip* macio;
620 unsigned long flags;
621 unsigned long chan_mask;
622 u32 fcr;
623
624 macio = macio_find(node, 0);
625 if (!macio)
626 return -ENODEV;
627 if (!strcmp(node->name, "ch-a"))
628 chan_mask = MACIO_FLAG_SCCA_ON;
629 else if (!strcmp(node->name, "ch-b"))
630 chan_mask = MACIO_FLAG_SCCB_ON;
631 else
632 return -ENODEV;
633
634 if (value) {
635 int need_reset_scc = 0;
636 int need_reset_irda = 0;
637
638 LOCK(flags);
639 fcr = MACIO_IN32(KEYLARGO_FCR0);
640 /* Check if scc cell need enabling */
641 if (!(fcr & KL0_SCC_CELL_ENABLE)) {
642 fcr |= KL0_SCC_CELL_ENABLE;
643 need_reset_scc = 1;
644 }
645 if (chan_mask & MACIO_FLAG_SCCA_ON) {
646 fcr |= KL0_SCCA_ENABLE;
647 /* Don't enable line drivers for I2S modem */
648 if ((param & 0xfff) == PMAC_SCC_I2S1)
649 fcr &= ~KL0_SCC_A_INTF_ENABLE;
650 else
651 fcr |= KL0_SCC_A_INTF_ENABLE;
652 }
653 if (chan_mask & MACIO_FLAG_SCCB_ON) {
654 fcr |= KL0_SCCB_ENABLE;
655 /* Perform irda specific inits */
656 if ((param & 0xfff) == PMAC_SCC_IRDA) {
657 fcr &= ~KL0_SCC_B_INTF_ENABLE;
658 fcr |= KL0_IRDA_ENABLE;
659 fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
660 fcr |= KL0_IRDA_SOURCE1_SEL;
661 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
662 fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
663 need_reset_irda = 1;
664 } else
665 fcr |= KL0_SCC_B_INTF_ENABLE;
666 }
667 MACIO_OUT32(KEYLARGO_FCR0, fcr);
668 macio->flags |= chan_mask;
669 if (need_reset_scc) {
670 MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
671 (void)MACIO_IN32(KEYLARGO_FCR0);
672 UNLOCK(flags);
673 mdelay(15);
674 LOCK(flags);
675 MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
676 }
677 if (need_reset_irda) {
678 MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
679 (void)MACIO_IN32(KEYLARGO_FCR0);
680 UNLOCK(flags);
681 mdelay(15);
682 LOCK(flags);
683 MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
684 }
685 UNLOCK(flags);
686 if (param & PMAC_SCC_FLAG_XMON)
687 macio->flags |= MACIO_FLAG_SCC_LOCKED;
688 } else {
689 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
690 return -EPERM;
691 LOCK(flags);
692 fcr = MACIO_IN32(KEYLARGO_FCR0);
693 if (chan_mask & MACIO_FLAG_SCCA_ON)
694 fcr &= ~KL0_SCCA_ENABLE;
695 if (chan_mask & MACIO_FLAG_SCCB_ON) {
696 fcr &= ~KL0_SCCB_ENABLE;
697 /* Perform irda specific clears */
698 if ((param & 0xfff) == PMAC_SCC_IRDA) {
699 fcr &= ~KL0_IRDA_ENABLE;
700 fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
701 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
702 fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
703 }
704 }
705 MACIO_OUT32(KEYLARGO_FCR0, fcr);
706 if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
707 fcr &= ~KL0_SCC_CELL_ENABLE;
708 MACIO_OUT32(KEYLARGO_FCR0, fcr);
709 }
710 macio->flags &= ~(chan_mask);
711 UNLOCK(flags);
712 mdelay(10);
713 }
714 return 0;
715}
716
717static long
718core99_modem_enable(struct device_node *node, long param, long value)
719{
720 struct macio_chip* macio;
721 u8 gpio;
722 unsigned long flags;
723
724 /* Hack for internal USB modem */
725 if (node == NULL) {
726 if (macio_chips[0].type != macio_keylargo)
727 return -ENODEV;
728 node = macio_chips[0].of_node;
729 }
730 macio = macio_find(node, 0);
731 if (!macio)
732 return -ENODEV;
733 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
734 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
735 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
736
737 if (!value) {
738 LOCK(flags);
739 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
740 UNLOCK(flags);
741 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
742 mdelay(250);
743 }
744 LOCK(flags);
745 if (value) {
746 MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
747 UNLOCK(flags);
748 (void)MACIO_IN32(KEYLARGO_FCR2);
749 mdelay(250);
750 } else {
751 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
752 UNLOCK(flags);
753 }
754 if (value) {
755 LOCK(flags);
756 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
757 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
758 UNLOCK(flags); mdelay(250); LOCK(flags);
759 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
760 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
761 UNLOCK(flags); mdelay(250); LOCK(flags);
762 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
763 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
764 UNLOCK(flags); mdelay(250);
765 }
766 return 0;
767}
768
769static long
770pangea_modem_enable(struct device_node *node, long param, long value)
771{
772 struct macio_chip* macio;
773 u8 gpio;
774 unsigned long flags;
775
776 /* Hack for internal USB modem */
777 if (node == NULL) {
778 if (macio_chips[0].type != macio_pangea &&
779 macio_chips[0].type != macio_intrepid)
780 return -ENODEV;
781 node = macio_chips[0].of_node;
782 }
783 macio = macio_find(node, 0);
784 if (!macio)
785 return -ENODEV;
786 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
787 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
788 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
789
790 if (!value) {
791 LOCK(flags);
792 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
793 UNLOCK(flags);
794 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
795 mdelay(250);
796 }
797 LOCK(flags);
798 if (value) {
799 MACIO_OUT8(KL_GPIO_MODEM_POWER,
800 KEYLARGO_GPIO_OUTPUT_ENABLE);
801 UNLOCK(flags);
802 (void)MACIO_IN32(KEYLARGO_FCR2);
803 mdelay(250);
804 } else {
805 MACIO_OUT8(KL_GPIO_MODEM_POWER,
806 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
807 UNLOCK(flags);
808 }
809 if (value) {
810 LOCK(flags);
811 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
812 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
813 UNLOCK(flags); mdelay(250); LOCK(flags);
814 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
815 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
816 UNLOCK(flags); mdelay(250); LOCK(flags);
817 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
818 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
819 UNLOCK(flags); mdelay(250);
820 }
821 return 0;
822}
823
824static long
825core99_ata100_enable(struct device_node *node, long value)
826{
827 unsigned long flags;
828 struct pci_dev *pdev = NULL;
829 u8 pbus, pid;
830
831 if (uninorth_rev < 0x24)
832 return -ENODEV;
833
834 LOCK(flags);
835 if (value)
836 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
837 else
838 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
839 (void)UN_IN(UNI_N_CLOCK_CNTL);
840 UNLOCK(flags);
841 udelay(20);
842
843 if (value) {
844 if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
845 pdev = pci_find_slot(pbus, pid);
846 if (pdev == NULL)
847 return 0;
848 pci_enable_device(pdev);
849 pci_set_master(pdev);
850 }
851 return 0;
852}
853
854static long
855core99_ide_enable(struct device_node *node, long param, long value)
856{
857 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
858 * based ata-100
859 */
860 switch(param) {
861 case 0:
862 return simple_feature_tweak(node, macio_unknown,
863 KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
864 case 1:
865 return simple_feature_tweak(node, macio_unknown,
866 KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
867 case 2:
868 return simple_feature_tweak(node, macio_unknown,
869 KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
870 case 3:
871 return core99_ata100_enable(node, value);
872 default:
873 return -ENODEV;
874 }
875}
876
877static long
878core99_ide_reset(struct device_node *node, long param, long value)
879{
880 switch(param) {
881 case 0:
882 return simple_feature_tweak(node, macio_unknown,
883 KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
884 case 1:
885 return simple_feature_tweak(node, macio_unknown,
886 KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
887 case 2:
888 return simple_feature_tweak(node, macio_unknown,
889 KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
890 default:
891 return -ENODEV;
892 }
893}
894
895static long
896core99_gmac_enable(struct device_node *node, long param, long value)
897{
898 unsigned long flags;
899
900 LOCK(flags);
901 if (value)
902 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
903 else
904 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
905 (void)UN_IN(UNI_N_CLOCK_CNTL);
906 UNLOCK(flags);
907 udelay(20);
908
909 return 0;
910}
911
912static long
913core99_gmac_phy_reset(struct device_node *node, long param, long value)
914{
915 unsigned long flags;
916 struct macio_chip *macio;
917
918 macio = &macio_chips[0];
919 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
920 macio->type != macio_intrepid)
921 return -ENODEV;
922
923 LOCK(flags);
924 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
925 (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
926 UNLOCK(flags);
927 mdelay(10);
928 LOCK(flags);
929 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
930 KEYLARGO_GPIO_OUTOUT_DATA);
931 UNLOCK(flags);
932 mdelay(10);
933
934 return 0;
935}
936
937static long
938core99_sound_chip_enable(struct device_node *node, long param, long value)
939{
940 struct macio_chip* macio;
941 unsigned long flags;
942
943 macio = macio_find(node, 0);
944 if (!macio)
945 return -ENODEV;
946
947 /* Do a better probe code, screamer G4 desktops &
948 * iMacs can do that too, add a recalibrate in
949 * the driver as well
950 */
951 if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
952 pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
953 LOCK(flags);
954 if (value)
955 MACIO_OUT8(KL_GPIO_SOUND_POWER,
956 KEYLARGO_GPIO_OUTPUT_ENABLE |
957 KEYLARGO_GPIO_OUTOUT_DATA);
958 else
959 MACIO_OUT8(KL_GPIO_SOUND_POWER,
960 KEYLARGO_GPIO_OUTPUT_ENABLE);
961 (void)MACIO_IN8(KL_GPIO_SOUND_POWER);
962 UNLOCK(flags);
963 }
964 return 0;
965}
966
967static long
968core99_airport_enable(struct device_node *node, long param, long value)
969{
970 struct macio_chip* macio;
971 unsigned long flags;
972 int state;
973
974 macio = macio_find(node, 0);
975 if (!macio)
976 return -ENODEV;
977
978 /* Hint: we allow passing of macio itself for the sake of the
979 * sleep code
980 */
981 if (node != macio->of_node &&
982 (!node->parent || node->parent != macio->of_node))
983 return -ENODEV;
984 state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
985 if (value == state)
986 return 0;
987 if (value) {
988 /* This code is a reproduction of OF enable-cardslot
989 * and init-wireless methods, slightly hacked until
990 * I got it working.
991 */
992 LOCK(flags);
993 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
994 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
995 UNLOCK(flags);
996 mdelay(10);
997 LOCK(flags);
998 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
999 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
1000 UNLOCK(flags);
1001
1002 mdelay(10);
1003
1004 LOCK(flags);
1005 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1006 (void)MACIO_IN32(KEYLARGO_FCR2);
1007 udelay(10);
1008 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
1009 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
1010 udelay(10);
1011 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
1012 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
1013 udelay(10);
1014 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
1015 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
1016 udelay(10);
1017 MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
1018 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
1019 udelay(10);
1020 MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
1021 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
1022 UNLOCK(flags);
1023 udelay(10);
1024 MACIO_OUT32(0x1c000, 0);
1025 mdelay(1);
1026 MACIO_OUT8(0x1a3e0, 0x41);
1027 (void)MACIO_IN8(0x1a3e0);
1028 udelay(10);
1029 LOCK(flags);
1030 MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
1031 (void)MACIO_IN32(KEYLARGO_FCR2);
1032 UNLOCK(flags);
1033 mdelay(100);
1034
1035 macio->flags |= MACIO_FLAG_AIRPORT_ON;
1036 } else {
1037 LOCK(flags);
1038 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1039 (void)MACIO_IN32(KEYLARGO_FCR2);
1040 MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
1041 MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
1042 MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
1043 MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
1044 MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
1045 (void)MACIO_IN8(KL_GPIO_AIRPORT_4);
1046 UNLOCK(flags);
1047
1048 macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
1049 }
1050 return 0;
1051}
1052
1053#ifdef CONFIG_SMP
1054static long
1055core99_reset_cpu(struct device_node *node, long param, long value)
1056{
1057 unsigned int reset_io = 0;
1058 unsigned long flags;
1059 struct macio_chip *macio;
1060 struct device_node *np;
1061 const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0,
1062 KL_GPIO_RESET_CPU1,
1063 KL_GPIO_RESET_CPU2,
1064 KL_GPIO_RESET_CPU3 };
1065
1066 macio = &macio_chips[0];
1067 if (macio->type != macio_keylargo)
1068 return -ENODEV;
1069
1070 np = find_path_device("/cpus");
1071 if (np == NULL)
1072 return -ENODEV;
1073 for (np = np->child; np != NULL; np = np->sibling) {
1074 u32 *num = (u32 *)get_property(np, "reg", NULL);
1075 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1076 if (num == NULL || rst == NULL)
1077 continue;
1078 if (param == *num) {
1079 reset_io = *rst;
1080 break;
1081 }
1082 }
1083 if (np == NULL || reset_io == 0)
1084 reset_io = dflt_reset_lines[param];
1085
1086 LOCK(flags);
1087 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1088 (void)MACIO_IN8(reset_io);
1089 udelay(1);
1090 MACIO_OUT8(reset_io, 0);
1091 (void)MACIO_IN8(reset_io);
1092 UNLOCK(flags);
1093
1094 return 0;
1095}
1096#endif /* CONFIG_SMP */
1097
1098static long
1099core99_usb_enable(struct device_node *node, long param, long value)
1100{
1101 struct macio_chip *macio;
1102 unsigned long flags;
1103 char *prop;
1104 int number;
1105 u32 reg;
1106
1107 macio = &macio_chips[0];
1108 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1109 macio->type != macio_intrepid)
1110 return -ENODEV;
1111
1112 prop = (char *)get_property(node, "AAPL,clock-id", NULL);
1113 if (!prop)
1114 return -ENODEV;
1115 if (strncmp(prop, "usb0u048", 8) == 0)
1116 number = 0;
1117 else if (strncmp(prop, "usb1u148", 8) == 0)
1118 number = 2;
1119 else if (strncmp(prop, "usb2u248", 8) == 0)
1120 number = 4;
1121 else
1122 return -ENODEV;
1123
1124 /* Sorry for the brute-force locking, but this is only used during
1125 * sleep and the timing seem to be critical
1126 */
1127 LOCK(flags);
1128 if (value) {
1129 /* Turn ON */
1130 if (number == 0) {
1131 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1132 (void)MACIO_IN32(KEYLARGO_FCR0);
1133 UNLOCK(flags);
1134 mdelay(1);
1135 LOCK(flags);
1136 MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1137 } else if (number == 2) {
1138 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1139 UNLOCK(flags);
1140 (void)MACIO_IN32(KEYLARGO_FCR0);
1141 mdelay(1);
1142 LOCK(flags);
1143 MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1144 } else if (number == 4) {
1145 MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1146 UNLOCK(flags);
1147 (void)MACIO_IN32(KEYLARGO_FCR1);
1148 mdelay(1);
1149 LOCK(flags);
1150 MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
1151 }
1152 if (number < 4) {
1153 reg = MACIO_IN32(KEYLARGO_FCR4);
1154 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1155 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
1156 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1157 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
1158 MACIO_OUT32(KEYLARGO_FCR4, reg);
1159 (void)MACIO_IN32(KEYLARGO_FCR4);
1160 udelay(10);
1161 } else {
1162 reg = MACIO_IN32(KEYLARGO_FCR3);
1163 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1164 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
1165 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1166 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
1167 MACIO_OUT32(KEYLARGO_FCR3, reg);
1168 (void)MACIO_IN32(KEYLARGO_FCR3);
1169 udelay(10);
1170 }
1171 if (macio->type == macio_intrepid) {
1172 /* wait for clock stopped bits to clear */
1173 u32 test0 = 0, test1 = 0;
1174 u32 status0, status1;
1175 int timeout = 1000;
1176
1177 UNLOCK(flags);
1178 switch (number) {
1179 case 0:
1180 test0 = UNI_N_CLOCK_STOPPED_USB0;
1181 test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
1182 break;
1183 case 2:
1184 test0 = UNI_N_CLOCK_STOPPED_USB1;
1185 test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
1186 break;
1187 case 4:
1188 test0 = UNI_N_CLOCK_STOPPED_USB2;
1189 test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
1190 break;
1191 }
1192 do {
1193 if (--timeout <= 0) {
1194 printk(KERN_ERR "core99_usb_enable: "
1195 "Timeout waiting for clocks\n");
1196 break;
1197 }
1198 mdelay(1);
1199 status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
1200 status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
1201 } while ((status0 & test0) | (status1 & test1));
1202 LOCK(flags);
1203 }
1204 } else {
1205 /* Turn OFF */
1206 if (number < 4) {
1207 reg = MACIO_IN32(KEYLARGO_FCR4);
1208 reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1209 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
1210 reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1211 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
1212 MACIO_OUT32(KEYLARGO_FCR4, reg);
1213 (void)MACIO_IN32(KEYLARGO_FCR4);
1214 udelay(1);
1215 } else {
1216 reg = MACIO_IN32(KEYLARGO_FCR3);
1217 reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1218 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
1219 reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1220 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
1221 MACIO_OUT32(KEYLARGO_FCR3, reg);
1222 (void)MACIO_IN32(KEYLARGO_FCR3);
1223 udelay(1);
1224 }
1225 if (number == 0) {
1226 if (macio->type != macio_intrepid)
1227 MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1228 (void)MACIO_IN32(KEYLARGO_FCR0);
1229 udelay(1);
1230 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1231 (void)MACIO_IN32(KEYLARGO_FCR0);
1232 } else if (number == 2) {
1233 if (macio->type != macio_intrepid)
1234 MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1235 (void)MACIO_IN32(KEYLARGO_FCR0);
1236 udelay(1);
1237 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1238 (void)MACIO_IN32(KEYLARGO_FCR0);
1239 } else if (number == 4) {
1240 udelay(1);
1241 MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1242 (void)MACIO_IN32(KEYLARGO_FCR1);
1243 }
1244 udelay(1);
1245 }
1246 UNLOCK(flags);
1247
1248 return 0;
1249}
1250
1251static long
1252core99_firewire_enable(struct device_node *node, long param, long value)
1253{
1254 unsigned long flags;
1255 struct macio_chip *macio;
1256
1257 macio = &macio_chips[0];
1258 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1259 macio->type != macio_intrepid)
1260 return -ENODEV;
1261 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1262 return -ENODEV;
1263
1264 LOCK(flags);
1265 if (value) {
1266 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1267 (void)UN_IN(UNI_N_CLOCK_CNTL);
1268 } else {
1269 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1270 (void)UN_IN(UNI_N_CLOCK_CNTL);
1271 }
1272 UNLOCK(flags);
1273 mdelay(1);
1274
1275 return 0;
1276}
1277
1278static long
1279core99_firewire_cable_power(struct device_node *node, long param, long value)
1280{
1281 unsigned long flags;
1282 struct macio_chip *macio;
1283
1284 /* Trick: we allow NULL node */
1285 if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
1286 return -ENODEV;
1287 macio = &macio_chips[0];
1288 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1289 macio->type != macio_intrepid)
1290 return -ENODEV;
1291 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1292 return -ENODEV;
1293
1294 LOCK(flags);
1295 if (value) {
1296 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
1297 MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
1298 udelay(10);
1299 } else {
1300 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
1301 MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
1302 }
1303 UNLOCK(flags);
1304 mdelay(1);
1305
1306 return 0;
1307}
1308
1309static long
1310intrepid_aack_delay_enable(struct device_node *node, long param, long value)
1311{
1312 unsigned long flags;
1313
1314 if (uninorth_rev < 0xd2)
1315 return -ENODEV;
1316
1317 LOCK(flags);
1318 if (param)
1319 UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1320 else
1321 UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1322 UNLOCK(flags);
1323
1324 return 0;
1325}
1326
1327
1328#endif /* CONFIG_POWER4 */
1329
1330static long
1331core99_read_gpio(struct device_node *node, long param, long value)
1332{
1333 struct macio_chip *macio = &macio_chips[0];
1334
1335 return MACIO_IN8(param);
1336}
1337
1338
1339static long
1340core99_write_gpio(struct device_node *node, long param, long value)
1341{
1342 struct macio_chip *macio = &macio_chips[0];
1343
1344 MACIO_OUT8(param, (u8)(value & 0xff));
1345 return 0;
1346}
1347
1348#ifdef CONFIG_POWER4
1349static long g5_gmac_enable(struct device_node *node, long param, long value)
1350{
1351 struct macio_chip *macio = &macio_chips[0];
1352 unsigned long flags;
1353
1354 if (node == NULL)
1355 return -ENODEV;
1356
1357 LOCK(flags);
1358 if (value) {
1359 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1360 mb();
1361 k2_skiplist[0] = NULL;
1362 } else {
1363 k2_skiplist[0] = node;
1364 mb();
1365 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1366 }
1367
1368 UNLOCK(flags);
1369 mdelay(1);
1370
1371 return 0;
1372}
1373
1374static long g5_fw_enable(struct device_node *node, long param, long value)
1375{
1376 struct macio_chip *macio = &macio_chips[0];
1377 unsigned long flags;
1378
1379 if (node == NULL)
1380 return -ENODEV;
1381
1382 LOCK(flags);
1383 if (value) {
1384 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1385 mb();
1386 k2_skiplist[1] = NULL;
1387 } else {
1388 k2_skiplist[1] = node;
1389 mb();
1390 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1391 }
1392
1393 UNLOCK(flags);
1394 mdelay(1);
1395
1396 return 0;
1397}
1398
1399static long g5_mpic_enable(struct device_node *node, long param, long value)
1400{
1401 unsigned long flags;
1402
1403 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
1404 return 0;
1405
1406 LOCK(flags);
1407 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
1408 UNLOCK(flags);
1409
1410 return 0;
1411}
1412
1413static long g5_eth_phy_reset(struct device_node *node, long param, long value)
1414{
1415 struct macio_chip *macio = &macio_chips[0];
1416 struct device_node *phy;
1417 int need_reset;
1418
1419 /*
1420 * We must not reset the combo PHYs, only the BCM5221 found in
1421 * the iMac G5.
1422 */
1423 phy = of_get_next_child(node, NULL);
1424 if (!phy)
1425 return -ENODEV;
1426 need_reset = device_is_compatible(phy, "B5221");
1427 of_node_put(phy);
1428 if (!need_reset)
1429 return 0;
1430
1431 /* PHY reset is GPIO 29, not in device-tree unfortunately */
1432 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
1433 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
1434 /* Thankfully, this is now always called at a time when we can
1435 * schedule by sungem.
1436 */
1437 msleep(10);
1438 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
1439
1440 return 0;
1441}
1442
1443static long g5_i2s_enable(struct device_node *node, long param, long value)
1444{
1445 /* Very crude implementation for now */
1446 struct macio_chip *macio = &macio_chips[0];
1447 unsigned long flags;
1448
1449 if (value == 0)
1450 return 0; /* don't disable yet */
1451
1452 LOCK(flags);
1453 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
1454 KL3_I2S0_CLK18_ENABLE);
1455 udelay(10);
1456 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
1457 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
1458 udelay(10);
1459 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
1460 UNLOCK(flags);
1461 udelay(10);
1462
1463 return 0;
1464}
1465
1466
1467#ifdef CONFIG_SMP
1468static long g5_reset_cpu(struct device_node *node, long param, long value)
1469{
1470 unsigned int reset_io = 0;
1471 unsigned long flags;
1472 struct macio_chip *macio;
1473 struct device_node *np;
1474
1475 macio = &macio_chips[0];
1476 if (macio->type != macio_keylargo2)
1477 return -ENODEV;
1478
1479 np = find_path_device("/cpus");
1480 if (np == NULL)
1481 return -ENODEV;
1482 for (np = np->child; np != NULL; np = np->sibling) {
1483 u32 *num = (u32 *)get_property(np, "reg", NULL);
1484 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1485 if (num == NULL || rst == NULL)
1486 continue;
1487 if (param == *num) {
1488 reset_io = *rst;
1489 break;
1490 }
1491 }
1492 if (np == NULL || reset_io == 0)
1493 return -ENODEV;
1494
1495 LOCK(flags);
1496 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1497 (void)MACIO_IN8(reset_io);
1498 udelay(1);
1499 MACIO_OUT8(reset_io, 0);
1500 (void)MACIO_IN8(reset_io);
1501 UNLOCK(flags);
1502
1503 return 0;
1504}
1505#endif /* CONFIG_SMP */
1506
1507/*
1508 * This can be called from pmac_smp so isn't static
1509 *
1510 * This takes the second CPU off the bus on dual CPU machines
1511 * running UP
1512 */
1513void g5_phy_disable_cpu1(void)
1514{
1515 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1516}
1517#endif /* CONFIG_POWER4 */
1518
1519#ifndef CONFIG_POWER4
1520
1521static void
1522keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1523{
1524 u32 temp;
1525
1526 if (sleep_mode) {
1527 mdelay(1);
1528 MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
1529 (void)MACIO_IN32(KEYLARGO_FCR0);
1530 mdelay(1);
1531 }
1532
1533 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1534 KL0_SCC_CELL_ENABLE |
1535 KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
1536 KL0_IRDA_CLK19_ENABLE);
1537
1538 MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
1539 MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
1540
1541 MACIO_BIC(KEYLARGO_FCR1,
1542 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1543 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1544 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1545 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1546 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1547 KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
1548 KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
1549 KL1_UIDE_ENABLE);
1550
1551 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1552 MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
1553
1554 temp = MACIO_IN32(KEYLARGO_FCR3);
1555 if (macio->rev >= 2) {
1556 temp |= KL3_SHUTDOWN_PLL2X;
1557 if (sleep_mode)
1558 temp |= KL3_SHUTDOWN_PLL_TOTAL;
1559 }
1560
1561 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1562 KL3_SHUTDOWN_PLLKW35;
1563 if (sleep_mode)
1564 temp |= KL3_SHUTDOWN_PLLKW12;
1565 temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
1566 | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1567 if (sleep_mode)
1568 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
1569 MACIO_OUT32(KEYLARGO_FCR3, temp);
1570
1571 /* Flush posted writes & wait a bit */
1572 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1573}
1574
1575static void
1576pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1577{
1578 u32 temp;
1579
1580 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1581 KL0_SCC_CELL_ENABLE |
1582 KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
1583
1584 MACIO_BIC(KEYLARGO_FCR1,
1585 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1586 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1587 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1588 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1589 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1590 KL1_UIDE_ENABLE);
1591 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1592 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1593
1594 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1595
1596 temp = MACIO_IN32(KEYLARGO_FCR3);
1597 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1598 KL3_SHUTDOWN_PLLKW35;
1599 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
1600 | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
1601 if (sleep_mode)
1602 temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
1603 MACIO_OUT32(KEYLARGO_FCR3, temp);
1604
1605 /* Flush posted writes & wait a bit */
1606 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1607}
1608
1609static void
1610intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1611{
1612 u32 temp;
1613
1614 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1615 KL0_SCC_CELL_ENABLE);
1616
1617 MACIO_BIC(KEYLARGO_FCR1,
1618 /*KL1_USB2_CELL_ENABLE |*/
1619 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1620 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1621 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
1622 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1623 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1624
1625 temp = MACIO_IN32(KEYLARGO_FCR3);
1626 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
1627 KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1628 if (sleep_mode)
1629 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
1630 MACIO_OUT32(KEYLARGO_FCR3, temp);
1631
1632 /* Flush posted writes & wait a bit */
1633 (void)MACIO_IN32(KEYLARGO_FCR0);
1634 mdelay(10);
1635}
1636
1637
1638void pmac_tweak_clock_spreading(int enable)
1639{
1640 struct macio_chip *macio = &macio_chips[0];
1641
1642 /* Hack for doing clock spreading on some machines PowerBooks and
1643 * iBooks. This implements the "platform-do-clockspreading" OF
1644 * property as decoded manually on various models. For safety, we also
1645 * check the product ID in the device-tree in cases we'll whack the i2c
1646 * chip to make reasonably sure we won't set wrong values in there
1647 *
1648 * Of course, ultimately, we have to implement a real parser for
1649 * the platform-do-* stuff...
1650 */
1651
1652 if (macio->type == macio_intrepid) {
1653 if (enable)
1654 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1655 else
1656 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1657 mdelay(40);
1658 }
1659
1660 while (machine_is_compatible("PowerBook5,2") ||
1661 machine_is_compatible("PowerBook5,3") ||
1662 machine_is_compatible("PowerBook6,2") ||
1663 machine_is_compatible("PowerBook6,3")) {
1664 struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
1665 struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
1666 u8 buffer[9];
1667 u32 *productID;
1668 int i, rc, changed = 0;
1669
1670 if (dt == NULL)
1671 break;
1672 productID = (u32 *)get_property(dt, "pid#", NULL);
1673 if (productID == NULL)
1674 break;
1675 while(ui2c) {
1676 struct device_node *p = of_get_parent(ui2c);
1677 if (p && !strcmp(p->name, "uni-n"))
1678 break;
1679 ui2c = of_find_node_by_type(ui2c, "i2c");
1680 }
1681 if (ui2c == NULL)
1682 break;
1683 DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
1684 rc = pmac_low_i2c_open(ui2c, 1);
1685 if (rc != 0)
1686 break;
1687 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1688 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1689 DBG("read result: %d,", rc);
1690 if (rc != 0) {
1691 pmac_low_i2c_close(ui2c);
1692 break;
1693 }
1694 for (i=0; i<9; i++)
1695 DBG(" %02x", buffer[i]);
1696 DBG("\n");
1697
1698 switch(*productID) {
1699 case 0x1182: /* AlBook 12" rev 2 */
1700 case 0x1183: /* iBook G4 12" */
1701 buffer[0] = (buffer[0] & 0x8f) | 0x70;
1702 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1703 buffer[5] = (buffer[5] & 0x80) | 0x31;
1704 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1705 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
1706 buffer[8] = (buffer[8] & 0x00) | 0x30;
1707 changed = 1;
1708 break;
1709 case 0x3142: /* AlBook 15" (ATI M10) */
1710 case 0x3143: /* AlBook 17" (ATI M10) */
1711 buffer[0] = (buffer[0] & 0xaf) | 0x50;
1712 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1713 buffer[5] = (buffer[5] & 0x80) | 0x31;
1714 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1715 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
1716 buffer[8] = (buffer[8] & 0x00) | 0x30;
1717 changed = 1;
1718 break;
1719 default:
1720 DBG("i2c-hwclock: Machine model not handled\n");
1721 break;
1722 }
1723 if (!changed) {
1724 pmac_low_i2c_close(ui2c);
1725 break;
1726 }
1727 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1728 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1729 DBG("write result: %d,", rc);
1730 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1731 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1732 DBG("read result: %d,", rc);
1733 if (rc != 0) {
1734 pmac_low_i2c_close(ui2c);
1735 break;
1736 }
1737 for (i=0; i<9; i++)
1738 DBG(" %02x", buffer[i]);
1739 pmac_low_i2c_close(ui2c);
1740 break;
1741 }
1742}
1743
1744
1745static int
1746core99_sleep(void)
1747{
1748 struct macio_chip *macio;
1749 int i;
1750
1751 macio = &macio_chips[0];
1752 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1753 macio->type != macio_intrepid)
1754 return -ENODEV;
1755
1756 /* We power off the wireless slot in case it was not done
1757 * by the driver. We don't power it on automatically however
1758 */
1759 if (macio->flags & MACIO_FLAG_AIRPORT_ON)
1760 core99_airport_enable(macio->of_node, 0, 0);
1761
1762 /* We power off the FW cable. Should be done by the driver... */
1763 if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
1764 core99_firewire_enable(NULL, 0, 0);
1765 core99_firewire_cable_power(NULL, 0, 0);
1766 }
1767
1768 /* We make sure int. modem is off (in case driver lost it) */
1769 if (macio->type == macio_keylargo)
1770 core99_modem_enable(macio->of_node, 0, 0);
1771 else
1772 pangea_modem_enable(macio->of_node, 0, 0);
1773
1774 /* We make sure the sound is off as well */
1775 core99_sound_chip_enable(macio->of_node, 0, 0);
1776
1777 /*
1778 * Save various bits of KeyLargo
1779 */
1780
1781 /* Save the state of the various GPIOs */
1782 save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
1783 save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
1784 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1785 save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
1786 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1787 save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
1788
1789 /* Save the FCRs */
1790 if (macio->type == macio_keylargo)
1791 save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
1792 save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
1793 save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
1794 save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
1795 save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
1796 save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
1797 if (macio->type == macio_pangea || macio->type == macio_intrepid)
1798 save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
1799
1800 /* Save state & config of DBDMA channels */
1801 dbdma_save(macio, save_dbdma);
1802
1803 /*
1804 * Turn off as much as we can
1805 */
1806 if (macio->type == macio_pangea)
1807 pangea_shutdown(macio, 1);
1808 else if (macio->type == macio_intrepid)
1809 intrepid_shutdown(macio, 1);
1810 else if (macio->type == macio_keylargo)
1811 keylargo_shutdown(macio, 1);
1812
1813 /*
1814 * Put the host bridge to sleep
1815 */
1816
1817 save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
1818 /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
1819 * enabled !
1820 */
1821 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
1822 ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
1823 udelay(100);
1824 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1825 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
1826 mdelay(10);
1827
1828 /*
1829 * FIXME: A bit of black magic with OpenPIC (don't ask me why)
1830 */
1831 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1832 MACIO_BIS(0x506e0, 0x00400000);
1833 MACIO_BIS(0x506e0, 0x80000000);
1834 }
1835 return 0;
1836}
1837
1838static int
1839core99_wake_up(void)
1840{
1841 struct macio_chip *macio;
1842 int i;
1843
1844 macio = &macio_chips[0];
1845 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1846 macio->type != macio_intrepid)
1847 return -ENODEV;
1848
1849 /*
1850 * Wakeup the host bridge
1851 */
1852 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1853 udelay(10);
1854 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1855 udelay(10);
1856
1857 /*
1858 * Restore KeyLargo
1859 */
1860
1861 if (macio->type == macio_keylargo) {
1862 MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
1863 (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
1864 }
1865 MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
1866 (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
1867 MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
1868 (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
1869 MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
1870 (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
1871 MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
1872 (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
1873 MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
1874 (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
1875 if (macio->type == macio_pangea || macio->type == macio_intrepid) {
1876 MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
1877 (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
1878 }
1879
1880 dbdma_restore(macio, save_dbdma);
1881
1882 MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
1883 MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
1884 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1885 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
1886 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1887 MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
1888
1889 /* FIXME more black magic with OpenPIC ... */
1890 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1891 MACIO_BIC(0x506e0, 0x00400000);
1892 MACIO_BIC(0x506e0, 0x80000000);
1893 }
1894
1895 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
1896 udelay(100);
1897
1898 return 0;
1899}
1900
1901static long
1902core99_sleep_state(struct device_node *node, long param, long value)
1903{
1904 /* Param == 1 means to enter the "fake sleep" mode that is
1905 * used for CPU speed switch
1906 */
1907 if (param == 1) {
1908 if (value == 1) {
1909 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1910 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
1911 } else {
1912 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1913 udelay(10);
1914 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1915 udelay(10);
1916 }
1917 return 0;
1918 }
1919 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
1920 return -EPERM;
1921
1922 if (value == 1)
1923 return core99_sleep();
1924 else if (value == 0)
1925 return core99_wake_up();
1926 return 0;
1927}
1928
1929#endif /* CONFIG_POWER4 */
1930
1931static long
1932generic_dev_can_wake(struct device_node *node, long param, long value)
1933{
1934 /* Todo: eventually check we are really dealing with on-board
1935 * video device ...
1936 */
1937
1938 if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
1939 pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
1940 return 0;
1941}
1942
1943static long generic_get_mb_info(struct device_node *node, long param, long value)
1944{
1945 switch(param) {
1946 case PMAC_MB_INFO_MODEL:
1947 return pmac_mb.model_id;
1948 case PMAC_MB_INFO_FLAGS:
1949 return pmac_mb.board_flags;
1950 case PMAC_MB_INFO_NAME:
1951 /* hack hack hack... but should work */
1952 *((const char **)value) = pmac_mb.model_name;
1953 return 0;
1954 }
1955 return -EINVAL;
1956}
1957
1958
1959/*
1960 * Table definitions
1961 */
1962
1963/* Used on any machine
1964 */
1965static struct feature_table_entry any_features[] = {
1966 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1967 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1968 { 0, NULL }
1969};
1970
1971#ifndef CONFIG_POWER4
1972
1973/* OHare based motherboards. Currently, we only use these on the
1974 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1975 * to have issues with turning on/off those asic cells
1976 */
1977static struct feature_table_entry ohare_features[] = {
1978 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1980 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
1981 { PMAC_FTR_IDE_ENABLE, ohare_ide_enable},
1982 { PMAC_FTR_IDE_RESET, ohare_ide_reset},
1983 { PMAC_FTR_SLEEP_STATE, ohare_sleep_state },
1984 { 0, NULL }
1985};
1986
1987/* Heathrow desktop machines (Beige G3).
1988 * Separated as some features couldn't be properly tested
1989 * and the serial port control bits appear to confuse it.
1990 */
1991static struct feature_table_entry heathrow_desktop_features[] = {
1992 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1993 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1994 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
1995 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
1996 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
1997 { 0, NULL }
1998};
1999
2000/* Heathrow based laptop, that is the Wallstreet and mainstreet
2001 * powerbooks.
2002 */
2003static struct feature_table_entry heathrow_laptop_features[] = {
2004 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2005 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2006 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2007 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2008 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2009 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2010 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2011 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2012 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2013 { 0, NULL }
2014};
2015
2016/* Paddington based machines
2017 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
2018 */
2019static struct feature_table_entry paddington_features[] = {
2020 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2021 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2022 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2023 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2024 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2025 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2026 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2027 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2028 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2029 { 0, NULL }
2030};
2031
2032/* Core99 & MacRISC 2 machines (all machines released since the
2033 * iBook (included), that is all AGP machines, except pangea
2034 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
2035 * used on iBook2 & iMac "flow power".
2036 */
2037static struct feature_table_entry core99_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2041 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2042 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2043 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2044 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2045 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2046 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2047 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2048 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2049 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2050#ifdef CONFIG_SMP
2051 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2052#endif /* CONFIG_SMP */
2053 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2054 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2055 { 0, NULL }
2056};
2057
2058/* RackMac
2059 */
2060static struct feature_table_entry rackmac_features[] = {
2061 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2062 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2063 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2064 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2065 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2066 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2067 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2068 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2069 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2070#ifdef CONFIG_SMP
2071 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2072#endif /* CONFIG_SMP */
2073 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2074 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2075 { 0, NULL }
2076};
2077
2078/* Pangea features
2079 */
2080static struct feature_table_entry pangea_features[] = {
2081 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2082 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2083 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2084 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2085 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2086 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2087 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2088 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2089 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2090 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2091 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2092 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2093 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2094 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2095 { 0, NULL }
2096};
2097
2098/* Intrepid features
2099 */
2100static struct feature_table_entry intrepid_features[] = {
2101 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2102 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2103 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2104 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2105 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2106 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2107 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2108 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2109 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2110 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2111 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2112 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2113 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2114 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2115 { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable },
2116 { 0, NULL }
2117};
2118
2119#else /* CONFIG_POWER4 */
2120
2121/* G5 features
2122 */
2123static struct feature_table_entry g5_features[] = {
2124 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2125 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2126 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
2127 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
2128 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
2129#ifdef CONFIG_SMP
2130 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
2131#endif /* CONFIG_SMP */
2132 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2133 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2134 { 0, NULL }
2135};
2136
2137#endif /* CONFIG_POWER4 */
2138
2139static struct pmac_mb_def pmac_mb_defs[] = {
2140#ifndef CONFIG_POWER4
2141 /*
2142 * Desktops
2143 */
2144
2145 { "AAPL,8500", "PowerMac 8500/8600",
2146 PMAC_TYPE_PSURGE, NULL,
2147 0
2148 },
2149 { "AAPL,9500", "PowerMac 9500/9600",
2150 PMAC_TYPE_PSURGE, NULL,
2151 0
2152 },
2153 { "AAPL,7200", "PowerMac 7200",
2154 PMAC_TYPE_PSURGE, NULL,
2155 0
2156 },
2157 { "AAPL,7300", "PowerMac 7200/7300",
2158 PMAC_TYPE_PSURGE, NULL,
2159 0
2160 },
2161 { "AAPL,7500", "PowerMac 7500",
2162 PMAC_TYPE_PSURGE, NULL,
2163 0
2164 },
2165 { "AAPL,ShinerESB", "Apple Network Server",
2166 PMAC_TYPE_ANS, NULL,
2167 0
2168 },
2169 { "AAPL,e407", "Alchemy",
2170 PMAC_TYPE_ALCHEMY, NULL,
2171 0
2172 },
2173 { "AAPL,e411", "Gazelle",
2174 PMAC_TYPE_GAZELLE, NULL,
2175 0
2176 },
2177 { "AAPL,Gossamer", "PowerMac G3 (Gossamer)",
2178 PMAC_TYPE_GOSSAMER, heathrow_desktop_features,
2179 0
2180 },
2181 { "AAPL,PowerMac G3", "PowerMac G3 (Silk)",
2182 PMAC_TYPE_SILK, heathrow_desktop_features,
2183 0
2184 },
2185 { "PowerMac1,1", "Blue&White G3",
2186 PMAC_TYPE_YOSEMITE, paddington_features,
2187 0
2188 },
2189 { "PowerMac1,2", "PowerMac G4 PCI Graphics",
2190 PMAC_TYPE_YIKES, paddington_features,
2191 0
2192 },
2193 { "PowerMac2,1", "iMac FireWire",
2194 PMAC_TYPE_FW_IMAC, core99_features,
2195 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2196 },
2197 { "PowerMac2,2", "iMac FireWire",
2198 PMAC_TYPE_FW_IMAC, core99_features,
2199 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2200 },
2201 { "PowerMac3,1", "PowerMac G4 AGP Graphics",
2202 PMAC_TYPE_SAWTOOTH, core99_features,
2203 PMAC_MB_OLD_CORE99
2204 },
2205 { "PowerMac3,2", "PowerMac G4 AGP Graphics",
2206 PMAC_TYPE_SAWTOOTH, core99_features,
2207 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2208 },
2209 { "PowerMac3,3", "PowerMac G4 AGP Graphics",
2210 PMAC_TYPE_SAWTOOTH, core99_features,
2211 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2212 },
2213 { "PowerMac3,4", "PowerMac G4 Silver",
2214 PMAC_TYPE_QUICKSILVER, core99_features,
2215 PMAC_MB_MAY_SLEEP
2216 },
2217 { "PowerMac3,5", "PowerMac G4 Silver",
2218 PMAC_TYPE_QUICKSILVER, core99_features,
2219 PMAC_MB_MAY_SLEEP
2220 },
2221 { "PowerMac3,6", "PowerMac G4 Windtunnel",
2222 PMAC_TYPE_WINDTUNNEL, core99_features,
2223 PMAC_MB_MAY_SLEEP,
2224 },
2225 { "PowerMac4,1", "iMac \"Flower Power\"",
2226 PMAC_TYPE_PANGEA_IMAC, pangea_features,
2227 PMAC_MB_MAY_SLEEP
2228 },
2229 { "PowerMac4,2", "Flat panel iMac",
2230 PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features,
2231 PMAC_MB_CAN_SLEEP
2232 },
2233 { "PowerMac4,4", "eMac",
2234 PMAC_TYPE_EMAC, core99_features,
2235 PMAC_MB_MAY_SLEEP
2236 },
2237 { "PowerMac5,1", "PowerMac G4 Cube",
2238 PMAC_TYPE_CUBE, core99_features,
2239 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2240 },
2241 { "PowerMac6,1", "Flat panel iMac",
2242 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2243 PMAC_MB_MAY_SLEEP,
2244 },
2245 { "PowerMac6,3", "Flat panel iMac",
2246 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2247 PMAC_MB_MAY_SLEEP,
2248 },
2249 { "PowerMac6,4", "eMac",
2250 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2251 PMAC_MB_MAY_SLEEP,
2252 },
2253 { "PowerMac10,1", "Mac mini",
2254 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2255 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
2256 },
2257 { "iMac,1", "iMac (first generation)",
2258 PMAC_TYPE_ORIG_IMAC, paddington_features,
2259 0
2260 },
2261
2262 /*
2263 * Xserve's
2264 */
2265
2266 { "RackMac1,1", "XServe",
2267 PMAC_TYPE_RACKMAC, rackmac_features,
2268 0,
2269 },
2270 { "RackMac1,2", "XServe rev. 2",
2271 PMAC_TYPE_RACKMAC, rackmac_features,
2272 0,
2273 },
2274
2275 /*
2276 * Laptops
2277 */
2278
2279 { "AAPL,3400/2400", "PowerBook 3400",
2280 PMAC_TYPE_HOOPER, ohare_features,
2281 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2282 },
2283 { "AAPL,3500", "PowerBook 3500",
2284 PMAC_TYPE_KANGA, ohare_features,
2285 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2286 },
2287 { "AAPL,PowerBook1998", "PowerBook Wallstreet",
2288 PMAC_TYPE_WALLSTREET, heathrow_laptop_features,
2289 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2290 },
2291 { "PowerBook1,1", "PowerBook 101 (Lombard)",
2292 PMAC_TYPE_101_PBOOK, paddington_features,
2293 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2294 },
2295 { "PowerBook2,1", "iBook (first generation)",
2296 PMAC_TYPE_ORIG_IBOOK, core99_features,
2297 PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2298 },
2299 { "PowerBook2,2", "iBook FireWire",
2300 PMAC_TYPE_FW_IBOOK, core99_features,
2301 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2302 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2303 },
2304 { "PowerBook3,1", "PowerBook Pismo",
2305 PMAC_TYPE_PISMO, core99_features,
2306 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2307 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2308 },
2309 { "PowerBook3,2", "PowerBook Titanium",
2310 PMAC_TYPE_TITANIUM, core99_features,
2311 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2312 },
2313 { "PowerBook3,3", "PowerBook Titanium II",
2314 PMAC_TYPE_TITANIUM2, core99_features,
2315 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2316 },
2317 { "PowerBook3,4", "PowerBook Titanium III",
2318 PMAC_TYPE_TITANIUM3, core99_features,
2319 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2320 },
2321 { "PowerBook3,5", "PowerBook Titanium IV",
2322 PMAC_TYPE_TITANIUM4, core99_features,
2323 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2324 },
2325 { "PowerBook4,1", "iBook 2",
2326 PMAC_TYPE_IBOOK2, pangea_features,
2327 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2328 },
2329 { "PowerBook4,2", "iBook 2",
2330 PMAC_TYPE_IBOOK2, pangea_features,
2331 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2332 },
2333 { "PowerBook4,3", "iBook 2 rev. 2",
2334 PMAC_TYPE_IBOOK2, pangea_features,
2335 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2336 },
2337 { "PowerBook5,1", "PowerBook G4 17\"",
2338 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2339 PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2340 },
2341 { "PowerBook5,2", "PowerBook G4 15\"",
2342 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2343 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2344 },
2345 { "PowerBook5,3", "PowerBook G4 17\"",
2346 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2347 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2348 },
2349 { "PowerBook5,4", "PowerBook G4 15\"",
2350 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2351 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2352 },
2353 { "PowerBook5,5", "PowerBook G4 17\"",
2354 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2355 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2356 },
2357 { "PowerBook5,6", "PowerBook G4 15\"",
2358 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2359 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2360 },
2361 { "PowerBook5,7", "PowerBook G4 17\"",
2362 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2363 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2364 },
2365 { "PowerBook6,1", "PowerBook G4 12\"",
2366 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2367 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2368 },
2369 { "PowerBook6,2", "PowerBook G4",
2370 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2371 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2372 },
2373 { "PowerBook6,3", "iBook G4",
2374 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2375 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2376 },
2377 { "PowerBook6,4", "PowerBook G4 12\"",
2378 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2379 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2380 },
2381 { "PowerBook6,5", "iBook G4",
2382 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2383 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2384 },
2385 { "PowerBook6,7", "iBook G4",
2386 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2387 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2388 },
2389 { "PowerBook6,8", "PowerBook G4 12\"",
2390 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2391 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2392 },
2393#else /* CONFIG_POWER4 */
2394 { "PowerMac7,2", "PowerMac G5",
2395 PMAC_TYPE_POWERMAC_G5, g5_features,
2396 0,
2397 },
2398#ifdef CONFIG_PPC64
2399 { "PowerMac7,3", "PowerMac G5",
2400 PMAC_TYPE_POWERMAC_G5, g5_features,
2401 0,
2402 },
2403 { "PowerMac8,1", "iMac G5",
2404 PMAC_TYPE_IMAC_G5, g5_features,
2405 0,
2406 },
2407 { "PowerMac9,1", "PowerMac G5",
2408 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2409 0,
2410 },
2411 { "RackMac3,1", "XServe G5",
2412 PMAC_TYPE_XSERVE_G5, g5_features,
2413 0,
2414 },
2415#endif /* CONFIG_PPC64 */
2416#endif /* CONFIG_POWER4 */
2417};
2418
2419/*
2420 * The toplevel feature_call callback
2421 */
2422long pmac_do_feature_call(unsigned int selector, ...)
2423{
2424 struct device_node *node;
2425 long param, value;
2426 int i;
2427 feature_call func = NULL;
2428 va_list args;
2429
2430 if (pmac_mb.features)
2431 for (i=0; pmac_mb.features[i].function; i++)
2432 if (pmac_mb.features[i].selector == selector) {
2433 func = pmac_mb.features[i].function;
2434 break;
2435 }
2436 if (!func)
2437 for (i=0; any_features[i].function; i++)
2438 if (any_features[i].selector == selector) {
2439 func = any_features[i].function;
2440 break;
2441 }
2442 if (!func)
2443 return -ENODEV;
2444
2445 va_start(args, selector);
2446 node = (struct device_node*)va_arg(args, void*);
2447 param = va_arg(args, long);
2448 value = va_arg(args, long);
2449 va_end(args);
2450
2451 return func(node, param, value);
2452}
2453
2454static int __init probe_motherboard(void)
2455{
2456 int i;
2457 struct macio_chip *macio = &macio_chips[0];
2458 const char *model = NULL;
2459 struct device_node *dt;
2460
2461 /* Lookup known motherboard type in device-tree. First try an
2462 * exact match on the "model" property, then try a "compatible"
2463 * match is none is found.
2464 */
2465 dt = find_devices("device-tree");
2466 if (dt != NULL)
2467 model = (const char *) get_property(dt, "model", NULL);
2468 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2469 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
2470 pmac_mb = pmac_mb_defs[i];
2471 goto found;
2472 }
2473 }
2474 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2475 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
2476 pmac_mb = pmac_mb_defs[i];
2477 goto found;
2478 }
2479 }
2480
2481 /* Fallback to selection depending on mac-io chip type */
2482 switch(macio->type) {
2483#ifndef CONFIG_POWER4
2484 case macio_grand_central:
2485 pmac_mb.model_id = PMAC_TYPE_PSURGE;
2486 pmac_mb.model_name = "Unknown PowerSurge";
2487 break;
2488 case macio_ohare:
2489 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
2490 pmac_mb.model_name = "Unknown OHare-based";
2491 break;
2492 case macio_heathrow:
2493 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
2494 pmac_mb.model_name = "Unknown Heathrow-based";
2495 pmac_mb.features = heathrow_desktop_features;
2496 break;
2497 case macio_paddington:
2498 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
2499 pmac_mb.model_name = "Unknown Paddington-based";
2500 pmac_mb.features = paddington_features;
2501 break;
2502 case macio_keylargo:
2503 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
2504 pmac_mb.model_name = "Unknown Keylargo-based";
2505 pmac_mb.features = core99_features;
2506 break;
2507 case macio_pangea:
2508 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
2509 pmac_mb.model_name = "Unknown Pangea-based";
2510 pmac_mb.features = pangea_features;
2511 break;
2512 case macio_intrepid:
2513 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
2514 pmac_mb.model_name = "Unknown Intrepid-based";
2515 pmac_mb.features = intrepid_features;
2516 break;
2517#else /* CONFIG_POWER4 */
2518 case macio_keylargo2:
2519 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
2520 pmac_mb.model_name = "Unknown K2-based";
2521 pmac_mb.features = g5_features;
2522 break;
2523#endif /* CONFIG_POWER4 */
2524 default:
2525 return -ENODEV;
2526 }
2527found:
2528#ifndef CONFIG_POWER4
2529 /* Fixup Hooper vs. Comet */
2530 if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
2531 u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
2532 if (!mach_id_ptr)
2533 return -ENODEV;
2534 /* Here, I used to disable the media-bay on comet. It
2535 * appears this is wrong, the floppy connector is actually
2536 * a kind of media-bay and works with the current driver.
2537 */
2538 if (__raw_readl(mach_id_ptr) & 0x20000000UL)
2539 pmac_mb.model_id = PMAC_TYPE_COMET;
2540 iounmap(mach_id_ptr);
2541 }
2542#endif /* CONFIG_POWER4 */
2543
2544#ifdef CONFIG_6xx
2545 /* Set default value of powersave_nap on machines that support it.
2546 * It appears that uninorth rev 3 has a problem with it, we don't
2547 * enable it on those. In theory, the flush-on-lock property is
2548 * supposed to be set when not supported, but I'm not very confident
2549 * that all Apple OF revs did it properly, I do it the paranoid way.
2550 */
2551 while (uninorth_base && uninorth_rev > 3) {
2552 struct device_node *np = find_path_device("/cpus");
2553 if (!np || !np->child) {
2554 printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
2555 break;
2556 }
2557 np = np->child;
2558 /* Nap mode not supported on SMP */
2559 if (np->sibling)
2560 break;
2561 /* Nap mode not supported if flush-on-lock property is present */
2562 if (get_property(np, "flush-on-lock", NULL))
2563 break;
2564 powersave_nap = 1;
2565 printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
2566 break;
2567 }
2568
2569 /* On CPUs that support it (750FX), lowspeed by default during
2570 * NAP mode
2571 */
2572 powersave_lowspeed = 1;
2573#endif /* CONFIG_6xx */
2574#ifdef CONFIG_POWER4
2575 powersave_nap = 1;
2576#endif
2577 /* Check for "mobile" machine */
2578 if (model && (strncmp(model, "PowerBook", 9) == 0
2579 || strncmp(model, "iBook", 5) == 0))
2580 pmac_mb.board_flags |= PMAC_MB_MOBILE;
2581
2582
2583 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
2584 return 0;
2585}
2586
2587/* Initialize the Core99 UniNorth host bridge and memory controller
2588 */
2589static void __init probe_uninorth(void)
2590{
2591 unsigned long actrl;
2592
2593 /* Locate core99 Uni-N */
2594 uninorth_node = of_find_node_by_name(NULL, "uni-n");
2595 /* Locate G5 u3 */
2596 if (uninorth_node == NULL) {
2597 uninorth_node = of_find_node_by_name(NULL, "u3");
2598 uninorth_u3 = 1;
2599 }
2600 if (uninorth_node && uninorth_node->n_addrs > 0) {
2601 unsigned long address = uninorth_node->addrs[0].address;
2602 uninorth_base = ioremap(address, 0x40000);
2603 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
2604 if (uninorth_u3)
2605 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
2606 } else
2607 uninorth_node = NULL;
2608
2609 if (!uninorth_node)
2610 return;
2611
2612 printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
2613 uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
2614 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
2615
2616 /* Set the arbitrer QAck delay according to what Apple does
2617 */
2618 if (uninorth_rev < 0x11) {
2619 actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
2620 actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
2621 UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
2622 UN_OUT(UNI_N_ARB_CTRL, actrl);
2623 }
2624
2625 /* Some more magic as done by them in recent MacOS X on UniNorth
2626 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
2627 * memory timeout
2628 */
2629 if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
2630 UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
2631}
2632
2633static void __init probe_one_macio(const char *name, const char *compat, int type)
2634{
2635 struct device_node* node;
2636 int i;
2637 volatile u32 __iomem * base;
2638 u32* revp;
2639
2640 node = find_devices(name);
2641 if (!node || !node->n_addrs)
2642 return;
2643 if (compat)
2644 do {
2645 if (device_is_compatible(node, compat))
2646 break;
2647 node = node->next;
2648 } while (node);
2649 if (!node)
2650 return;
2651 for(i=0; i<MAX_MACIO_CHIPS; i++) {
2652 if (!macio_chips[i].of_node)
2653 break;
2654 if (macio_chips[i].of_node == node)
2655 return;
2656 }
2657 if (i >= MAX_MACIO_CHIPS) {
2658 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
2659 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
2660 return;
2661 }
2662 base = ioremap(node->addrs[0].address, node->addrs[0].size);
2663 if (!base) {
2664 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
2665 return;
2666 }
2667 if (type == macio_keylargo) {
2668 u32 *did = (u32 *)get_property(node, "device-id", NULL);
2669 if (*did == 0x00000025)
2670 type = macio_pangea;
2671 if (*did == 0x0000003e)
2672 type = macio_intrepid;
2673 }
2674 macio_chips[i].of_node = node;
2675 macio_chips[i].type = type;
2676 macio_chips[i].base = base;
2677 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
2678 macio_chips[i].name = macio_names[type];
2679 revp = (u32 *)get_property(node, "revision-id", NULL);
2680 if (revp)
2681 macio_chips[i].rev = *revp;
2682 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
2683 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
2684}
2685
2686static int __init
2687probe_macios(void)
2688{
2689 /* Warning, ordering is important */
2690 probe_one_macio("gc", NULL, macio_grand_central);
2691 probe_one_macio("ohare", NULL, macio_ohare);
2692 probe_one_macio("pci106b,7", NULL, macio_ohareII);
2693 probe_one_macio("mac-io", "keylargo", macio_keylargo);
2694 probe_one_macio("mac-io", "paddington", macio_paddington);
2695 probe_one_macio("mac-io", "gatwick", macio_gatwick);
2696 probe_one_macio("mac-io", "heathrow", macio_heathrow);
2697 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
2698
2699 /* Make sure the "main" macio chip appear first */
2700 if (macio_chips[0].type == macio_gatwick
2701 && macio_chips[1].type == macio_heathrow) {
2702 struct macio_chip temp = macio_chips[0];
2703 macio_chips[0] = macio_chips[1];
2704 macio_chips[1] = temp;
2705 }
2706 if (macio_chips[0].type == macio_ohareII
2707 && macio_chips[1].type == macio_ohare) {
2708 struct macio_chip temp = macio_chips[0];
2709 macio_chips[0] = macio_chips[1];
2710 macio_chips[1] = temp;
2711 }
2712 macio_chips[0].lbus.index = 0;
2713 macio_chips[1].lbus.index = 1;
2714
2715 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
2716}
2717
2718static void __init
2719initial_serial_shutdown(struct device_node *np)
2720{
2721 int len;
2722 struct slot_names_prop {
2723 int count;
2724 char name[1];
2725 } *slots;
2726 char *conn;
2727 int port_type = PMAC_SCC_ASYNC;
2728 int modem = 0;
2729
2730 slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
2731 conn = get_property(np, "AAPL,connector", &len);
2732 if (conn && (strcmp(conn, "infrared") == 0))
2733 port_type = PMAC_SCC_IRDA;
2734 else if (device_is_compatible(np, "cobalt"))
2735 modem = 1;
2736 else if (slots && slots->count > 0) {
2737 if (strcmp(slots->name, "IrDA") == 0)
2738 port_type = PMAC_SCC_IRDA;
2739 else if (strcmp(slots->name, "Modem") == 0)
2740 modem = 1;
2741 }
2742 if (modem)
2743 pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
2744 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
2745}
2746
2747static void __init
2748set_initial_features(void)
2749{
2750 struct device_node *np;
2751
2752 /* That hack appears to be necessary for some StarMax motherboards
2753 * but I'm not too sure it was audited for side-effects on other
2754 * ohare based machines...
2755 * Since I still have difficulties figuring the right way to
2756 * differenciate them all and since that hack was there for a long
2757 * time, I'll keep it around
2758 */
2759 if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) {
2760 struct macio_chip *macio = &macio_chips[0];
2761 MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
2762 } else if (macio_chips[0].type == macio_ohare) {
2763 struct macio_chip *macio = &macio_chips[0];
2764 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2765 } else if (macio_chips[1].type == macio_ohare) {
2766 struct macio_chip *macio = &macio_chips[1];
2767 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2768 }
2769
2770#ifdef CONFIG_POWER4
2771 if (macio_chips[0].type == macio_keylargo2) {
2772#ifndef CONFIG_SMP
2773 /* On SMP machines running UP, we have the second CPU eating
2774 * bus cycles. We need to take it off the bus. This is done
2775 * from pmac_smp for SMP kernels running on one CPU
2776 */
2777 np = of_find_node_by_type(NULL, "cpu");
2778 if (np != NULL)
2779 np = of_find_node_by_type(np, "cpu");
2780 if (np != NULL) {
2781 g5_phy_disable_cpu1();
2782 of_node_put(np);
2783 }
2784#endif /* CONFIG_SMP */
2785 /* Enable GMAC for now for PCI probing. It will be disabled
2786 * later on after PCI probe
2787 */
2788 np = of_find_node_by_name(NULL, "ethernet");
2789 while(np) {
2790 if (device_is_compatible(np, "K2-GMAC"))
2791 g5_gmac_enable(np, 0, 1);
2792 np = of_find_node_by_name(np, "ethernet");
2793 }
2794
2795 /* Enable FW before PCI probe. Will be disabled later on
2796 * Note: We should have a batter way to check that we are
2797 * dealing with uninorth internal cell and not a PCI cell
2798 * on the external PCI. The code below works though.
2799 */
2800 np = of_find_node_by_name(NULL, "firewire");
2801 while(np) {
2802 if (device_is_compatible(np, "pci106b,5811")) {
2803 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2804 g5_fw_enable(np, 0, 1);
2805 }
2806 np = of_find_node_by_name(np, "firewire");
2807 }
2808 }
2809#else /* CONFIG_POWER4 */
2810
2811 if (macio_chips[0].type == macio_keylargo ||
2812 macio_chips[0].type == macio_pangea ||
2813 macio_chips[0].type == macio_intrepid) {
2814 /* Enable GMAC for now for PCI probing. It will be disabled
2815 * later on after PCI probe
2816 */
2817 np = of_find_node_by_name(NULL, "ethernet");
2818 while(np) {
2819 if (np->parent
2820 && device_is_compatible(np->parent, "uni-north")
2821 && device_is_compatible(np, "gmac"))
2822 core99_gmac_enable(np, 0, 1);
2823 np = of_find_node_by_name(np, "ethernet");
2824 }
2825
2826 /* Enable FW before PCI probe. Will be disabled later on
2827 * Note: We should have a batter way to check that we are
2828 * dealing with uninorth internal cell and not a PCI cell
2829 * on the external PCI. The code below works though.
2830 */
2831 np = of_find_node_by_name(NULL, "firewire");
2832 while(np) {
2833 if (np->parent
2834 && device_is_compatible(np->parent, "uni-north")
2835 && (device_is_compatible(np, "pci106b,18") ||
2836 device_is_compatible(np, "pci106b,30") ||
2837 device_is_compatible(np, "pci11c1,5811"))) {
2838 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2839 core99_firewire_enable(np, 0, 1);
2840 }
2841 np = of_find_node_by_name(np, "firewire");
2842 }
2843
2844 /* Enable ATA-100 before PCI probe. */
2845 np = of_find_node_by_name(NULL, "ata-6");
2846 while(np) {
2847 if (np->parent
2848 && device_is_compatible(np->parent, "uni-north")
2849 && device_is_compatible(np, "kauai-ata")) {
2850 core99_ata100_enable(np, 1);
2851 }
2852 np = of_find_node_by_name(np, "ata-6");
2853 }
2854
2855 /* Switch airport off */
2856 np = find_devices("radio");
2857 while(np) {
2858 if (np && np->parent == macio_chips[0].of_node) {
2859 macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
2860 core99_airport_enable(np, 0, 0);
2861 }
2862 np = np->next;
2863 }
2864 }
2865
2866 /* On all machines that support sound PM, switch sound off */
2867 if (macio_chips[0].of_node)
2868 pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
2869 macio_chips[0].of_node, 0, 0);
2870
2871 /* While on some desktop G3s, we turn it back on */
2872 if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
2873 && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
2874 pmac_mb.model_id == PMAC_TYPE_SILK)) {
2875 struct macio_chip *macio = &macio_chips[0];
2876 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
2877 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
2878 }
2879
2880 /* Some machine models need the clock chip to be properly setup for
2881 * clock spreading now. This should be a platform function but we
2882 * don't do these at the moment
2883 */
2884 pmac_tweak_clock_spreading(1);
2885
2886#endif /* CONFIG_POWER4 */
2887
2888 /* On all machines, switch modem & serial ports off */
2889 np = find_devices("ch-a");
2890 while(np) {
2891 initial_serial_shutdown(np);
2892 np = np->next;
2893 }
2894 np = find_devices("ch-b");
2895 while(np) {
2896 initial_serial_shutdown(np);
2897 np = np->next;
2898 }
2899}
2900
2901void __init
2902pmac_feature_init(void)
2903{
2904 /* Detect the UniNorth memory controller */
2905 probe_uninorth();
2906
2907 /* Probe mac-io controllers */
2908 if (probe_macios()) {
2909 printk(KERN_WARNING "No mac-io chip found\n");
2910 return;
2911 }
2912
2913 /* Setup low-level i2c stuffs */
2914 pmac_init_low_i2c();
2915
2916 /* Probe machine type */
2917 if (probe_motherboard())
2918 printk(KERN_WARNING "Unknown PowerMac !\n");
2919
2920 /* Set some initial features (turn off some chips that will
2921 * be later turned on)
2922 */
2923 set_initial_features();
2924}
2925
2926int __init pmac_feature_late_init(void)
2927{
2928#if 0
2929 struct device_node *np;
2930
2931 /* Request some resources late */
2932 if (uninorth_node)
2933 request_OF_resource(uninorth_node, 0, NULL);
2934 np = find_devices("hammerhead");
2935 if (np)
2936 request_OF_resource(np, 0, NULL);
2937 np = find_devices("interrupt-controller");
2938 if (np)
2939 request_OF_resource(np, 0, NULL);
2940#endif
2941 return 0;
2942}
2943
2944device_initcall(pmac_feature_late_init);
2945
2946#if 0
2947static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
2948{
2949 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
2950 int bits[8] = { 8,16,0,32,2,4,0,0 };
2951 int freq = (frq >> 8) & 0xf;
2952
2953 if (freqs[freq] == 0)
2954 printk("%s: Unknown HT link frequency %x\n", name, freq);
2955 else
2956 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
2957 name, freqs[freq],
2958 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
2959}
2960
2961void __init pmac_check_ht_link(void)
2962{
2963 u32 ufreq, freq, ucfg, cfg;
2964 struct device_node *pcix_node;
2965 u8 px_bus, px_devfn;
2966 struct pci_controller *px_hose;
2967
2968 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
2969 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
2970 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
2971 dump_HT_speeds("U3 HyperTransport", cfg, freq);
2972
2973 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
2974 if (pcix_node == NULL) {
2975 printk("No PCI-X bridge found\n");
2976 return;
2977 }
2978 if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
2979 printk("PCI-X bridge found but not matched to pci\n");
2980 return;
2981 }
2982 px_hose = pci_find_hose_for_OF_device(pcix_node);
2983 if (px_hose == NULL) {
2984 printk("PCI-X bridge found but not matched to host\n");
2985 return;
2986 }
2987 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
2988 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
2989 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
2990 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
2991 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
2992 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
2993}
2994#endif /* 0 */
2995
2996/*
2997 * Early video resume hook
2998 */
2999
3000static void (*pmac_early_vresume_proc)(void *data);
3001static void *pmac_early_vresume_data;
3002
3003void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
3004{
3005 if (_machine != _MACH_Pmac)
3006 return;
3007 preempt_disable();
3008 pmac_early_vresume_proc = proc;
3009 pmac_early_vresume_data = data;
3010 preempt_enable();
3011}
3012EXPORT_SYMBOL(pmac_set_early_video_resume);
3013
3014void pmac_call_early_video_resume(void)
3015{
3016 if (pmac_early_vresume_proc)
3017 pmac_early_vresume_proc(pmac_early_vresume_data);
3018}
3019
3020/*
3021 * AGP related suspend/resume code
3022 */
3023
3024static struct pci_dev *pmac_agp_bridge;
3025static int (*pmac_agp_suspend)(struct pci_dev *bridge);
3026static int (*pmac_agp_resume)(struct pci_dev *bridge);
3027
3028void pmac_register_agp_pm(struct pci_dev *bridge,
3029 int (*suspend)(struct pci_dev *bridge),
3030 int (*resume)(struct pci_dev *bridge))
3031{
3032 if (suspend || resume) {
3033 pmac_agp_bridge = bridge;
3034 pmac_agp_suspend = suspend;
3035 pmac_agp_resume = resume;
3036 return;
3037 }
3038 if (bridge != pmac_agp_bridge)
3039 return;
3040 pmac_agp_suspend = pmac_agp_resume = NULL;
3041 return;
3042}
3043EXPORT_SYMBOL(pmac_register_agp_pm);
3044
3045void pmac_suspend_agp_for_card(struct pci_dev *dev)
3046{
3047 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
3048 return;
3049 if (pmac_agp_bridge->bus != dev->bus)
3050 return;
3051 pmac_agp_suspend(pmac_agp_bridge);
3052}
3053EXPORT_SYMBOL(pmac_suspend_agp_for_card);
3054
3055void pmac_resume_agp_for_card(struct pci_dev *dev)
3056{
3057 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3058 return;
3059 if (pmac_agp_bridge->bus != dev->bus)
3060 return;
3061 pmac_agp_resume(pmac_agp_bridge);
3062}
3063EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f3f39e8e337a..f3f39e8e337a 100644
--- a/arch/ppc64/kernel/pmac_low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
diff --git a/arch/ppc64/kernel/pmac_nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index e32a902236e3..4042e2f06ee0 100644
--- a/arch/ppc64/kernel/pmac_nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -15,10 +15,13 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/stddef.h> 16#include <linux/stddef.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/nvram.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/adb.h>
24#include <linux/pmu.h>
22#include <linux/bootmem.h> 25#include <linux/bootmem.h>
23#include <linux/completion.h> 26#include <linux/completion.h>
24#include <linux/spinlock.h> 27#include <linux/spinlock.h>
@@ -72,20 +75,38 @@ struct core99_header {
72/* 75/*
73 * Read and write the non-volatile RAM on PowerMacs and CHRP machines. 76 * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
74 */ 77 */
78static int nvram_naddrs;
75static volatile unsigned char *nvram_data; 79static volatile unsigned char *nvram_data;
80static int is_core_99;
76static int core99_bank = 0; 81static int core99_bank = 0;
82static int nvram_partitions[3];
77// XXX Turn that into a sem 83// XXX Turn that into a sem
78static DEFINE_SPINLOCK(nv_lock); 84static DEFINE_SPINLOCK(nv_lock);
79 85
86extern int pmac_newworld;
80extern int system_running; 87extern int system_running;
81 88
82static int (*core99_write_bank)(int bank, u8* datas); 89static int (*core99_write_bank)(int bank, u8* datas);
83static int (*core99_erase_bank)(int bank); 90static int (*core99_erase_bank)(int bank);
84 91
85static char *nvram_image __pmacdata; 92static char *nvram_image;
86 93
87 94
88static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index) 95static unsigned char core99_nvram_read_byte(int addr)
96{
97 if (nvram_image == NULL)
98 return 0xff;
99 return nvram_image[addr];
100}
101
102static void core99_nvram_write_byte(int addr, unsigned char val)
103{
104 if (nvram_image == NULL)
105 return;
106 nvram_image[addr] = val;
107}
108
109static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
89{ 110{
90 int i; 111 int i;
91 112
@@ -103,7 +124,7 @@ static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
103 return count; 124 return count;
104} 125}
105 126
106static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index) 127static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
107{ 128{
108 int i; 129 int i;
109 130
@@ -121,14 +142,95 @@ static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
121 return count; 142 return count;
122} 143}
123 144
124static ssize_t __pmac core99_nvram_size(void) 145static ssize_t core99_nvram_size(void)
125{ 146{
126 if (nvram_image == NULL) 147 if (nvram_image == NULL)
127 return -ENODEV; 148 return -ENODEV;
128 return NVRAM_SIZE; 149 return NVRAM_SIZE;
129} 150}
130 151
131static u8 __pmac chrp_checksum(struct chrp_header* hdr) 152#ifdef CONFIG_PPC32
153static volatile unsigned char *nvram_addr;
154static int nvram_mult;
155
156static unsigned char direct_nvram_read_byte(int addr)
157{
158 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
159}
160
161static void direct_nvram_write_byte(int addr, unsigned char val)
162{
163 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
164}
165
166
167static unsigned char indirect_nvram_read_byte(int addr)
168{
169 unsigned char val;
170 unsigned long flags;
171
172 spin_lock_irqsave(&nv_lock, flags);
173 out_8(nvram_addr, addr >> 5);
174 val = in_8(&nvram_data[(addr & 0x1f) << 4]);
175 spin_unlock_irqrestore(&nv_lock, flags);
176
177 return val;
178}
179
180static void indirect_nvram_write_byte(int addr, unsigned char val)
181{
182 unsigned long flags;
183
184 spin_lock_irqsave(&nv_lock, flags);
185 out_8(nvram_addr, addr >> 5);
186 out_8(&nvram_data[(addr & 0x1f) << 4], val);
187 spin_unlock_irqrestore(&nv_lock, flags);
188}
189
190
191#ifdef CONFIG_ADB_PMU
192
193static void pmu_nvram_complete(struct adb_request *req)
194{
195 if (req->arg)
196 complete((struct completion *)req->arg);
197}
198
199static unsigned char pmu_nvram_read_byte(int addr)
200{
201 struct adb_request req;
202 DECLARE_COMPLETION(req_complete);
203
204 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
205 if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
206 (addr >> 8) & 0xff, addr & 0xff))
207 return 0xff;
208 if (system_state == SYSTEM_RUNNING)
209 wait_for_completion(&req_complete);
210 while (!req.complete)
211 pmu_poll();
212 return req.reply[0];
213}
214
215static void pmu_nvram_write_byte(int addr, unsigned char val)
216{
217 struct adb_request req;
218 DECLARE_COMPLETION(req_complete);
219
220 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
221 if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
222 (addr >> 8) & 0xff, addr & 0xff, val))
223 return;
224 if (system_state == SYSTEM_RUNNING)
225 wait_for_completion(&req_complete);
226 while (!req.complete)
227 pmu_poll();
228}
229
230#endif /* CONFIG_ADB_PMU */
231#endif /* CONFIG_PPC32 */
232
233static u8 chrp_checksum(struct chrp_header* hdr)
132{ 234{
133 u8 *ptr; 235 u8 *ptr;
134 u16 sum = hdr->signature; 236 u16 sum = hdr->signature;
@@ -139,7 +241,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
139 return sum; 241 return sum;
140} 242}
141 243
142static u32 __pmac core99_calc_adler(u8 *buffer) 244static u32 core99_calc_adler(u8 *buffer)
143{ 245{
144 int cnt; 246 int cnt;
145 u32 low, high; 247 u32 low, high;
@@ -161,7 +263,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
161 return (high << 16) | low; 263 return (high << 16) | low;
162} 264}
163 265
164static u32 __pmac core99_check(u8* datas) 266static u32 core99_check(u8* datas)
165{ 267{
166 struct core99_header* hdr99 = (struct core99_header*)datas; 268 struct core99_header* hdr99 = (struct core99_header*)datas;
167 269
@@ -180,7 +282,7 @@ static u32 __pmac core99_check(u8* datas)
180 return hdr99->generation; 282 return hdr99->generation;
181} 283}
182 284
183static int __pmac sm_erase_bank(int bank) 285static int sm_erase_bank(int bank)
184{ 286{
185 int stat, i; 287 int stat, i;
186 unsigned long timeout; 288 unsigned long timeout;
@@ -194,7 +296,7 @@ static int __pmac sm_erase_bank(int bank)
194 timeout = 0; 296 timeout = 0;
195 do { 297 do {
196 if (++timeout > 1000000) { 298 if (++timeout > 1000000) {
197 printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n"); 299 printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
198 break; 300 break;
199 } 301 }
200 out_8(base, SM_FLASH_CMD_READ_STATUS); 302 out_8(base, SM_FLASH_CMD_READ_STATUS);
@@ -212,7 +314,7 @@ static int __pmac sm_erase_bank(int bank)
212 return 0; 314 return 0;
213} 315}
214 316
215static int __pmac sm_write_bank(int bank, u8* datas) 317static int sm_write_bank(int bank, u8* datas)
216{ 318{
217 int i, stat = 0; 319 int i, stat = 0;
218 unsigned long timeout; 320 unsigned long timeout;
@@ -247,7 +349,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
247 return 0; 349 return 0;
248} 350}
249 351
250static int __pmac amd_erase_bank(int bank) 352static int amd_erase_bank(int bank)
251{ 353{
252 int i, stat = 0; 354 int i, stat = 0;
253 unsigned long timeout; 355 unsigned long timeout;
@@ -294,7 +396,7 @@ static int __pmac amd_erase_bank(int bank)
294 return 0; 396 return 0;
295} 397}
296 398
297static int __pmac amd_write_bank(int bank, u8* datas) 399static int amd_write_bank(int bank, u8* datas)
298{ 400{
299 int i, stat = 0; 401 int i, stat = 0;
300 unsigned long timeout; 402 unsigned long timeout;
@@ -340,12 +442,49 @@ static int __pmac amd_write_bank(int bank, u8* datas)
340 return 0; 442 return 0;
341} 443}
342 444
445static void __init lookup_partitions(void)
446{
447 u8 buffer[17];
448 int i, offset;
449 struct chrp_header* hdr;
450
451 if (pmac_newworld) {
452 nvram_partitions[pmac_nvram_OF] = -1;
453 nvram_partitions[pmac_nvram_XPRAM] = -1;
454 nvram_partitions[pmac_nvram_NR] = -1;
455 hdr = (struct chrp_header *)buffer;
456
457 offset = 0;
458 buffer[16] = 0;
459 do {
460 for (i=0;i<16;i++)
461 buffer[i] = ppc_md.nvram_read_val(offset+i);
462 if (!strcmp(hdr->name, "common"))
463 nvram_partitions[pmac_nvram_OF] = offset + 0x10;
464 if (!strcmp(hdr->name, "APL,MacOS75")) {
465 nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
466 nvram_partitions[pmac_nvram_NR] = offset + 0x110;
467 }
468 offset += (hdr->len * 0x10);
469 } while(offset < NVRAM_SIZE);
470 } else {
471 nvram_partitions[pmac_nvram_OF] = 0x1800;
472 nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
473 nvram_partitions[pmac_nvram_NR] = 0x1400;
474 }
475 DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
476 DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
477 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
478}
343 479
344static int __pmac core99_nvram_sync(void) 480static void core99_nvram_sync(void)
345{ 481{
346 struct core99_header* hdr99; 482 struct core99_header* hdr99;
347 unsigned long flags; 483 unsigned long flags;
348 484
485 if (!is_core_99 || !nvram_data || !nvram_image)
486 return;
487
349 spin_lock_irqsave(&nv_lock, flags); 488 spin_lock_irqsave(&nv_lock, flags);
350 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, 489 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
351 NVRAM_SIZE)) 490 NVRAM_SIZE))
@@ -370,32 +509,28 @@ static int __pmac core99_nvram_sync(void)
370 bail: 509 bail:
371 spin_unlock_irqrestore(&nv_lock, flags); 510 spin_unlock_irqrestore(&nv_lock, flags);
372 511
373 return 0; 512#ifdef DEBUG
513 mdelay(2000);
514#endif
374} 515}
375 516
376int __init pmac_nvram_init(void) 517static int __init core99_nvram_setup(struct device_node *dp)
377{ 518{
378 struct device_node *dp;
379 u32 gen_bank0, gen_bank1;
380 int i; 519 int i;
520 u32 gen_bank0, gen_bank1;
381 521
382 dp = find_devices("nvram"); 522 if (nvram_naddrs < 1) {
383 if (dp == NULL) { 523 printk(KERN_ERR "nvram: no address\n");
384 printk(KERN_ERR "Can't find NVRAM device\n"); 524 return -EINVAL;
385 return -ENODEV;
386 }
387 if (!device_is_compatible(dp, "nvram,flash")) {
388 printk(KERN_ERR "Incompatible type of NVRAM\n");
389 return -ENXIO;
390 } 525 }
391
392 nvram_image = alloc_bootmem(NVRAM_SIZE); 526 nvram_image = alloc_bootmem(NVRAM_SIZE);
393 if (nvram_image == NULL) { 527 if (nvram_image == NULL) {
394 printk(KERN_ERR "nvram: can't allocate ram image\n"); 528 printk(KERN_ERR "nvram: can't allocate ram image\n");
395 return -ENOMEM; 529 return -ENOMEM;
396 } 530 }
397 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); 531 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
398 532 nvram_naddrs = 1; /* Make sure we get the correct case */
533
399 DBG("nvram: Checking bank 0...\n"); 534 DBG("nvram: Checking bank 0...\n");
400 535
401 gen_bank0 = core99_check((u8 *)nvram_data); 536 gen_bank0 = core99_check((u8 *)nvram_data);
@@ -408,11 +543,12 @@ int __init pmac_nvram_init(void)
408 for (i=0; i<NVRAM_SIZE; i++) 543 for (i=0; i<NVRAM_SIZE; i++)
409 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; 544 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
410 545
546 ppc_md.nvram_read_val = core99_nvram_read_byte;
547 ppc_md.nvram_write_val = core99_nvram_write_byte;
411 ppc_md.nvram_read = core99_nvram_read; 548 ppc_md.nvram_read = core99_nvram_read;
412 ppc_md.nvram_write = core99_nvram_write; 549 ppc_md.nvram_write = core99_nvram_write;
413 ppc_md.nvram_size = core99_nvram_size; 550 ppc_md.nvram_size = core99_nvram_size;
414 ppc_md.nvram_sync = core99_nvram_sync; 551 ppc_md.nvram_sync = core99_nvram_sync;
415
416 /* 552 /*
417 * Maybe we could be smarter here though making an exclusive list 553 * Maybe we could be smarter here though making an exclusive list
418 * of known flash chips is a bit nasty as older OF didn't provide us 554 * of known flash chips is a bit nasty as older OF didn't provide us
@@ -427,67 +563,81 @@ int __init pmac_nvram_init(void)
427 core99_erase_bank = sm_erase_bank; 563 core99_erase_bank = sm_erase_bank;
428 core99_write_bank = sm_write_bank; 564 core99_write_bank = sm_write_bank;
429 } 565 }
430
431 return 0; 566 return 0;
432} 567}
433 568
434int __pmac pmac_get_partition(int partition) 569int __init pmac_nvram_init(void)
435{ 570{
436 struct nvram_partition *part; 571 struct device_node *dp;
437 const char *name; 572 int err = 0;
438 int sig; 573
439 574 nvram_naddrs = 0;
440 switch(partition) { 575
441 case pmac_nvram_OF: 576 dp = find_devices("nvram");
442 name = "common"; 577 if (dp == NULL) {
443 sig = NVRAM_SIG_SYS; 578 printk(KERN_ERR "Can't find NVRAM device\n");
444 break;
445 case pmac_nvram_XPRAM:
446 name = "APL,MacOS75";
447 sig = NVRAM_SIG_OS;
448 break;
449 case pmac_nvram_NR:
450 default:
451 /* Oldworld stuff */
452 return -ENODEV; 579 return -ENODEV;
453 } 580 }
581 nvram_naddrs = dp->n_addrs;
582 is_core_99 = device_is_compatible(dp, "nvram,flash");
583 if (is_core_99)
584 err = core99_nvram_setup(dp);
585#ifdef CONFIG_PPC32
586 else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
587 nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
588 dp->addrs[0].size);
589 nvram_mult = 1;
590 ppc_md.nvram_read_val = direct_nvram_read_byte;
591 ppc_md.nvram_write_val = direct_nvram_write_byte;
592 } else if (nvram_naddrs == 1) {
593 nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
594 nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
595 ppc_md.nvram_read_val = direct_nvram_read_byte;
596 ppc_md.nvram_write_val = direct_nvram_write_byte;
597 } else if (nvram_naddrs == 2) {
598 nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
599 nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
600 ppc_md.nvram_read_val = indirect_nvram_read_byte;
601 ppc_md.nvram_write_val = indirect_nvram_write_byte;
602 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
603#ifdef CONFIG_ADB_PMU
604 nvram_naddrs = -1;
605 ppc_md.nvram_read_val = pmu_nvram_read_byte;
606 ppc_md.nvram_write_val = pmu_nvram_write_byte;
607#endif /* CONFIG_ADB_PMU */
608 }
609#endif
610 else {
611 printk(KERN_ERR "Incompatible type of NVRAM\n");
612 return -ENXIO;
613 }
614 lookup_partitions();
615 return err;
616}
454 617
455 part = nvram_find_partition(sig, name); 618int pmac_get_partition(int partition)
456 if (part == NULL) 619{
457 return 0; 620 return nvram_partitions[partition];
458
459 return part->index;
460} 621}
461 622
462u8 __pmac pmac_xpram_read(int xpaddr) 623u8 pmac_xpram_read(int xpaddr)
463{ 624{
464 int offset = pmac_get_partition(pmac_nvram_XPRAM); 625 int offset = pmac_get_partition(pmac_nvram_XPRAM);
465 loff_t index;
466 u8 buf;
467 ssize_t count;
468 626
469 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 627 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
470 return 0xff; 628 return 0xff;
471 index = offset + xpaddr;
472 629
473 count = ppc_md.nvram_read(&buf, 1, &index); 630 return ppc_md.nvram_read_val(xpaddr + offset);
474 if (count != 1)
475 return 0xff;
476 return buf;
477} 631}
478 632
479void __pmac pmac_xpram_write(int xpaddr, u8 data) 633void pmac_xpram_write(int xpaddr, u8 data)
480{ 634{
481 int offset = pmac_get_partition(pmac_nvram_XPRAM); 635 int offset = pmac_get_partition(pmac_nvram_XPRAM);
482 loff_t index;
483 u8 buf;
484 636
485 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 637 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
486 return; 638 return;
487 index = offset + xpaddr;
488 buf = data;
489 639
490 ppc_md.nvram_write(&buf, 1, &index); 640 ppc_md.nvram_write_val(xpaddr + offset, data);
491} 641}
492 642
493EXPORT_SYMBOL(pmac_get_partition); 643EXPORT_SYMBOL(pmac_get_partition);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
new file mode 100644
index 000000000000..ebe22a2267d2
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -0,0 +1,1213 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 *
4 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
5 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/bootmem.h>
19
20#include <asm/sections.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24#include <asm/machdep.h>
25#include <asm/pmac_feature.h>
26#ifdef CONFIG_PPC64
27#include <asm/iommu.h>
28#include <asm/ppc-pci.h>
29#endif
30
31#undef DEBUG
32
33#ifdef DEBUG
34#define DBG(x...) printk(x)
35#else
36#define DBG(x...)
37#endif
38
39static int add_bridge(struct device_node *dev);
40
41/* XXX Could be per-controller, but I don't think we risk anything by
42 * assuming we won't have both UniNorth and Bandit */
43static int has_uninorth;
44#ifdef CONFIG_PPC64
45static struct pci_controller *u3_agp;
46static struct pci_controller *u3_ht;
47#endif /* CONFIG_PPC64 */
48
49extern u8 pci_cache_line_size;
50extern int pcibios_assign_bus_offset;
51
52struct device_node *k2_skiplist[2];
53
54/*
55 * Magic constants for enabling cache coherency in the bandit/PSX bridge.
56 */
57#define BANDIT_DEVID_2 8
58#define BANDIT_REVID 3
59
60#define BANDIT_DEVNUM 11
61#define BANDIT_MAGIC 0x50
62#define BANDIT_COHERENT 0x40
63
64static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
65{
66 for (; node != 0;node = node->sibling) {
67 int * bus_range;
68 unsigned int *class_code;
69 int len;
70
71 /* For PCI<->PCI bridges or CardBus bridges, we go down */
72 class_code = (unsigned int *) get_property(node, "class-code", NULL);
73 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
74 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
75 continue;
76 bus_range = (int *) get_property(node, "bus-range", &len);
77 if (bus_range != NULL && len > 2 * sizeof(int)) {
78 if (bus_range[1] > higher)
79 higher = bus_range[1];
80 }
81 higher = fixup_one_level_bus_range(node->child, higher);
82 }
83 return higher;
84}
85
86/* This routine fixes the "bus-range" property of all bridges in the
87 * system since they tend to have their "last" member wrong on macs
88 *
89 * Note that the bus numbers manipulated here are OF bus numbers, they
90 * are not Linux bus numbers.
91 */
92static void __init fixup_bus_range(struct device_node *bridge)
93{
94 int * bus_range;
95 int len;
96
97 /* Lookup the "bus-range" property for the hose */
98 bus_range = (int *) get_property(bridge, "bus-range", &len);
99 if (bus_range == NULL || len < 2 * sizeof(int)) {
100 printk(KERN_WARNING "Can't get bus-range for %s\n",
101 bridge->full_name);
102 return;
103 }
104 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
105}
106
107/*
108 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
109 *
110 * The "Bandit" version is present in all early PCI PowerMacs,
111 * and up to the first ones using Grackle. Some machines may
112 * have 2 bandit controllers (2 PCI busses).
113 *
114 * "Chaos" is used in some "Bandit"-type machines as a bridge
115 * for the separate display bus. It is accessed the same
116 * way as bandit, but cannot be probed for devices. It therefore
117 * has its own config access functions.
118 *
119 * The "UniNorth" version is present in all Core99 machines
120 * (iBook, G4, new IMacs, and all the recent Apple machines).
121 * It contains 3 controllers in one ASIC.
122 *
123 * The U3 is the bridge used on G5 machines. It contains an
124 * AGP bus which is dealt with the old UniNorth access routines
125 * and a HyperTransport bus which uses its own set of access
126 * functions.
127 */
128
129#define MACRISC_CFA0(devfn, off) \
130 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
131 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
132 | (((unsigned long)(off)) & 0xFCUL))
133
134#define MACRISC_CFA1(bus, devfn, off) \
135 ((((unsigned long)(bus)) << 16) \
136 |(((unsigned long)(devfn)) << 8) \
137 |(((unsigned long)(off)) & 0xFCUL) \
138 |1UL)
139
140static unsigned long macrisc_cfg_access(struct pci_controller* hose,
141 u8 bus, u8 dev_fn, u8 offset)
142{
143 unsigned int caddr;
144
145 if (bus == hose->first_busno) {
146 if (dev_fn < (11 << 3))
147 return 0;
148 caddr = MACRISC_CFA0(dev_fn, offset);
149 } else
150 caddr = MACRISC_CFA1(bus, dev_fn, offset);
151
152 /* Uninorth will return garbage if we don't read back the value ! */
153 do {
154 out_le32(hose->cfg_addr, caddr);
155 } while (in_le32(hose->cfg_addr) != caddr);
156
157 offset &= has_uninorth ? 0x07 : 0x03;
158 return ((unsigned long)hose->cfg_data) + offset;
159}
160
161static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
162 int offset, int len, u32 *val)
163{
164 struct pci_controller *hose;
165 unsigned long addr;
166
167 hose = pci_bus_to_host(bus);
168 if (hose == NULL)
169 return PCIBIOS_DEVICE_NOT_FOUND;
170
171 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
172 if (!addr)
173 return PCIBIOS_DEVICE_NOT_FOUND;
174 /*
175 * Note: the caller has already checked that offset is
176 * suitably aligned and that len is 1, 2 or 4.
177 */
178 switch (len) {
179 case 1:
180 *val = in_8((u8 *)addr);
181 break;
182 case 2:
183 *val = in_le16((u16 *)addr);
184 break;
185 default:
186 *val = in_le32((u32 *)addr);
187 break;
188 }
189 return PCIBIOS_SUCCESSFUL;
190}
191
192static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
193 int offset, int len, u32 val)
194{
195 struct pci_controller *hose;
196 unsigned long addr;
197
198 hose = pci_bus_to_host(bus);
199 if (hose == NULL)
200 return PCIBIOS_DEVICE_NOT_FOUND;
201
202 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
203 if (!addr)
204 return PCIBIOS_DEVICE_NOT_FOUND;
205 /*
206 * Note: the caller has already checked that offset is
207 * suitably aligned and that len is 1, 2 or 4.
208 */
209 switch (len) {
210 case 1:
211 out_8((u8 *)addr, val);
212 (void) in_8((u8 *)addr);
213 break;
214 case 2:
215 out_le16((u16 *)addr, val);
216 (void) in_le16((u16 *)addr);
217 break;
218 default:
219 out_le32((u32 *)addr, val);
220 (void) in_le32((u32 *)addr);
221 break;
222 }
223 return PCIBIOS_SUCCESSFUL;
224}
225
226static struct pci_ops macrisc_pci_ops =
227{
228 macrisc_read_config,
229 macrisc_write_config
230};
231
232#ifdef CONFIG_PPC32
233/*
234 * Verify that a specific (bus, dev_fn) exists on chaos
235 */
236static int
237chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
238{
239 struct device_node *np;
240 u32 *vendor, *device;
241
242 np = pci_busdev_to_OF_node(bus, devfn);
243 if (np == NULL)
244 return PCIBIOS_DEVICE_NOT_FOUND;
245
246 vendor = (u32 *)get_property(np, "vendor-id", NULL);
247 device = (u32 *)get_property(np, "device-id", NULL);
248 if (vendor == NULL || device == NULL)
249 return PCIBIOS_DEVICE_NOT_FOUND;
250
251 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
252 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
253 return PCIBIOS_BAD_REGISTER_NUMBER;
254
255 return PCIBIOS_SUCCESSFUL;
256}
257
258static int
259chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
260 int len, u32 *val)
261{
262 int result = chaos_validate_dev(bus, devfn, offset);
263 if (result == PCIBIOS_BAD_REGISTER_NUMBER)
264 *val = ~0U;
265 if (result != PCIBIOS_SUCCESSFUL)
266 return result;
267 return macrisc_read_config(bus, devfn, offset, len, val);
268}
269
270static int
271chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
272 int len, u32 val)
273{
274 int result = chaos_validate_dev(bus, devfn, offset);
275 if (result != PCIBIOS_SUCCESSFUL)
276 return result;
277 return macrisc_write_config(bus, devfn, offset, len, val);
278}
279
280static struct pci_ops chaos_pci_ops =
281{
282 chaos_read_config,
283 chaos_write_config
284};
285
286static void __init setup_chaos(struct pci_controller *hose,
287 struct reg_property *addr)
288{
289 /* assume a `chaos' bridge */
290 hose->ops = &chaos_pci_ops;
291 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
292 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
293}
294#else
295#define setup_chaos(hose, addr)
296#endif /* CONFIG_PPC32 */
297
298#ifdef CONFIG_PPC64
299/*
300 * These versions of U3 HyperTransport config space access ops do not
301 * implement self-view of the HT host yet
302 */
303
304/*
305 * This function deals with some "special cases" devices.
306 *
307 * 0 -> No special case
308 * 1 -> Skip the device but act as if the access was successfull
309 * (return 0xff's on reads, eventually, cache config space
310 * accesses in a later version)
311 * -1 -> Hide the device (unsuccessful acess)
312 */
313static int u3_ht_skip_device(struct pci_controller *hose,
314 struct pci_bus *bus, unsigned int devfn)
315{
316 struct device_node *busdn, *dn;
317 int i;
318
319 /* We only allow config cycles to devices that are in OF device-tree
320 * as we are apparently having some weird things going on with some
321 * revs of K2 on recent G5s
322 */
323 if (bus->self)
324 busdn = pci_device_to_OF_node(bus->self);
325 else
326 busdn = hose->arch_data;
327 for (dn = busdn->child; dn; dn = dn->sibling)
328 if (dn->data && PCI_DN(dn)->devfn == devfn)
329 break;
330 if (dn == NULL)
331 return -1;
332
333 /*
334 * When a device in K2 is powered down, we die on config
335 * cycle accesses. Fix that here.
336 */
337 for (i=0; i<2; i++)
338 if (k2_skiplist[i] == dn)
339 return 1;
340
341 return 0;
342}
343
344#define U3_HT_CFA0(devfn, off) \
345 ((((unsigned long)devfn) << 8) | offset)
346#define U3_HT_CFA1(bus, devfn, off) \
347 (U3_HT_CFA0(devfn, off) \
348 + (((unsigned long)bus) << 16) \
349 + 0x01000000UL)
350
351static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
352 u8 bus, u8 devfn, u8 offset)
353{
354 if (bus == hose->first_busno) {
355 /* For now, we don't self probe U3 HT bridge */
356 if (PCI_SLOT(devfn) == 0)
357 return 0;
358 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
359 } else
360 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
361}
362
363static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
364 int offset, int len, u32 *val)
365{
366 struct pci_controller *hose;
367 unsigned long addr;
368
369 hose = pci_bus_to_host(bus);
370 if (hose == NULL)
371 return PCIBIOS_DEVICE_NOT_FOUND;
372
373 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
374 if (!addr)
375 return PCIBIOS_DEVICE_NOT_FOUND;
376
377 switch (u3_ht_skip_device(hose, bus, devfn)) {
378 case 0:
379 break;
380 case 1:
381 switch (len) {
382 case 1:
383 *val = 0xff; break;
384 case 2:
385 *val = 0xffff; break;
386 default:
387 *val = 0xfffffffful; break;
388 }
389 return PCIBIOS_SUCCESSFUL;
390 default:
391 return PCIBIOS_DEVICE_NOT_FOUND;
392 }
393
394 /*
395 * Note: the caller has already checked that offset is
396 * suitably aligned and that len is 1, 2 or 4.
397 */
398 switch (len) {
399 case 1:
400 *val = in_8((u8 *)addr);
401 break;
402 case 2:
403 *val = in_le16((u16 *)addr);
404 break;
405 default:
406 *val = in_le32((u32 *)addr);
407 break;
408 }
409 return PCIBIOS_SUCCESSFUL;
410}
411
412static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
413 int offset, int len, u32 val)
414{
415 struct pci_controller *hose;
416 unsigned long addr;
417
418 hose = pci_bus_to_host(bus);
419 if (hose == NULL)
420 return PCIBIOS_DEVICE_NOT_FOUND;
421
422 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
423 if (!addr)
424 return PCIBIOS_DEVICE_NOT_FOUND;
425
426 switch (u3_ht_skip_device(hose, bus, devfn)) {
427 case 0:
428 break;
429 case 1:
430 return PCIBIOS_SUCCESSFUL;
431 default:
432 return PCIBIOS_DEVICE_NOT_FOUND;
433 }
434
435 /*
436 * Note: the caller has already checked that offset is
437 * suitably aligned and that len is 1, 2 or 4.
438 */
439 switch (len) {
440 case 1:
441 out_8((u8 *)addr, val);
442 (void) in_8((u8 *)addr);
443 break;
444 case 2:
445 out_le16((u16 *)addr, val);
446 (void) in_le16((u16 *)addr);
447 break;
448 default:
449 out_le32((u32 *)addr, val);
450 (void) in_le32((u32 *)addr);
451 break;
452 }
453 return PCIBIOS_SUCCESSFUL;
454}
455
456static struct pci_ops u3_ht_pci_ops =
457{
458 u3_ht_read_config,
459 u3_ht_write_config
460};
461#endif /* CONFIG_PPC64 */
462
463#ifdef CONFIG_PPC32
464/*
465 * For a bandit bridge, turn on cache coherency if necessary.
466 * N.B. we could clean this up using the hose ops directly.
467 */
468static void __init init_bandit(struct pci_controller *bp)
469{
470 unsigned int vendev, magic;
471 int rev;
472
473 /* read the word at offset 0 in config space for device 11 */
474 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
475 udelay(2);
476 vendev = in_le32(bp->cfg_data);
477 if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
478 PCI_VENDOR_ID_APPLE) {
479 /* read the revision id */
480 out_le32(bp->cfg_addr,
481 (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
482 udelay(2);
483 rev = in_8(bp->cfg_data);
484 if (rev != BANDIT_REVID)
485 printk(KERN_WARNING
486 "Unknown revision %d for bandit\n", rev);
487 } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
488 printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
489 return;
490 }
491
492 /* read the word at offset 0x50 */
493 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
494 udelay(2);
495 magic = in_le32(bp->cfg_data);
496 if ((magic & BANDIT_COHERENT) != 0)
497 return;
498 magic |= BANDIT_COHERENT;
499 udelay(2);
500 out_le32(bp->cfg_data, magic);
501 printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
502}
503
504/*
505 * Tweak the PCI-PCI bridge chip on the blue & white G3s.
506 */
507static void __init init_p2pbridge(void)
508{
509 struct device_node *p2pbridge;
510 struct pci_controller* hose;
511 u8 bus, devfn;
512 u16 val;
513
514 /* XXX it would be better here to identify the specific
515 PCI-PCI bridge chip we have. */
516 if ((p2pbridge = find_devices("pci-bridge")) == 0
517 || p2pbridge->parent == NULL
518 || strcmp(p2pbridge->parent->name, "pci") != 0)
519 return;
520 if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
521 DBG("Can't find PCI infos for PCI<->PCI bridge\n");
522 return;
523 }
524 /* Warning: At this point, we have not yet renumbered all busses.
525 * So we must use OF walking to find out hose
526 */
527 hose = pci_find_hose_for_OF_device(p2pbridge);
528 if (!hose) {
529 DBG("Can't find hose for PCI<->PCI bridge\n");
530 return;
531 }
532 if (early_read_config_word(hose, bus, devfn,
533 PCI_BRIDGE_CONTROL, &val) < 0) {
534 printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
535 return;
536 }
537 val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
538 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
539}
540
541/*
542 * Some Apple desktop machines have a NEC PD720100A USB2 controller
543 * on the motherboard. Open Firmware, on these, will disable the
544 * EHCI part of it so it behaves like a pair of OHCI's. This fixup
545 * code re-enables it ;)
546 */
547static void __init fixup_nec_usb2(void)
548{
549 struct device_node *nec;
550
551 for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
552 struct pci_controller *hose;
553 u32 data, *prop;
554 u8 bus, devfn;
555
556 prop = (u32 *)get_property(nec, "vendor-id", NULL);
557 if (prop == NULL)
558 continue;
559 if (0x1033 != *prop)
560 continue;
561 prop = (u32 *)get_property(nec, "device-id", NULL);
562 if (prop == NULL)
563 continue;
564 if (0x0035 != *prop)
565 continue;
566 prop = (u32 *)get_property(nec, "reg", NULL);
567 if (prop == NULL)
568 continue;
569 devfn = (prop[0] >> 8) & 0xff;
570 bus = (prop[0] >> 16) & 0xff;
571 if (PCI_FUNC(devfn) != 0)
572 continue;
573 hose = pci_find_hose_for_OF_device(nec);
574 if (!hose)
575 continue;
576 early_read_config_dword(hose, bus, devfn, 0xe4, &data);
577 if (data & 1UL) {
578 printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
579 data &= ~1UL;
580 early_write_config_dword(hose, bus, devfn, 0xe4, data);
581 early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
582 nec->intrs[0].line);
583 }
584 }
585}
586
587#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
588 | (((o) & ~3) << 24))
589
590#define GRACKLE_PICR1_STG 0x00000040
591#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
592
593/* N.B. this is called before bridges is initialized, so we can't
594 use grackle_pcibios_{read,write}_config_dword. */
595static inline void grackle_set_stg(struct pci_controller* bp, int enable)
596{
597 unsigned int val;
598
599 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
600 val = in_le32(bp->cfg_data);
601 val = enable? (val | GRACKLE_PICR1_STG) :
602 (val & ~GRACKLE_PICR1_STG);
603 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
604 out_le32(bp->cfg_data, val);
605 (void)in_le32(bp->cfg_data);
606}
607
608static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
609{
610 unsigned int val;
611
612 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
613 val = in_le32(bp->cfg_data);
614 val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
615 (val & ~GRACKLE_PICR1_LOOPSNOOP);
616 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
617 out_le32(bp->cfg_data, val);
618 (void)in_le32(bp->cfg_data);
619}
620
621void __init setup_grackle(struct pci_controller *hose)
622{
623 setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
624 if (machine_is_compatible("AAPL,PowerBook1998"))
625 grackle_set_loop_snoop(hose, 1);
626#if 0 /* Disabled for now, HW problems ??? */
627 grackle_set_stg(hose, 1);
628#endif
629}
630
631static void __init setup_bandit(struct pci_controller *hose,
632 struct reg_property *addr)
633{
634 hose->ops = &macrisc_pci_ops;
635 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
636 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
637 init_bandit(hose);
638}
639
640static int __init setup_uninorth(struct pci_controller *hose,
641 struct reg_property *addr)
642{
643 pci_assign_all_buses = 1;
644 has_uninorth = 1;
645 hose->ops = &macrisc_pci_ops;
646 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
647 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
648 /* We "know" that the bridge at f2000000 has the PCI slots. */
649 return addr->address == 0xf2000000;
650}
651#endif
652
653#ifdef CONFIG_PPC64
654static void __init setup_u3_agp(struct pci_controller* hose)
655{
656 /* On G5, we move AGP up to high bus number so we don't need
657 * to reassign bus numbers for HT. If we ever have P2P bridges
658 * on AGP, we'll have to move pci_assign_all_busses to the
659 * pci_controller structure so we enable it for AGP and not for
660 * HT childs.
661 * We hard code the address because of the different size of
662 * the reg address cell, we shall fix that by killing struct
663 * reg_property and using some accessor functions instead
664 */
665 hose->first_busno = 0xf0;
666 hose->last_busno = 0xff;
667 has_uninorth = 1;
668 hose->ops = &macrisc_pci_ops;
669 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
670 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
671
672 u3_agp = hose;
673}
674
675static void __init setup_u3_ht(struct pci_controller* hose)
676{
677 struct device_node *np = (struct device_node *)hose->arch_data;
678 int i, cur;
679
680 hose->ops = &u3_ht_pci_ops;
681
682 /* We hard code the address because of the different size of
683 * the reg address cell, we shall fix that by killing struct
684 * reg_property and using some accessor functions instead
685 */
686 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
687
688 /*
689 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
690 * have been allocated to AGP. So far, this version of the code doesn't assign
691 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
692 * We need to fix that sooner or later by either parsing all child "ranges"
693 * properties or figuring out the U3 address space decoding logic and
694 * then read its configuration register (if any).
695 */
696 hose->io_base_phys = 0xf4000000;
697 hose->pci_io_size = 0x00400000;
698 hose->io_resource.name = np->full_name;
699 hose->io_resource.start = 0;
700 hose->io_resource.end = 0x003fffff;
701 hose->io_resource.flags = IORESOURCE_IO;
702 hose->pci_mem_offset = 0;
703 hose->first_busno = 0;
704 hose->last_busno = 0xef;
705 hose->mem_resources[0].name = np->full_name;
706 hose->mem_resources[0].start = 0x80000000;
707 hose->mem_resources[0].end = 0xefffffff;
708 hose->mem_resources[0].flags = IORESOURCE_MEM;
709
710 u3_ht = hose;
711
712 if (u3_agp == NULL) {
713 DBG("U3 has no AGP, using full resource range\n");
714 return;
715 }
716
717 /* We "remove" the AGP resources from the resources allocated to HT, that
718 * is we create "holes". However, that code does assumptions that so far
719 * happen to be true (cross fingers...), typically that resources in the
720 * AGP node are properly ordered
721 */
722 cur = 0;
723 for (i=0; i<3; i++) {
724 struct resource *res = &u3_agp->mem_resources[i];
725 if (res->flags != IORESOURCE_MEM)
726 continue;
727 /* We don't care about "fine" resources */
728 if (res->start >= 0xf0000000)
729 continue;
730 /* Check if it's just a matter of "shrinking" us in one direction */
731 if (hose->mem_resources[cur].start == res->start) {
732 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
733 cur, hose->mem_resources[cur].start, res->end + 1);
734 hose->mem_resources[cur].start = res->end + 1;
735 continue;
736 }
737 if (hose->mem_resources[cur].end == res->end) {
738 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
739 cur, hose->mem_resources[cur].end, res->start - 1);
740 hose->mem_resources[cur].end = res->start - 1;
741 continue;
742 }
743 /* No, it's not the case, we need a hole */
744 if (cur == 2) {
745 /* not enough resources for a hole, we drop part of the range */
746 printk(KERN_WARNING "Running out of resources for /ht host !\n");
747 hose->mem_resources[cur].end = res->start - 1;
748 continue;
749 }
750 cur++;
751 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
752 cur-1, res->start - 1, cur, res->end + 1);
753 hose->mem_resources[cur].name = np->full_name;
754 hose->mem_resources[cur].flags = IORESOURCE_MEM;
755 hose->mem_resources[cur].start = res->end + 1;
756 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
757 hose->mem_resources[cur-1].end = res->start - 1;
758 }
759}
760
761/* XXX this needs to be converged between ppc32 and ppc64... */
762static struct pci_controller * __init pcibios_alloc_controller(void)
763{
764 struct pci_controller *hose;
765
766 hose = alloc_bootmem(sizeof(struct pci_controller));
767 if (hose)
768 pci_setup_pci_controller(hose);
769 return hose;
770}
771#endif
772
773/*
774 * We assume that if we have a G3 powermac, we have one bridge called
775 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
776 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
777 */
778static int __init add_bridge(struct device_node *dev)
779{
780 int len;
781 struct pci_controller *hose;
782#ifdef CONFIG_PPC32
783 struct reg_property *addr;
784#endif
785 char *disp_name;
786 int *bus_range;
787 int primary = 1;
788
789 DBG("Adding PCI host bridge %s\n", dev->full_name);
790
791#ifdef CONFIG_PPC32
792 /* XXX fix this */
793 addr = (struct reg_property *) get_property(dev, "reg", &len);
794 if (addr == NULL || len < sizeof(*addr)) {
795 printk(KERN_WARNING "Can't use %s: no address\n",
796 dev->full_name);
797 return -ENODEV;
798 }
799#endif
800 bus_range = (int *) get_property(dev, "bus-range", &len);
801 if (bus_range == NULL || len < 2 * sizeof(int)) {
802 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
803 dev->full_name);
804 }
805
806 hose = pcibios_alloc_controller();
807 if (!hose)
808 return -ENOMEM;
809 hose->arch_data = dev;
810 hose->first_busno = bus_range ? bus_range[0] : 0;
811 hose->last_busno = bus_range ? bus_range[1] : 0xff;
812
813 disp_name = NULL;
814#ifdef CONFIG_POWER4
815 if (device_is_compatible(dev, "u3-agp")) {
816 setup_u3_agp(hose);
817 disp_name = "U3-AGP";
818 primary = 0;
819 } else if (device_is_compatible(dev, "u3-ht")) {
820 setup_u3_ht(hose);
821 disp_name = "U3-HT";
822 primary = 1;
823 }
824 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
825 disp_name, hose->first_busno, hose->last_busno);
826#else
827 if (device_is_compatible(dev, "uni-north")) {
828 primary = setup_uninorth(hose, addr);
829 disp_name = "UniNorth";
830 } else if (strcmp(dev->name, "pci") == 0) {
831 /* XXX assume this is a mpc106 (grackle) */
832 setup_grackle(hose);
833 disp_name = "Grackle (MPC106)";
834 } else if (strcmp(dev->name, "bandit") == 0) {
835 setup_bandit(hose, addr);
836 disp_name = "Bandit";
837 } else if (strcmp(dev->name, "chaos") == 0) {
838 setup_chaos(hose, addr);
839 disp_name = "Chaos";
840 primary = 0;
841 }
842 printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
843 disp_name, addr->address, hose->first_busno, hose->last_busno);
844#endif
845 DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
846 hose, hose->cfg_addr, hose->cfg_data);
847
848 /* Interpret the "ranges" property */
849 /* This also maps the I/O region and sets isa_io/mem_base */
850 pci_process_bridge_OF_ranges(hose, dev, primary);
851
852 /* Fixup "bus-range" OF property */
853 fixup_bus_range(dev);
854
855 return 0;
856}
857
858static void __init
859pcibios_fixup_OF_interrupts(void)
860{
861 struct pci_dev* dev = NULL;
862
863 /*
864 * Open Firmware often doesn't initialize the
865 * PCI_INTERRUPT_LINE config register properly, so we
866 * should find the device node and apply the interrupt
867 * obtained from the OF device-tree
868 */
869 for_each_pci_dev(dev) {
870 struct device_node *node;
871 node = pci_device_to_OF_node(dev);
872 /* this is the node, see if it has interrupts */
873 if (node && node->n_intrs > 0)
874 dev->irq = node->intrs[0].line;
875 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
876 }
877}
878
879void __init
880pmac_pcibios_fixup(void)
881{
882 /* Fixup interrupts according to OF tree */
883 pcibios_fixup_OF_interrupts();
884}
885
886#ifdef CONFIG_PPC64
887static void __init pmac_fixup_phb_resources(void)
888{
889 struct pci_controller *hose, *tmp;
890
891 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
892 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
893 hose->global_number,
894 hose->io_resource.start, hose->io_resource.end);
895 }
896}
897#endif
898
899void __init pmac_pci_init(void)
900{
901 struct device_node *np, *root;
902 struct device_node *ht = NULL;
903
904 root = of_find_node_by_path("/");
905 if (root == NULL) {
906 printk(KERN_CRIT "pmac_pci_init: can't find root "
907 "of device tree\n");
908 return;
909 }
910 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
911 if (np->name == NULL)
912 continue;
913 if (strcmp(np->name, "bandit") == 0
914 || strcmp(np->name, "chaos") == 0
915 || strcmp(np->name, "pci") == 0) {
916 if (add_bridge(np) == 0)
917 of_node_get(np);
918 }
919 if (strcmp(np->name, "ht") == 0) {
920 of_node_get(np);
921 ht = np;
922 }
923 }
924 of_node_put(root);
925
926#ifdef CONFIG_PPC64
927 /* Probe HT last as it relies on the agp resources to be already
928 * setup
929 */
930 if (ht && add_bridge(ht) != 0)
931 of_node_put(ht);
932
933 /*
934 * We need to call pci_setup_phb_io for the HT bridge first
935 * so it gets the I/O port numbers starting at 0, and we
936 * need to call it for the AGP bridge after that so it gets
937 * small positive I/O port numbers.
938 */
939 if (u3_ht)
940 pci_setup_phb_io(u3_ht, 1);
941 if (u3_agp)
942 pci_setup_phb_io(u3_agp, 0);
943
944 /*
945 * On ppc64, fixup the IO resources on our host bridges as
946 * the common code does it only for children of the host bridges
947 */
948 pmac_fixup_phb_resources();
949
950 /* Setup the linkage between OF nodes and PHBs */
951 pci_devs_phb_init();
952
953 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
954 * assume there is no P2P bridge on the AGP bus, which should be a
955 * safe assumptions hopefully.
956 */
957 if (u3_agp) {
958 struct device_node *np = u3_agp->arch_data;
959 PCI_DN(np)->busno = 0xf0;
960 for (np = np->child; np; np = np->sibling)
961 PCI_DN(np)->busno = 0xf0;
962 }
963
964 /* map in PCI I/O space */
965 phbs_remap_io();
966
967 /* pmac_check_ht_link(); */
968
969 /* Tell pci.c to not use the common resource allocation mechanism */
970 pci_probe_only = 1;
971
972 /* Allow all IO */
973 io_page_mask = -1;
974
975#else /* CONFIG_PPC64 */
976 init_p2pbridge();
977 fixup_nec_usb2();
978
979 /* We are still having some issues with the Xserve G4, enabling
980 * some offset between bus number and domains for now when we
981 * assign all busses should help for now
982 */
983 if (pci_assign_all_buses)
984 pcibios_assign_bus_offset = 0x10;
985#endif
986}
987
988int
989pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
990{
991 struct device_node* node;
992 int updatecfg = 0;
993 int uninorth_child;
994
995 node = pci_device_to_OF_node(dev);
996
997 /* We don't want to enable USB controllers absent from the OF tree
998 * (iBook second controller)
999 */
1000 if (dev->vendor == PCI_VENDOR_ID_APPLE
1001 && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10))
1002 && !node) {
1003 printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
1004 pci_name(dev));
1005 return -EINVAL;
1006 }
1007
1008 if (!node)
1009 return 0;
1010
1011 uninorth_child = node->parent &&
1012 device_is_compatible(node->parent, "uni-north");
1013
1014 /* Firewire & GMAC were disabled after PCI probe, the driver is
1015 * claiming them, we must re-enable them now.
1016 */
1017 if (uninorth_child && !strcmp(node->name, "firewire") &&
1018 (device_is_compatible(node, "pci106b,18") ||
1019 device_is_compatible(node, "pci106b,30") ||
1020 device_is_compatible(node, "pci11c1,5811"))) {
1021 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
1022 pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
1023 updatecfg = 1;
1024 }
1025 if (uninorth_child && !strcmp(node->name, "ethernet") &&
1026 device_is_compatible(node, "gmac")) {
1027 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
1028 updatecfg = 1;
1029 }
1030
1031 if (updatecfg) {
1032 u16 cmd;
1033
1034 /*
1035 * Make sure PCI is correctly configured
1036 *
1037 * We use old pci_bios versions of the function since, by
1038 * default, gmac is not powered up, and so will be absent
1039 * from the kernel initial PCI lookup.
1040 *
1041 * Should be replaced by 2.4 new PCI mechanisms and really
1042 * register the device.
1043 */
1044 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1045 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1046 | PCI_COMMAND_INVALIDATE;
1047 pci_write_config_word(dev, PCI_COMMAND, cmd);
1048 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
1049 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
1050 L1_CACHE_BYTES >> 2);
1051 }
1052
1053 return 0;
1054}
1055
1056/* We power down some devices after they have been probed. They'll
1057 * be powered back on later on
1058 */
1059void __init pmac_pcibios_after_init(void)
1060{
1061 struct device_node* nd;
1062
1063#ifdef CONFIG_BLK_DEV_IDE
1064 struct pci_dev *dev = NULL;
1065
1066 /* OF fails to initialize IDE controllers on macs
1067 * (and maybe other machines)
1068 *
1069 * Ideally, this should be moved to the IDE layer, but we need
1070 * to check specifically with Andre Hedrick how to do it cleanly
1071 * since the common IDE code seem to care about the fact that the
1072 * BIOS may have disabled a controller.
1073 *
1074 * -- BenH
1075 */
1076 for_each_pci_dev(dev) {
1077 if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
1078 pci_enable_device(dev);
1079 }
1080#endif /* CONFIG_BLK_DEV_IDE */
1081
1082 nd = find_devices("firewire");
1083 while (nd) {
1084 if (nd->parent && (device_is_compatible(nd, "pci106b,18") ||
1085 device_is_compatible(nd, "pci106b,30") ||
1086 device_is_compatible(nd, "pci11c1,5811"))
1087 && device_is_compatible(nd->parent, "uni-north")) {
1088 pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
1089 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
1090 }
1091 nd = nd->next;
1092 }
1093 nd = find_devices("ethernet");
1094 while (nd) {
1095 if (nd->parent && device_is_compatible(nd, "gmac")
1096 && device_is_compatible(nd->parent, "uni-north"))
1097 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
1098 nd = nd->next;
1099 }
1100}
1101
1102#ifdef CONFIG_PPC32
1103void pmac_pci_fixup_cardbus(struct pci_dev* dev)
1104{
1105 if (_machine != _MACH_Pmac)
1106 return;
1107 /*
1108 * Fix the interrupt routing on the various cardbus bridges
1109 * used on powerbooks
1110 */
1111 if (dev->vendor != PCI_VENDOR_ID_TI)
1112 return;
1113 if (dev->device == PCI_DEVICE_ID_TI_1130 ||
1114 dev->device == PCI_DEVICE_ID_TI_1131) {
1115 u8 val;
1116 /* Enable PCI interrupt */
1117 if (pci_read_config_byte(dev, 0x91, &val) == 0)
1118 pci_write_config_byte(dev, 0x91, val | 0x30);
1119 /* Disable ISA interrupt mode */
1120 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1121 pci_write_config_byte(dev, 0x92, val & ~0x06);
1122 }
1123 if (dev->device == PCI_DEVICE_ID_TI_1210 ||
1124 dev->device == PCI_DEVICE_ID_TI_1211 ||
1125 dev->device == PCI_DEVICE_ID_TI_1410 ||
1126 dev->device == PCI_DEVICE_ID_TI_1510) {
1127 u8 val;
1128 /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
1129 signal out the MFUNC0 pin */
1130 if (pci_read_config_byte(dev, 0x8c, &val) == 0)
1131 pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
1132 /* Disable ISA interrupt mode */
1133 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1134 pci_write_config_byte(dev, 0x92, val & ~0x06);
1135 }
1136}
1137
1138DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
1139
1140void pmac_pci_fixup_pciata(struct pci_dev* dev)
1141{
1142 u8 progif = 0;
1143
1144 /*
1145 * On PowerMacs, we try to switch any PCI ATA controller to
1146 * fully native mode
1147 */
1148 if (_machine != _MACH_Pmac)
1149 return;
1150 /* Some controllers don't have the class IDE */
1151 if (dev->vendor == PCI_VENDOR_ID_PROMISE)
1152 switch(dev->device) {
1153 case PCI_DEVICE_ID_PROMISE_20246:
1154 case PCI_DEVICE_ID_PROMISE_20262:
1155 case PCI_DEVICE_ID_PROMISE_20263:
1156 case PCI_DEVICE_ID_PROMISE_20265:
1157 case PCI_DEVICE_ID_PROMISE_20267:
1158 case PCI_DEVICE_ID_PROMISE_20268:
1159 case PCI_DEVICE_ID_PROMISE_20269:
1160 case PCI_DEVICE_ID_PROMISE_20270:
1161 case PCI_DEVICE_ID_PROMISE_20271:
1162 case PCI_DEVICE_ID_PROMISE_20275:
1163 case PCI_DEVICE_ID_PROMISE_20276:
1164 case PCI_DEVICE_ID_PROMISE_20277:
1165 goto good;
1166 }
1167 /* Others, check PCI class */
1168 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
1169 return;
1170 good:
1171 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1172 if ((progif & 5) != 5) {
1173 printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
1174 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
1175 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
1176 (progif & 5) != 5)
1177 printk(KERN_ERR "Rewrite of PROGIF failed !\n");
1178 }
1179}
1180DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1181#endif
1182
1183/*
1184 * Disable second function on K2-SATA, it's broken
1185 * and disable IO BARs on first one
1186 */
1187static void fixup_k2_sata(struct pci_dev* dev)
1188{
1189 int i;
1190 u16 cmd;
1191
1192 if (PCI_FUNC(dev->devfn) > 0) {
1193 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1194 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
1195 pci_write_config_word(dev, PCI_COMMAND, cmd);
1196 for (i = 0; i < 6; i++) {
1197 dev->resource[i].start = dev->resource[i].end = 0;
1198 dev->resource[i].flags = 0;
1199 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1200 }
1201 } else {
1202 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1203 cmd &= ~PCI_COMMAND_IO;
1204 pci_write_config_word(dev, PCI_COMMAND, cmd);
1205 for (i = 0; i < 5; i++) {
1206 dev->resource[i].start = dev->resource[i].end = 0;
1207 dev->resource[i].flags = 0;
1208 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1209 }
1210 }
1211}
1212DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
1213
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
new file mode 100644
index 000000000000..0eca17df239e
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -0,0 +1,682 @@
1/*
2 * Support for the interrupt controllers found on Power Macintosh,
3 * currently Apple's "Grand Central" interrupt controller in all
4 * it's incarnations. OpenPIC support used on newer machines is
5 * in a separate file
6 *
7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8 *
9 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/stddef.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/signal.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/sysdev.h>
26#include <linux/adb.h>
27#include <linux/pmu.h>
28#include <linux/module.h>
29
30#include <asm/sections.h>
31#include <asm/io.h>
32#include <asm/smp.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h>
35#include <asm/time.h>
36#include <asm/pmac_feature.h>
37#include <asm/mpic.h>
38
39#include "pmac.h"
40
41/*
42 * XXX this should be in xmon.h, but putting it there means xmon.h
43 * has to include <linux/interrupt.h> (to get irqreturn_t), which
44 * causes all sorts of problems. -- paulus
45 */
46extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
47
48#ifdef CONFIG_PPC32
49struct pmac_irq_hw {
50 unsigned int event;
51 unsigned int enable;
52 unsigned int ack;
53 unsigned int level;
54};
55
56/* Default addresses */
57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
58 (struct pmac_irq_hw *) 0xf3000020,
59 (struct pmac_irq_hw *) 0xf3000010,
60 (struct pmac_irq_hw *) 0xf4000020,
61 (struct pmac_irq_hw *) 0xf4000010,
62};
63
64#define GC_LEVEL_MASK 0x3ff00000
65#define OHARE_LEVEL_MASK 0x1ff00000
66#define HEATHROW_LEVEL_MASK 0x1ff00000
67
68static int max_irqs;
69static int max_real_irqs;
70static u32 level_mask[4];
71
72static DEFINE_SPINLOCK(pmac_pic_lock);
73
74/* XXX here for now, should move to arch/powerpc/kernel/irq.c */
75int ppc_do_canonicalize_irqs;
76EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
77
78#define GATWICK_IRQ_POOL_SIZE 10
79static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
80
81/*
82 * Mark an irq as "lost". This is only used on the pmac
83 * since it can lose interrupts (see pmac_set_irq_mask).
84 * -- Cort
85 */
86void
87__set_lost(unsigned long irq_nr, int nokick)
88{
89 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
90 atomic_inc(&ppc_n_lost_interrupts);
91 if (!nokick)
92 set_dec(1);
93 }
94}
95
96static void
97pmac_mask_and_ack_irq(unsigned int irq_nr)
98{
99 unsigned long bit = 1UL << (irq_nr & 0x1f);
100 int i = irq_nr >> 5;
101 unsigned long flags;
102
103 if ((unsigned)irq_nr >= max_irqs)
104 return;
105
106 clear_bit(irq_nr, ppc_cached_irq_mask);
107 if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
108 atomic_dec(&ppc_n_lost_interrupts);
109 spin_lock_irqsave(&pmac_pic_lock, flags);
110 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
111 out_le32(&pmac_irq_hw[i]->ack, bit);
112 do {
113 /* make sure ack gets to controller before we enable
114 interrupts */
115 mb();
116 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
117 != (ppc_cached_irq_mask[i] & bit));
118 spin_unlock_irqrestore(&pmac_pic_lock, flags);
119}
120
121static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
122{
123 unsigned long bit = 1UL << (irq_nr & 0x1f);
124 int i = irq_nr >> 5;
125 unsigned long flags;
126
127 if ((unsigned)irq_nr >= max_irqs)
128 return;
129
130 spin_lock_irqsave(&pmac_pic_lock, flags);
131 /* enable unmasked interrupts */
132 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
133
134 do {
135 /* make sure mask gets to controller before we
136 return to user */
137 mb();
138 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
139 != (ppc_cached_irq_mask[i] & bit));
140
141 /*
142 * Unfortunately, setting the bit in the enable register
143 * when the device interrupt is already on *doesn't* set
144 * the bit in the flag register or request another interrupt.
145 */
146 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
147 __set_lost((ulong)irq_nr, nokicklost);
148 spin_unlock_irqrestore(&pmac_pic_lock, flags);
149}
150
151/* When an irq gets requested for the first client, if it's an
152 * edge interrupt, we clear any previous one on the controller
153 */
154static unsigned int pmac_startup_irq(unsigned int irq_nr)
155{
156 unsigned long bit = 1UL << (irq_nr & 0x1f);
157 int i = irq_nr >> 5;
158
159 if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
160 out_le32(&pmac_irq_hw[i]->ack, bit);
161 set_bit(irq_nr, ppc_cached_irq_mask);
162 pmac_set_irq_mask(irq_nr, 0);
163
164 return 0;
165}
166
167static void pmac_mask_irq(unsigned int irq_nr)
168{
169 clear_bit(irq_nr, ppc_cached_irq_mask);
170 pmac_set_irq_mask(irq_nr, 0);
171 mb();
172}
173
174static void pmac_unmask_irq(unsigned int irq_nr)
175{
176 set_bit(irq_nr, ppc_cached_irq_mask);
177 pmac_set_irq_mask(irq_nr, 0);
178}
179
180static void pmac_end_irq(unsigned int irq_nr)
181{
182 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
183 && irq_desc[irq_nr].action) {
184 set_bit(irq_nr, ppc_cached_irq_mask);
185 pmac_set_irq_mask(irq_nr, 1);
186 }
187}
188
189
190struct hw_interrupt_type pmac_pic = {
191 .typename = " PMAC-PIC ",
192 .startup = pmac_startup_irq,
193 .enable = pmac_unmask_irq,
194 .disable = pmac_mask_irq,
195 .ack = pmac_mask_and_ack_irq,
196 .end = pmac_end_irq,
197};
198
199struct hw_interrupt_type gatwick_pic = {
200 .typename = " GATWICK ",
201 .startup = pmac_startup_irq,
202 .enable = pmac_unmask_irq,
203 .disable = pmac_mask_irq,
204 .ack = pmac_mask_and_ack_irq,
205 .end = pmac_end_irq,
206};
207
208static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
209{
210 int irq, bits;
211
212 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
213 int i = irq >> 5;
214 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
215 /* We must read level interrupts from the level register */
216 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
217 bits &= ppc_cached_irq_mask[i];
218 if (bits == 0)
219 continue;
220 irq += __ilog2(bits);
221 __do_IRQ(irq, regs);
222 return IRQ_HANDLED;
223 }
224 printk("gatwick irq not from gatwick pic\n");
225 return IRQ_NONE;
226}
227
228int
229pmac_get_irq(struct pt_regs *regs)
230{
231 int irq;
232 unsigned long bits = 0;
233
234#ifdef CONFIG_SMP
235 void psurge_smp_message_recv(struct pt_regs *);
236
237 /* IPI's are a hack on the powersurge -- Cort */
238 if ( smp_processor_id() != 0 ) {
239 psurge_smp_message_recv(regs);
240 return -2; /* ignore, already handled */
241 }
242#endif /* CONFIG_SMP */
243 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
244 int i = irq >> 5;
245 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
246 /* We must read level interrupts from the level register */
247 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
248 bits &= ppc_cached_irq_mask[i];
249 if (bits == 0)
250 continue;
251 irq += __ilog2(bits);
252 break;
253 }
254
255 return irq;
256}
257
258/* This routine will fix some missing interrupt values in the device tree
259 * on the gatwick mac-io controller used by some PowerBooks
260 */
261static void __init
262pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
263{
264 struct device_node *node;
265 int count;
266
267 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
268 node = gw->child;
269 count = 0;
270 while(node)
271 {
272 /* Fix SCC */
273 if (strcasecmp(node->name, "escc") == 0)
274 if (node->child) {
275 if (node->child->n_intrs < 3) {
276 node->child->intrs = &gatwick_int_pool[count];
277 count += 3;
278 }
279 node->child->n_intrs = 3;
280 node->child->intrs[0].line = 15+irq_base;
281 node->child->intrs[1].line = 4+irq_base;
282 node->child->intrs[2].line = 5+irq_base;
283 printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
284 node->child->intrs[0].line,
285 node->child->intrs[1].line,
286 node->child->intrs[2].line);
287 }
288 /* Fix media-bay & left SWIM */
289 if (strcasecmp(node->name, "media-bay") == 0) {
290 struct device_node* ya_node;
291
292 if (node->n_intrs == 0)
293 node->intrs = &gatwick_int_pool[count++];
294 node->n_intrs = 1;
295 node->intrs[0].line = 29+irq_base;
296 printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
297 node->intrs[0].line);
298
299 ya_node = node->child;
300 while(ya_node)
301 {
302 if (strcasecmp(ya_node->name, "floppy") == 0) {
303 if (ya_node->n_intrs < 2) {
304 ya_node->intrs = &gatwick_int_pool[count];
305 count += 2;
306 }
307 ya_node->n_intrs = 2;
308 ya_node->intrs[0].line = 19+irq_base;
309 ya_node->intrs[1].line = 1+irq_base;
310 printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
311 ya_node->intrs[0].line, ya_node->intrs[1].line);
312 }
313 if (strcasecmp(ya_node->name, "ata4") == 0) {
314 if (ya_node->n_intrs < 2) {
315 ya_node->intrs = &gatwick_int_pool[count];
316 count += 2;
317 }
318 ya_node->n_intrs = 2;
319 ya_node->intrs[0].line = 14+irq_base;
320 ya_node->intrs[1].line = 3+irq_base;
321 printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
322 ya_node->intrs[0].line, ya_node->intrs[1].line);
323 }
324 ya_node = ya_node->sibling;
325 }
326 }
327 node = node->sibling;
328 }
329 if (count > 10) {
330 printk("WARNING !! Gatwick interrupt pool overflow\n");
331 printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
332 printk(" requested = %d\n", count);
333 }
334}
335
336/*
337 * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
338 * card which includes an ohare chip that acts as a second interrupt
339 * controller. If we find this second ohare, set it up and fix the
340 * interrupt value in the device tree for the ethernet chip.
341 */
342static int __init enable_second_ohare(void)
343{
344 unsigned char bus, devfn;
345 unsigned short cmd;
346 unsigned long addr;
347 struct device_node *irqctrler = find_devices("pci106b,7");
348 struct device_node *ether;
349
350 if (irqctrler == NULL || irqctrler->n_addrs <= 0)
351 return -1;
352 addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
353 pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
354 max_irqs = 64;
355 if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
356 struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
357 if (!hose)
358 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
359 else {
360 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
361 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
362 cmd &= ~PCI_COMMAND_IO;
363 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
364 }
365 }
366
367 /* Fix interrupt for the modem/ethernet combo controller. The number
368 in the device tree (27) is bogus (correct for the ethernet-only
369 board but not the combo ethernet/modem board).
370 The real interrupt is 28 on the second controller -> 28+32 = 60.
371 */
372 ether = find_devices("pci1011,14");
373 if (ether && ether->n_intrs > 0) {
374 ether->intrs[0].line = 60;
375 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
376 ether->intrs[0].line);
377 }
378
379 /* Return the interrupt number of the cascade */
380 return irqctrler->intrs[0].line;
381}
382
383#ifdef CONFIG_XMON
384static struct irqaction xmon_action = {
385 .handler = xmon_irq,
386 .flags = 0,
387 .mask = CPU_MASK_NONE,
388 .name = "NMI - XMON"
389};
390#endif
391
392static struct irqaction gatwick_cascade_action = {
393 .handler = gatwick_action,
394 .flags = SA_INTERRUPT,
395 .mask = CPU_MASK_NONE,
396 .name = "cascade",
397};
398#endif /* CONFIG_PPC32 */
399
400static int pmac_u3_cascade(struct pt_regs *regs, void *data)
401{
402 return mpic_get_one_irq((struct mpic *)data, regs);
403}
404
405void __init pmac_pic_init(void)
406{
407 struct device_node *irqctrler = NULL;
408 struct device_node *irqctrler2 = NULL;
409 struct device_node *np;
410#ifdef CONFIG_PPC32
411 int i;
412 unsigned long addr;
413 int irq_cascade = -1;
414#endif
415 struct mpic *mpic1, *mpic2;
416
417 /* We first try to detect Apple's new Core99 chipset, since mac-io
418 * is quite different on those machines and contains an IBM MPIC2.
419 */
420 np = find_type_devices("open-pic");
421 while (np) {
422 if (np->parent && !strcmp(np->parent->name, "u3"))
423 irqctrler2 = np;
424 else
425 irqctrler = np;
426 np = np->next;
427 }
428 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
429 unsigned char senses[128];
430
431 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
432 (unsigned int)irqctrler->addrs[0].address);
433 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
434
435 prom_get_irq_senses(senses, 0, 128);
436 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
437 MPIC_PRIMARY | MPIC_WANTS_RESET,
438 0, 0, 128, 252, senses, 128, " OpenPIC ");
439 BUG_ON(mpic1 == NULL);
440 mpic_init(mpic1);
441
442 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
443 irqctrler2->n_addrs > 0) {
444 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
445 (u32)irqctrler2->addrs[0].address,
446 irqctrler2->intrs[0].line);
447
448 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
449 prom_get_irq_senses(senses, 128, 128 + 124);
450
451 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
452 * hypertransport interrupts routed to it
453 */
454 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
455 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
456 0, 128, 124, 0, senses, 124,
457 " U3-MPIC ");
458 BUG_ON(mpic2 == NULL);
459 mpic_init(mpic2);
460 mpic_setup_cascade(irqctrler2->intrs[0].line,
461 pmac_u3_cascade, mpic2);
462 }
463#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
464 {
465 struct device_node* pswitch;
466 int nmi_irq;
467
468 pswitch = find_devices("programmer-switch");
469 if (pswitch && pswitch->n_intrs) {
470 nmi_irq = pswitch->intrs[0].line;
471 mpic_irq_set_priority(nmi_irq, 9);
472 setup_irq(nmi_irq, &xmon_action);
473 }
474 }
475#endif /* CONFIG_XMON */
476 return;
477 }
478 irqctrler = NULL;
479
480#ifdef CONFIG_PPC32
481 /* Get the level/edge settings, assume if it's not
482 * a Grand Central nor an OHare, then it's an Heathrow
483 * (or Paddington).
484 */
485 ppc_md.get_irq = pmac_get_irq;
486 if (find_devices("gc"))
487 level_mask[0] = GC_LEVEL_MASK;
488 else if (find_devices("ohare")) {
489 level_mask[0] = OHARE_LEVEL_MASK;
490 /* We might have a second cascaded ohare */
491 level_mask[1] = OHARE_LEVEL_MASK;
492 } else {
493 level_mask[0] = HEATHROW_LEVEL_MASK;
494 level_mask[1] = 0;
495 /* We might have a second cascaded heathrow */
496 level_mask[2] = HEATHROW_LEVEL_MASK;
497 level_mask[3] = 0;
498 }
499
500 /*
501 * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
502 * 1998 G3 Series PowerBooks have 128,
503 * other powermacs have 32.
504 * The combo ethernet/modem card for the Powerstar powerbooks
505 * (2400/3400/3500, ohare based) has a second ohare chip
506 * effectively making a total of 64.
507 */
508 max_irqs = max_real_irqs = 32;
509 irqctrler = find_devices("mac-io");
510 if (irqctrler)
511 {
512 max_real_irqs = 64;
513 if (irqctrler->next)
514 max_irqs = 128;
515 else
516 max_irqs = 64;
517 }
518 for ( i = 0; i < max_real_irqs ; i++ )
519 irq_desc[i].handler = &pmac_pic;
520
521 /* get addresses of first controller */
522 if (irqctrler) {
523 if (irqctrler->n_addrs > 0) {
524 addr = (unsigned long)
525 ioremap(irqctrler->addrs[0].address, 0x40);
526 for (i = 0; i < 2; ++i)
527 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
528 (addr + (2 - i) * 0x10);
529 }
530
531 /* get addresses of second controller */
532 irqctrler = irqctrler->next;
533 if (irqctrler && irqctrler->n_addrs > 0) {
534 addr = (unsigned long)
535 ioremap(irqctrler->addrs[0].address, 0x40);
536 for (i = 2; i < 4; ++i)
537 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
538 (addr + (4 - i) * 0x10);
539 irq_cascade = irqctrler->intrs[0].line;
540 if (device_is_compatible(irqctrler, "gatwick"))
541 pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
542 }
543 } else {
544 /* older powermacs have a GC (grand central) or ohare at
545 f3000000, with interrupt control registers at f3000020. */
546 addr = (unsigned long) ioremap(0xf3000000, 0x40);
547 pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
548 }
549
550 /* PowerBooks 3400 and 3500 can have a second controller in a second
551 ohare chip, on the combo ethernet/modem card */
552 if (machine_is_compatible("AAPL,3400/2400")
553 || machine_is_compatible("AAPL,3500"))
554 irq_cascade = enable_second_ohare();
555
556 /* disable all interrupts in all controllers */
557 for (i = 0; i * 32 < max_irqs; ++i)
558 out_le32(&pmac_irq_hw[i]->enable, 0);
559 /* mark level interrupts */
560 for (i = 0; i < max_irqs; i++)
561 if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
562 irq_desc[i].status = IRQ_LEVEL;
563
564 /* get interrupt line of secondary interrupt controller */
565 if (irq_cascade >= 0) {
566 printk(KERN_INFO "irq: secondary controller on irq %d\n",
567 (int)irq_cascade);
568 for ( i = max_real_irqs ; i < max_irqs ; i++ )
569 irq_desc[i].handler = &gatwick_pic;
570 setup_irq(irq_cascade, &gatwick_cascade_action);
571 }
572 printk("System has %d possible interrupts\n", max_irqs);
573 if (max_irqs != max_real_irqs)
574 printk(KERN_DEBUG "%d interrupts on main controller\n",
575 max_real_irqs);
576
577#ifdef CONFIG_XMON
578 setup_irq(20, &xmon_action);
579#endif /* CONFIG_XMON */
580#endif /* CONFIG_PPC32 */
581}
582
583#ifdef CONFIG_PM
584/*
585 * These procedures are used in implementing sleep on the powerbooks.
586 * sleep_save_intrs() saves the states of all interrupt enables
587 * and disables all interrupts except for the nominated one.
588 * sleep_restore_intrs() restores the states of all interrupt enables.
589 */
590unsigned long sleep_save_mask[2];
591
592/* This used to be passed by the PMU driver but that link got
593 * broken with the new driver model. We use this tweak for now...
594 */
595static int pmacpic_find_viaint(void)
596{
597 int viaint = -1;
598
599#ifdef CONFIG_ADB_PMU
600 struct device_node *np;
601
602 if (pmu_get_model() != PMU_OHARE_BASED)
603 goto not_found;
604 np = of_find_node_by_name(NULL, "via-pmu");
605 if (np == NULL)
606 goto not_found;
607 viaint = np->intrs[0].line;
608#endif /* CONFIG_ADB_PMU */
609
610not_found:
611 return viaint;
612}
613
614static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
615{
616 int viaint = pmacpic_find_viaint();
617
618 sleep_save_mask[0] = ppc_cached_irq_mask[0];
619 sleep_save_mask[1] = ppc_cached_irq_mask[1];
620 ppc_cached_irq_mask[0] = 0;
621 ppc_cached_irq_mask[1] = 0;
622 if (viaint > 0)
623 set_bit(viaint, ppc_cached_irq_mask);
624 out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
625 if (max_real_irqs > 32)
626 out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
627 (void)in_le32(&pmac_irq_hw[0]->event);
628 /* make sure mask gets to controller before we return to caller */
629 mb();
630 (void)in_le32(&pmac_irq_hw[0]->enable);
631
632 return 0;
633}
634
635static int pmacpic_resume(struct sys_device *sysdev)
636{
637 int i;
638
639 out_le32(&pmac_irq_hw[0]->enable, 0);
640 if (max_real_irqs > 32)
641 out_le32(&pmac_irq_hw[1]->enable, 0);
642 mb();
643 for (i = 0; i < max_real_irqs; ++i)
644 if (test_bit(i, sleep_save_mask))
645 pmac_unmask_irq(i);
646
647 return 0;
648}
649
650#endif /* CONFIG_PM */
651
652static struct sysdev_class pmacpic_sysclass = {
653 set_kset_name("pmac_pic"),
654};
655
656static struct sys_device device_pmacpic = {
657 .id = 0,
658 .cls = &pmacpic_sysclass,
659};
660
661static struct sysdev_driver driver_pmacpic = {
662#ifdef CONFIG_PM
663 .suspend = &pmacpic_suspend,
664 .resume = &pmacpic_resume,
665#endif /* CONFIG_PM */
666};
667
668static int __init init_pmacpic_sysfs(void)
669{
670#ifdef CONFIG_PPC32
671 if (max_irqs == 0)
672 return -ENODEV;
673#endif
674 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
675 sysdev_class_register(&pmacpic_sysclass);
676 sysdev_register(&device_pmacpic);
677 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
678 return 0;
679}
680
681subsys_initcall(init_pmacpic_sysfs);
682
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
new file mode 100644
index 000000000000..664103dfeef9
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.h
@@ -0,0 +1,11 @@
1#ifndef __PPC_PLATFORMS_PMAC_PIC_H
2#define __PPC_PLATFORMS_PMAC_PIC_H
3
4#include <linux/irq.h>
5
6extern struct hw_interrupt_type pmac_pic;
7
8void pmac_pic_init(void);
9int pmac_get_irq(struct pt_regs *regs);
10
11#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644
index 000000000000..2ad25e13423e
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -0,0 +1,51 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6#include <linux/irq.h>
7
8/*
9 * Declaration for the various functions exported by the
10 * pmac_* files. Mostly for use by pmac_setup
11 */
12
13struct rtc_time;
14
15extern long pmac_time_init(void);
16extern unsigned long pmac_get_boot_time(void);
17extern void pmac_get_rtc_time(struct rtc_time *);
18extern int pmac_set_rtc_time(struct rtc_time *);
19extern void pmac_read_rtc_time(void);
20extern void pmac_calibrate_decr(void);
21extern void pmac_pcibios_fixup(void);
22extern void pmac_pci_init(void);
23extern unsigned long pmac_ide_get_base(int index);
24extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
25 unsigned long data_port, unsigned long ctrl_port, int *irq);
26
27extern void pmac_nvram_update(void);
28extern unsigned char pmac_nvram_read_byte(int addr);
29extern void pmac_nvram_write_byte(int addr, unsigned char val);
30extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
31extern void pmac_pcibios_after_init(void);
32extern int of_show_percpuinfo(struct seq_file *m, int i);
33
34extern void pmac_pci_init(void);
35extern void pmac_setup_pci_dma(void);
36extern void pmac_check_ht_link(void);
37
38extern void pmac_setup_smp(void);
39
40extern unsigned long pmac_ide_get_base(int index);
41extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
42 unsigned long data_port, unsigned long ctrl_port, int *irq);
43
44extern int pmac_nvram_init(void);
45
46extern struct hw_interrupt_type pmac_pic;
47
48void pmac_pic_init(void);
49int pmac_get_irq(struct pt_regs *regs);
50
51#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
new file mode 100644
index 000000000000..50f5dd787900
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -0,0 +1,789 @@
1/*
2 * Powermac setup and early boot code plus other random bits.
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#include <linux/config.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/stddef.h>
33#include <linux/unistd.h>
34#include <linux/ptrace.h>
35#include <linux/slab.h>
36#include <linux/user.h>
37#include <linux/a.out.h>
38#include <linux/tty.h>
39#include <linux/string.h>
40#include <linux/delay.h>
41#include <linux/ioport.h>
42#include <linux/major.h>
43#include <linux/initrd.h>
44#include <linux/vt_kern.h>
45#include <linux/console.h>
46#include <linux/ide.h>
47#include <linux/pci.h>
48#include <linux/adb.h>
49#include <linux/cuda.h>
50#include <linux/pmu.h>
51#include <linux/irq.h>
52#include <linux/seq_file.h>
53#include <linux/root_dev.h>
54#include <linux/bitops.h>
55#include <linux/suspend.h>
56
57#include <asm/reg.h>
58#include <asm/sections.h>
59#include <asm/prom.h>
60#include <asm/system.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/ohare.h>
65#include <asm/mediabay.h>
66#include <asm/machdep.h>
67#include <asm/dma.h>
68#include <asm/cputable.h>
69#include <asm/btext.h>
70#include <asm/pmac_feature.h>
71#include <asm/time.h>
72#include <asm/of_device.h>
73#include <asm/mmu_context.h>
74#include <asm/iommu.h>
75#include <asm/smu.h>
76#include <asm/pmc.h>
77#include <asm/mpic.h>
78
79#include "pmac.h"
80
81#undef SHOW_GATWICK_IRQS
82
83unsigned char drive_info;
84
85int ppc_override_l2cr = 0;
86int ppc_override_l2cr_value;
87int has_l2cache = 0;
88
89int pmac_newworld = 1;
90
91static int current_root_goodness = -1;
92
93extern int pmac_newworld;
94extern struct machdep_calls pmac_md;
95
96#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
97
98#ifdef CONFIG_PPC64
99#include <asm/udbg.h>
100int sccdbg;
101#endif
102
103extern void zs_kgdb_hook(int tty_num);
104
105sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
106EXPORT_SYMBOL(sys_ctrler);
107
108#ifdef CONFIG_PMAC_SMU
109unsigned long smu_cmdbuf_abs;
110EXPORT_SYMBOL(smu_cmdbuf_abs);
111#endif
112
113#ifdef CONFIG_SMP
114extern struct smp_ops_t psurge_smp_ops;
115extern struct smp_ops_t core99_smp_ops;
116#endif /* CONFIG_SMP */
117
118static void pmac_show_cpuinfo(struct seq_file *m)
119{
120 struct device_node *np;
121 char *pp;
122 int plen;
123 int mbmodel;
124 unsigned int mbflags;
125 char* mbname;
126
127 mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
128 PMAC_MB_INFO_MODEL, 0);
129 mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
130 PMAC_MB_INFO_FLAGS, 0);
131 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
132 (long) &mbname) != 0)
133 mbname = "Unknown";
134
135 /* find motherboard type */
136 seq_printf(m, "machine\t\t: ");
137 np = of_find_node_by_path("/");
138 if (np != NULL) {
139 pp = (char *) get_property(np, "model", NULL);
140 if (pp != NULL)
141 seq_printf(m, "%s\n", pp);
142 else
143 seq_printf(m, "PowerMac\n");
144 pp = (char *) get_property(np, "compatible", &plen);
145 if (pp != NULL) {
146 seq_printf(m, "motherboard\t:");
147 while (plen > 0) {
148 int l = strlen(pp) + 1;
149 seq_printf(m, " %s", pp);
150 plen -= l;
151 pp += l;
152 }
153 seq_printf(m, "\n");
154 }
155 of_node_put(np);
156 } else
157 seq_printf(m, "PowerMac\n");
158
159 /* print parsed model */
160 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
161 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
162
163 /* find l2 cache info */
164 np = of_find_node_by_name(NULL, "l2-cache");
165 if (np == NULL)
166 np = of_find_node_by_type(NULL, "cache");
167 if (np != NULL) {
168 unsigned int *ic = (unsigned int *)
169 get_property(np, "i-cache-size", NULL);
170 unsigned int *dc = (unsigned int *)
171 get_property(np, "d-cache-size", NULL);
172 seq_printf(m, "L2 cache\t:");
173 has_l2cache = 1;
174 if (get_property(np, "cache-unified", NULL) != 0 && dc) {
175 seq_printf(m, " %dK unified", *dc / 1024);
176 } else {
177 if (ic)
178 seq_printf(m, " %dK instruction", *ic / 1024);
179 if (dc)
180 seq_printf(m, "%s %dK data",
181 (ic? " +": ""), *dc / 1024);
182 }
183 pp = get_property(np, "ram-type", NULL);
184 if (pp)
185 seq_printf(m, " %s", pp);
186 seq_printf(m, "\n");
187 of_node_put(np);
188 }
189
190 /* Indicate newworld/oldworld */
191 seq_printf(m, "pmac-generation\t: %s\n",
192 pmac_newworld ? "NewWorld" : "OldWorld");
193}
194
195static void pmac_show_percpuinfo(struct seq_file *m, int i)
196{
197#ifdef CONFIG_CPU_FREQ_PMAC
198 extern unsigned int pmac_get_one_cpufreq(int i);
199 unsigned int freq = pmac_get_one_cpufreq(i);
200 if (freq != 0) {
201 seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
202 return;
203 }
204#endif /* CONFIG_CPU_FREQ_PMAC */
205#ifdef CONFIG_PPC32
206 of_show_percpuinfo(m, i);
207#endif
208}
209
210#ifndef CONFIG_ADB_CUDA
211int find_via_cuda(void)
212{
213 if (!find_devices("via-cuda"))
214 return 0;
215 printk("WARNING ! Your machine is CUDA-based but your kernel\n");
216 printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
217 return 0;
218}
219#endif
220
221#ifndef CONFIG_ADB_PMU
222int find_via_pmu(void)
223{
224 if (!find_devices("via-pmu"))
225 return 0;
226 printk("WARNING ! Your machine is PMU-based but your kernel\n");
227 printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
228 return;
229}
230#endif
231
232#ifndef CONFIG_PMAC_SMU
233int smu_init(void)
234{
235 /* should check and warn if SMU is present */
236 return 0;
237}
238#endif
239
240#ifdef CONFIG_PPC32
241static volatile u32 *sysctrl_regs;
242
243static void __init ohare_init(void)
244{
245 /* this area has the CPU identification register
246 and some registers used by smp boards */
247 sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
248
249 /*
250 * Turn on the L2 cache.
251 * We assume that we have a PSX memory controller iff
252 * we have an ohare I/O controller.
253 */
254 if (find_devices("ohare") != NULL) {
255 if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
256 if (sysctrl_regs[4] & 0x10)
257 sysctrl_regs[4] |= 0x04000020;
258 else
259 sysctrl_regs[4] |= 0x04000000;
260 if(has_l2cache)
261 printk(KERN_INFO "Level 2 cache enabled\n");
262 }
263 }
264}
265
266static void __init l2cr_init(void)
267{
268 /* Checks "l2cr-value" property in the registry */
269 if (cpu_has_feature(CPU_FTR_L2CR)) {
270 struct device_node *np = find_devices("cpus");
271 if (np == 0)
272 np = find_type_devices("cpu");
273 if (np != 0) {
274 unsigned int *l2cr = (unsigned int *)
275 get_property(np, "l2cr-value", NULL);
276 if (l2cr != 0) {
277 ppc_override_l2cr = 1;
278 ppc_override_l2cr_value = *l2cr;
279 _set_L2CR(0);
280 _set_L2CR(ppc_override_l2cr_value);
281 }
282 }
283 }
284
285 if (ppc_override_l2cr)
286 printk(KERN_INFO "L2CR overridden (0x%x), "
287 "backside cache is %s\n",
288 ppc_override_l2cr_value,
289 (ppc_override_l2cr_value & 0x80000000)
290 ? "enabled" : "disabled");
291}
292#endif
293
294void __init pmac_setup_arch(void)
295{
296 struct device_node *cpu;
297 int *fp;
298 unsigned long pvr;
299
300 pvr = PVR_VER(mfspr(SPRN_PVR));
301
302 /* Set loops_per_jiffy to a half-way reasonable value,
303 for use until calibrate_delay gets called. */
304 loops_per_jiffy = 50000000 / HZ;
305 cpu = of_find_node_by_type(NULL, "cpu");
306 if (cpu != NULL) {
307 fp = (int *) get_property(cpu, "clock-frequency", NULL);
308 if (fp != NULL) {
309 if (pvr >= 0x30 && pvr < 0x80)
310 /* PPC970 etc. */
311 loops_per_jiffy = *fp / (3 * HZ);
312 else if (pvr == 4 || pvr >= 8)
313 /* 604, G3, G4 etc. */
314 loops_per_jiffy = *fp / HZ;
315 else
316 /* 601, 603, etc. */
317 loops_per_jiffy = *fp / (2 * HZ);
318 }
319 of_node_put(cpu);
320 }
321
322 /* Lookup PCI hosts */
323 pmac_pci_init();
324
325#ifdef CONFIG_PPC32
326 ohare_init();
327 l2cr_init();
328#endif /* CONFIG_PPC32 */
329
330#ifdef CONFIG_PPC64
331 /* Probe motherboard chipset */
332 /* this is done earlier in setup_arch for 32-bit */
333 pmac_feature_init();
334
335 /* We can NAP */
336 powersave_nap = 1;
337 printk(KERN_INFO "Using native/NAP idle loop\n");
338#endif
339
340#ifdef CONFIG_KGDB
341 zs_kgdb_hook(0);
342#endif
343
344 find_via_cuda();
345 find_via_pmu();
346 smu_init();
347
348#ifdef CONFIG_NVRAM
349 pmac_nvram_init();
350#endif
351
352#ifdef CONFIG_PPC32
353#ifdef CONFIG_BLK_DEV_INITRD
354 if (initrd_start)
355 ROOT_DEV = Root_RAM0;
356 else
357#endif
358 ROOT_DEV = DEFAULT_ROOT_DEVICE;
359#endif
360
361#ifdef CONFIG_SMP
362 /* Check for Core99 */
363 if (find_devices("uni-n") || find_devices("u3"))
364 smp_ops = &core99_smp_ops;
365#ifdef CONFIG_PPC32
366 else
367 smp_ops = &psurge_smp_ops;
368#endif
369#endif /* CONFIG_SMP */
370}
371
372char *bootpath;
373char *bootdevice;
374void *boot_host;
375int boot_target;
376int boot_part;
377extern dev_t boot_dev;
378
379#ifdef CONFIG_SCSI
380void __init note_scsi_host(struct device_node *node, void *host)
381{
382 int l;
383 char *p;
384
385 l = strlen(node->full_name);
386 if (bootpath != NULL && bootdevice != NULL
387 && strncmp(node->full_name, bootdevice, l) == 0
388 && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
389 boot_host = host;
390 /*
391 * There's a bug in OF 1.0.5. (Why am I not surprised.)
392 * If you pass a path like scsi/sd@1:0 to canon, it returns
393 * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
394 * That is, the scsi target number doesn't get preserved.
395 * So we pick the target number out of bootpath and use that.
396 */
397 p = strstr(bootpath, "/sd@");
398 if (p != NULL) {
399 p += 4;
400 boot_target = simple_strtoul(p, NULL, 10);
401 p = strchr(p, ':');
402 if (p != NULL)
403 boot_part = simple_strtoul(p + 1, NULL, 10);
404 }
405 }
406}
407EXPORT_SYMBOL(note_scsi_host);
408#endif
409
410#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
411static dev_t __init find_ide_boot(void)
412{
413 char *p;
414 int n;
415 dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
416
417 if (bootdevice == NULL)
418 return 0;
419 p = strrchr(bootdevice, '/');
420 if (p == NULL)
421 return 0;
422 n = p - bootdevice;
423
424 return pmac_find_ide_boot(bootdevice, n);
425}
426#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
427
428static void __init find_boot_device(void)
429{
430#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
431 boot_dev = find_ide_boot();
432#endif
433}
434
435/* TODO: Merge the suspend-to-ram with the common code !!!
436 * currently, this is a stub implementation for suspend-to-disk
437 * only
438 */
439
440#ifdef CONFIG_SOFTWARE_SUSPEND
441
442static int pmac_pm_prepare(suspend_state_t state)
443{
444 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
445
446 return 0;
447}
448
449static int pmac_pm_enter(suspend_state_t state)
450{
451 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
452
453 /* Giveup the lazy FPU & vec so we don't have to back them
454 * up from the low level code
455 */
456 enable_kernel_fp();
457
458#ifdef CONFIG_ALTIVEC
459 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
460 enable_kernel_altivec();
461#endif /* CONFIG_ALTIVEC */
462
463 return 0;
464}
465
466static int pmac_pm_finish(suspend_state_t state)
467{
468 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
469
470 /* Restore userland MMU context */
471 set_context(current->active_mm->context, current->active_mm->pgd);
472
473 return 0;
474}
475
476static struct pm_ops pmac_pm_ops = {
477 .pm_disk_mode = PM_DISK_SHUTDOWN,
478 .prepare = pmac_pm_prepare,
479 .enter = pmac_pm_enter,
480 .finish = pmac_pm_finish,
481};
482
483#endif /* CONFIG_SOFTWARE_SUSPEND */
484
485static int initializing = 1;
486
487static int pmac_late_init(void)
488{
489 initializing = 0;
490#ifdef CONFIG_SOFTWARE_SUSPEND
491 pm_set_ops(&pmac_pm_ops);
492#endif /* CONFIG_SOFTWARE_SUSPEND */
493 return 0;
494}
495
496late_initcall(pmac_late_init);
497
498/* can't be __init - can be called whenever a disk is first accessed */
499void note_bootable_part(dev_t dev, int part, int goodness)
500{
501 static int found_boot = 0;
502 char *p;
503
504 if (!initializing)
505 return;
506 if ((goodness <= current_root_goodness) &&
507 ROOT_DEV != DEFAULT_ROOT_DEVICE)
508 return;
509 p = strstr(saved_command_line, "root=");
510 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
511 return;
512
513 if (!found_boot) {
514 find_boot_device();
515 found_boot = 1;
516 }
517 if (!boot_dev || dev == boot_dev) {
518 ROOT_DEV = dev + part;
519 boot_dev = 0;
520 current_root_goodness = goodness;
521 }
522}
523
524#ifdef CONFIG_ADB_CUDA
525static void cuda_restart(void)
526{
527 struct adb_request req;
528
529 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
530 for (;;)
531 cuda_poll();
532}
533
534static void cuda_shutdown(void)
535{
536 struct adb_request req;
537
538 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
539 for (;;)
540 cuda_poll();
541}
542
543#else
544#define cuda_restart()
545#define cuda_shutdown()
546#endif
547
548#ifndef CONFIG_ADB_PMU
549#define pmu_restart()
550#define pmu_shutdown()
551#endif
552
553#ifndef CONFIG_PMAC_SMU
554#define smu_restart()
555#define smu_shutdown()
556#endif
557
558static void pmac_restart(char *cmd)
559{
560 switch (sys_ctrler) {
561 case SYS_CTRLER_CUDA:
562 cuda_restart();
563 break;
564 case SYS_CTRLER_PMU:
565 pmu_restart();
566 break;
567 case SYS_CTRLER_SMU:
568 smu_restart();
569 break;
570 default: ;
571 }
572}
573
574static void pmac_power_off(void)
575{
576 switch (sys_ctrler) {
577 case SYS_CTRLER_CUDA:
578 cuda_shutdown();
579 break;
580 case SYS_CTRLER_PMU:
581 pmu_shutdown();
582 break;
583 case SYS_CTRLER_SMU:
584 smu_shutdown();
585 break;
586 default: ;
587 }
588}
589
590static void
591pmac_halt(void)
592{
593 pmac_power_off();
594}
595
596#ifdef CONFIG_PPC32
597void __init pmac_init(void)
598{
599 /* isa_io_base gets set in pmac_pci_init */
600 isa_mem_base = PMAC_ISA_MEM_BASE;
601 pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
602 ISA_DMA_THRESHOLD = ~0L;
603 DMA_MODE_READ = 1;
604 DMA_MODE_WRITE = 2;
605
606 ppc_md = pmac_md;
607
608#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
609#ifdef CONFIG_BLK_DEV_IDE_PMAC
610 ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
611 ppc_ide_md.default_io_base = pmac_ide_get_base;
612#endif /* CONFIG_BLK_DEV_IDE_PMAC */
613#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
614
615 if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
616
617}
618#endif
619
620/*
621 * Early initialization.
622 */
623static void __init pmac_init_early(void)
624{
625#ifdef CONFIG_PPC64
626 /* Initialize hash table, from now on, we can take hash faults
627 * and call ioremap
628 */
629 hpte_init_native();
630
631 /* Init SCC */
632 if (strstr(cmd_line, "sccdbg")) {
633 sccdbg = 1;
634 udbg_init_scc(NULL);
635 }
636
637 /* Setup interrupt mapping options */
638 ppc64_interrupt_controller = IC_OPEN_PIC;
639
640 iommu_init_early_u3();
641#endif
642}
643
644static void __init pmac_progress(char *s, unsigned short hex)
645{
646#ifdef CONFIG_PPC64
647 if (sccdbg) {
648 udbg_puts(s);
649 udbg_puts("\n");
650 return;
651 }
652#endif
653#ifdef CONFIG_BOOTX_TEXT
654 if (boot_text_mapped) {
655 btext_drawstring(s);
656 btext_drawchar('\n');
657 }
658#endif /* CONFIG_BOOTX_TEXT */
659}
660
661/*
662 * pmac has no legacy IO, anything calling this function has to
663 * fail or bad things will happen
664 */
665static int pmac_check_legacy_ioport(unsigned int baseport)
666{
667 return -ENODEV;
668}
669
670static int __init pmac_declare_of_platform_devices(void)
671{
672 struct device_node *np, *npp;
673
674 np = find_devices("uni-n");
675 if (np) {
676 for (np = np->child; np != NULL; np = np->sibling)
677 if (strncmp(np->name, "i2c", 3) == 0) {
678 of_platform_device_create(np, "uni-n-i2c",
679 NULL);
680 break;
681 }
682 }
683 np = find_devices("valkyrie");
684 if (np)
685 of_platform_device_create(np, "valkyrie", NULL);
686 np = find_devices("platinum");
687 if (np)
688 of_platform_device_create(np, "platinum", NULL);
689
690 npp = of_find_node_by_name(NULL, "u3");
691 if (npp) {
692 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
693 if (strncmp(np->name, "i2c", 3) == 0) {
694 of_platform_device_create(np, "u3-i2c", NULL);
695 of_node_put(np);
696 break;
697 }
698 }
699 of_node_put(npp);
700 }
701 np = of_find_node_by_type(NULL, "smu");
702 if (np) {
703 of_platform_device_create(np, "smu", NULL);
704 of_node_put(np);
705 }
706
707 return 0;
708}
709
710device_initcall(pmac_declare_of_platform_devices);
711
712/*
713 * Called very early, MMU is off, device-tree isn't unflattened
714 */
715static int __init pmac_probe(int platform)
716{
717#ifdef CONFIG_PPC64
718 if (platform != PLATFORM_POWERMAC)
719 return 0;
720
721 /*
722 * On U3, the DART (iommu) must be allocated now since it
723 * has an impact on htab_initialize (due to the large page it
724 * occupies having to be broken up so the DART itself is not
725 * part of the cacheable linar mapping
726 */
727 alloc_u3_dart_table();
728#endif
729
730#ifdef CONFIG_PMAC_SMU
731 /*
732 * SMU based G5s need some memory below 2Gb, at least the current
733 * driver needs that. We have to allocate it now. We allocate 4k
734 * (1 small page) for now.
735 */
736 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
737#endif /* CONFIG_PMAC_SMU */
738
739 return 1;
740}
741
742#ifdef CONFIG_PPC64
743static int pmac_probe_mode(struct pci_bus *bus)
744{
745 struct device_node *node = bus->sysdata;
746
747 /* We need to use normal PCI probing for the AGP bus,
748 since the device for the AGP bridge isn't in the tree. */
749 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
750 return PCI_PROBE_NORMAL;
751
752 return PCI_PROBE_DEVTREE;
753}
754#endif
755
756struct machdep_calls __initdata pmac_md = {
757#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
758 .cpu_die = generic_mach_cpu_die,
759#endif
760 .probe = pmac_probe,
761 .setup_arch = pmac_setup_arch,
762 .init_early = pmac_init_early,
763 .show_cpuinfo = pmac_show_cpuinfo,
764 .show_percpuinfo = pmac_show_percpuinfo,
765 .init_IRQ = pmac_pic_init,
766 .get_irq = mpic_get_irq, /* changed later */
767 .pcibios_fixup = pmac_pcibios_fixup,
768 .restart = pmac_restart,
769 .power_off = pmac_power_off,
770 .halt = pmac_halt,
771 .time_init = pmac_time_init,
772 .get_boot_time = pmac_get_boot_time,
773 .set_rtc_time = pmac_set_rtc_time,
774 .get_rtc_time = pmac_get_rtc_time,
775 .calibrate_decr = pmac_calibrate_decr,
776 .feature_call = pmac_do_feature_call,
777 .check_legacy_ioport = pmac_check_legacy_ioport,
778 .progress = pmac_progress,
779#ifdef CONFIG_PPC64
780 .pci_probe_mode = pmac_probe_mode,
781 .idle_loop = native_idle,
782 .enable_pmcs = power4_enable_pmcs,
783#endif
784#ifdef CONFIG_PPC32
785 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
786 .pcibios_after_init = pmac_pcibios_after_init,
787 .phys_mem_access_prot = pci_phys_mem_access_prot,
788#endif
789};
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
new file mode 100644
index 000000000000..22b113d19b24
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -0,0 +1,396 @@
1/*
2 * This file contains sleep low-level functions for PowerBook G3.
3 * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 * and Paul Mackerras (paulus@samba.org).
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <asm/processor.h>
15#include <asm/page.h>
16#include <asm/ppc_asm.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h>
21
22#define MAGIC 0x4c617273 /* 'Lars' */
23
24/*
25 * Structure for storing CPU registers on the stack.
26 */
27#define SL_SP 0
28#define SL_PC 4
29#define SL_MSR 8
30#define SL_SDR1 0xc
31#define SL_SPRG0 0x10 /* 4 sprg's */
32#define SL_DBAT0 0x20
33#define SL_IBAT0 0x28
34#define SL_DBAT1 0x30
35#define SL_IBAT1 0x38
36#define SL_DBAT2 0x40
37#define SL_IBAT2 0x48
38#define SL_DBAT3 0x50
39#define SL_IBAT3 0x58
40#define SL_TB 0x60
41#define SL_R2 0x68
42#define SL_CR 0x6c
43#define SL_R12 0x70 /* r12 to r31 */
44#define SL_SIZE (SL_R12 + 80)
45
46 .section .text
47 .align 5
48
49#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
50
51/* This gets called by via-pmu.c late during the sleep process.
52 * The PMU was already send the sleep command and will shut us down
53 * soon. We need to save all that is needed and setup the wakeup
54 * vector that will be called by the ROM on wakeup
55 */
56_GLOBAL(low_sleep_handler)
57#ifndef CONFIG_6xx
58 blr
59#else
60 mflr r0
61 stw r0,4(r1)
62 stwu r1,-SL_SIZE(r1)
63 mfcr r0
64 stw r0,SL_CR(r1)
65 stw r2,SL_R2(r1)
66 stmw r12,SL_R12(r1)
67
68 /* Save MSR & SDR1 */
69 mfmsr r4
70 stw r4,SL_MSR(r1)
71 mfsdr1 r4
72 stw r4,SL_SDR1(r1)
73
74 /* Get a stable timebase and save it */
751: mftbu r4
76 stw r4,SL_TB(r1)
77 mftb r5
78 stw r5,SL_TB+4(r1)
79 mftbu r3
80 cmpw r3,r4
81 bne 1b
82
83 /* Save SPRGs */
84 mfsprg r4,0
85 stw r4,SL_SPRG0(r1)
86 mfsprg r4,1
87 stw r4,SL_SPRG0+4(r1)
88 mfsprg r4,2
89 stw r4,SL_SPRG0+8(r1)
90 mfsprg r4,3
91 stw r4,SL_SPRG0+12(r1)
92
93 /* Save BATs */
94 mfdbatu r4,0
95 stw r4,SL_DBAT0(r1)
96 mfdbatl r4,0
97 stw r4,SL_DBAT0+4(r1)
98 mfdbatu r4,1
99 stw r4,SL_DBAT1(r1)
100 mfdbatl r4,1
101 stw r4,SL_DBAT1+4(r1)
102 mfdbatu r4,2
103 stw r4,SL_DBAT2(r1)
104 mfdbatl r4,2
105 stw r4,SL_DBAT2+4(r1)
106 mfdbatu r4,3
107 stw r4,SL_DBAT3(r1)
108 mfdbatl r4,3
109 stw r4,SL_DBAT3+4(r1)
110 mfibatu r4,0
111 stw r4,SL_IBAT0(r1)
112 mfibatl r4,0
113 stw r4,SL_IBAT0+4(r1)
114 mfibatu r4,1
115 stw r4,SL_IBAT1(r1)
116 mfibatl r4,1
117 stw r4,SL_IBAT1+4(r1)
118 mfibatu r4,2
119 stw r4,SL_IBAT2(r1)
120 mfibatl r4,2
121 stw r4,SL_IBAT2+4(r1)
122 mfibatu r4,3
123 stw r4,SL_IBAT3(r1)
124 mfibatl r4,3
125 stw r4,SL_IBAT3+4(r1)
126
127 /* Backup various CPU config stuffs */
128 bl __save_cpu_setup
129
130 /* The ROM can wake us up via 2 different vectors:
131 * - On wallstreet & lombard, we must write a magic
132 * value 'Lars' at address 4 and a pointer to a
133 * memory location containing the PC to resume from
134 * at address 0.
135 * - On Core99, we must store the wakeup vector at
136 * address 0x80 and eventually it's parameters
137 * at address 0x84. I've have some trouble with those
138 * parameters however and I no longer use them.
139 */
140 lis r5,grackle_wake_up@ha
141 addi r5,r5,grackle_wake_up@l
142 tophys(r5,r5)
143 stw r5,SL_PC(r1)
144 lis r4,KERNELBASE@h
145 tophys(r5,r1)
146 addi r5,r5,SL_PC
147 lis r6,MAGIC@ha
148 addi r6,r6,MAGIC@l
149 stw r5,0(r4)
150 stw r6,4(r4)
151 /* Setup stuffs at 0x80-0x84 for Core99 */
152 lis r3,core99_wake_up@ha
153 addi r3,r3,core99_wake_up@l
154 tophys(r3,r3)
155 stw r3,0x80(r4)
156 stw r5,0x84(r4)
157 /* Store a pointer to our backup storage into
158 * a kernel global
159 */
160 lis r3,sleep_storage@ha
161 addi r3,r3,sleep_storage@l
162 stw r5,0(r3)
163
164 .globl low_cpu_die
165low_cpu_die:
166 /* Flush & disable all caches */
167 bl flush_disable_caches
168
169 /* Turn off data relocation. */
170 mfmsr r3 /* Save MSR in r7 */
171 rlwinm r3,r3,0,28,26 /* Turn off DR bit */
172 sync
173 mtmsr r3
174 isync
175
176BEGIN_FTR_SECTION
177 /* Flush any pending L2 data prefetches to work around HW bug */
178 sync
179 lis r3,0xfff0
180 lwz r0,0(r3) /* perform cache-inhibited load to ROM */
181 sync /* (caches are disabled at this point) */
182END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
183
184/*
185 * Set the HID0 and MSR for sleep.
186 */
187 mfspr r2,SPRN_HID0
188 rlwinm r2,r2,0,10,7 /* clear doze, nap */
189 oris r2,r2,HID0_SLEEP@h
190 sync
191 isync
192 mtspr SPRN_HID0,r2
193 sync
194
195/* This loop puts us back to sleep in case we have a spurrious
196 * wakeup so that the host bridge properly stays asleep. The
197 * CPU will be turned off, either after a known time (about 1
198 * second) on wallstreet & lombard, or as soon as the CPU enters
199 * SLEEP mode on core99
200 */
201 mfmsr r2
202 oris r2,r2,MSR_POW@h
2031: sync
204 mtmsr r2
205 isync
206 b 1b
207
208/*
209 * Here is the resume code.
210 */
211
212
213/*
214 * Core99 machines resume here
215 * r4 has the physical address of SL_PC(sp) (unused)
216 */
217_GLOBAL(core99_wake_up)
218 /* Make sure HID0 no longer contains any sleep bit and that data cache
219 * is disabled
220 */
221 mfspr r3,SPRN_HID0
222 rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
223 rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
224 mtspr SPRN_HID0,r3
225 sync
226 isync
227
228 /* sanitize MSR */
229 mfmsr r3
230 ori r3,r3,MSR_EE|MSR_IP
231 xori r3,r3,MSR_EE|MSR_IP
232 sync
233 isync
234 mtmsr r3
235 sync
236 isync
237
238 /* Recover sleep storage */
239 lis r3,sleep_storage@ha
240 addi r3,r3,sleep_storage@l
241 tophys(r3,r3)
242 lwz r1,0(r3)
243
244 /* Pass thru to older resume code ... */
245/*
246 * Here is the resume code for older machines.
247 * r1 has the physical address of SL_PC(sp).
248 */
249
250grackle_wake_up:
251
252 /* Restore the kernel's segment registers before
253 * we do any r1 memory access as we are not sure they
254 * are in a sane state above the first 256Mb region
255 */
256 li r0,16 /* load up segment register values */
257 mtctr r0 /* for context 0 */
258 lis r3,0x2000 /* Ku = 1, VSID = 0 */
259 li r4,0
2603: mtsrin r3,r4
261 addi r3,r3,0x111 /* increment VSID */
262 addis r4,r4,0x1000 /* address of next segment */
263 bdnz 3b
264 sync
265 isync
266
267 subi r1,r1,SL_PC
268
269 /* Restore various CPU config stuffs */
270 bl __restore_cpu_setup
271
272 /* Make sure all FPRs have been initialized */
273 bl reloc_offset
274 bl __init_fpu_registers
275
276 /* Invalidate & enable L1 cache, we don't care about
277 * whatever the ROM may have tried to write to memory
278 */
279 bl __inval_enable_L1
280
281 /* Restore the BATs, and SDR1. Then we can turn on the MMU. */
282 lwz r4,SL_SDR1(r1)
283 mtsdr1 r4
284 lwz r4,SL_SPRG0(r1)
285 mtsprg 0,r4
286 lwz r4,SL_SPRG0+4(r1)
287 mtsprg 1,r4
288 lwz r4,SL_SPRG0+8(r1)
289 mtsprg 2,r4
290 lwz r4,SL_SPRG0+12(r1)
291 mtsprg 3,r4
292
293 lwz r4,SL_DBAT0(r1)
294 mtdbatu 0,r4
295 lwz r4,SL_DBAT0+4(r1)
296 mtdbatl 0,r4
297 lwz r4,SL_DBAT1(r1)
298 mtdbatu 1,r4
299 lwz r4,SL_DBAT1+4(r1)
300 mtdbatl 1,r4
301 lwz r4,SL_DBAT2(r1)
302 mtdbatu 2,r4
303 lwz r4,SL_DBAT2+4(r1)
304 mtdbatl 2,r4
305 lwz r4,SL_DBAT3(r1)
306 mtdbatu 3,r4
307 lwz r4,SL_DBAT3+4(r1)
308 mtdbatl 3,r4
309 lwz r4,SL_IBAT0(r1)
310 mtibatu 0,r4
311 lwz r4,SL_IBAT0+4(r1)
312 mtibatl 0,r4
313 lwz r4,SL_IBAT1(r1)
314 mtibatu 1,r4
315 lwz r4,SL_IBAT1+4(r1)
316 mtibatl 1,r4
317 lwz r4,SL_IBAT2(r1)
318 mtibatu 2,r4
319 lwz r4,SL_IBAT2+4(r1)
320 mtibatl 2,r4
321 lwz r4,SL_IBAT3(r1)
322 mtibatu 3,r4
323 lwz r4,SL_IBAT3+4(r1)
324 mtibatl 3,r4
325
326BEGIN_FTR_SECTION
327 li r4,0
328 mtspr SPRN_DBAT4U,r4
329 mtspr SPRN_DBAT4L,r4
330 mtspr SPRN_DBAT5U,r4
331 mtspr SPRN_DBAT5L,r4
332 mtspr SPRN_DBAT6U,r4
333 mtspr SPRN_DBAT6L,r4
334 mtspr SPRN_DBAT7U,r4
335 mtspr SPRN_DBAT7L,r4
336 mtspr SPRN_IBAT4U,r4
337 mtspr SPRN_IBAT4L,r4
338 mtspr SPRN_IBAT5U,r4
339 mtspr SPRN_IBAT5L,r4
340 mtspr SPRN_IBAT6U,r4
341 mtspr SPRN_IBAT6L,r4
342 mtspr SPRN_IBAT7U,r4
343 mtspr SPRN_IBAT7L,r4
344END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
345
346 /* Flush all TLBs */
347 lis r4,0x1000
3481: addic. r4,r4,-0x1000
349 tlbie r4
350 blt 1b
351 sync
352
353 /* restore the MSR and turn on the MMU */
354 lwz r3,SL_MSR(r1)
355 bl turn_on_mmu
356
357 /* get back the stack pointer */
358 tovirt(r1,r1)
359
360 /* Restore TB */
361 li r3,0
362 mttbl r3
363 lwz r3,SL_TB(r1)
364 lwz r4,SL_TB+4(r1)
365 mttbu r3
366 mttbl r4
367
368 /* Restore the callee-saved registers and return */
369 lwz r0,SL_CR(r1)
370 mtcr r0
371 lwz r2,SL_R2(r1)
372 lmw r12,SL_R12(r1)
373 addi r1,r1,SL_SIZE
374 lwz r0,4(r1)
375 mtlr r0
376 blr
377
378turn_on_mmu:
379 mflr r4
380 tovirt(r4,r4)
381 mtsrr0 r4
382 mtsrr1 r3
383 sync
384 isync
385 rfi
386
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388
389 .section .data
390 .balign L1_CACHE_BYTES
391sleep_storage:
392 .long 0
393 .balign L1_CACHE_BYTES, 0
394
395#endif /* CONFIG_6xx */
396 .section .text
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
new file mode 100644
index 000000000000..e1f9443cc872
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -0,0 +1,865 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24#include <linux/config.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/interrupt.h>
30#include <linux/kernel_stat.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/errno.h>
35#include <linux/hardirq.h>
36#include <linux/cpu.h>
37
38#include <asm/ptrace.h>
39#include <asm/atomic.h>
40#include <asm/irq.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/sections.h>
44#include <asm/io.h>
45#include <asm/prom.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/pmac_feature.h>
49#include <asm/time.h>
50#include <asm/mpic.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void __secondary_start_pmac_0(void);
64
65#ifdef CONFIG_PPC32
66
67/* Sync flag for HW tb sync */
68static volatile int sec_tb_reset = 0;
69
70/*
71 * Powersurge (old powermac SMP) support.
72 */
73
74/* Addresses for powersurge registers */
75#define HAMMERHEAD_BASE 0xf8000000
76#define HHEAD_CONFIG 0x90
77#define HHEAD_SEC_INTR 0xc0
78
79/* register for interrupting the primary processor on the powersurge */
80/* N.B. this is actually the ethernet ROM! */
81#define PSURGE_PRI_INTR 0xf3019000
82
83/* register for storing the start address for the secondary processor */
84/* N.B. this is the PCI config space address register for the 1st bridge */
85#define PSURGE_START 0xf2800000
86
87/* Daystar/XLR8 4-CPU card */
88#define PSURGE_QUAD_REG_ADDR 0xf8800000
89
90#define PSURGE_QUAD_IRQ_SET 0
91#define PSURGE_QUAD_IRQ_CLR 1
92#define PSURGE_QUAD_IRQ_PRIMARY 2
93#define PSURGE_QUAD_CKSTOP_CTL 3
94#define PSURGE_QUAD_PRIMARY_ARB 4
95#define PSURGE_QUAD_BOARD_ID 6
96#define PSURGE_QUAD_WHICH_CPU 7
97#define PSURGE_QUAD_CKSTOP_RDBK 8
98#define PSURGE_QUAD_RESET_CTL 11
99
100#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
101#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
102#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
103#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
104
105/* virtual addresses for the above */
106static volatile u8 __iomem *hhead_base;
107static volatile u8 __iomem *quad_base;
108static volatile u32 __iomem *psurge_pri_intr;
109static volatile u8 __iomem *psurge_sec_intr;
110static volatile u32 __iomem *psurge_start;
111
112/* values for psurge_type */
113#define PSURGE_NONE -1
114#define PSURGE_DUAL 0
115#define PSURGE_QUAD_OKEE 1
116#define PSURGE_QUAD_COTTON 2
117#define PSURGE_QUAD_ICEGRASS 3
118
119/* what sort of powersurge board we have */
120static int psurge_type = PSURGE_NONE;
121
122/*
123 * Set and clear IPIs for powersurge.
124 */
125static inline void psurge_set_ipi(int cpu)
126{
127 if (psurge_type == PSURGE_NONE)
128 return;
129 if (cpu == 0)
130 in_be32(psurge_pri_intr);
131 else if (psurge_type == PSURGE_DUAL)
132 out_8(psurge_sec_intr, 0);
133 else
134 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
135}
136
137static inline void psurge_clr_ipi(int cpu)
138{
139 if (cpu > 0) {
140 switch(psurge_type) {
141 case PSURGE_DUAL:
142 out_8(psurge_sec_intr, ~0);
143 case PSURGE_NONE:
144 break;
145 default:
146 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
147 }
148 }
149}
150
151/*
152 * On powersurge (old SMP powermac architecture) we don't have
153 * separate IPIs for separate messages like openpic does. Instead
154 * we have a bitmap for each processor, where a 1 bit means that
155 * the corresponding message is pending for that processor.
156 * Ideally each cpu's entry would be in a different cache line.
157 * -- paulus.
158 */
159static unsigned long psurge_smp_message[NR_CPUS];
160
161void psurge_smp_message_recv(struct pt_regs *regs)
162{
163 int cpu = smp_processor_id();
164 int msg;
165
166 /* clear interrupt */
167 psurge_clr_ipi(cpu);
168
169 if (num_online_cpus() < 2)
170 return;
171
172 /* make sure there is a message there */
173 for (msg = 0; msg < 4; msg++)
174 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
175 smp_message_recv(msg, regs);
176}
177
178irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
179{
180 psurge_smp_message_recv(regs);
181 return IRQ_HANDLED;
182}
183
184static void smp_psurge_message_pass(int target, int msg)
185{
186 int i;
187
188 if (num_online_cpus() < 2)
189 return;
190
191 for (i = 0; i < NR_CPUS; i++) {
192 if (!cpu_online(i))
193 continue;
194 if (target == MSG_ALL
195 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
196 || target == i) {
197 set_bit(msg, &psurge_smp_message[i]);
198 psurge_set_ipi(i);
199 }
200 }
201}
202
203/*
204 * Determine a quad card presence. We read the board ID register, we
205 * force the data bus to change to something else, and we read it again.
206 * It it's stable, then the register probably exist (ugh !)
207 */
208static int __init psurge_quad_probe(void)
209{
210 int type;
211 unsigned int i;
212
213 type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
214 if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
215 || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
216 return PSURGE_DUAL;
217
218 /* looks OK, try a slightly more rigorous test */
219 /* bogus is not necessarily cacheline-aligned,
220 though I don't suppose that really matters. -- paulus */
221 for (i = 0; i < 100; i++) {
222 volatile u32 bogus[8];
223 bogus[(0+i)%8] = 0x00000000;
224 bogus[(1+i)%8] = 0x55555555;
225 bogus[(2+i)%8] = 0xFFFFFFFF;
226 bogus[(3+i)%8] = 0xAAAAAAAA;
227 bogus[(4+i)%8] = 0x33333333;
228 bogus[(5+i)%8] = 0xCCCCCCCC;
229 bogus[(6+i)%8] = 0xCCCCCCCC;
230 bogus[(7+i)%8] = 0x33333333;
231 wmb();
232 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
233 mb();
234 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
235 return PSURGE_DUAL;
236 }
237 return type;
238}
239
240static void __init psurge_quad_init(void)
241{
242 int procbits;
243
244 if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
245 procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
246 if (psurge_type == PSURGE_QUAD_ICEGRASS)
247 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
248 else
249 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
250 mdelay(33);
251 out_8(psurge_sec_intr, ~0);
252 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
253 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
254 if (psurge_type != PSURGE_QUAD_ICEGRASS)
255 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
256 PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
257 mdelay(33);
258 PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
259 mdelay(33);
260 PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
261 mdelay(33);
262}
263
264static int __init smp_psurge_probe(void)
265{
266 int i, ncpus;
267
268 /* We don't do SMP on the PPC601 -- paulus */
269 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
270 return 1;
271
272 /*
273 * The powersurge cpu board can be used in the generation
274 * of powermacs that have a socket for an upgradeable cpu card,
275 * including the 7500, 8500, 9500, 9600.
276 * The device tree doesn't tell you if you have 2 cpus because
277 * OF doesn't know anything about the 2nd processor.
278 * Instead we look for magic bits in magic registers,
279 * in the hammerhead memory controller in the case of the
280 * dual-cpu powersurge board. -- paulus.
281 */
282 if (find_devices("hammerhead") == NULL)
283 return 1;
284
285 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
286 quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
287 psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
288
289 psurge_type = psurge_quad_probe();
290 if (psurge_type != PSURGE_DUAL) {
291 psurge_quad_init();
292 /* All released cards using this HW design have 4 CPUs */
293 ncpus = 4;
294 } else {
295 iounmap(quad_base);
296 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
297 /* not a dual-cpu card */
298 iounmap(hhead_base);
299 psurge_type = PSURGE_NONE;
300 return 1;
301 }
302 ncpus = 2;
303 }
304
305 psurge_start = ioremap(PSURGE_START, 4);
306 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
307
308 /* this is not actually strictly necessary -- paulus. */
309 for (i = 1; i < ncpus; ++i)
310 smp_hw_index[i] = i;
311
312 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
313
314 return ncpus;
315}
316
317static void __init smp_psurge_kick_cpu(int nr)
318{
319 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
320 unsigned long a;
321
322 /* may need to flush here if secondary bats aren't setup */
323 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
324 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
325 asm volatile("sync");
326
327 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
328
329 out_be32(psurge_start, start);
330 mb();
331
332 psurge_set_ipi(nr);
333 udelay(10);
334 psurge_clr_ipi(nr);
335
336 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
337}
338
339/*
340 * With the dual-cpu powersurge board, the decrementers and timebases
341 * of both cpus are frozen after the secondary cpu is started up,
342 * until we give the secondary cpu another interrupt. This routine
343 * uses this to get the timebases synchronized.
344 * -- paulus.
345 */
346static void __init psurge_dual_sync_tb(int cpu_nr)
347{
348 int t;
349
350 set_dec(tb_ticks_per_jiffy);
351 set_tb(0, 0);
352 last_jiffy_stamp(cpu_nr) = 0;
353
354 if (cpu_nr > 0) {
355 mb();
356 sec_tb_reset = 1;
357 return;
358 }
359
360 /* wait for the secondary to have reset its TB before proceeding */
361 for (t = 10000000; t > 0 && !sec_tb_reset; --t)
362 ;
363
364 /* now interrupt the secondary, starting both TBs */
365 psurge_set_ipi(1);
366
367 smp_tb_synchronized = 1;
368}
369
370static struct irqaction psurge_irqaction = {
371 .handler = psurge_primary_intr,
372 .flags = SA_INTERRUPT,
373 .mask = CPU_MASK_NONE,
374 .name = "primary IPI",
375};
376
377static void __init smp_psurge_setup_cpu(int cpu_nr)
378{
379
380 if (cpu_nr == 0) {
381 /* If we failed to start the second CPU, we should still
382 * send it an IPI to start the timebase & DEC or we might
383 * have them stuck.
384 */
385 if (num_online_cpus() < 2) {
386 if (psurge_type == PSURGE_DUAL)
387 psurge_set_ipi(1);
388 return;
389 }
390 /* reset the entry point so if we get another intr we won't
391 * try to startup again */
392 out_be32(psurge_start, 0x100);
393 if (setup_irq(30, &psurge_irqaction))
394 printk(KERN_ERR "Couldn't get primary IPI interrupt");
395 }
396
397 if (psurge_type == PSURGE_DUAL)
398 psurge_dual_sync_tb(cpu_nr);
399}
400
401void __init smp_psurge_take_timebase(void)
402{
403 /* Dummy implementation */
404}
405
406void __init smp_psurge_give_timebase(void)
407{
408 /* Dummy implementation */
409}
410
411/* PowerSurge-style Macs */
412struct smp_ops_t psurge_smp_ops = {
413 .message_pass = smp_psurge_message_pass,
414 .probe = smp_psurge_probe,
415 .kick_cpu = smp_psurge_kick_cpu,
416 .setup_cpu = smp_psurge_setup_cpu,
417 .give_timebase = smp_psurge_give_timebase,
418 .take_timebase = smp_psurge_take_timebase,
419};
420#endif /* CONFIG_PPC32 - actually powersurge support */
421
422#ifdef CONFIG_PPC64
423/*
424 * G5s enable/disable the timebase via an i2c-connected clock chip.
425 */
426static struct device_node *pmac_tb_clock_chip_host;
427static u8 pmac_tb_pulsar_addr;
428static void (*pmac_tb_freeze)(int freeze);
429static DEFINE_SPINLOCK(timebase_lock);
430static unsigned long timebase;
431
432static void smp_core99_cypress_tb_freeze(int freeze)
433{
434 u8 data;
435 int rc;
436
437 /* Strangely, the device-tree says address is 0xd2, but darwin
438 * accesses 0xd0 ...
439 */
440 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
441 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
442 0xd0 | pmac_low_i2c_read,
443 0x81, &data, 1);
444 if (rc != 0)
445 goto bail;
446
447 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
448
449 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
450 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
451 0xd0 | pmac_low_i2c_write,
452 0x81, &data, 1);
453
454 bail:
455 if (rc != 0) {
456 printk("Cypress Timebase %s rc: %d\n",
457 freeze ? "freeze" : "unfreeze", rc);
458 panic("Timebase freeze failed !\n");
459 }
460}
461
462
463static void smp_core99_pulsar_tb_freeze(int freeze)
464{
465 u8 data;
466 int rc;
467
468 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
469 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
470 pmac_tb_pulsar_addr | pmac_low_i2c_read,
471 0x2e, &data, 1);
472 if (rc != 0)
473 goto bail;
474
475 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
476
477 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
478 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
479 pmac_tb_pulsar_addr | pmac_low_i2c_write,
480 0x2e, &data, 1);
481 bail:
482 if (rc != 0) {
483 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
484 freeze ? "freeze" : "unfreeze", rc);
485 panic("Timebase freeze failed !\n");
486 }
487}
488
489
490static void smp_core99_give_timebase(void)
491{
492 /* Open i2c bus for synchronous access */
493 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
494 panic("Can't open i2c for TB sync !\n");
495
496 spin_lock(&timebase_lock);
497 (*pmac_tb_freeze)(1);
498 mb();
499 timebase = get_tb();
500 spin_unlock(&timebase_lock);
501
502 while (timebase)
503 barrier();
504
505 spin_lock(&timebase_lock);
506 (*pmac_tb_freeze)(0);
507 spin_unlock(&timebase_lock);
508
509 /* Close i2c bus */
510 pmac_low_i2c_close(pmac_tb_clock_chip_host);
511}
512
513
514static void __devinit smp_core99_take_timebase(void)
515{
516 while (!timebase)
517 barrier();
518 spin_lock(&timebase_lock);
519 set_tb(timebase >> 32, timebase & 0xffffffff);
520 timebase = 0;
521 spin_unlock(&timebase_lock);
522}
523
524static void __init smp_core99_setup(int ncpus)
525{
526 struct device_node *cc = NULL;
527 struct device_node *p;
528 u32 *reg;
529 int ok;
530
531 /* HW sync only on these platforms */
532 if (!machine_is_compatible("PowerMac7,2") &&
533 !machine_is_compatible("PowerMac7,3") &&
534 !machine_is_compatible("RackMac3,1"))
535 return;
536
537 /* Look for the clock chip */
538 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
539 p = of_get_parent(cc);
540 ok = p && device_is_compatible(p, "uni-n-i2c");
541 of_node_put(p);
542 if (!ok)
543 continue;
544
545 reg = (u32 *)get_property(cc, "reg", NULL);
546 if (reg == NULL)
547 continue;
548
549 switch (*reg) {
550 case 0xd2:
551 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
552 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
553 pmac_tb_pulsar_addr = 0xd2;
554 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
555 } else if (device_is_compatible(cc, "cy28508")) {
556 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
557 printk(KERN_INFO "Timebase clock is Cypress chip\n");
558 }
559 break;
560 case 0xd4:
561 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
562 pmac_tb_pulsar_addr = 0xd4;
563 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
564 break;
565 }
566 if (pmac_tb_freeze != NULL) {
567 pmac_tb_clock_chip_host = of_get_parent(cc);
568 of_node_put(cc);
569 break;
570 }
571 }
572 if (pmac_tb_freeze == NULL) {
573 smp_ops->give_timebase = smp_generic_give_timebase;
574 smp_ops->take_timebase = smp_generic_take_timebase;
575 }
576}
577
578/* nothing to do here, caches are already set up by service processor */
579static inline void __devinit core99_init_caches(int cpu)
580{
581}
582
583#else /* CONFIG_PPC64 */
584
585/*
586 * SMP G4 powermacs use a GPIO to enable/disable the timebase.
587 */
588
589static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
590
591static unsigned int pri_tb_hi, pri_tb_lo;
592static unsigned int pri_tb_stamp;
593
594/* not __init, called in sleep/wakeup code */
595void smp_core99_give_timebase(void)
596{
597 unsigned long flags;
598 unsigned int t;
599
600 /* wait for the secondary to be in take_timebase */
601 for (t = 100000; t > 0 && !sec_tb_reset; --t)
602 udelay(10);
603 if (!sec_tb_reset) {
604 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
605 return;
606 }
607
608 /* freeze the timebase and read it */
609 /* disable interrupts so the timebase is disabled for the
610 shortest possible time */
611 local_irq_save(flags);
612 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
613 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
614 mb();
615 pri_tb_hi = get_tbu();
616 pri_tb_lo = get_tbl();
617 pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
618 mb();
619
620 /* tell the secondary we're ready */
621 sec_tb_reset = 2;
622 mb();
623
624 /* wait for the secondary to have taken it */
625 for (t = 100000; t > 0 && sec_tb_reset; --t)
626 udelay(10);
627 if (sec_tb_reset)
628 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
629 else
630 smp_tb_synchronized = 1;
631
632 /* Now, restart the timebase by leaving the GPIO to an open collector */
633 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
634 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
635 local_irq_restore(flags);
636}
637
638/* not __init, called in sleep/wakeup code */
639void smp_core99_take_timebase(void)
640{
641 unsigned long flags;
642
643 /* tell the primary we're here */
644 sec_tb_reset = 1;
645 mb();
646
647 /* wait for the primary to set pri_tb_hi/lo */
648 while (sec_tb_reset < 2)
649 mb();
650
651 /* set our stuff the same as the primary */
652 local_irq_save(flags);
653 set_dec(1);
654 set_tb(pri_tb_hi, pri_tb_lo);
655 last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
656 mb();
657
658 /* tell the primary we're done */
659 sec_tb_reset = 0;
660 mb();
661 local_irq_restore(flags);
662}
663
664/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
665volatile static long int core99_l2_cache;
666volatile static long int core99_l3_cache;
667
668static void __devinit core99_init_caches(int cpu)
669{
670 if (!cpu_has_feature(CPU_FTR_L2CR))
671 return;
672
673 if (cpu == 0) {
674 core99_l2_cache = _get_L2CR();
675 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
676 } else {
677 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
678 _set_L2CR(0);
679 _set_L2CR(core99_l2_cache);
680 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
681 }
682
683 if (!cpu_has_feature(CPU_FTR_L3CR))
684 return;
685
686 if (cpu == 0){
687 core99_l3_cache = _get_L3CR();
688 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
689 } else {
690 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
691 _set_L3CR(0);
692 _set_L3CR(core99_l3_cache);
693 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
694 }
695}
696
697static void __init smp_core99_setup(int ncpus)
698{
699 struct device_node *cpu;
700 u32 *tbprop = NULL;
701 int i;
702
703 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
704 cpu = of_find_node_by_type(NULL, "cpu");
705 if (cpu != NULL) {
706 tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
707 if (tbprop)
708 core99_tb_gpio = *tbprop;
709 of_node_put(cpu);
710 }
711
712 /* XXX should get this from reg properties */
713 for (i = 1; i < ncpus; ++i)
714 smp_hw_index[i] = i;
715 powersave_nap = 0;
716}
717#endif
718
719static int __init smp_core99_probe(void)
720{
721 struct device_node *cpus;
722 int ncpus = 0;
723
724 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
725
726 /* Count CPUs in the device-tree */
727 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
728 ++ncpus;
729
730 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
731
732 /* Nothing more to do if less than 2 of them */
733 if (ncpus <= 1)
734 return 1;
735
736 smp_core99_setup(ncpus);
737 mpic_request_ipis();
738 core99_init_caches(0);
739
740 return ncpus;
741}
742
743static void __devinit smp_core99_kick_cpu(int nr)
744{
745 unsigned int save_vector;
746 unsigned long new_vector;
747 unsigned long flags;
748 volatile unsigned int *vector
749 = ((volatile unsigned int *)(KERNELBASE+0x100));
750
751 if (nr < 0 || nr > 3)
752 return;
753 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
754
755 local_irq_save(flags);
756 local_irq_disable();
757
758 /* Save reset vector */
759 save_vector = *vector;
760
761 /* Setup fake reset vector that does
762 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
763 */
764 new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
765 *vector = 0x48000002 + new_vector - KERNELBASE;
766
767 /* flush data cache and inval instruction cache */
768 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
769
770 /* Put some life in our friend */
771 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
772
773 /* FIXME: We wait a bit for the CPU to take the exception, I should
774 * instead wait for the entry code to set something for me. Well,
775 * ideally, all that crap will be done in prom.c and the CPU left
776 * in a RAM-based wait loop like CHRP.
777 */
778 mdelay(1);
779
780 /* Restore our exception vector */
781 *vector = save_vector;
782 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
783
784 local_irq_restore(flags);
785 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
786}
787
788static void __devinit smp_core99_setup_cpu(int cpu_nr)
789{
790 /* Setup L2/L3 */
791 if (cpu_nr != 0)
792 core99_init_caches(cpu_nr);
793
794 /* Setup openpic */
795 mpic_setup_this_cpu();
796
797 if (cpu_nr == 0) {
798#ifdef CONFIG_POWER4
799 extern void g5_phy_disable_cpu1(void);
800
801 /* If we didn't start the second CPU, we must take
802 * it off the bus
803 */
804 if (machine_is_compatible("MacRISC4") &&
805 num_online_cpus() < 2)
806 g5_phy_disable_cpu1();
807#endif /* CONFIG_POWER4 */
808 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
809 }
810}
811
812
813/* Core99 Macs (dual G4s and G5s) */
814struct smp_ops_t core99_smp_ops = {
815 .message_pass = smp_mpic_message_pass,
816 .probe = smp_core99_probe,
817 .kick_cpu = smp_core99_kick_cpu,
818 .setup_cpu = smp_core99_setup_cpu,
819 .give_timebase = smp_core99_give_timebase,
820 .take_timebase = smp_core99_take_timebase,
821};
822
823#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
824
825int __cpu_disable(void)
826{
827 cpu_clear(smp_processor_id(), cpu_online_map);
828
829 /* XXX reset cpu affinity here */
830 mpic_cpu_set_priority(0xf);
831 asm volatile("mtdec %0" : : "r" (0x7fffffff));
832 mb();
833 udelay(20);
834 asm volatile("mtdec %0" : : "r" (0x7fffffff));
835 return 0;
836}
837
838extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
839static int cpu_dead[NR_CPUS];
840
841void cpu_die(void)
842{
843 local_irq_disable();
844 cpu_dead[smp_processor_id()] = 1;
845 mb();
846 low_cpu_die();
847}
848
849void __cpu_die(unsigned int cpu)
850{
851 int timeout;
852
853 timeout = 1000;
854 while (!cpu_dead[cpu]) {
855 if (--timeout == 0) {
856 printk("CPU %u refused to die!\n", cpu);
857 break;
858 }
859 msleep(1);
860 }
861 cpu_callin_map[cpu] = 0;
862 cpu_dead[cpu] = 0;
863}
864
865#endif
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
new file mode 100644
index 000000000000..82982bf6453c
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -0,0 +1,364 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/cuda.h>
23#include <linux/pmu.h>
24#include <linux/interrupt.h>
25#include <linux/hardirq.h>
26#include <linux/rtc.h>
27
28#include <asm/sections.h>
29#include <asm/prom.h>
30#include <asm/system.h>
31#include <asm/io.h>
32#include <asm/pgtable.h>
33#include <asm/machdep.h>
34#include <asm/time.h>
35#include <asm/nvram.h>
36#include <asm/smu.h>
37
38#undef DEBUG
39
40#ifdef DEBUG
41#define DBG(x...) printk(x)
42#else
43#define DBG(x...)
44#endif
45
46/* Apparently the RTC stores seconds since 1 Jan 1904 */
47#define RTC_OFFSET 2082844800
48
49/*
50 * Calibrate the decrementer frequency with the VIA timer 1.
51 */
52#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
53
54/* VIA registers */
55#define RS 0x200 /* skip between registers */
56#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
57#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
58#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
59#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
60#define ACR (11*RS) /* Auxiliary control register */
61#define IFR (13*RS) /* Interrupt flag register */
62
63/* Bits in ACR */
64#define T1MODE 0xc0 /* Timer 1 mode */
65#define T1MODE_CONT 0x40 /* continuous interrupts */
66
67/* Bits in IFR and IER */
68#define T1_INT 0x40 /* Timer 1 interrupt */
69
70long __init pmac_time_init(void)
71{
72 s32 delta = 0;
73#ifdef CONFIG_NVRAM
74 int dst;
75
76 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
77 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
78 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
79 if (delta & 0x00800000UL)
80 delta |= 0xFF000000UL;
81 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
82 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
83 dst ? "on" : "off");
84#endif
85 return delta;
86}
87
88static void to_rtc_time(unsigned long now, struct rtc_time *tm)
89{
90 to_tm(now, tm);
91 tm->tm_year -= 1900;
92 tm->tm_mon -= 1;
93}
94
95static unsigned long from_rtc_time(struct rtc_time *tm)
96{
97 return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
98 tm->tm_hour, tm->tm_min, tm->tm_sec);
99}
100
101#ifdef CONFIG_ADB_CUDA
102static unsigned long cuda_get_time(void)
103{
104 struct adb_request req;
105 unsigned long now;
106
107 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
108 return 0;
109 while (!req.complete)
110 cuda_poll();
111 if (req.reply_len != 7)
112 printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
113 req.reply_len);
114 now = (req.reply[3] << 24) + (req.reply[4] << 16)
115 + (req.reply[5] << 8) + req.reply[6];
116 if (now < RTC_OFFSET)
117 return 0;
118 return now - RTC_OFFSET;
119}
120
121#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
122
123static int cuda_set_rtc_time(struct rtc_time *tm)
124{
125 unsigned int nowtime;
126 struct adb_request req;
127
128 nowtime = from_rtc_time(tm) + RTC_OFFSET;
129 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
130 nowtime >> 24, nowtime >> 16, nowtime >> 8,
131 nowtime) < 0)
132 return -ENXIO;
133 while (!req.complete)
134 cuda_poll();
135 if ((req.reply_len != 3) && (req.reply_len != 7))
136 printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
137 req.reply_len);
138 return 0;
139}
140
141#else
142#define cuda_get_time() 0
143#define cuda_get_rtc_time(tm)
144#define cuda_set_rtc_time(tm) 0
145#endif
146
147#ifdef CONFIG_ADB_PMU
148static unsigned long pmu_get_time(void)
149{
150 struct adb_request req;
151 unsigned long now;
152
153 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
154 return 0;
155 pmu_wait_complete(&req);
156 if (req.reply_len != 4)
157 printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
158 req.reply_len);
159 now = (req.reply[0] << 24) + (req.reply[1] << 16)
160 + (req.reply[2] << 8) + req.reply[3];
161 if (now < RTC_OFFSET)
162 return 0;
163 return now - RTC_OFFSET;
164}
165
166#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
167
168static int pmu_set_rtc_time(struct rtc_time *tm)
169{
170 unsigned int nowtime;
171 struct adb_request req;
172
173 nowtime = from_rtc_time(tm) + RTC_OFFSET;
174 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
175 nowtime >> 16, nowtime >> 8, nowtime) < 0)
176 return -ENXIO;
177 pmu_wait_complete(&req);
178 if (req.reply_len != 0)
179 printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
180 req.reply_len);
181 return 0;
182}
183
184#else
185#define pmu_get_time() 0
186#define pmu_get_rtc_time(tm)
187#define pmu_set_rtc_time(tm) 0
188#endif
189
190#ifdef CONFIG_PMAC_SMU
191static unsigned long smu_get_time(void)
192{
193 struct rtc_time tm;
194
195 if (smu_get_rtc_time(&tm, 1))
196 return 0;
197 return from_rtc_time(&tm);
198}
199
200#else
201#define smu_get_time() 0
202#define smu_get_rtc_time(tm, spin)
203#define smu_set_rtc_time(tm, spin) 0
204#endif
205
206unsigned long pmac_get_boot_time(void)
207{
208 /* Get the time from the RTC, used only at boot time */
209 switch (sys_ctrler) {
210 case SYS_CTRLER_CUDA:
211 return cuda_get_time();
212 case SYS_CTRLER_PMU:
213 return pmu_get_time();
214 case SYS_CTRLER_SMU:
215 return smu_get_time();
216 default:
217 return 0;
218 }
219}
220
221void pmac_get_rtc_time(struct rtc_time *tm)
222{
223 /* Get the time from the RTC, used only at boot time */
224 switch (sys_ctrler) {
225 case SYS_CTRLER_CUDA:
226 cuda_get_rtc_time(tm);
227 break;
228 case SYS_CTRLER_PMU:
229 pmu_get_rtc_time(tm);
230 break;
231 case SYS_CTRLER_SMU:
232 smu_get_rtc_time(tm, 1);
233 break;
234 default:
235 ;
236 }
237}
238
239int pmac_set_rtc_time(struct rtc_time *tm)
240{
241 switch (sys_ctrler) {
242 case SYS_CTRLER_CUDA:
243 return cuda_set_rtc_time(tm);
244 case SYS_CTRLER_PMU:
245 return pmu_set_rtc_time(tm);
246 case SYS_CTRLER_SMU:
247 return smu_set_rtc_time(tm, 1);
248 default:
249 return -ENODEV;
250 }
251}
252
253#ifdef CONFIG_PPC32
254/*
255 * Calibrate the decrementer register using VIA timer 1.
256 * This is used both on powermacs and CHRP machines.
257 */
258int __init via_calibrate_decr(void)
259{
260 struct device_node *vias;
261 volatile unsigned char __iomem *via;
262 int count = VIA_TIMER_FREQ_6 / 100;
263 unsigned int dstart, dend;
264
265 vias = find_devices("via-cuda");
266 if (vias == 0)
267 vias = find_devices("via-pmu");
268 if (vias == 0)
269 vias = find_devices("via");
270 if (vias == 0 || vias->n_addrs == 0)
271 return 0;
272 via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
273
274 /* set timer 1 for continuous interrupts */
275 out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
276 /* set the counter to a small value */
277 out_8(&via[T1CH], 2);
278 /* set the latch to `count' */
279 out_8(&via[T1LL], count);
280 out_8(&via[T1LH], count >> 8);
281 /* wait until it hits 0 */
282 while ((in_8(&via[IFR]) & T1_INT) == 0)
283 ;
284 dstart = get_dec();
285 /* clear the interrupt & wait until it hits 0 again */
286 in_8(&via[T1CL]);
287 while ((in_8(&via[IFR]) & T1_INT) == 0)
288 ;
289 dend = get_dec();
290
291 ppc_tb_freq = (dstart - dend) * 100 / 6;
292
293 iounmap(via);
294
295 return 1;
296}
297#endif
298
299#ifdef CONFIG_PM
300/*
301 * Reset the time after a sleep.
302 */
303static int
304time_sleep_notify(struct pmu_sleep_notifier *self, int when)
305{
306 static unsigned long time_diff;
307 unsigned long flags;
308 unsigned long seq;
309 struct timespec tv;
310
311 switch (when) {
312 case PBOOK_SLEEP_NOW:
313 do {
314 seq = read_seqbegin_irqsave(&xtime_lock, flags);
315 time_diff = xtime.tv_sec - pmac_get_boot_time();
316 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
317 break;
318 case PBOOK_WAKE:
319 tv.tv_sec = pmac_get_boot_time() + time_diff;
320 tv.tv_nsec = 0;
321 do_settimeofday(&tv);
322 break;
323 }
324 return PBOOK_SLEEP_OK;
325}
326
327static struct pmu_sleep_notifier time_sleep_notifier = {
328 time_sleep_notify, SLEEP_LEVEL_MISC,
329};
330#endif /* CONFIG_PM */
331
332/*
333 * Query the OF and get the decr frequency.
334 */
335void __init pmac_calibrate_decr(void)
336{
337#ifdef CONFIG_PM
338 /* XXX why here? */
339 pmu_register_sleep_notifier(&time_sleep_notifier);
340#endif /* CONFIG_PM */
341
342 generic_calibrate_decr();
343
344#ifdef CONFIG_PPC32
345 /* We assume MacRISC2 machines have correct device-tree
346 * calibration. That's better since the VIA itself seems
347 * to be slightly off. --BenH
348 */
349 if (!machine_is_compatible("MacRISC2") &&
350 !machine_is_compatible("MacRISC3") &&
351 !machine_is_compatible("MacRISC4"))
352 if (via_calibrate_decr())
353 return;
354
355 /* Special case: QuickSilver G4s seem to have a badly calibrated
356 * timebase-frequency in OF, VIA is much better on these. We should
357 * probably implement calibration based on the KL timer on these
358 * machines anyway... -BenH
359 */
360 if (machine_is_compatible("PowerMac3,5"))
361 if (via_calibrate_decr())
362 return;
363#endif
364}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
new file mode 100644
index 000000000000..673ac47a1626
--- /dev/null
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -0,0 +1,22 @@
1
2config PREP_RESIDUAL
3 bool "Support for PReP Residual Data"
4 depends on PPC_PREP
5 help
6 Some PReP systems have residual data passed to the kernel by the
7 firmware. This allows detection of memory size, devices present and
8 other useful pieces of information. Sometimes this information is
9 not present or incorrect, in which case it could lead to the machine
10 behaving incorrectly. If this happens, either disable PREP_RESIDUAL
11 or pass the 'noresidual' option to the kernel.
12
13 If you are running a PReP system, say Y here, otherwise say N.
14
15config PROC_PREPRESIDUAL
16 bool "Support for reading of PReP Residual Data in /proc"
17 depends on PREP_RESIDUAL && PROC_FS
18 help
19 Enabling this option will create a /proc/residual file which allows
20 you to get at the residual data on PReP systems. You will need a tool
21 (lsresidual) to parse it. If you aren't on a PReP system, you don't
22 want this.
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
new file mode 100644
index 000000000000..7a3b6fc4d976
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -0,0 +1,47 @@
1
2config PPC_SPLPAR
3 depends on PPC_PSERIES
4 bool "Support for shared-processor logical partitions"
5 default n
6 help
7 Enabling this option will make the kernel run more efficiently
8 on logically-partitioned pSeries systems which use shared
9 processors, that is, which share physical processors between
10 two or more partitions.
11
12config HMT
13 bool "Hardware multithreading"
14 depends on SMP && PPC_PSERIES && BROKEN
15 help
16 This option enables hardware multithreading on RS64 cpus.
17 pSeries systems p620 and p660 have such a cpu type.
18
19config EEH
20 bool "PCI Extended Error Handling (EEH)" if EMBEDDED
21 depends on PPC_PSERIES
22 default y if !EMBEDDED
23
24config PPC_RTAS
25 bool
26 depends on PPC_PSERIES || PPC_BPA
27 default y
28
29config RTAS_PROC
30 bool "Proc interface to RTAS"
31 depends on PPC_RTAS
32 default y
33
34config RTAS_FLASH
35 tristate "Firmware flash interface"
36 depends on PPC64 && RTAS_PROC
37
38config SCANLOG
39 tristate "Scanlog dump interface"
40 depends on RTAS_PROC && PPC_PSERIES
41
42config LPARCFG
43 tristate "LPAR Configuration Data"
44 depends on PPC_PSERIES || PPC_ISERIES
45 help
46 Provide system capacity information via human readable
47 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
new file mode 100644
index 000000000000..9ebb34180a10
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -0,0 +1,4 @@
1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
2 setup.o iommu.o
3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o
diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 176e8da76466..176e8da76466 100644
--- a/arch/ppc64/kernel/pSeries_hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index d17f0108a032..9e90d41131d8 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -46,7 +46,8 @@
46#include <asm/pSeries_reconfig.h> 46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#include "pci.h" 49#include <asm/tce.h>
50#include <asm/ppc-pci.h>
50 51
51#define DBG(fmt...) 52#define DBG(fmt...)
52 53
@@ -59,6 +60,9 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
59 union tce_entry t; 60 union tce_entry t;
60 union tce_entry *tp; 61 union tce_entry *tp;
61 62
63 index <<= TCE_PAGE_FACTOR;
64 npages <<= TCE_PAGE_FACTOR;
65
62 t.te_word = 0; 66 t.te_word = 0;
63 t.te_rdwr = 1; // Read allowed 67 t.te_rdwr = 1; // Read allowed
64 68
@@ -69,11 +73,11 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
69 73
70 while (npages--) { 74 while (npages--) {
71 /* can't move this out since we might cross LMB boundary */ 75 /* can't move this out since we might cross LMB boundary */
72 t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 76 t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
73 77
74 tp->te_word = t.te_word; 78 tp->te_word = t.te_word;
75 79
76 uaddr += PAGE_SIZE; 80 uaddr += TCE_PAGE_SIZE;
77 tp++; 81 tp++;
78 } 82 }
79} 83}
@@ -84,6 +88,9 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
84 union tce_entry t; 88 union tce_entry t;
85 union tce_entry *tp; 89 union tce_entry *tp;
86 90
91 npages <<= TCE_PAGE_FACTOR;
92 index <<= TCE_PAGE_FACTOR;
93
87 t.te_word = 0; 94 t.te_word = 0;
88 tp = ((union tce_entry *)tbl->it_base) + index; 95 tp = ((union tce_entry *)tbl->it_base) + index;
89 96
@@ -103,7 +110,7 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
103 union tce_entry tce; 110 union tce_entry tce;
104 111
105 tce.te_word = 0; 112 tce.te_word = 0;
106 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 113 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
107 tce.te_rdwr = 1; 114 tce.te_rdwr = 1;
108 if (direction != DMA_TO_DEVICE) 115 if (direction != DMA_TO_DEVICE)
109 tce.te_pciwr = 1; 116 tce.te_pciwr = 1;
@@ -136,6 +143,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
136 union tce_entry tce, *tcep; 143 union tce_entry tce, *tcep;
137 long l, limit; 144 long l, limit;
138 145
146 tcenum <<= TCE_PAGE_FACTOR;
147 npages <<= TCE_PAGE_FACTOR;
148
139 if (npages == 1) 149 if (npages == 1)
140 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
141 direction); 151 direction);
@@ -155,7 +165,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
155 } 165 }
156 166
157 tce.te_word = 0; 167 tce.te_word = 0;
158 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 168 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
159 tce.te_rdwr = 1; 169 tce.te_rdwr = 1;
160 if (direction != DMA_TO_DEVICE) 170 if (direction != DMA_TO_DEVICE)
161 tce.te_pciwr = 1; 171 tce.te_pciwr = 1;
@@ -166,7 +176,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
166 * Set up the page with TCE data, looping through and setting 176 * Set up the page with TCE data, looping through and setting
167 * the values. 177 * the values.
168 */ 178 */
169 limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry)); 179 limit = min_t(long, npages, 4096/sizeof(union tce_entry));
170 180
171 for (l = 0; l < limit; l++) { 181 for (l = 0; l < limit; l++) {
172 tcep[l] = tce; 182 tcep[l] = tce;
@@ -196,6 +206,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
196 u64 rc; 206 u64 rc;
197 union tce_entry tce; 207 union tce_entry tce;
198 208
209 tcenum <<= TCE_PAGE_FACTOR;
210 npages <<= TCE_PAGE_FACTOR;
211
199 tce.te_word = 0; 212 tce.te_word = 0;
200 213
201 while (npages--) { 214 while (npages--) {
@@ -221,6 +234,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
221 u64 rc; 234 u64 rc;
222 union tce_entry tce; 235 union tce_entry tce;
223 236
237 tcenum <<= TCE_PAGE_FACTOR;
238 npages <<= TCE_PAGE_FACTOR;
239
224 tce.te_word = 0; 240 tce.te_word = 0;
225 241
226 rc = plpar_tce_stuff((u64)tbl->it_index, 242 rc = plpar_tce_stuff((u64)tbl->it_index,
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a6de83f2078f..268d8362dde7 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
487 * lock. 487 * lock.
488 */ 488 */
489void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, 489void pSeries_lpar_flush_hash_range(unsigned long number, int local)
490 int local)
491{ 490{
492 int i; 491 int i;
493 unsigned long flags = 0; 492 unsigned long flags = 0;
@@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
498 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 497 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
499 498
500 for (i = 0; i < number; i++) 499 for (i = 0; i < number; i++)
501 flush_hash_page(context, batch->addr[i], batch->pte[i], local); 500 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
502 501
503 if (lock_tlbie) 502 if (lock_tlbie)
504 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 503 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/ppc64/kernel/pSeries_nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 18abfb1f4e24..18abfb1f4e24 100644
--- a/arch/ppc64/kernel/pSeries_nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/powerpc/platforms/pseries/pci.c
index 928f8febdb3b..c198656a3bb5 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,7 @@
29 29
30#include <asm/pci-bridge.h> 30#include <asm/pci-bridge.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32 32#include <asm/ppc-pci.h>
33#include "pci.h"
34 33
35static int __devinitdata s7a_workaround = -1; 34static int __devinitdata s7a_workaround = -1;
36 35
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 58c61219d08e..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/powerpc/platforms/pseries/setup.c
index 3009701eb90d..92d18003f152 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -62,10 +62,10 @@
62#include <asm/xics.h> 62#include <asm/xics.h>
63#include <asm/firmware.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h> 64#include <asm/pmc.h>
65 65#include <asm/mpic.h>
66#include "i8259.h" 66#include <asm/ppc-pci.h>
67#include "mpic.h" 67#include <asm/i8259.h>
68#include "pci.h" 68#include <asm/udbg.h>
69 69
70#ifdef DEBUG 70#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt) 71#define DBG(fmt...) udbg_printf(fmt)
@@ -84,13 +84,13 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
84extern void pSeries_system_reset_exception(struct pt_regs *regs); 84extern void pSeries_system_reset_exception(struct pt_regs *regs);
85extern int pSeries_machine_check_exception(struct pt_regs *regs); 85extern int pSeries_machine_check_exception(struct pt_regs *regs);
86 86
87static int pseries_shared_idle(void); 87static void pseries_shared_idle(void);
88static int pseries_dedicated_idle(void); 88static void pseries_dedicated_idle(void);
89 89
90static volatile void __iomem * chrp_int_ack_special; 90static volatile void __iomem * chrp_int_ack_special;
91struct mpic *pSeries_mpic; 91struct mpic *pSeries_mpic;
92 92
93void pSeries_get_cpuinfo(struct seq_file *m) 93void pSeries_show_cpuinfo(struct seq_file *m)
94{ 94{
95 struct device_node *root; 95 struct device_node *root;
96 const char *model = ""; 96 const char *model = "";
@@ -124,7 +124,7 @@ static int pSeries_irq_cascade(struct pt_regs *regs, void *data)
124 if (chrp_int_ack_special) 124 if (chrp_int_ack_special)
125 return readb(chrp_int_ack_special); 125 return readb(chrp_int_ack_special);
126 else 126 else
127 return i8259_irq(smp_processor_id()); 127 return i8259_irq(regs);
128} 128}
129 129
130static void __init pSeries_init_mpic(void) 130static void __init pSeries_init_mpic(void)
@@ -241,10 +241,6 @@ static void __init pSeries_setup_arch(void)
241 find_and_init_phbs(); 241 find_and_init_phbs();
242 eeh_init(); 242 eeh_init();
243 243
244#ifdef CONFIG_DUMMY_CONSOLE
245 conswitchp = &dummy_con;
246#endif
247
248 pSeries_nvram_init(); 244 pSeries_nvram_init();
249 245
250 /* Choose an idle loop */ 246 /* Choose an idle loop */
@@ -488,8 +484,8 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
488 } 484 }
489} 485}
490 486
491static int pseries_dedicated_idle(void) 487static void pseries_dedicated_idle(void)
492{ 488{
493 long oldval; 489 long oldval;
494 struct paca_struct *lpaca = get_paca(); 490 struct paca_struct *lpaca = get_paca();
495 unsigned int cpu = smp_processor_id(); 491 unsigned int cpu = smp_processor_id();
@@ -544,7 +540,7 @@ static int pseries_dedicated_idle(void)
544 } 540 }
545} 541}
546 542
547static int pseries_shared_idle(void) 543static void pseries_shared_idle(void)
548{ 544{
549 struct paca_struct *lpaca = get_paca(); 545 struct paca_struct *lpaca = get_paca();
550 unsigned int cpu = smp_processor_id(); 546 unsigned int cpu = smp_processor_id();
@@ -586,8 +582,6 @@ static int pseries_shared_idle(void)
586 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 582 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
587 cpu_die(); 583 cpu_die();
588 } 584 }
589
590 return 0;
591} 585}
592 586
593static int pSeries_pci_probe_mode(struct pci_bus *bus) 587static int pSeries_pci_probe_mode(struct pci_bus *bus)
@@ -601,7 +595,7 @@ struct machdep_calls __initdata pSeries_md = {
601 .probe = pSeries_probe, 595 .probe = pSeries_probe,
602 .setup_arch = pSeries_setup_arch, 596 .setup_arch = pSeries_setup_arch,
603 .init_early = pSeries_init_early, 597 .init_early = pSeries_init_early,
604 .get_cpuinfo = pSeries_get_cpuinfo, 598 .show_cpuinfo = pSeries_show_cpuinfo,
605 .log_error = pSeries_log_error, 599 .log_error = pSeries_log_error,
606 .pcibios_fixup = pSeries_final_fixup, 600 .pcibios_fixup = pSeries_final_fixup,
607 .pci_probe_mode = pSeries_pci_probe_mode, 601 .pci_probe_mode = pSeries_pci_probe_mode,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/powerpc/platforms/pseries/smp.c
index d2c7e2c4733b..ae1bd270f308 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SMP support for pSeries and BPA machines. 2 * SMP support for pSeries machines.
3 * 3 *
4 * Dave Engebretsen, Peter Bergner, and 4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -46,9 +46,7 @@
46#include <asm/rtas.h> 46#include <asm/rtas.h>
47#include <asm/plpar_wrappers.h> 47#include <asm/plpar_wrappers.h>
48#include <asm/pSeries_reconfig.h> 48#include <asm/pSeries_reconfig.h>
49 49#include <asm/mpic.h>
50#include "mpic.h"
51#include "bpa_iic.h"
52 50
53#ifdef DEBUG 51#ifdef DEBUG
54#define DBG(fmt...) udbg_printf(fmt) 52#define DBG(fmt...) udbg_printf(fmt)
@@ -343,36 +341,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
343 341
344} 342}
345#endif /* CONFIG_XICS */ 343#endif /* CONFIG_XICS */
346#ifdef CONFIG_BPA_IIC
347static void smp_iic_message_pass(int target, int msg)
348{
349 unsigned int i;
350
351 if (target < NR_CPUS) {
352 iic_cause_IPI(target, msg);
353 } else {
354 for_each_online_cpu(i) {
355 if (target == MSG_ALL_BUT_SELF
356 && i == smp_processor_id())
357 continue;
358 iic_cause_IPI(i, msg);
359 }
360 }
361}
362
363static int __init smp_iic_probe(void)
364{
365 iic_request_IPIs();
366
367 return cpus_weight(cpu_possible_map);
368}
369
370static void __devinit smp_iic_setup_cpu(int cpu)
371{
372 if (cpu != boot_cpuid)
373 iic_setup_cpu();
374}
375#endif /* CONFIG_BPA_IIC */
376 344
377static DEFINE_SPINLOCK(timebase_lock); 345static DEFINE_SPINLOCK(timebase_lock);
378static unsigned long timebase = 0; 346static unsigned long timebase = 0;
@@ -444,15 +412,6 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
444 .cpu_bootable = smp_pSeries_cpu_bootable, 412 .cpu_bootable = smp_pSeries_cpu_bootable,
445}; 413};
446#endif 414#endif
447#ifdef CONFIG_BPA_IIC
448static struct smp_ops_t bpa_iic_smp_ops = {
449 .message_pass = smp_iic_message_pass,
450 .probe = smp_iic_probe,
451 .kick_cpu = smp_pSeries_kick_cpu,
452 .setup_cpu = smp_iic_setup_cpu,
453 .cpu_bootable = smp_pSeries_cpu_bootable,
454};
455#endif
456 415
457/* This is called very early */ 416/* This is called very early */
458void __init smp_init_pSeries(void) 417void __init smp_init_pSeries(void)
@@ -472,11 +431,6 @@ void __init smp_init_pSeries(void)
472 smp_ops = &pSeries_xics_smp_ops; 431 smp_ops = &pSeries_xics_smp_ops;
473 break; 432 break;
474#endif 433#endif
475#ifdef CONFIG_BPA_IIC
476 case IC_BPA_IIC:
477 smp_ops = &bpa_iic_smp_ops;
478 break;
479#endif
480 default: 434 default:
481 panic("Invalid interrupt controller"); 435 panic("Invalid interrupt controller");
482 } 436 }
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/powerpc/platforms/pseries/vio.c
index e0ae06f58f86..866379b80c09 100644
--- a/arch/ppc64/kernel/pSeries_vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -22,6 +22,7 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/vio.h> 23#include <asm/vio.h>
24#include <asm/hvcall.h> 24#include <asm/hvcall.h>
25#include <asm/tce.h>
25 26
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 27extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27 28
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
new file mode 100644
index 000000000000..c649f03acf68
--- /dev/null
+++ b/arch/powerpc/sysdev/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MPIC) += mpic.o
2indirectpci-$(CONFIG_PPC_PMAC) = indirect_pci.o
3obj-$(CONFIG_PPC32) += $(indirectpci-y)
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
new file mode 100644
index 000000000000..e71488469704
--- /dev/null
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -0,0 +1,134 @@
1/*
2 * Support for indirect PCI bridges.
3 *
4 * Copyright (C) 1998 Gabriel Paubert.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/pci-bridge.h>
21#include <asm/machdep.h>
22
23#ifdef CONFIG_PPC_INDIRECT_PCI_BE
24#define PCI_CFG_OUT out_be32
25#else
26#define PCI_CFG_OUT out_le32
27#endif
28
29static int
30indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
31 int len, u32 *val)
32{
33 struct pci_controller *hose = bus->sysdata;
34 volatile void __iomem *cfg_data;
35 u8 cfg_type = 0;
36
37 if (ppc_md.pci_exclude_device)
38 if (ppc_md.pci_exclude_device(bus->number, devfn))
39 return PCIBIOS_DEVICE_NOT_FOUND;
40
41 if (hose->set_cfg_type)
42 if (bus->number != hose->first_busno)
43 cfg_type = 1;
44
45 PCI_CFG_OUT(hose->cfg_addr,
46 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
47 | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
48
49 /*
50 * Note: the caller has already checked that offset is
51 * suitably aligned and that len is 1, 2 or 4.
52 */
53 cfg_data = hose->cfg_data + (offset & 3);
54 switch (len) {
55 case 1:
56 *val = in_8(cfg_data);
57 break;
58 case 2:
59 *val = in_le16(cfg_data);
60 break;
61 default:
62 *val = in_le32(cfg_data);
63 break;
64 }
65 return PCIBIOS_SUCCESSFUL;
66}
67
68static int
69indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
70 int len, u32 val)
71{
72 struct pci_controller *hose = bus->sysdata;
73 volatile void __iomem *cfg_data;
74 u8 cfg_type = 0;
75
76 if (ppc_md.pci_exclude_device)
77 if (ppc_md.pci_exclude_device(bus->number, devfn))
78 return PCIBIOS_DEVICE_NOT_FOUND;
79
80 if (hose->set_cfg_type)
81 if (bus->number != hose->first_busno)
82 cfg_type = 1;
83
84 PCI_CFG_OUT(hose->cfg_addr,
85 (0x80000000 | ((bus->number - hose->bus_offset) << 16)
86 | (devfn << 8) | ((offset & 0xfc) | cfg_type)));
87
88 /*
89 * Note: the caller has already checked that offset is
90 * suitably aligned and that len is 1, 2 or 4.
91 */
92 cfg_data = hose->cfg_data + (offset & 3);
93 switch (len) {
94 case 1:
95 out_8(cfg_data, val);
96 break;
97 case 2:
98 out_le16(cfg_data, val);
99 break;
100 default:
101 out_le32(cfg_data, val);
102 break;
103 }
104 return PCIBIOS_SUCCESSFUL;
105}
106
107static struct pci_ops indirect_pci_ops =
108{
109 indirect_read_config,
110 indirect_write_config
111};
112
113void __init
114setup_indirect_pci_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
115 void __iomem * cfg_data)
116{
117 hose->cfg_addr = cfg_addr;
118 hose->cfg_data = cfg_data;
119 hose->ops = &indirect_pci_ops;
120}
121
122void __init
123setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
124{
125 unsigned long base = cfg_addr & PAGE_MASK;
126 void __iomem *mbase, *addr, *data;
127
128 mbase = ioremap(base, PAGE_SIZE);
129 addr = mbase + (cfg_addr & ~PAGE_MASK);
130 if ((cfg_data & PAGE_MASK) != base)
131 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
132 data = mbase + (cfg_data & ~PAGE_MASK);
133 setup_indirect_pci_nomap(hose, addr, data);
134}
diff --git a/arch/ppc64/kernel/mpic.c b/arch/powerpc/sysdev/mpic.c
index cc262a05ddb4..3948e759d41a 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/mpic.c 2 * arch/powerpc/kernel/mpic.c
3 * 3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the 4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal 5 * common implementation beeing IBM's MPIC. This driver also can deal
@@ -31,8 +31,8 @@
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34 34#include <asm/mpic.h>
35#include "mpic.h" 35#include <asm/smp.h>
36 36
37#ifdef DEBUG 37#ifdef DEBUG
38#define DBG(fmt...) printk(fmt) 38#define DBG(fmt...) printk(fmt)
@@ -44,6 +44,9 @@ static struct mpic *mpics;
44static struct mpic *mpic_primary; 44static struct mpic *mpic_primary;
45static DEFINE_SPINLOCK(mpic_lock); 45static DEFINE_SPINLOCK(mpic_lock);
46 46
47#ifdef CONFIG_PPC32 /* XXX for now */
48#define distribute_irqs CONFIG_IRQ_ALL_CPUS
49#endif
47 50
48/* 51/*
49 * Register accessor functions 52 * Register accessor functions
@@ -480,6 +483,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
480 if (mpic == NULL) 483 if (mpic == NULL)
481 return NULL; 484 return NULL;
482 485
486
483 memset(mpic, 0, sizeof(struct mpic)); 487 memset(mpic, 0, sizeof(struct mpic));
484 mpic->name = name; 488 mpic->name = name;
485 489
@@ -700,7 +704,7 @@ void __init mpic_init(struct mpic *mpic)
700 /* init hw */ 704 /* init hw */
701 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 705 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
702 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 706 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
703 1 << get_hard_smp_processor_id(boot_cpuid)); 707 1 << hard_smp_processor_id());
704 708
705 /* init linux descriptors */ 709 /* init linux descriptors */
706 if (i < mpic->irq_count) { 710 if (i < mpic->irq_count) {
@@ -792,6 +796,21 @@ void mpic_setup_this_cpu(void)
792#endif /* CONFIG_SMP */ 796#endif /* CONFIG_SMP */
793} 797}
794 798
799int mpic_cpu_get_priority(void)
800{
801 struct mpic *mpic = mpic_primary;
802
803 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
804}
805
806void mpic_cpu_set_priority(int prio)
807{
808 struct mpic *mpic = mpic_primary;
809
810 prio &= MPIC_CPU_TASKPRI_MASK;
811 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
812}
813
795/* 814/*
796 * XXX: someone who knows mpic should check this. 815 * XXX: someone who knows mpic should check this.
797 * do we need to eoi the ipi including for kexec cpu here (see xics comments)? 816 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
@@ -885,4 +904,25 @@ void mpic_request_ipis(void)
885 904
886 printk("IPIs requested... \n"); 905 printk("IPIs requested... \n");
887} 906}
907
908void smp_mpic_message_pass(int target, int msg)
909{
910 /* make sure we're sending something that translates to an IPI */
911 if ((unsigned int)msg > 3) {
912 printk("SMP %d: smp_message_pass: unknown msg %d\n",
913 smp_processor_id(), msg);
914 return;
915 }
916 switch (target) {
917 case MSG_ALL:
918 mpic_send_ipi(msg, 0xffffffff);
919 break;
920 case MSG_ALL_BUT_SELF:
921 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
922 break;
923 default:
924 mpic_send_ipi(msg, 1 << target);
925 break;
926 }
927}
888#endif /* CONFIG_SMP */ 928#endif /* CONFIG_SMP */
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 776941c75672..ed9c9727d75f 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -747,12 +747,12 @@ config CPM2
747 on it (826x, 827x, 8560). 747 on it (826x, 827x, 8560).
748 748
749config PPC_CHRP 749config PPC_CHRP
750 bool 750 bool " Common Hardware Reference Platform (CHRP) based machines"
751 depends on PPC_MULTIPLATFORM 751 depends on PPC_MULTIPLATFORM
752 default y 752 default y
753 753
754config PPC_PMAC 754config PPC_PMAC
755 bool 755 bool " Apple PowerMac based machines"
756 depends on PPC_MULTIPLATFORM 756 depends on PPC_MULTIPLATFORM
757 default y 757 default y
758 758
@@ -762,7 +762,7 @@ config PPC_PMAC64
762 default y 762 default y
763 763
764config PPC_PREP 764config PPC_PREP
765 bool 765 bool " PowerPC Reference Platform (PReP) based machines"
766 depends on PPC_MULTIPLATFORM 766 depends on PPC_MULTIPLATFORM
767 default y 767 default y
768 768
@@ -1368,7 +1368,7 @@ endmenu
1368 1368
1369source "lib/Kconfig" 1369source "lib/Kconfig"
1370 1370
1371source "arch/ppc/oprofile/Kconfig" 1371source "arch/powerpc/oprofile/Kconfig"
1372 1372
1373source "arch/ppc/Kconfig.debug" 1373source "arch/ppc/Kconfig.debug"
1374 1374
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 16e2675f3270..aedc9ae13b2a 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -57,9 +57,10 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
57 57
58head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o 58head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
59head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o 59head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
60head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o 60head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
61 61
62core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ 62core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
63 arch/ppc/platforms/ \
63 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ 64 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
64core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ 65core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
65core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ 66core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
@@ -71,7 +72,7 @@ drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
71drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/ 72drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
72drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/ 73drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
73 74
74drivers-$(CONFIG_OPROFILE) += arch/ppc/oprofile/ 75drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
75 76
76BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm 77BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
77 78
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b1457a8a9c0f..b35346df1e37 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4ifneq ($(CONFIG_PPC_MERGE),y)
4 5
5extra-$(CONFIG_PPC_STD_MMU) := head.o 6extra-$(CONFIG_PPC_STD_MMU) := head.o
6extra-$(CONFIG_40x) := head_4xx.o 7extra-$(CONFIG_40x) := head_4xx.o
@@ -9,13 +10,12 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
9extra-$(CONFIG_8xx) := head_8xx.o 10extra-$(CONFIG_8xx) := head_8xx.o
10extra-$(CONFIG_6xx) += idle_6xx.o 11extra-$(CONFIG_6xx) += idle_6xx.o
11extra-$(CONFIG_POWER4) += idle_power4.o 12extra-$(CONFIG_POWER4) += idle_power4.o
12extra-$(CONFIG_PPC_FPU) += fpu.o
13extra-y += vmlinux.lds 13extra-y += vmlinux.lds
14 14
15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ 15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
16 process.o signal.o ptrace.o align.o \ 16 process.o align.o \
17 semaphore.o syscalls.o setup.o \ 17 setup.o \
18 cputable.o ppc_htab.o perfmon.o 18 ppc_htab.o
19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
21obj-$(CONFIG_POWER4) += cpu_setup_power4.o 21obj-$(CONFIG_POWER4) += cpu_setup_power4.o
@@ -25,7 +25,6 @@ obj-$(CONFIG_PCI) += pci.o
25obj-$(CONFIG_KGDB) += ppc-stub.o 25obj-$(CONFIG_KGDB) += ppc-stub.o
26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o 26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
27obj-$(CONFIG_TAU) += temp.o 27obj-$(CONFIG_TAU) += temp.o
28obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
29ifndef CONFIG_E200 28ifndef CONFIG_E200
30obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o 29obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
31endif 30endif
@@ -35,3 +34,21 @@ ifndef CONFIG_MATH_EMULATION
35obj-$(CONFIG_8xx) += softemu8xx.o 34obj-$(CONFIG_8xx) += softemu8xx.o
36endif 35endif
37 36
37# These are here while we do the architecture merge
38
39else
40obj-y := irq.o idle.o \
41 align.o
42obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
43obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
44obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
46obj-$(CONFIG_PCI) += pci.o
47obj-$(CONFIG_KGDB) += ppc-stub.o
48obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
49obj-$(CONFIG_TAU) += temp.o
50ifndef CONFIG_E200
51obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
52endif
53obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
54endif
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index d9ad1d776d0e..968261d69572 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -130,10 +130,10 @@ main(void)
130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
132 132
133 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
133 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 134 DEFINE(TI_TASK, offsetof(struct thread_info, task));
134 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
135 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
136 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
139 139
@@ -141,6 +141,7 @@ main(void)
141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
142 DEFINE(pbe_next, offsetof(struct pbe, next)); 142 DEFINE(pbe_next, offsetof(struct pbe, next));
143 143
144 DEFINE(TASK_SIZE, TASK_SIZE);
144 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 145 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
145 return 0; 146 return 0;
146} 147}
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index ba396438ede3..55ed7716636f 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -17,8 +17,6 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/cache.h> 18#include <asm/cache.h>
19 19
20_GLOBAL(__setup_cpu_601)
21 blr
22_GLOBAL(__setup_cpu_603) 20_GLOBAL(__setup_cpu_603)
23 b setup_common_caches 21 b setup_common_caches
24_GLOBAL(__setup_cpu_604) 22_GLOBAL(__setup_cpu_604)
@@ -292,10 +290,10 @@ _GLOBAL(__init_fpu_registers)
292#define CS_SIZE 32 290#define CS_SIZE 32
293 291
294 .data 292 .data
295 .balign L1_CACHE_LINE_SIZE 293 .balign L1_CACHE_BYTES
296cpu_state_storage: 294cpu_state_storage:
297 .space CS_SIZE 295 .space CS_SIZE
298 .balign L1_CACHE_LINE_SIZE,0 296 .balign L1_CACHE_BYTES,0
299 .text 297 .text
300 298
301/* Called in normal context to backup CPU 0 state. This 299/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
index 7e4fbb653724..d7bfd60e21fc 100644
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ b/arch/ppc/kernel/cpu_setup_power4.S
@@ -63,8 +63,6 @@ _GLOBAL(__970_cpu_preinit)
63 isync 63 isync
64 blr 64 blr
65 65
66_GLOBAL(__setup_cpu_power4)
67 blr
68_GLOBAL(__setup_cpu_ppc970) 66_GLOBAL(__setup_cpu_ppc970)
69 mfspr r0,SPRN_HID0 67 mfspr r0,SPRN_HID0
70 li r11,5 /* clear DOZE and SLEEP */ 68 li r11,5 /* clear DOZE and SLEEP */
@@ -88,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970)
88#define CS_SIZE 32 86#define CS_SIZE 32
89 87
90 .data 88 .data
91 .balign L1_CACHE_LINE_SIZE 89 .balign L1_CACHE_BYTES
92cpu_state_storage: 90cpu_state_storage:
93 .space CS_SIZE 91 .space CS_SIZE
94 .balign L1_CACHE_LINE_SIZE,0 92 .balign L1_CACHE_BYTES,0
95 .text 93 .text
96 94
97/* Called in normal context to backup CPU 0 state. This 95/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 03d4886869f3..f044edbb454f 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
200 bl do_show_syscall 200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */ 201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */ 202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10) 203 li r11,0
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR 204 stb r11,TI_SC_NOERR(r10)
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10) 205 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A 206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace 207 bne- syscall_dotrace
@@ -227,8 +226,8 @@ ret_from_syscall:
227 cmplw 0,r3,r11 226 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 227 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f 228 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12) 229 lbz r11,TI_SC_NOERR(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR 230 cmpwi r11,0
232 bne 30f 231 bne 30f
233 neg r3,r3 232 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */ 233 lwz r10,_CCR(r1) /* Set SO bit in CR */
@@ -633,7 +632,8 @@ sigreturn_exit:
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 632 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12) 633 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A 634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 bnel- do_syscall_trace_leave 635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */ 637 /* fall through */
638 638
639 .globl ret_from_except_full 639 .globl ret_from_except_full
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1960fb8c259c..c5a890dca9cf 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -349,12 +349,12 @@ i##n: \
349 349
350/* System reset */ 350/* System reset */
351/* core99 pmac starts the seconary here by changing the vector, and 351/* core99 pmac starts the seconary here by changing the vector, and
352 putting it back to what it was (UnknownException) when done. */ 352 putting it back to what it was (unknown_exception) when done. */
353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) 353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
354 . = 0x100 354 . = 0x100
355 b __secondary_start_gemini 355 b __secondary_start_gemini
356#else 356#else
357 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 357 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
358#endif 358#endif
359 359
360/* Machine check */ 360/* Machine check */
@@ -389,7 +389,7 @@ i##n: \
389 cmpwi cr1,r4,0 389 cmpwi cr1,r4,0
390 bne cr1,1f 390 bne cr1,1f
391#endif 391#endif
392 EXC_XFER_STD(0x200, MachineCheckException) 392 EXC_XFER_STD(0x200, machine_check_exception)
393#ifdef CONFIG_PPC_CHRP 393#ifdef CONFIG_PPC_CHRP
3941: b machine_check_in_rtas 3941: b machine_check_in_rtas
395#endif 395#endif
@@ -456,10 +456,10 @@ Alignment:
456 mfspr r5,SPRN_DSISR 456 mfspr r5,SPRN_DSISR
457 stw r5,_DSISR(r11) 457 stw r5,_DSISR(r11)
458 addi r3,r1,STACK_FRAME_OVERHEAD 458 addi r3,r1,STACK_FRAME_OVERHEAD
459 EXC_XFER_EE(0x600, AlignmentException) 459 EXC_XFER_EE(0x600, alignment_exception)
460 460
461/* Program check exception */ 461/* Program check exception */
462 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 462 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
463 463
464/* Floating-point unavailable */ 464/* Floating-point unavailable */
465 . = 0x800 465 . = 0x800
@@ -467,13 +467,13 @@ FPUnavailable:
467 EXCEPTION_PROLOG 467 EXCEPTION_PROLOG
468 bne load_up_fpu /* if from user, just load it up */ 468 bne load_up_fpu /* if from user, just load it up */
469 addi r3,r1,STACK_FRAME_OVERHEAD 469 addi r3,r1,STACK_FRAME_OVERHEAD
470 EXC_XFER_EE_LITE(0x800, KernelFP) 470 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
471 471
472/* Decrementer */ 472/* Decrementer */
473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
474 474
475 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 475 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
476 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 476 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
477 477
478/* System call */ 478/* System call */
479 . = 0xc00 479 . = 0xc00
@@ -482,8 +482,8 @@ SystemCall:
482 EXC_XFER_EE_LITE(0xc00, DoSyscall) 482 EXC_XFER_EE_LITE(0xc00, DoSyscall)
483 483
484/* Single step - not used on 601 */ 484/* Single step - not used on 601 */
485 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 485 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
486 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 486 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
487 487
488/* 488/*
489 * The Altivec unavailable trap is at 0x0f20. Foo. 489 * The Altivec unavailable trap is at 0x0f20. Foo.
@@ -502,7 +502,7 @@ SystemCall:
502Trap_0f: 502Trap_0f:
503 EXCEPTION_PROLOG 503 EXCEPTION_PROLOG
504 addi r3,r1,STACK_FRAME_OVERHEAD 504 addi r3,r1,STACK_FRAME_OVERHEAD
505 EXC_XFER_EE(0xf00, UnknownException) 505 EXC_XFER_EE(0xf00, unknown_exception)
506 506
507/* 507/*
508 * Handle TLB miss for instruction on 603/603e. 508 * Handle TLB miss for instruction on 603/603e.
@@ -702,44 +702,44 @@ DataStoreTLBMiss:
702 rfi 702 rfi
703 703
704#ifndef CONFIG_ALTIVEC 704#ifndef CONFIG_ALTIVEC
705#define AltivecAssistException UnknownException 705#define altivec_assist_exception unknown_exception
706#endif 706#endif
707 707
708 EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) 708 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) 709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
710 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
711#ifdef CONFIG_POWER4 711#ifdef CONFIG_POWER4
712 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 712 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) 713 EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) 714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
715#else /* !CONFIG_POWER4 */ 715#else /* !CONFIG_POWER4 */
716 EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) 716 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) 717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
718 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 718 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
719#endif /* CONFIG_POWER4 */ 719#endif /* CONFIG_POWER4 */
720 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 720 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 721 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 722 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
723 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 723 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
724 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 724 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
725 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 725 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
726 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 726 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) 727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
728 EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) 728 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
729 EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) 729 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
730 EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) 730 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
731 EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) 731 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
732 EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) 732 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
733 EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) 733 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
734 EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) 734 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
735 EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) 735 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
736 EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) 736 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
737 EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) 737 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
738 EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) 738 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
739 EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) 739 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
740 EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) 740 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
741 EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) 741 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
742 EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) 742 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
743 743
744 .globl mol_trampoline 744 .globl mol_trampoline
745 .set mol_trampoline, i0x2f00 745 .set mol_trampoline, i0x2f00
@@ -751,7 +751,7 @@ AltiVecUnavailable:
751#ifdef CONFIG_ALTIVEC 751#ifdef CONFIG_ALTIVEC
752 bne load_up_altivec /* if from user, just load it up */ 752 bne load_up_altivec /* if from user, just load it up */
753#endif /* CONFIG_ALTIVEC */ 753#endif /* CONFIG_ALTIVEC */
754 EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) 754 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
755 755
756#ifdef CONFIG_PPC64BRIDGE 756#ifdef CONFIG_PPC64BRIDGE
757DataAccess: 757DataAccess:
@@ -767,12 +767,12 @@ DataSegment:
767 addi r3,r1,STACK_FRAME_OVERHEAD 767 addi r3,r1,STACK_FRAME_OVERHEAD
768 mfspr r4,SPRN_DAR 768 mfspr r4,SPRN_DAR
769 stw r4,_DAR(r11) 769 stw r4,_DAR(r11)
770 EXC_XFER_STD(0x380, UnknownException) 770 EXC_XFER_STD(0x380, unknown_exception)
771 771
772InstructionSegment: 772InstructionSegment:
773 EXCEPTION_PROLOG 773 EXCEPTION_PROLOG
774 addi r3,r1,STACK_FRAME_OVERHEAD 774 addi r3,r1,STACK_FRAME_OVERHEAD
775 EXC_XFER_STD(0x480, UnknownException) 775 EXC_XFER_STD(0x480, unknown_exception)
776#endif /* CONFIG_PPC64BRIDGE */ 776#endif /* CONFIG_PPC64BRIDGE */
777 777
778#ifdef CONFIG_ALTIVEC 778#ifdef CONFIG_ALTIVEC
@@ -804,7 +804,7 @@ load_up_altivec:
804 beq 1f 804 beq 1f
805 add r4,r4,r6 805 add r4,r4,r6
806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ 806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
807 SAVE_32VR(0,r10,r4) 807 SAVE_32VRS(0,r10,r4)
808 mfvscr vr0 808 mfvscr vr0
809 li r10,THREAD_VSCR 809 li r10,THREAD_VSCR
810 stvx vr0,r10,r4 810 stvx vr0,r10,r4
@@ -824,7 +824,7 @@ load_up_altivec:
824 stw r4,THREAD_USED_VR(r5) 824 stw r4,THREAD_USED_VR(r5)
825 lvx vr0,r10,r5 825 lvx vr0,r10,r5
826 mtvscr vr0 826 mtvscr vr0
827 REST_32VR(0,r10,r5) 827 REST_32VRS(0,r10,r5)
828#ifndef CONFIG_SMP 828#ifndef CONFIG_SMP
829 subi r4,r5,THREAD 829 subi r4,r5,THREAD
830 sub r4,r4,r6 830 sub r4,r4,r6
@@ -870,7 +870,7 @@ giveup_altivec:
870 addi r3,r3,THREAD /* want THREAD of task */ 870 addi r3,r3,THREAD /* want THREAD of task */
871 lwz r5,PT_REGS(r3) 871 lwz r5,PT_REGS(r3)
872 cmpwi 0,r5,0 872 cmpwi 0,r5,0
873 SAVE_32VR(0, r4, r3) 873 SAVE_32VRS(0, r4, r3)
874 mfvscr vr0 874 mfvscr vr0
875 li r4,THREAD_VSCR 875 li r4,THREAD_VSCR
876 stvx vr0,r4,r3 876 stvx vr0,r4,r3
@@ -916,7 +916,7 @@ relocate_kernel:
916copy_and_flush: 916copy_and_flush:
917 addi r5,r5,-4 917 addi r5,r5,-4
918 addi r6,r6,-4 918 addi r6,r6,-4
9194: li r0,L1_CACHE_LINE_SIZE/4 9194: li r0,L1_CACHE_BYTES/4
920 mtctr r0 920 mtctr r0
9213: addi r6,r6,4 /* copy a cache line */ 9213: addi r6,r6,4 /* copy a cache line */
922 lwzx r0,r6,r4 922 lwzx r0,r6,r4
@@ -1059,7 +1059,6 @@ __secondary_start:
1059 1059
1060 lis r3,-KERNELBASE@h 1060 lis r3,-KERNELBASE@h
1061 mr r4,r24 1061 mr r4,r24
1062 bl identify_cpu
1063 bl call_setup_cpu /* Call setup_cpu for this CPU */ 1062 bl call_setup_cpu /* Call setup_cpu for this CPU */
1064#ifdef CONFIG_6xx 1063#ifdef CONFIG_6xx
1065 lis r3,-KERNELBASE@h 1064 lis r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@ __secondary_start:
1109 * Those generic dummy functions are kept for CPUs not 1108 * Those generic dummy functions are kept for CPUs not
1110 * included in CONFIG_6xx 1109 * included in CONFIG_6xx
1111 */ 1110 */
1112_GLOBAL(__setup_cpu_power3)
1113 blr
1114_GLOBAL(__setup_cpu_generic)
1115 blr
1116
1117#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) 1111#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
1118_GLOBAL(__save_cpu_setup) 1112_GLOBAL(__save_cpu_setup)
1119 blr 1113 blr
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 599245b0407e..8b49679fad54 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -309,13 +309,13 @@ skpinv: addi r4,r4,1 /* Increment */
309 309
310interrupt_base: 310interrupt_base:
311 /* Critical Input Interrupt */ 311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313 313
314 /* Machine Check Interrupt */ 314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A 315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else 317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif 319#endif
320 320
321 /* Data Storage Interrupt */ 321 /* Data Storage Interrupt */
@@ -442,7 +442,7 @@ interrupt_base:
442#ifdef CONFIG_PPC_FPU 442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION 443 FP_UNAVAILABLE_EXCEPTION
444#else 444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif 446#endif
447 447
448 /* System Call Interrupt */ 448 /* System Call Interrupt */
@@ -451,21 +451,21 @@ interrupt_base:
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452 452
453 /* Auxillary Processor Unavailable Interrupt */ 453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455 455
456 /* Decrementer Interrupt */ 456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION 457 DECREMENTER_EXCEPTION
458 458
459 /* Fixed Internal Timer Interrupt */ 459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */ 460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462 462
463 /* Watchdog Timer Interrupt */ 463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */ 464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT 465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else 467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) 468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif 469#endif
470 470
471 /* Data TLB Error Interrupt */ 471 /* Data TLB Error Interrupt */
@@ -743,14 +743,18 @@ _GLOBAL(set_context)
743 * goes at the beginning of the data segment, which is page-aligned. 743 * goes at the beginning of the data segment, which is page-aligned.
744 */ 744 */
745 .data 745 .data
746_GLOBAL(sdata) 746 .align 12
747_GLOBAL(empty_zero_page) 747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
748 .space 4096 751 .space 4096
749 752
750/* 753/*
751 * To support >32-bit physical addresses, we use an 8KB pgdir. 754 * To support >32-bit physical addresses, we use an 8KB pgdir.
752 */ 755 */
753_GLOBAL(swapper_pg_dir) 756 .globl swapper_pg_dir
757swapper_pg_dir:
754 .space 8192 758 .space 8192
755 759
756/* Reserved 4k for the critical exception stack & 4k for the machine 760/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -759,13 +763,15 @@ _GLOBAL(swapper_pg_dir)
759 .align 12 763 .align 12
760exception_stack_bottom: 764exception_stack_bottom:
761 .space BOOKE_EXCEPTION_STACK_SIZE 765 .space BOOKE_EXCEPTION_STACK_SIZE
762_GLOBAL(exception_stack_top) 766 .globl exception_stack_top
767exception_stack_top:
763 768
764/* 769/*
765 * This space gets a copy of optional info passed to us by the bootstrap 770 * This space gets a copy of optional info passed to us by the bootstrap
766 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
767 */ 772 */
768_GLOBAL(cmd_line) 773 .globl cmd_line
774cmd_line:
769 .space 512 775 .space 512
770 776
771/* 777/*
@@ -774,5 +780,3 @@ _GLOBAL(cmd_line)
774 */ 780 */
775abatron_pteptrs: 781abatron_pteptrs:
776 .space 8 782 .space 8
777
778
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index 8562b807b37c..10c261c67021 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -245,12 +245,12 @@ label:
245/* 245/*
246 * 0x0100 - Critical Interrupt Exception 246 * 0x0100 - Critical Interrupt Exception
247 */ 247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) 248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249 249
250/* 250/*
251 * 0x0200 - Machine Check Exception 251 * 0x0200 - Machine Check Exception
252 */ 252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254 254
255/* 255/*
256 * 0x0300 - Data Storage Exception 256 * 0x0300 - Data Storage Exception
@@ -405,7 +405,7 @@ label:
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ 405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11) 406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD 407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, AlignmentException) 408 EXC_XFER_EE(0x600, alignment_exception)
409 409
410/* 0x0700 - Program Exception */ 410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck) 411 START_EXCEPTION(0x0700, ProgramCheck)
@@ -413,21 +413,21 @@ label:
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */ 413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11) 414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD 415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, ProgramCheckException) 416 EXC_XFER_STD(0x700, program_check_exception)
417 417
418 EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) 418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) 419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) 420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) 421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422 422
423/* 0x0C00 - System Call Exception */ 423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall) 424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG 425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall) 426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427 427
428 EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) 428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) 429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) 430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431 431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */ 432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer) 433 START_EXCEPTION(0x1000, Decrementer)
@@ -444,14 +444,14 @@ label:
444 444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception 445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/ 446*/
447 STND_EXCEPTION(0x1010, FITException, UnknownException) 447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448 448
449/* 0x1020 - Watchdog Timer (WDT) Exception 449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/ 450*/
451#ifdef CONFIG_BOOKE_WDT 451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) 452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else 453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) 454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif 455#endif
456#endif 456#endif
457 457
@@ -656,25 +656,25 @@ label:
656 mfspr r10, SPRN_SPRG0 656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess 657 b InstructionAccess
658 658
659 EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) 659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) 660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51 663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */ 664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17) 665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss 666 b DTLBMiss
667#else 667#else
668 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif 669#endif
670 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) 672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) 673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) 674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) 675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) 676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) 677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678 678
679/* Check for a single step debug exception while in an exception 679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case 680 * handler before state has been saved. This is to catch the case
@@ -988,10 +988,14 @@ _GLOBAL(set_context)
988 * goes at the beginning of the data segment, which is page-aligned. 988 * goes at the beginning of the data segment, which is page-aligned.
989 */ 989 */
990 .data 990 .data
991_GLOBAL(sdata) 991 .align 12
992_GLOBAL(empty_zero_page) 992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
993 .space 4096 996 .space 4096
994_GLOBAL(swapper_pg_dir) 997 .globl swapper_pg_dir
998swapper_pg_dir:
995 .space 4096 999 .space 4096
996 1000
997 1001
@@ -1001,12 +1005,14 @@ _GLOBAL(swapper_pg_dir)
1001exception_stack_bottom: 1005exception_stack_bottom:
1002 .space 4096 1006 .space 4096
1003critical_stack_top: 1007critical_stack_top:
1004_GLOBAL(exception_stack_top) 1008 .globl exception_stack_top
1009exception_stack_top:
1005 1010
1006/* This space gets a copy of optional info passed to us by the bootstrap 1011/* This space gets a copy of optional info passed to us by the bootstrap
1007 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1008 */ 1013 */
1009_GLOBAL(cmd_line) 1014 .globl cmd_line
1015cmd_line:
1010 .space 512 1016 .space 512
1011 1017
1012/* Room for two PTE pointers, usually the kernel and current user pointers 1018/* Room for two PTE pointers, usually the kernel and current user pointers
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index cb1a3a54a026..de0978742221 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -203,7 +203,7 @@ i##n: \
203 ret_from_except) 203 ret_from_except)
204 204
205/* System reset */ 205/* System reset */
206 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207 207
208/* Machine check */ 208/* Machine check */
209 . = 0x200 209 . = 0x200
@@ -214,7 +214,7 @@ MachineCheck:
214 mfspr r5,SPRN_DSISR 214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11) 215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD 216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, MachineCheckException) 217 EXC_XFER_STD(0x200, machine_check_exception)
218 218
219/* Data access exception. 219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other 220 * This is "never generated" by the MPC8xx. We jump to it for other
@@ -252,20 +252,20 @@ Alignment:
252 mfspr r5,SPRN_DSISR 252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11) 253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD 254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, AlignmentException) 255 EXC_XFER_EE(0x600, alignment_exception)
256 256
257/* Program check exception */ 257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259 259
260/* No FPU on MPC8xx. This exception is not supposed to happen. 260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/ 261*/
262 EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) 262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263 263
264/* Decrementer */ 264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266 266
267 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269 269
270/* System call */ 270/* System call */
271 . = 0xc00 271 . = 0xc00
@@ -274,9 +274,9 @@ SystemCall:
274 EXC_XFER_EE_LITE(0xc00, DoSyscall) 274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275 275
276/* Single step - not used on 601 */ 276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) 279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280 280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs 281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions. 282 * for all unimplemented and illegal instructions.
@@ -540,22 +540,22 @@ DataTLBError:
540#endif 540#endif
541 b DataAccess 541 b DataAccess
542 542
543 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550 550
551/* On the MPC8xx, these next four traps are used for development 551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to 552 * support of breakpoints and such. Someday I will get around to
553 * using them. 553 * using them.
554 */ 554 */
555 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559 559
560 . = 0x2000 560 . = 0x2000
561 561
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 9342acf12e72..aeb349b47af3 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -335,7 +335,7 @@ label:
335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ 335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
336 stw r4,_DEAR(r11); \ 336 stw r4,_DEAR(r11); \
337 addi r3,r1,STACK_FRAME_OVERHEAD; \ 337 addi r3,r1,STACK_FRAME_OVERHEAD; \
338 EXC_XFER_EE(0x0600, AlignmentException) 338 EXC_XFER_EE(0x0600, alignment_exception)
339 339
340#define PROGRAM_EXCEPTION \ 340#define PROGRAM_EXCEPTION \
341 START_EXCEPTION(Program) \ 341 START_EXCEPTION(Program) \
@@ -343,7 +343,7 @@ label:
343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ 343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
344 stw r4,_ESR(r11); \ 344 stw r4,_ESR(r11); \
345 addi r3,r1,STACK_FRAME_OVERHEAD; \ 345 addi r3,r1,STACK_FRAME_OVERHEAD; \
346 EXC_XFER_STD(0x0700, ProgramCheckException) 346 EXC_XFER_STD(0x0700, program_check_exception)
347 347
348#define DECREMENTER_EXCEPTION \ 348#define DECREMENTER_EXCEPTION \
349 START_EXCEPTION(Decrementer) \ 349 START_EXCEPTION(Decrementer) \
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index 8e52e8408316..5063c603fad4 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -426,14 +426,14 @@ skpinv: addi r6,r6,1 /* Increment */
426 426
427interrupt_base: 427interrupt_base:
428 /* Critical Input Interrupt */ 428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430 430
431 /* Machine Check Interrupt */ 431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200 432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */ 433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else 435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif 437#endif
438 438
439 /* Data Storage Interrupt */ 439 /* Data Storage Interrupt */
@@ -542,9 +542,9 @@ interrupt_base:
542#else 542#else
543#ifdef CONFIG_E200 543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) 545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else 546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif 548#endif
549#endif 549#endif
550 550
@@ -554,20 +554,20 @@ interrupt_base:
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555 555
556 /* Auxillary Processor Unavailable Interrupt */ 556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558 558
559 /* Decrementer Interrupt */ 559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION 560 DECREMENTER_EXCEPTION
561 561
562 /* Fixed Internal Timer Interrupt */ 562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */ 563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565 565
566 /* Watchdog Timer Interrupt */ 566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT 567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else 569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) 570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif 571#endif
572 572
573 /* Data TLB Error Interrupt */ 573 /* Data TLB Error Interrupt */
@@ -696,21 +696,21 @@ interrupt_base:
696 addi r3,r1,STACK_FRAME_OVERHEAD 696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE) 697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else 698#else
699 EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) 699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */ 700#endif /* CONFIG_SPE */
701 701
702 /* SPE Floating Point Data */ 702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE 703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else 705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) 706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */ 707#endif /* CONFIG_SPE */
708 708
709 /* SPE Floating Point Round */ 709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711 711
712 /* Performance Monitor */ 712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) 713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714 714
715 715
716 /* Debug Interrupt */ 716 /* Debug Interrupt */
@@ -853,7 +853,7 @@ load_up_spe:
853 cmpi 0,r4,0 853 cmpi 0,r4,0
854 beq 1f 854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVR(0,r10,r4) 856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */ 857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC 859 li r5,THREAD_ACC
@@ -873,7 +873,7 @@ load_up_spe:
873 stw r4,THREAD_USED_SPE(r5) 873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5 874 evlddx evr4,r10,r5
875 evmra evr4,evr4 875 evmra evr4,evr4
876 REST_32EVR(0,r10,r5) 876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP 877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD 878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3) 879 stw r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@ _GLOBAL(giveup_spe)
963 addi r3,r3,THREAD /* want THREAD of task */ 963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3) 964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0 965 cmpi 0,r5,0
966 SAVE_32EVR(0, r4, r3) 966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */ 967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC 969 li r4,THREAD_ACC
@@ -1028,10 +1028,14 @@ _GLOBAL(set_context)
1028 * goes at the beginning of the data segment, which is page-aligned. 1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */ 1029 */
1030 .data 1030 .data
1031_GLOBAL(sdata) 1031 .align 12
1032_GLOBAL(empty_zero_page) 1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1033 .space 4096 1036 .space 4096
1034_GLOBAL(swapper_pg_dir) 1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1035 .space 4096 1039 .space 4096
1036 1040
1037/* Reserved 4k for the critical exception stack & 4k for the machine 1041/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -1040,13 +1044,15 @@ _GLOBAL(swapper_pg_dir)
1040 .align 12 1044 .align 12
1041exception_stack_bottom: 1045exception_stack_bottom:
1042 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1043_GLOBAL(exception_stack_top) 1047 .globl exception_stack_top
1048exception_stack_top:
1044 1049
1045/* 1050/*
1046 * This space gets a copy of optional info passed to us by the bootstrap 1051 * This space gets a copy of optional info passed to us by the bootstrap
1047 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1048 */ 1053 */
1049_GLOBAL(cmd_line) 1054 .globl cmd_line
1055cmd_line:
1050 .space 512 1056 .space 512
1051 1057
1052/* 1058/*
@@ -1055,4 +1061,3 @@ _GLOBAL(cmd_line)
1055 */ 1061 */
1056abatron_pteptrs: 1062abatron_pteptrs:
1057 .space 8 1063 .space 8
1058
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index fba29c876b62..11e5b44713f7 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -32,6 +32,7 @@
32#include <asm/cache.h> 32#include <asm/cache.h>
33#include <asm/cputable.h> 33#include <asm/cputable.h>
34#include <asm/machdep.h> 34#include <asm/machdep.h>
35#include <asm/smp.h>
35 36
36void default_idle(void) 37void default_idle(void)
37{ 38{
@@ -74,7 +75,7 @@ void cpu_idle(void)
74/* 75/*
75 * Register the sysctl to set/clear powersave_nap. 76 * Register the sysctl to set/clear powersave_nap.
76 */ 77 */
77extern unsigned long powersave_nap; 78extern int powersave_nap;
78 79
79static ctl_table powersave_nap_ctl_table[]={ 80static ctl_table powersave_nap_ctl_table[]={
80 { 81 {
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 8843f3af230f..772e428aaa59 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -57,6 +57,7 @@
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/prom.h> 58#include <asm/prom.h>
59#include <asm/ptrace.h> 59#include <asm/ptrace.h>
60#include <asm/machdep.h>
60 61
61#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 62#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
62 63
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 861115249b35..d7f4e982b539 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
203 * L1 icache 203 * L1 icache
204 */ 204 */
205 b 20f 205 b 20f
206 .balign L1_CACHE_LINE_SIZE 206 .balign L1_CACHE_BYTES
20722: 20722:
208 sync 208 sync
209 mtspr SPRN_L2CR,r3 209 mtspr SPRN_L2CR,r3
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 90d917d2e856..2350f3e09f95 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -125,9 +125,8 @@ _GLOBAL(identify_cpu)
1251: 1251:
126 addis r6,r3,cur_cpu_spec@ha 126 addis r6,r3,cur_cpu_spec@ha
127 addi r6,r6,cur_cpu_spec@l 127 addi r6,r6,cur_cpu_spec@l
128 slwi r4,r4,2
129 sub r8,r8,r3 128 sub r8,r8,r3
130 stwx r8,r4,r6 129 stw r8,0(r6)
131 blr 130 blr
132 131
133/* 132/*
@@ -186,19 +185,18 @@ _GLOBAL(do_cpu_ftr_fixups)
186 * 185 *
187 * Setup function is called with: 186 * Setup function is called with:
188 * r3 = data offset 187 * r3 = data offset
189 * r4 = CPU number 188 * r4 = ptr to CPU spec (relocated)
190 * r5 = ptr to CPU spec (relocated)
191 */ 189 */
192_GLOBAL(call_setup_cpu) 190_GLOBAL(call_setup_cpu)
193 addis r5,r3,cur_cpu_spec@ha 191 addis r4,r3,cur_cpu_spec@ha
194 addi r5,r5,cur_cpu_spec@l 192 addi r4,r4,cur_cpu_spec@l
195 slwi r4,r24,2 193 lwz r4,0(r4)
196 lwzx r5,r4,r5 194 add r4,r4,r3
195 lwz r5,CPU_SPEC_SETUP(r4)
196 cmpi 0,r5,0
197 add r5,r5,r3 197 add r5,r5,r3
198 lwz r6,CPU_SPEC_SETUP(r5) 198 beqlr
199 add r6,r6,r3 199 mtctr r5
200 mtctr r6
201 mr r4,r24
202 bctr 200 bctr
203 201
204#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) 202#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@ _GLOBAL(low_choose_7447a_dfs)
273 271
274#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ 272#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
275 273
276/* void local_save_flags_ptr(unsigned long *flags) */
277_GLOBAL(local_save_flags_ptr)
278 mfmsr r4
279 stw r4,0(r3)
280 blr
281 /*
282 * Need these nops here for taking over save/restore to
283 * handle lost intrs
284 * -- Cort
285 */
286 nop
287 nop
288 nop
289 nop
290 nop
291 nop
292 nop
293 nop
294 nop
295 nop
296 nop
297 nop
298 nop
299 nop
300 nop
301 nop
302 nop
303_GLOBAL(local_save_flags_ptr_end)
304
305/* void local_irq_restore(unsigned long flags) */
306_GLOBAL(local_irq_restore)
307/*
308 * Just set/clear the MSR_EE bit through restore/flags but do not
309 * change anything else. This is needed by the RT system and makes
310 * sense anyway.
311 * -- Cort
312 */
313 mfmsr r4
314 /* Copy all except the MSR_EE bit from r4 (current MSR value)
315 to r3. This is the sort of thing the rlwimi instruction is
316 designed for. -- paulus. */
317 rlwimi r3,r4,0,17,15
318 /* Check if things are setup the way we want _already_. */
319 cmpw 0,r3,r4
320 beqlr
3211: SYNC
322 mtmsr r3
323 SYNC
324 blr
325 nop
326 nop
327 nop
328 nop
329 nop
330 nop
331 nop
332 nop
333 nop
334 nop
335 nop
336 nop
337 nop
338 nop
339 nop
340 nop
341 nop
342 nop
343 nop
344_GLOBAL(local_irq_restore_end)
345
346_GLOBAL(local_irq_disable)
347 mfmsr r0 /* Get current interrupt state */
348 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
349 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
350 SYNC /* Some chip revs have problems here... */
351 mtmsr r0 /* Update machine state */
352 blr /* Done */
353 /*
354 * Need these nops here for taking over save/restore to
355 * handle lost intrs
356 * -- Cort
357 */
358 nop
359 nop
360 nop
361 nop
362 nop
363 nop
364 nop
365 nop
366 nop
367 nop
368 nop
369 nop
370 nop
371 nop
372 nop
373_GLOBAL(local_irq_disable_end)
374
375_GLOBAL(local_irq_enable)
376 mfmsr r3 /* Get current state */
377 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
378 SYNC /* Some chip revs have problems here... */
379 mtmsr r3 /* Update machine state */
380 blr
381 /*
382 * Need these nops here for taking over save/restore to
383 * handle lost intrs
384 * -- Cort
385 */
386 nop
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
396 nop
397 nop
398 nop
399 nop
400 nop
401 nop
402_GLOBAL(local_irq_enable_end)
403
404/* 274/*
405 * complement mask on the msr then "or" some values on. 275 * complement mask on the msr then "or" some values on.
406 * _nmask_and_or_msr(nmask, value_to_or) 276 * _nmask_and_or_msr(nmask, value_to_or)
@@ -628,21 +498,21 @@ _GLOBAL(flush_icache_range)
628BEGIN_FTR_SECTION 498BEGIN_FTR_SECTION
629 blr /* for 601, do nothing */ 499 blr /* for 601, do nothing */
630END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 500END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
631 li r5,L1_CACHE_LINE_SIZE-1 501 li r5,L1_CACHE_BYTES-1
632 andc r3,r3,r5 502 andc r3,r3,r5
633 subf r4,r3,r4 503 subf r4,r3,r4
634 add r4,r4,r5 504 add r4,r4,r5
635 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 505 srwi. r4,r4,L1_CACHE_SHIFT
636 beqlr 506 beqlr
637 mtctr r4 507 mtctr r4
638 mr r6,r3 508 mr r6,r3
6391: dcbst 0,r3 5091: dcbst 0,r3
640 addi r3,r3,L1_CACHE_LINE_SIZE 510 addi r3,r3,L1_CACHE_BYTES
641 bdnz 1b 511 bdnz 1b
642 sync /* wait for dcbst's to get to ram */ 512 sync /* wait for dcbst's to get to ram */
643 mtctr r4 513 mtctr r4
6442: icbi 0,r6 5142: icbi 0,r6
645 addi r6,r6,L1_CACHE_LINE_SIZE 515 addi r6,r6,L1_CACHE_BYTES
646 bdnz 2b 516 bdnz 2b
647 sync /* additional sync needed on g4 */ 517 sync /* additional sync needed on g4 */
648 isync 518 isync
@@ -655,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
655 * clean_dcache_range(unsigned long start, unsigned long stop) 525 * clean_dcache_range(unsigned long start, unsigned long stop)
656 */ 526 */
657_GLOBAL(clean_dcache_range) 527_GLOBAL(clean_dcache_range)
658 li r5,L1_CACHE_LINE_SIZE-1 528 li r5,L1_CACHE_BYTES-1
659 andc r3,r3,r5 529 andc r3,r3,r5
660 subf r4,r3,r4 530 subf r4,r3,r4
661 add r4,r4,r5 531 add r4,r4,r5
662 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 532 srwi. r4,r4,L1_CACHE_SHIFT
663 beqlr 533 beqlr
664 mtctr r4 534 mtctr r4
665 535
6661: dcbst 0,r3 5361: dcbst 0,r3
667 addi r3,r3,L1_CACHE_LINE_SIZE 537 addi r3,r3,L1_CACHE_BYTES
668 bdnz 1b 538 bdnz 1b
669 sync /* wait for dcbst's to get to ram */ 539 sync /* wait for dcbst's to get to ram */
670 blr 540 blr
@@ -676,16 +546,16 @@ _GLOBAL(clean_dcache_range)
676 * flush_dcache_range(unsigned long start, unsigned long stop) 546 * flush_dcache_range(unsigned long start, unsigned long stop)
677 */ 547 */
678_GLOBAL(flush_dcache_range) 548_GLOBAL(flush_dcache_range)
679 li r5,L1_CACHE_LINE_SIZE-1 549 li r5,L1_CACHE_BYTES-1
680 andc r3,r3,r5 550 andc r3,r3,r5
681 subf r4,r3,r4 551 subf r4,r3,r4
682 add r4,r4,r5 552 add r4,r4,r5
683 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 553 srwi. r4,r4,L1_CACHE_SHIFT
684 beqlr 554 beqlr
685 mtctr r4 555 mtctr r4
686 556
6871: dcbf 0,r3 5571: dcbf 0,r3
688 addi r3,r3,L1_CACHE_LINE_SIZE 558 addi r3,r3,L1_CACHE_BYTES
689 bdnz 1b 559 bdnz 1b
690 sync /* wait for dcbst's to get to ram */ 560 sync /* wait for dcbst's to get to ram */
691 blr 561 blr
@@ -698,16 +568,16 @@ _GLOBAL(flush_dcache_range)
698 * invalidate_dcache_range(unsigned long start, unsigned long stop) 568 * invalidate_dcache_range(unsigned long start, unsigned long stop)
699 */ 569 */
700_GLOBAL(invalidate_dcache_range) 570_GLOBAL(invalidate_dcache_range)
701 li r5,L1_CACHE_LINE_SIZE-1 571 li r5,L1_CACHE_BYTES-1
702 andc r3,r3,r5 572 andc r3,r3,r5
703 subf r4,r3,r4 573 subf r4,r3,r4
704 add r4,r4,r5 574 add r4,r4,r5
705 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 575 srwi. r4,r4,L1_CACHE_SHIFT
706 beqlr 576 beqlr
707 mtctr r4 577 mtctr r4
708 578
7091: dcbi 0,r3 5791: dcbi 0,r3
710 addi r3,r3,L1_CACHE_LINE_SIZE 580 addi r3,r3,L1_CACHE_BYTES
711 bdnz 1b 581 bdnz 1b
712 sync /* wait for dcbi's to get to ram */ 582 sync /* wait for dcbi's to get to ram */
713 blr 583 blr
@@ -728,7 +598,7 @@ _GLOBAL(flush_dcache_all)
728 mtctr r4 598 mtctr r4
729 lis r5, KERNELBASE@h 599 lis r5, KERNELBASE@h
7301: lwz r3, 0(r5) /* Load one word from every line */ 6001: lwz r3, 0(r5) /* Load one word from every line */
731 addi r5, r5, L1_CACHE_LINE_SIZE 601 addi r5, r5, L1_CACHE_BYTES
732 bdnz 1b 602 bdnz 1b
733 blr 603 blr
734#endif /* CONFIG_NOT_COHERENT_CACHE */ 604#endif /* CONFIG_NOT_COHERENT_CACHE */
@@ -746,16 +616,16 @@ BEGIN_FTR_SECTION
746 blr /* for 601, do nothing */ 616 blr /* for 601, do nothing */
747END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 617END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
748 rlwinm r3,r3,0,0,19 /* Get page base address */ 618 rlwinm r3,r3,0,0,19 /* Get page base address */
749 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 619 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
750 mtctr r4 620 mtctr r4
751 mr r6,r3 621 mr r6,r3
7520: dcbst 0,r3 /* Write line to ram */ 6220: dcbst 0,r3 /* Write line to ram */
753 addi r3,r3,L1_CACHE_LINE_SIZE 623 addi r3,r3,L1_CACHE_BYTES
754 bdnz 0b 624 bdnz 0b
755 sync 625 sync
756 mtctr r4 626 mtctr r4
7571: icbi 0,r6 6271: icbi 0,r6
758 addi r6,r6,L1_CACHE_LINE_SIZE 628 addi r6,r6,L1_CACHE_BYTES
759 bdnz 1b 629 bdnz 1b
760 sync 630 sync
761 isync 631 isync
@@ -778,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
778 mtmsr r0 648 mtmsr r0
779 isync 649 isync
780 rlwinm r3,r3,0,0,19 /* Get page base address */ 650 rlwinm r3,r3,0,0,19 /* Get page base address */
781 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 651 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
782 mtctr r4 652 mtctr r4
783 mr r6,r3 653 mr r6,r3
7840: dcbst 0,r3 /* Write line to ram */ 6540: dcbst 0,r3 /* Write line to ram */
785 addi r3,r3,L1_CACHE_LINE_SIZE 655 addi r3,r3,L1_CACHE_BYTES
786 bdnz 0b 656 bdnz 0b
787 sync 657 sync
788 mtctr r4 658 mtctr r4
7891: icbi 0,r6 6591: icbi 0,r6
790 addi r6,r6,L1_CACHE_LINE_SIZE 660 addi r6,r6,L1_CACHE_BYTES
791 bdnz 1b 661 bdnz 1b
792 sync 662 sync
793 mtmsr r10 /* restore DR */ 663 mtmsr r10 /* restore DR */
@@ -802,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
802 * void clear_pages(void *page, int order) ; 672 * void clear_pages(void *page, int order) ;
803 */ 673 */
804_GLOBAL(clear_pages) 674_GLOBAL(clear_pages)
805 li r0,4096/L1_CACHE_LINE_SIZE 675 li r0,4096/L1_CACHE_BYTES
806 slw r0,r0,r4 676 slw r0,r0,r4
807 mtctr r0 677 mtctr r0
808#ifdef CONFIG_8xx 678#ifdef CONFIG_8xx
@@ -814,7 +684,7 @@ _GLOBAL(clear_pages)
814#else 684#else
8151: dcbz 0,r3 6851: dcbz 0,r3
816#endif 686#endif
817 addi r3,r3,L1_CACHE_LINE_SIZE 687 addi r3,r3,L1_CACHE_BYTES
818 bdnz 1b 688 bdnz 1b
819 blr 689 blr
820 690
@@ -840,7 +710,7 @@ _GLOBAL(copy_page)
840 710
841#ifdef CONFIG_8xx 711#ifdef CONFIG_8xx
842 /* don't use prefetch on 8xx */ 712 /* don't use prefetch on 8xx */
843 li r0,4096/L1_CACHE_LINE_SIZE 713 li r0,4096/L1_CACHE_BYTES
844 mtctr r0 714 mtctr r0
8451: COPY_16_BYTES 7151: COPY_16_BYTES
846 bdnz 1b 716 bdnz 1b
@@ -854,13 +724,13 @@ _GLOBAL(copy_page)
854 li r11,4 724 li r11,4
855 mtctr r0 725 mtctr r0
85611: dcbt r11,r4 72611: dcbt r11,r4
857 addi r11,r11,L1_CACHE_LINE_SIZE 727 addi r11,r11,L1_CACHE_BYTES
858 bdnz 11b 728 bdnz 11b
859#else /* MAX_COPY_PREFETCH == 1 */ 729#else /* MAX_COPY_PREFETCH == 1 */
860 dcbt r5,r4 730 dcbt r5,r4
861 li r11,L1_CACHE_LINE_SIZE+4 731 li r11,L1_CACHE_BYTES+4
862#endif /* MAX_COPY_PREFETCH */ 732#endif /* MAX_COPY_PREFETCH */
863 li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH 733 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
864 crclr 4*cr0+eq 734 crclr 4*cr0+eq
8652: 7352:
866 mtctr r0 736 mtctr r0
@@ -868,12 +738,12 @@ _GLOBAL(copy_page)
868 dcbt r11,r4 738 dcbt r11,r4
869 dcbz r5,r3 739 dcbz r5,r3
870 COPY_16_BYTES 740 COPY_16_BYTES
871#if L1_CACHE_LINE_SIZE >= 32 741#if L1_CACHE_BYTES >= 32
872 COPY_16_BYTES 742 COPY_16_BYTES
873#if L1_CACHE_LINE_SIZE >= 64 743#if L1_CACHE_BYTES >= 64
874 COPY_16_BYTES 744 COPY_16_BYTES
875 COPY_16_BYTES 745 COPY_16_BYTES
876#if L1_CACHE_LINE_SIZE >= 128 746#if L1_CACHE_BYTES >= 128
877 COPY_16_BYTES 747 COPY_16_BYTES
878 COPY_16_BYTES 748 COPY_16_BYTES
879 COPY_16_BYTES 749 COPY_16_BYTES
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 854e45beb387..ad4ef2aaa6ab 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -21,6 +21,7 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/machdep.h>
24 25
25#undef DEBUG 26#undef DEBUG
26 27
@@ -53,7 +54,7 @@ static u8* pci_to_OF_bus_map;
53/* By default, we don't re-assign bus numbers. We do this only on 54/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs 55 * some pmacs
55 */ 56 */
56int pci_assign_all_busses; 57int pci_assign_all_buses;
57 58
58struct pci_controller* hose_head; 59struct pci_controller* hose_head;
59struct pci_controller** hose_tail = &hose_head; 60struct pci_controller** hose_tail = &hose_head;
@@ -644,7 +645,7 @@ pcibios_alloc_controller(void)
644/* 645/*
645 * Functions below are used on OpenFirmware machines. 646 * Functions below are used on OpenFirmware machines.
646 */ 647 */
647static void __openfirmware 648static void
648make_one_node_map(struct device_node* node, u8 pci_bus) 649make_one_node_map(struct device_node* node, u8 pci_bus)
649{ 650{
650 int *bus_range; 651 int *bus_range;
@@ -678,7 +679,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
678 } 679 }
679} 680}
680 681
681void __openfirmware 682void
682pcibios_make_OF_bus_map(void) 683pcibios_make_OF_bus_map(void)
683{ 684{
684 int i; 685 int i;
@@ -720,7 +721,7 @@ pcibios_make_OF_bus_map(void)
720 721
721typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); 722typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
722 723
723static struct device_node* __openfirmware 724static struct device_node*
724scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) 725scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
725{ 726{
726 struct device_node* sub_node; 727 struct device_node* sub_node;
@@ -761,7 +762,7 @@ scan_OF_pci_childs_iterator(struct device_node* node, void* data)
761 return 0; 762 return 0;
762} 763}
763 764
764static struct device_node* __openfirmware 765static struct device_node*
765scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) 766scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
766{ 767{
767 u8 filter_data[2] = {bus, dev_fn}; 768 u8 filter_data[2] = {bus, dev_fn};
@@ -813,18 +814,20 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
813 /* Now, lookup childs of the hose */ 814 /* Now, lookup childs of the hose */
814 return scan_OF_childs_for_device(node->child, busnr, devfn); 815 return scan_OF_childs_for_device(node->child, busnr, devfn);
815} 816}
817EXPORT_SYMBOL(pci_busdev_to_OF_node);
816 818
817struct device_node* 819struct device_node*
818pci_device_to_OF_node(struct pci_dev *dev) 820pci_device_to_OF_node(struct pci_dev *dev)
819{ 821{
820 return pci_busdev_to_OF_node(dev->bus, dev->devfn); 822 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
821} 823}
824EXPORT_SYMBOL(pci_device_to_OF_node);
822 825
823/* This routine is meant to be used early during boot, when the 826/* This routine is meant to be used early during boot, when the
824 * PCI bus numbers have not yet been assigned, and you need to 827 * PCI bus numbers have not yet been assigned, and you need to
825 * issue PCI config cycles to an OF device. 828 * issue PCI config cycles to an OF device.
826 * It could also be used to "fix" RTAS config cycles if you want 829 * It could also be used to "fix" RTAS config cycles if you want
827 * to set pci_assign_all_busses to 1 and still use RTAS for PCI 830 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
828 * config cycles. 831 * config cycles.
829 */ 832 */
830struct pci_controller* 833struct pci_controller*
@@ -842,7 +845,7 @@ pci_find_hose_for_OF_device(struct device_node* node)
842 return NULL; 845 return NULL;
843} 846}
844 847
845static int __openfirmware 848static int
846find_OF_pci_device_filter(struct device_node* node, void* data) 849find_OF_pci_device_filter(struct device_node* node, void* data)
847{ 850{
848 return ((void *)node == data); 851 return ((void *)node == data);
@@ -890,6 +893,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
890 } 893 }
891 return -ENODEV; 894 return -ENODEV;
892} 895}
896EXPORT_SYMBOL(pci_device_from_OF_node);
893 897
894void __init 898void __init
895pci_process_bridge_OF_ranges(struct pci_controller *hose, 899pci_process_bridge_OF_ranges(struct pci_controller *hose,
@@ -1030,6 +1034,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
1030} 1034}
1031static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 1035static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1032 1036
1037#else /* CONFIG_PPC_OF */
1038void pcibios_make_OF_bus_map(void)
1039{
1040}
1033#endif /* CONFIG_PPC_OF */ 1041#endif /* CONFIG_PPC_OF */
1034 1042
1035/* Add sysfs properties */ 1043/* Add sysfs properties */
@@ -1262,12 +1270,12 @@ pcibios_init(void)
1262 1270
1263 /* Scan all of the recorded PCI controllers. */ 1271 /* Scan all of the recorded PCI controllers. */
1264 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { 1272 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1265 if (pci_assign_all_busses) 1273 if (pci_assign_all_buses)
1266 hose->first_busno = next_busno; 1274 hose->first_busno = next_busno;
1267 hose->last_busno = 0xff; 1275 hose->last_busno = 0xff;
1268 bus = pci_scan_bus(hose->first_busno, hose->ops, hose); 1276 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1269 hose->last_busno = bus->subordinate; 1277 hose->last_busno = bus->subordinate;
1270 if (pci_assign_all_busses || next_busno <= hose->last_busno) 1278 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1271 next_busno = hose->last_busno + pcibios_assign_bus_offset; 1279 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1272 } 1280 }
1273 pci_bus_count = next_busno; 1281 pci_bus_count = next_busno;
@@ -1276,7 +1284,7 @@ pcibios_init(void)
1276 * numbers vs. kernel bus numbers since we may have to 1284 * numbers vs. kernel bus numbers since we may have to
1277 * remap them. 1285 * remap them.
1278 */ 1286 */
1279 if (pci_assign_all_busses && have_of) 1287 if (pci_assign_all_buses && have_of)
1280 pcibios_make_OF_bus_map(); 1288 pcibios_make_OF_bus_map();
1281 1289
1282 /* Do machine dependent PCI interrupt routing */ 1290 /* Do machine dependent PCI interrupt routing */
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
deleted file mode 100644
index 22df9a596a0f..000000000000
--- a/arch/ppc/kernel/perfmon.c
+++ /dev/null
@@ -1,96 +0,0 @@
1/* kernel/perfmon.c
2 * PPC 32 Performance Monitor Infrastructure
3 *
4 * Author: Andy Fleming
5 * Copyright (c) 2004 Freescale Semiconductor, Inc
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/a.out.h>
23#include <linux/interrupt.h>
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/prctl.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/reg.h>
34#include <asm/xmon.h>
35
36/* A lock to regulate grabbing the interrupt */
37DEFINE_SPINLOCK(perfmon_lock);
38
39#if defined (CONFIG_FSL_BOOKE) && !defined (CONFIG_E200)
40static void dummy_perf(struct pt_regs *regs)
41{
42 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
43
44 pmgc0 &= ~PMGC0_PMIE;
45 mtpmr(PMRN_PMGC0, pmgc0);
46}
47
48#elif defined(CONFIG_6xx)
49/* Ensure exceptions are disabled */
50static void dummy_perf(struct pt_regs *regs)
51{
52 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
53
54 mmcr0 &= ~MMCR0_PMXE;
55 mtspr(SPRN_MMCR0, mmcr0);
56}
57#else
58static void dummy_perf(struct pt_regs *regs)
59{
60}
61#endif
62
63void (*perf_irq)(struct pt_regs *) = dummy_perf;
64
65/* Grab the interrupt, if it's free.
66 * Returns 0 on success, -1 if the interrupt is taken already */
67int request_perfmon_irq(void (*handler)(struct pt_regs *))
68{
69 int err = 0;
70
71 spin_lock(&perfmon_lock);
72
73 if (perf_irq == dummy_perf)
74 perf_irq = handler;
75 else {
76 pr_info("perfmon irq already handled by %p\n", perf_irq);
77 err = -1;
78 }
79
80 spin_unlock(&perfmon_lock);
81
82 return err;
83}
84
85void free_perfmon_irq(void)
86{
87 spin_lock(&perfmon_lock);
88
89 perf_irq = dummy_perf;
90
91 spin_unlock(&perfmon_lock);
92}
93
94EXPORT_SYMBOL(perf_irq);
95EXPORT_SYMBOL(request_perfmon_irq);
96EXPORT_SYMBOL(free_perfmon_irq);
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
index 03526bfb0840..32455dfcc36b 100644
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ b/arch/ppc/kernel/perfmon_fsl_booke.c
@@ -32,7 +32,7 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/reg.h> 33#include <asm/reg.h>
34#include <asm/xmon.h> 34#include <asm/xmon.h>
35#include <asm/perfmon.h> 35#include <asm/pmc.h>
36 36
37static inline u32 get_pmlca(int ctr); 37static inline u32 get_pmlca(int ctr);
38static inline void set_pmlca(int ctr, u32 pmlca); 38static inline void set_pmlca(int ctr, u32 pmlca);
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 88f6bb7b6964..dcc83440f203 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -53,10 +53,10 @@
53 53
54extern void transfer_to_handler(void); 54extern void transfer_to_handler(void);
55extern void do_IRQ(struct pt_regs *regs); 55extern void do_IRQ(struct pt_regs *regs);
56extern void MachineCheckException(struct pt_regs *regs); 56extern void machine_check_exception(struct pt_regs *regs);
57extern void AlignmentException(struct pt_regs *regs); 57extern void alignment_exception(struct pt_regs *regs);
58extern void ProgramCheckException(struct pt_regs *regs); 58extern void program_check_exception(struct pt_regs *regs);
59extern void SingleStepException(struct pt_regs *regs); 59extern void single_step_exception(struct pt_regs *regs);
60extern int do_signal(sigset_t *, struct pt_regs *); 60extern int do_signal(sigset_t *, struct pt_regs *);
61extern int pmac_newworld; 61extern int pmac_newworld;
62extern int sys_sigreturn(struct pt_regs *regs); 62extern int sys_sigreturn(struct pt_regs *regs);
@@ -72,10 +72,10 @@ EXPORT_SYMBOL(clear_user_page);
72EXPORT_SYMBOL(do_signal); 72EXPORT_SYMBOL(do_signal);
73EXPORT_SYMBOL(transfer_to_handler); 73EXPORT_SYMBOL(transfer_to_handler);
74EXPORT_SYMBOL(do_IRQ); 74EXPORT_SYMBOL(do_IRQ);
75EXPORT_SYMBOL(MachineCheckException); 75EXPORT_SYMBOL(machine_check_exception);
76EXPORT_SYMBOL(AlignmentException); 76EXPORT_SYMBOL(alignment_exception);
77EXPORT_SYMBOL(ProgramCheckException); 77EXPORT_SYMBOL(program_check_exception);
78EXPORT_SYMBOL(SingleStepException); 78EXPORT_SYMBOL(single_step_exception);
79EXPORT_SYMBOL(sys_sigreturn); 79EXPORT_SYMBOL(sys_sigreturn);
80EXPORT_SYMBOL(ppc_n_lost_interrupts); 80EXPORT_SYMBOL(ppc_n_lost_interrupts);
81EXPORT_SYMBOL(ppc_lost_interrupts); 81EXPORT_SYMBOL(ppc_lost_interrupts);
@@ -230,9 +230,6 @@ EXPORT_SYMBOL(find_all_nodes);
230EXPORT_SYMBOL(get_property); 230EXPORT_SYMBOL(get_property);
231EXPORT_SYMBOL(request_OF_resource); 231EXPORT_SYMBOL(request_OF_resource);
232EXPORT_SYMBOL(release_OF_resource); 232EXPORT_SYMBOL(release_OF_resource);
233EXPORT_SYMBOL(pci_busdev_to_OF_node);
234EXPORT_SYMBOL(pci_device_to_OF_node);
235EXPORT_SYMBOL(pci_device_from_OF_node);
236EXPORT_SYMBOL(of_find_node_by_name); 233EXPORT_SYMBOL(of_find_node_by_name);
237EXPORT_SYMBOL(of_find_node_by_type); 234EXPORT_SYMBOL(of_find_node_by_type);
238EXPORT_SYMBOL(of_find_compatible_node); 235EXPORT_SYMBOL(of_find_compatible_node);
@@ -272,16 +269,6 @@ EXPORT_SYMBOL(screen_info);
272#endif 269#endif
273 270
274EXPORT_SYMBOL(__delay); 271EXPORT_SYMBOL(__delay);
275#ifndef INLINE_IRQS
276EXPORT_SYMBOL(local_irq_enable);
277EXPORT_SYMBOL(local_irq_enable_end);
278EXPORT_SYMBOL(local_irq_disable);
279EXPORT_SYMBOL(local_irq_disable_end);
280EXPORT_SYMBOL(local_save_flags_ptr);
281EXPORT_SYMBOL(local_save_flags_ptr_end);
282EXPORT_SYMBOL(local_irq_restore);
283EXPORT_SYMBOL(local_irq_restore_end);
284#endif
285EXPORT_SYMBOL(timer_interrupt); 272EXPORT_SYMBOL(timer_interrupt);
286EXPORT_SYMBOL(irq_desc); 273EXPORT_SYMBOL(irq_desc);
287EXPORT_SYMBOL(tb_ticks_per_jiffy); 274EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -336,10 +323,6 @@ extern long *intercept_table;
336EXPORT_SYMBOL(intercept_table); 323EXPORT_SYMBOL(intercept_table);
337#endif /* CONFIG_PPC_STD_MMU */ 324#endif /* CONFIG_PPC_STD_MMU */
338EXPORT_SYMBOL(cur_cpu_spec); 325EXPORT_SYMBOL(cur_cpu_spec);
339#ifdef CONFIG_PPC_PMAC
340extern unsigned long agp_special_page;
341EXPORT_SYMBOL(agp_special_page);
342#endif
343#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 326#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
344EXPORT_SYMBOL(__mtdcr); 327EXPORT_SYMBOL(__mtdcr);
345EXPORT_SYMBOL(__mfdcr); 328EXPORT_SYMBOL(__mfdcr);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 82de66e4db6d..6d60c40598e7 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
152} 152}
153#endif /* defined(CHECK_STACK) */ 153#endif /* defined(CHECK_STACK) */
154 154
155#ifdef CONFIG_ALTIVEC 155/*
156int 156 * Make sure the floating-point register state in the
157dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) 157 * the thread_struct is up to date for task tsk.
158 */
159void flush_fp_to_thread(struct task_struct *tsk)
158{ 160{
159 if (regs->msr & MSR_VEC) 161 if (tsk->thread.regs) {
160 giveup_altivec(current); 162 /*
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 163 * We need to disable preemption here because if we didn't,
164 * another process could get scheduled after the regs->msr
165 * test but before we have finished saving the FP registers
166 * to the thread_struct. That process could take over the
167 * FPU, and then when we get scheduled again we would store
168 * bogus values for the remaining FP registers.
169 */
170 preempt_disable();
171 if (tsk->thread.regs->msr & MSR_FP) {
172#ifdef CONFIG_SMP
173 /*
174 * This should only ever be called for current or
175 * for a stopped child process. Since we save away
176 * the FP register state on context switch on SMP,
177 * there is something wrong if a stopped child appears
178 * to still have its FP state in the CPU registers.
179 */
180 BUG_ON(tsk != current);
181#endif
182 giveup_fpu(current);
183 }
184 preempt_enable();
185 }
186}
187
188void enable_kernel_fp(void)
189{
190 WARN_ON(preemptible());
191
192#ifdef CONFIG_SMP
193 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
194 giveup_fpu(current);
195 else
196 giveup_fpu(NULL); /* just enables FP for kernel */
197#else
198 giveup_fpu(last_task_used_math);
199#endif /* CONFIG_SMP */
200}
201EXPORT_SYMBOL(enable_kernel_fp);
202
203int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
204{
205 preempt_disable();
206 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
207 giveup_fpu(tsk);
208 preempt_enable();
209 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
162 return 1; 210 return 1;
163} 211}
164 212
165void 213#ifdef CONFIG_ALTIVEC
166enable_kernel_altivec(void) 214void enable_kernel_altivec(void)
167{ 215{
168 WARN_ON(preemptible()); 216 WARN_ON(preemptible());
169 217
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
177#endif /* __SMP __ */ 225#endif /* __SMP __ */
178} 226}
179EXPORT_SYMBOL(enable_kernel_altivec); 227EXPORT_SYMBOL(enable_kernel_altivec);
180#endif /* CONFIG_ALTIVEC */
181 228
182#ifdef CONFIG_SPE 229/*
183int 230 * Make sure the VMX/Altivec register state in the
184dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) 231 * the thread_struct is up to date for task tsk.
232 */
233void flush_altivec_to_thread(struct task_struct *tsk)
185{ 234{
186 if (regs->msr & MSR_SPE) 235 if (tsk->thread.regs) {
187 giveup_spe(current); 236 preempt_disable();
188 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ 237 if (tsk->thread.regs->msr & MSR_VEC) {
189 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); 238#ifdef CONFIG_SMP
239 BUG_ON(tsk != current);
240#endif
241 giveup_altivec(current);
242 }
243 preempt_enable();
244 }
245}
246
247int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
248{
249 if (regs->msr & MSR_VEC)
250 giveup_altivec(current);
251 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
190 return 1; 252 return 1;
191} 253}
254#endif /* CONFIG_ALTIVEC */
192 255
256#ifdef CONFIG_SPE
193void 257void
194enable_kernel_spe(void) 258enable_kernel_spe(void)
195{ 259{
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
205#endif /* __SMP __ */ 269#endif /* __SMP __ */
206} 270}
207EXPORT_SYMBOL(enable_kernel_spe); 271EXPORT_SYMBOL(enable_kernel_spe);
208#endif /* CONFIG_SPE */
209 272
210void 273void flush_spe_to_thread(struct task_struct *tsk)
211enable_kernel_fp(void)
212{ 274{
213 WARN_ON(preemptible()); 275 if (tsk->thread.regs) {
214 276 preempt_disable();
277 if (tsk->thread.regs->msr & MSR_SPE) {
215#ifdef CONFIG_SMP 278#ifdef CONFIG_SMP
216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 279 BUG_ON(tsk != current);
217 giveup_fpu(current); 280#endif
218 else 281 giveup_spe(current);
219 giveup_fpu(NULL); /* just enables FP for kernel */ 282 }
220#else 283 preempt_enable();
221 giveup_fpu(last_task_used_math); 284 }
222#endif /* CONFIG_SMP */
223} 285}
224EXPORT_SYMBOL(enable_kernel_fp);
225 286
226int 287int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
227dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
228{ 288{
229 preempt_disable(); 289 if (regs->msr & MSR_SPE)
230 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) 290 giveup_spe(current);
231 giveup_fpu(tsk); 291 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
232 preempt_enable(); 292 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
233 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
234 return 1; 293 return 1;
235} 294}
295#endif /* CONFIG_SPE */
236 296
237struct task_struct *__switch_to(struct task_struct *prev, 297struct task_struct *__switch_to(struct task_struct *prev,
238 struct task_struct *new) 298 struct task_struct *new)
@@ -557,14 +617,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
557 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 617 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
558} 618}
559 619
560int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, 620int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
621 unsigned long p4, unsigned long p5, unsigned long p6,
561 struct pt_regs *regs) 622 struct pt_regs *regs)
562{ 623{
563 CHECK_FULL_REGS(regs); 624 CHECK_FULL_REGS(regs);
564 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 625 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
565} 626}
566 627
567int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, 628int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
629 unsigned long p4, unsigned long p5, unsigned long p6,
568 struct pt_regs *regs) 630 struct pt_regs *regs)
569{ 631{
570 CHECK_FULL_REGS(regs); 632 CHECK_FULL_REGS(regs);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 545cfd0fab59..6bcb85d2b7fd 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -71,7 +71,8 @@ struct ide_machdep_calls ppc_ide_md;
71unsigned long boot_mem_size; 71unsigned long boot_mem_size;
72 72
73unsigned long ISA_DMA_THRESHOLD; 73unsigned long ISA_DMA_THRESHOLD;
74unsigned long DMA_MODE_READ, DMA_MODE_WRITE; 74unsigned int DMA_MODE_READ;
75unsigned int DMA_MODE_WRITE;
75 76
76#ifdef CONFIG_PPC_MULTIPLATFORM 77#ifdef CONFIG_PPC_MULTIPLATFORM
77int _machine = 0; 78int _machine = 0;
@@ -82,8 +83,18 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
82 unsigned long r5, unsigned long r6, unsigned long r7); 83 unsigned long r5, unsigned long r6, unsigned long r7);
83extern void chrp_init(unsigned long r3, unsigned long r4, 84extern void chrp_init(unsigned long r3, unsigned long r4,
84 unsigned long r5, unsigned long r6, unsigned long r7); 85 unsigned long r5, unsigned long r6, unsigned long r7);
86
87dev_t boot_dev;
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 88#endif /* CONFIG_PPC_MULTIPLATFORM */
86 89
90int have_of;
91EXPORT_SYMBOL(have_of);
92
93#ifdef __DO_IRQ_CANON
94int ppc_do_canonicalize_irqs;
95EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
96#endif
97
87#ifdef CONFIG_MAGIC_SYSRQ 98#ifdef CONFIG_MAGIC_SYSRQ
88unsigned long SYSRQ_KEY = 0x54; 99unsigned long SYSRQ_KEY = 0x54;
89#endif /* CONFIG_MAGIC_SYSRQ */ 100#endif /* CONFIG_MAGIC_SYSRQ */
@@ -185,18 +196,18 @@ int show_cpuinfo(struct seq_file *m, void *v)
185 seq_printf(m, "processor\t: %d\n", i); 196 seq_printf(m, "processor\t: %d\n", i);
186 seq_printf(m, "cpu\t\t: "); 197 seq_printf(m, "cpu\t\t: ");
187 198
188 if (cur_cpu_spec[i]->pvr_mask) 199 if (cur_cpu_spec->pvr_mask)
189 seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name); 200 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
190 else 201 else
191 seq_printf(m, "unknown (%08x)", pvr); 202 seq_printf(m, "unknown (%08x)", pvr);
192#ifdef CONFIG_ALTIVEC 203#ifdef CONFIG_ALTIVEC
193 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC) 204 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
194 seq_printf(m, ", altivec supported"); 205 seq_printf(m, ", altivec supported");
195#endif 206#endif
196 seq_printf(m, "\n"); 207 seq_printf(m, "\n");
197 208
198#ifdef CONFIG_TAU 209#ifdef CONFIG_TAU
199 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) { 210 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
200#ifdef CONFIG_TAU_AVERAGE 211#ifdef CONFIG_TAU_AVERAGE
201 /* more straightforward, but potentially misleading */ 212 /* more straightforward, but potentially misleading */
202 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 213 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +350,7 @@ early_init(int r3, int r4, int r5)
339 * Assume here that all clock rates are the same in a 350 * Assume here that all clock rates are the same in a
340 * smp system. -- Cort 351 * smp system. -- Cort
341 */ 352 */
342int __openfirmware 353int
343of_show_percpuinfo(struct seq_file *m, int i) 354of_show_percpuinfo(struct seq_file *m, int i)
344{ 355{
345 struct device_node *cpu_node; 356 struct device_node *cpu_node;
@@ -404,11 +415,15 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
404 _machine = _MACH_prep; 415 _machine = _MACH_prep;
405 } 416 }
406 417
418#ifdef CONFIG_PPC_PREP
407 /* not much more to do here, if prep */ 419 /* not much more to do here, if prep */
408 if (_machine == _MACH_prep) { 420 if (_machine == _MACH_prep) {
409 prep_init(r3, r4, r5, r6, r7); 421 prep_init(r3, r4, r5, r6, r7);
410 return; 422 return;
411 } 423 }
424#endif
425
426 have_of = 1;
412 427
413 /* prom_init has already been called from __start */ 428 /* prom_init has already been called from __start */
414 if (boot_infos) 429 if (boot_infos)
@@ -479,12 +494,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
479#endif /* CONFIG_ADB */ 494#endif /* CONFIG_ADB */
480 495
481 switch (_machine) { 496 switch (_machine) {
497#ifdef CONFIG_PPC_PMAC
482 case _MACH_Pmac: 498 case _MACH_Pmac:
483 pmac_init(r3, r4, r5, r6, r7); 499 pmac_init(r3, r4, r5, r6, r7);
484 break; 500 break;
501#endif
502#ifdef CONFIG_PPC_CHRP
485 case _MACH_chrp: 503 case _MACH_chrp:
486 chrp_init(r3, r4, r5, r6, r7); 504 chrp_init(r3, r4, r5, r6, r7);
487 break; 505 break;
506#endif
488 } 507 }
489} 508}
490 509
@@ -721,7 +740,7 @@ void __init setup_arch(char **cmdline_p)
721#endif 740#endif
722 741
723#ifdef CONFIG_XMON 742#ifdef CONFIG_XMON
724 xmon_map_scc(); 743 xmon_init(1);
725 if (strstr(cmd_line, "xmon")) 744 if (strstr(cmd_line, "xmon"))
726 xmon(NULL); 745 xmon(NULL);
727#endif /* CONFIG_XMON */ 746#endif /* CONFIG_XMON */
@@ -745,12 +764,12 @@ void __init setup_arch(char **cmdline_p)
745 * for a possibly more accurate value. 764 * for a possibly more accurate value.
746 */ 765 */
747 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { 766 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
748 dcache_bsize = cur_cpu_spec[0]->dcache_bsize; 767 dcache_bsize = cur_cpu_spec->dcache_bsize;
749 icache_bsize = cur_cpu_spec[0]->icache_bsize; 768 icache_bsize = cur_cpu_spec->icache_bsize;
750 ucache_bsize = 0; 769 ucache_bsize = 0;
751 } else 770 } else
752 ucache_bsize = dcache_bsize = icache_bsize 771 ucache_bsize = dcache_bsize = icache_bsize
753 = cur_cpu_spec[0]->dcache_bsize; 772 = cur_cpu_spec->dcache_bsize;
754 773
755 /* reboot on panic */ 774 /* reboot on panic */
756 panic_timeout = 180; 775 panic_timeout = 180;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
deleted file mode 100644
index 2244bf91e593..000000000000
--- a/arch/ppc/kernel/signal.c
+++ /dev/null
@@ -1,771 +0,0 @@
1/*
2 * arch/ppc/kernel/signal.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/signal.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/elf.h>
29#include <linux/tty.h>
30#include <linux/binfmts.h>
31#include <linux/suspend.h>
32#include <asm/ucontext.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/cacheflush.h>
36
37#undef DEBUG_SIG
38
39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
40
41extern void sigreturn_exit(struct pt_regs *);
42
43#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
44
45int do_signal(sigset_t *oldset, struct pt_regs *regs);
46
47/*
48 * Atomically swap in the new signal mask, and wait for a signal.
49 */
50int
51sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
52 struct pt_regs *regs)
53{
54 sigset_t saveset;
55
56 mask &= _BLOCKABLE;
57 spin_lock_irq(&current->sighand->siglock);
58 saveset = current->blocked;
59 siginitset(&current->blocked, mask);
60 recalc_sigpending();
61 spin_unlock_irq(&current->sighand->siglock);
62
63 regs->result = -EINTR;
64 regs->gpr[3] = EINTR;
65 regs->ccr |= 0x10000000;
66 while (1) {
67 current->state = TASK_INTERRUPTIBLE;
68 schedule();
69 if (do_signal(&saveset, regs))
70 sigreturn_exit(regs);
71 }
72}
73
74int
75sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
76 int p6, int p7, struct pt_regs *regs)
77{
78 sigset_t saveset, newset;
79
80 /* XXX: Don't preclude handling different sized sigset_t's. */
81 if (sigsetsize != sizeof(sigset_t))
82 return -EINVAL;
83
84 if (copy_from_user(&newset, unewset, sizeof(newset)))
85 return -EFAULT;
86 sigdelsetmask(&newset, ~_BLOCKABLE);
87
88 spin_lock_irq(&current->sighand->siglock);
89 saveset = current->blocked;
90 current->blocked = newset;
91 recalc_sigpending();
92 spin_unlock_irq(&current->sighand->siglock);
93
94 regs->result = -EINTR;
95 regs->gpr[3] = EINTR;
96 regs->ccr |= 0x10000000;
97 while (1) {
98 current->state = TASK_INTERRUPTIBLE;
99 schedule();
100 if (do_signal(&saveset, regs))
101 sigreturn_exit(regs);
102 }
103}
104
105
106int
107sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
108 int r6, int r7, int r8, struct pt_regs *regs)
109{
110 return do_sigaltstack(uss, uoss, regs->gpr[1]);
111}
112
113int
114sys_sigaction(int sig, const struct old_sigaction __user *act,
115 struct old_sigaction __user *oact)
116{
117 struct k_sigaction new_ka, old_ka;
118 int ret;
119
120 if (act) {
121 old_sigset_t mask;
122 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
123 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
124 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
125 return -EFAULT;
126 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
127 __get_user(mask, &act->sa_mask);
128 siginitset(&new_ka.sa.sa_mask, mask);
129 }
130
131 ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
132
133 if (!ret && oact) {
134 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
135 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
136 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
137 return -EFAULT;
138 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
139 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
140 }
141
142 return ret;
143}
144
145/*
146 * When we have signals to deliver, we set up on the
147 * user stack, going down from the original stack pointer:
148 * a sigregs struct
149 * a sigcontext struct
150 * a gap of __SIGNAL_FRAMESIZE bytes
151 *
152 * Each of these things must be a multiple of 16 bytes in size.
153 *
154 */
155struct sigregs {
156 struct mcontext mctx; /* all the register values */
157 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
158 and 18 fp regs below sp before decrementing it. */
159 int abigap[56];
160};
161
162/* We use the mc_pad field for the signal return trampoline. */
163#define tramp mc_pad
164
165/*
166 * When we have rt signals to deliver, we set up on the
167 * user stack, going down from the original stack pointer:
168 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
169 * a gap of __SIGNAL_FRAMESIZE+16 bytes
170 * (the +16 is to get the siginfo and ucontext in the same
171 * positions as in older kernels).
172 *
173 * Each of these things must be a multiple of 16 bytes in size.
174 *
175 */
176struct rt_sigframe
177{
178 struct siginfo info;
179 struct ucontext uc;
180 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
181 and 18 fp regs below sp before decrementing it. */
182 int abigap[56];
183};
184
185/*
186 * Save the current user registers on the user stack.
187 * We only save the altivec/spe registers if the process has used
188 * altivec/spe instructions at some point.
189 */
190static int
191save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
192{
193 /* save general and floating-point registers */
194 CHECK_FULL_REGS(regs);
195 preempt_disable();
196 if (regs->msr & MSR_FP)
197 giveup_fpu(current);
198#ifdef CONFIG_ALTIVEC
199 if (current->thread.used_vr && (regs->msr & MSR_VEC))
200 giveup_altivec(current);
201#endif /* CONFIG_ALTIVEC */
202#ifdef CONFIG_SPE
203 if (current->thread.used_spe && (regs->msr & MSR_SPE))
204 giveup_spe(current);
205#endif /* CONFIG_ALTIVEC */
206 preempt_enable();
207
208 if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE)
209 || __copy_to_user(&frame->mc_fregs, current->thread.fpr,
210 ELF_NFPREG * sizeof(double)))
211 return 1;
212
213 current->thread.fpscr = 0; /* turn off all fp exceptions */
214
215#ifdef CONFIG_ALTIVEC
216 /* save altivec registers */
217 if (current->thread.used_vr) {
218 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
219 ELF_NVRREG * sizeof(vector128)))
220 return 1;
221 /* set MSR_VEC in the saved MSR value to indicate that
222 frame->mc_vregs contains valid data */
223 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
224 return 1;
225 }
226 /* else assert((regs->msr & MSR_VEC) == 0) */
227
228 /* We always copy to/from vrsave, it's 0 if we don't have or don't
229 * use altivec. Since VSCR only contains 32 bits saved in the least
230 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
231 * most significant bits of that same vector. --BenH
232 */
233 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
234 return 1;
235#endif /* CONFIG_ALTIVEC */
236
237#ifdef CONFIG_SPE
238 /* save spe registers */
239 if (current->thread.used_spe) {
240 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
241 ELF_NEVRREG * sizeof(u32)))
242 return 1;
243 /* set MSR_SPE in the saved MSR value to indicate that
244 frame->mc_vregs contains valid data */
245 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
246 return 1;
247 }
248 /* else assert((regs->msr & MSR_SPE) == 0) */
249
250 /* We always copy to/from spefscr */
251 if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
252 return 1;
253#endif /* CONFIG_SPE */
254
255 if (sigret) {
256 /* Set up the sigreturn trampoline: li r0,sigret; sc */
257 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
258 || __put_user(0x44000002UL, &frame->tramp[1]))
259 return 1;
260 flush_icache_range((unsigned long) &frame->tramp[0],
261 (unsigned long) &frame->tramp[2]);
262 }
263
264 return 0;
265}
266
267/*
268 * Restore the current user register values from the user stack,
269 * (except for MSR).
270 */
271static int
272restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
273{
274 unsigned long save_r2 = 0;
275#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
276 unsigned long msr;
277#endif
278
279 /* backup/restore the TLS as we don't want it to be modified */
280 if (!sig)
281 save_r2 = regs->gpr[2];
282 /* copy up to but not including MSR */
283 if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
284 return 1;
285 /* copy from orig_r3 (the word after the MSR) up to the end */
286 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
287 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
288 return 1;
289 if (!sig)
290 regs->gpr[2] = save_r2;
291
292 /* force the process to reload the FP registers from
293 current->thread when it next does FP instructions */
294 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
295 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
296 sizeof(sr->mc_fregs)))
297 return 1;
298
299#ifdef CONFIG_ALTIVEC
300 /* force the process to reload the altivec registers from
301 current->thread when it next does altivec instructions */
302 regs->msr &= ~MSR_VEC;
303 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
304 /* restore altivec registers from the stack */
305 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
306 sizeof(sr->mc_vregs)))
307 return 1;
308 } else if (current->thread.used_vr)
309 memset(&current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
310
311 /* Always get VRSAVE back */
312 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
313 return 1;
314#endif /* CONFIG_ALTIVEC */
315
316#ifdef CONFIG_SPE
317 /* force the process to reload the spe registers from
318 current->thread when it next does spe instructions */
319 regs->msr &= ~MSR_SPE;
320 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
321 /* restore spe registers from the stack */
322 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
323 ELF_NEVRREG * sizeof(u32)))
324 return 1;
325 } else if (current->thread.used_spe)
326 memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
327
328 /* Always get SPEFSCR back */
329 if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
330 return 1;
331#endif /* CONFIG_SPE */
332
333#ifndef CONFIG_SMP
334 preempt_disable();
335 if (last_task_used_math == current)
336 last_task_used_math = NULL;
337 if (last_task_used_altivec == current)
338 last_task_used_altivec = NULL;
339 if (last_task_used_spe == current)
340 last_task_used_spe = NULL;
341 preempt_enable();
342#endif
343 return 0;
344}
345
346/*
347 * Restore the user process's signal mask
348 */
349static void
350restore_sigmask(sigset_t *set)
351{
352 sigdelsetmask(set, ~_BLOCKABLE);
353 spin_lock_irq(&current->sighand->siglock);
354 current->blocked = *set;
355 recalc_sigpending();
356 spin_unlock_irq(&current->sighand->siglock);
357}
358
359/*
360 * Set up a signal frame for a "real-time" signal handler
361 * (one which gets siginfo).
362 */
363static void
364handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
365 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
366 unsigned long newsp)
367{
368 struct rt_sigframe __user *rt_sf;
369 struct mcontext __user *frame;
370 unsigned long origsp = newsp;
371
372 /* Set up Signal Frame */
373 /* Put a Real Time Context onto stack */
374 newsp -= sizeof(*rt_sf);
375 rt_sf = (struct rt_sigframe __user *) newsp;
376
377 /* create a stack frame for the caller of the handler */
378 newsp -= __SIGNAL_FRAMESIZE + 16;
379
380 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
381 goto badframe;
382
383 /* Put the siginfo & fill in most of the ucontext */
384 if (copy_siginfo_to_user(&rt_sf->info, info)
385 || __put_user(0, &rt_sf->uc.uc_flags)
386 || __put_user(0, &rt_sf->uc.uc_link)
387 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
388 || __put_user(sas_ss_flags(regs->gpr[1]),
389 &rt_sf->uc.uc_stack.ss_flags)
390 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
391 || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
392 || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset)))
393 goto badframe;
394
395 /* Save user registers on the stack */
396 frame = &rt_sf->uc.uc_mcontext;
397 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
398 goto badframe;
399
400 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
401 goto badframe;
402 regs->gpr[1] = newsp;
403 regs->gpr[3] = sig;
404 regs->gpr[4] = (unsigned long) &rt_sf->info;
405 regs->gpr[5] = (unsigned long) &rt_sf->uc;
406 regs->gpr[6] = (unsigned long) rt_sf;
407 regs->nip = (unsigned long) ka->sa.sa_handler;
408 regs->link = (unsigned long) frame->tramp;
409 regs->trap = 0;
410
411 return;
412
413badframe:
414#ifdef DEBUG_SIG
415 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
416 regs, frame, newsp);
417#endif
418 force_sigsegv(sig, current);
419}
420
421static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
422{
423 sigset_t set;
424 struct mcontext __user *mcp;
425
426 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
427 || __get_user(mcp, &ucp->uc_regs))
428 return -EFAULT;
429 restore_sigmask(&set);
430 if (restore_user_regs(regs, mcp, sig))
431 return -EFAULT;
432
433 return 0;
434}
435
436int sys_swapcontext(struct ucontext __user *old_ctx,
437 struct ucontext __user *new_ctx,
438 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
439{
440 unsigned char tmp;
441
442 /* Context size is for future use. Right now, we only make sure
443 * we are passed something we understand
444 */
445 if (ctx_size < sizeof(struct ucontext))
446 return -EINVAL;
447
448 if (old_ctx != NULL) {
449 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
450 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
451 || __copy_to_user(&old_ctx->uc_sigmask,
452 &current->blocked, sizeof(sigset_t))
453 || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs))
454 return -EFAULT;
455 }
456 if (new_ctx == NULL)
457 return 0;
458 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
459 || __get_user(tmp, (u8 __user *) new_ctx)
460 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
461 return -EFAULT;
462
463 /*
464 * If we get a fault copying the context into the kernel's
465 * image of the user's registers, we can't just return -EFAULT
466 * because the user's registers will be corrupted. For instance
467 * the NIP value may have been updated but not some of the
468 * other registers. Given that we have done the access_ok
469 * and successfully read the first and last bytes of the region
470 * above, this should only happen in an out-of-memory situation
471 * or if another thread unmaps the region containing the context.
472 * We kill the task with a SIGSEGV in this situation.
473 */
474 if (do_setcontext(new_ctx, regs, 0))
475 do_exit(SIGSEGV);
476 sigreturn_exit(regs);
477 /* doesn't actually return back to here */
478 return 0;
479}
480
481int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
482 struct pt_regs *regs)
483{
484 struct rt_sigframe __user *rt_sf;
485
486 /* Always make any pending restarted system calls return -EINTR */
487 current_thread_info()->restart_block.fn = do_no_restart_syscall;
488
489 rt_sf = (struct rt_sigframe __user *)
490 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
491 if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
492 goto bad;
493 if (do_setcontext(&rt_sf->uc, regs, 1))
494 goto bad;
495
496 /*
497 * It's not clear whether or why it is desirable to save the
498 * sigaltstack setting on signal delivery and restore it on
499 * signal return. But other architectures do this and we have
500 * always done it up until now so it is probably better not to
501 * change it. -- paulus
502 */
503 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
504
505 sigreturn_exit(regs); /* doesn't return here */
506 return 0;
507
508 bad:
509 force_sig(SIGSEGV, current);
510 return 0;
511}
512
513int sys_debug_setcontext(struct ucontext __user *ctx,
514 int ndbg, struct sig_dbg_op __user *dbg,
515 int r6, int r7, int r8,
516 struct pt_regs *regs)
517{
518 struct sig_dbg_op op;
519 int i;
520 unsigned long new_msr = regs->msr;
521#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
522 unsigned long new_dbcr0 = current->thread.dbcr0;
523#endif
524
525 for (i=0; i<ndbg; i++) {
526 if (__copy_from_user(&op, dbg, sizeof(op)))
527 return -EFAULT;
528 switch (op.dbg_type) {
529 case SIG_DBG_SINGLE_STEPPING:
530#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
531 if (op.dbg_value) {
532 new_msr |= MSR_DE;
533 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
534 } else {
535 new_msr &= ~MSR_DE;
536 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
537 }
538#else
539 if (op.dbg_value)
540 new_msr |= MSR_SE;
541 else
542 new_msr &= ~MSR_SE;
543#endif
544 break;
545 case SIG_DBG_BRANCH_TRACING:
546#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
547 return -EINVAL;
548#else
549 if (op.dbg_value)
550 new_msr |= MSR_BE;
551 else
552 new_msr &= ~MSR_BE;
553#endif
554 break;
555
556 default:
557 return -EINVAL;
558 }
559 }
560
561 /* We wait until here to actually install the values in the
562 registers so if we fail in the above loop, it will not
563 affect the contents of these registers. After this point,
564 failure is a problem, anyway, and it's very unlikely unless
565 the user is really doing something wrong. */
566 regs->msr = new_msr;
567#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
568 current->thread.dbcr0 = new_dbcr0;
569#endif
570
571 /*
572 * If we get a fault copying the context into the kernel's
573 * image of the user's registers, we can't just return -EFAULT
574 * because the user's registers will be corrupted. For instance
575 * the NIP value may have been updated but not some of the
576 * other registers. Given that we have done the access_ok
577 * and successfully read the first and last bytes of the region
578 * above, this should only happen in an out-of-memory situation
579 * or if another thread unmaps the region containing the context.
580 * We kill the task with a SIGSEGV in this situation.
581 */
582 if (do_setcontext(ctx, regs, 1)) {
583 force_sig(SIGSEGV, current);
584 goto out;
585 }
586
587 /*
588 * It's not clear whether or why it is desirable to save the
589 * sigaltstack setting on signal delivery and restore it on
590 * signal return. But other architectures do this and we have
591 * always done it up until now so it is probably better not to
592 * change it. -- paulus
593 */
594 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
595
596 sigreturn_exit(regs);
597 /* doesn't actually return back to here */
598
599 out:
600 return 0;
601}
602
603/*
604 * OK, we're invoking a handler
605 */
606static void
607handle_signal(unsigned long sig, struct k_sigaction *ka,
608 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
609 unsigned long newsp)
610{
611 struct sigcontext __user *sc;
612 struct sigregs __user *frame;
613 unsigned long origsp = newsp;
614
615 /* Set up Signal Frame */
616 newsp -= sizeof(struct sigregs);
617 frame = (struct sigregs __user *) newsp;
618
619 /* Put a sigcontext on the stack */
620 newsp -= sizeof(*sc);
621 sc = (struct sigcontext __user *) newsp;
622
623 /* create a stack frame for the caller of the handler */
624 newsp -= __SIGNAL_FRAMESIZE;
625
626 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
627 goto badframe;
628
629#if _NSIG != 64
630#error "Please adjust handle_signal()"
631#endif
632 if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
633 || __put_user(oldset->sig[0], &sc->oldmask)
634 || __put_user(oldset->sig[1], &sc->_unused[3])
635 || __put_user((struct pt_regs __user *)frame, &sc->regs)
636 || __put_user(sig, &sc->signal))
637 goto badframe;
638
639 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
640 goto badframe;
641
642 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
643 goto badframe;
644 regs->gpr[1] = newsp;
645 regs->gpr[3] = sig;
646 regs->gpr[4] = (unsigned long) sc;
647 regs->nip = (unsigned long) ka->sa.sa_handler;
648 regs->link = (unsigned long) frame->mctx.tramp;
649 regs->trap = 0;
650
651 return;
652
653badframe:
654#ifdef DEBUG_SIG
655 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
656 regs, frame, newsp);
657#endif
658 force_sigsegv(sig, current);
659}
660
661/*
662 * Do a signal return; undo the signal stack.
663 */
664int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
665 struct pt_regs *regs)
666{
667 struct sigcontext __user *sc;
668 struct sigcontext sigctx;
669 struct mcontext __user *sr;
670 sigset_t set;
671
672 /* Always make any pending restarted system calls return -EINTR */
673 current_thread_info()->restart_block.fn = do_no_restart_syscall;
674
675 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
676 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
677 goto badframe;
678
679 set.sig[0] = sigctx.oldmask;
680 set.sig[1] = sigctx._unused[3];
681 restore_sigmask(&set);
682
683 sr = (struct mcontext __user *) sigctx.regs;
684 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
685 || restore_user_regs(regs, sr, 1))
686 goto badframe;
687
688 sigreturn_exit(regs); /* doesn't return */
689 return 0;
690
691badframe:
692 force_sig(SIGSEGV, current);
693 return 0;
694}
695
696/*
697 * Note that 'init' is a special process: it doesn't get signals it doesn't
698 * want to handle. Thus you cannot kill init even with a SIGKILL even by
699 * mistake.
700 */
701int do_signal(sigset_t *oldset, struct pt_regs *regs)
702{
703 siginfo_t info;
704 struct k_sigaction ka;
705 unsigned long frame, newsp;
706 int signr, ret;
707
708 if (try_to_freeze()) {
709 signr = 0;
710 if (!signal_pending(current))
711 goto no_signal;
712 }
713
714 if (!oldset)
715 oldset = &current->blocked;
716
717 newsp = frame = 0;
718
719 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
720 no_signal:
721 if (TRAP(regs) == 0x0C00 /* System Call! */
722 && regs->ccr & 0x10000000 /* error signalled */
723 && ((ret = regs->gpr[3]) == ERESTARTSYS
724 || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
725 || ret == ERESTART_RESTARTBLOCK)) {
726
727 if (signr > 0
728 && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
729 || (ret == ERESTARTSYS
730 && !(ka.sa.sa_flags & SA_RESTART)))) {
731 /* make the system call return an EINTR error */
732 regs->result = -EINTR;
733 regs->gpr[3] = EINTR;
734 /* note that the cr0.SO bit is already set */
735 } else {
736 regs->nip -= 4; /* Back up & retry system call */
737 regs->result = 0;
738 regs->trap = 0;
739 if (ret == ERESTART_RESTARTBLOCK)
740 regs->gpr[0] = __NR_restart_syscall;
741 else
742 regs->gpr[3] = regs->orig_gpr3;
743 }
744 }
745
746 if (signr == 0)
747 return 0; /* no signals delivered */
748
749 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
750 && !on_sig_stack(regs->gpr[1]))
751 newsp = current->sas_ss_sp + current->sas_ss_size;
752 else
753 newsp = regs->gpr[1];
754 newsp &= ~0xfUL;
755
756 /* Whee! Actually deliver the signal. */
757 if (ka.sa.sa_flags & SA_SIGINFO)
758 handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
759 else
760 handle_signal(signr, &ka, &info, oldset, regs, newsp);
761
762 spin_lock_irq(&current->sighand->siglock);
763 sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
764 if (!(ka.sa.sa_flags & SA_NODEFER))
765 sigaddset(&current->blocked, signr);
766 recalc_sigpending();
767 spin_unlock_irq(&current->sighand->siglock);
768
769 return 1;
770}
771
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 726fe7ce1747..bc5bf1124836 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -34,11 +34,11 @@
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/machdep.h>
37 38
38volatile int smp_commenced; 39volatile int smp_commenced;
39int smp_tb_synchronized; 40int smp_tb_synchronized;
40struct cpuinfo_PPC cpu_data[NR_CPUS]; 41struct cpuinfo_PPC cpu_data[NR_CPUS];
41struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
42atomic_t ipi_recv; 42atomic_t ipi_recv;
43atomic_t ipi_sent; 43atomic_t ipi_sent;
44cpumask_t cpu_online_map; 44cpumask_t cpu_online_map;
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpu_online_map);
51EXPORT_SYMBOL(cpu_possible_map); 51EXPORT_SYMBOL(cpu_possible_map);
52 52
53/* SMP operations for this machine */ 53/* SMP operations for this machine */
54static struct smp_ops_t *smp_ops; 54struct smp_ops_t *smp_ops;
55 55
56/* all cpu mappings are 1-1 -- Cort */ 56/* all cpu mappings are 1-1 -- Cort */
57volatile unsigned long cpu_callin_map[NR_CPUS]; 57volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -74,11 +74,11 @@ extern void __save_cpu_setup(void);
74#define PPC_MSG_XMON_BREAK 3 74#define PPC_MSG_XMON_BREAK 3
75 75
76static inline void 76static inline void
77smp_message_pass(int target, int msg, unsigned long data, int wait) 77smp_message_pass(int target, int msg)
78{ 78{
79 if (smp_ops){ 79 if (smp_ops) {
80 atomic_inc(&ipi_sent); 80 atomic_inc(&ipi_sent);
81 smp_ops->message_pass(target,msg,data,wait); 81 smp_ops->message_pass(target, msg);
82 } 82 }
83} 83}
84 84
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
119void smp_send_tlb_invalidate(int cpu) 119void smp_send_tlb_invalidate(int cpu)
120{ 120{
121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) 121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0); 122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
123} 123}
124 124
125void smp_send_reschedule(int cpu) 125void smp_send_reschedule(int cpu)
@@ -135,13 +135,13 @@ void smp_send_reschedule(int cpu)
135 */ 135 */
136 /* This is only used if `cpu' is running an idle task, 136 /* This is only used if `cpu' is running an idle task,
137 so it will reschedule itself anyway... */ 137 so it will reschedule itself anyway... */
138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); 138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
139} 139}
140 140
141#ifdef CONFIG_XMON 141#ifdef CONFIG_XMON
142void smp_send_xmon_break(int cpu) 142void smp_send_xmon_break(int cpu)
143{ 143{
144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0); 144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
145} 145}
146#endif /* CONFIG_XMON */ 146#endif /* CONFIG_XMON */
147 147
@@ -224,7 +224,7 @@ static int __smp_call_function(void (*func) (void *info), void *info,
224 spin_lock(&call_lock); 224 spin_lock(&call_lock);
225 call_data = &data; 225 call_data = &data;
226 /* Send a message to all other CPUs and wait for them to respond */ 226 /* Send a message to all other CPUs and wait for them to respond */
227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0); 227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
228 228
229 /* Wait for response */ 229 /* Wait for response */
230 timeout = 1000000; 230 timeout = 1000000;
@@ -294,7 +294,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
294 smp_store_cpu_info(smp_processor_id()); 294 smp_store_cpu_info(smp_processor_id());
295 cpu_callin_map[smp_processor_id()] = 1; 295 cpu_callin_map[smp_processor_id()] = 1;
296 296
297 smp_ops = ppc_md.smp_ops;
298 if (smp_ops == NULL) { 297 if (smp_ops == NULL) {
299 printk("SMP not supported on this machine.\n"); 298 printk("SMP not supported on this machine.\n");
300 return; 299 return;
@@ -308,9 +307,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
308 /* Backup CPU 0 state */ 307 /* Backup CPU 0 state */
309 __save_cpu_setup(); 308 __save_cpu_setup();
310 309
311 if (smp_ops->space_timers)
312 smp_ops->space_timers(num_cpus);
313
314 for_each_cpu(cpu) { 310 for_each_cpu(cpu) {
315 if (cpu == smp_processor_id()) 311 if (cpu == smp_processor_id())
316 continue; 312 continue;
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
deleted file mode 100644
index 127f040de9de..000000000000
--- a/arch/ppc/kernel/syscalls.c
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * arch/ppc/kernel/sys_ppc.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/sys_i386.c"
8 * Adapted from the i386 version by Gary Thomas
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au).
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/PPC
14 * platform.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/sem.h>
29#include <linux/msg.h>
30#include <linux/shm.h>
31#include <linux/stat.h>
32#include <linux/syscalls.h>
33#include <linux/mman.h>
34#include <linux/sys.h>
35#include <linux/ipc.h>
36#include <linux/utsname.h>
37#include <linux/file.h>
38#include <linux/unistd.h>
39
40#include <asm/uaccess.h>
41#include <asm/ipc.h>
42#include <asm/semaphore.h>
43
44
45/*
46 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
47 *
48 * This is really horribly ugly.
49 */
50int
51sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
52{
53 int version, ret;
54
55 version = call >> 16; /* hack for backward compatibility */
56 call &= 0xffff;
57
58 ret = -ENOSYS;
59 switch (call) {
60 case SEMOP:
61 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
62 second, NULL);
63 break;
64 case SEMTIMEDOP:
65 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
66 second, (const struct timespec __user *) fifth);
67 break;
68 case SEMGET:
69 ret = sys_semget (first, second, third);
70 break;
71 case SEMCTL: {
72 union semun fourth;
73
74 if (!ptr)
75 break;
76 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
77 || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
78 break;
79 ret = sys_semctl (first, second, third, fourth);
80 break;
81 }
82 case MSGSND:
83 ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third);
84 break;
85 case MSGRCV:
86 switch (version) {
87 case 0: {
88 struct ipc_kludge tmp;
89
90 if (!ptr)
91 break;
92 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
93 || (ret = copy_from_user(&tmp,
94 (struct ipc_kludge __user *) ptr,
95 sizeof (tmp)) ? -EFAULT : 0))
96 break;
97 ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
98 third);
99 break;
100 }
101 default:
102 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
103 second, fifth, third);
104 break;
105 }
106 break;
107 case MSGGET:
108 ret = sys_msgget ((key_t) first, second);
109 break;
110 case MSGCTL:
111 ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
112 break;
113 case SHMAT: {
114 ulong raddr;
115
116 if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third,
117 sizeof(ulong)) ? 0 : -EFAULT))
118 break;
119 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
120 if (ret)
121 break;
122 ret = put_user (raddr, (ulong __user *) third);
123 break;
124 }
125 case SHMDT:
126 ret = sys_shmdt ((char __user *)ptr);
127 break;
128 case SHMGET:
129 ret = sys_shmget (first, second, third);
130 break;
131 case SHMCTL:
132 ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
133 break;
134 }
135
136 return ret;
137}
138
139/*
140 * sys_pipe() is the normal C calling standard for creating
141 * a pipe. It's not the way unix traditionally does this, though.
142 */
143int sys_pipe(int __user *fildes)
144{
145 int fd[2];
146 int error;
147
148 error = do_pipe(fd);
149 if (!error) {
150 if (copy_to_user(fildes, fd, 2*sizeof(int)))
151 error = -EFAULT;
152 }
153 return error;
154}
155
156static inline unsigned long
157do_mmap2(unsigned long addr, size_t len,
158 unsigned long prot, unsigned long flags,
159 unsigned long fd, unsigned long pgoff)
160{
161 struct file * file = NULL;
162 int ret = -EBADF;
163
164 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
165 if (!(flags & MAP_ANONYMOUS)) {
166 if (!(file = fget(fd)))
167 goto out;
168 }
169
170 down_write(&current->mm->mmap_sem);
171 ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
172 up_write(&current->mm->mmap_sem);
173 if (file)
174 fput(file);
175out:
176 return ret;
177}
178
179unsigned long sys_mmap2(unsigned long addr, size_t len,
180 unsigned long prot, unsigned long flags,
181 unsigned long fd, unsigned long pgoff)
182{
183 return do_mmap2(addr, len, prot, flags, fd, pgoff);
184}
185
186unsigned long sys_mmap(unsigned long addr, size_t len,
187 unsigned long prot, unsigned long flags,
188 unsigned long fd, off_t offset)
189{
190 int err = -EINVAL;
191
192 if (offset & ~PAGE_MASK)
193 goto out;
194
195 err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
196out:
197 return err;
198}
199
200/*
201 * Due to some executables calling the wrong select we sometimes
202 * get wrong args. This determines how the args are being passed
203 * (a single ptr to them all args passed) then calls
204 * sys_select() with the appropriate args. -- Cort
205 */
206int
207ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
208{
209 if ( (unsigned long)n >= 4096 )
210 {
211 unsigned long __user *buffer = (unsigned long __user *)n;
212 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
213 || __get_user(n, buffer)
214 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
215 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
216 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
217 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
218 return -EFAULT;
219 }
220 return sys_select(n, inp, outp, exp, tvp);
221}
222
223int sys_uname(struct old_utsname __user * name)
224{
225 int err = -EFAULT;
226
227 down_read(&uts_sem);
228 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
229 err = 0;
230 up_read(&uts_sem);
231 return err;
232}
233
234int sys_olduname(struct oldold_utsname __user * name)
235{
236 int error;
237
238 if (!name)
239 return -EFAULT;
240 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
241 return -EFAULT;
242
243 down_read(&uts_sem);
244 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
245 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
246 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
247 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
248 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
249 error -= __put_user(0,name->release+__OLD_UTS_LEN);
250 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
251 error -= __put_user(0,name->version+__OLD_UTS_LEN);
252 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
253 error = __put_user(0,name->machine+__OLD_UTS_LEN);
254 up_read(&uts_sem);
255
256 error = error ? -EFAULT : 0;
257 return error;
258}
259
260/*
261 * We put the arguments in a different order so we only use 6
262 * registers for arguments, rather than 7 as sys_fadvise64_64 needs
263 * (because `offset' goes in r5/r6).
264 */
265long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
266{
267 return sys_fadvise64_64(fd, offset, len, advice);
268}
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 22d7fd1e0aea..76f44ce4772e 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -121,6 +121,15 @@ unsigned long profile_pc(struct pt_regs *regs)
121EXPORT_SYMBOL(profile_pc); 121EXPORT_SYMBOL(profile_pc);
122#endif 122#endif
123 123
124void wakeup_decrementer(void)
125{
126 set_dec(tb_ticks_per_jiffy);
127 /* No currently-supported powerbook has a 601,
128 * so use get_tbl, not native
129 */
130 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
131}
132
124/* 133/*
125 * timer_interrupt - gets called when the decrementer overflows, 134 * timer_interrupt - gets called when the decrementer overflows,
126 * with interrupts disabled. 135 * with interrupts disabled.
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 961ede87be72..5e4bf88a1ef5 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -41,9 +41,14 @@
41#ifdef CONFIG_PMAC_BACKLIGHT 41#ifdef CONFIG_PMAC_BACKLIGHT
42#include <asm/backlight.h> 42#include <asm/backlight.h>
43#endif 43#endif
44#include <asm/perfmon.h> 44#include <asm/pmc.h>
45 45
46#ifdef CONFIG_XMON 46#ifdef CONFIG_XMON
47extern int xmon_bpt(struct pt_regs *regs);
48extern int xmon_sstep(struct pt_regs *regs);
49extern int xmon_iabr_match(struct pt_regs *regs);
50extern int xmon_dabr_match(struct pt_regs *regs);
51
47void (*debugger)(struct pt_regs *regs) = xmon; 52void (*debugger)(struct pt_regs *regs) = xmon;
48int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; 53int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
49int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; 54int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
@@ -74,7 +79,7 @@ void (*debugger_fault_handler)(struct pt_regs *regs);
74 79
75DEFINE_SPINLOCK(die_lock); 80DEFINE_SPINLOCK(die_lock);
76 81
77void die(const char * str, struct pt_regs * fp, long err) 82int die(const char * str, struct pt_regs * fp, long err)
78{ 83{
79 static int die_counter; 84 static int die_counter;
80 int nl = 0; 85 int nl = 0;
@@ -232,7 +237,7 @@ platform_machine_check(struct pt_regs *regs)
232{ 237{
233} 238}
234 239
235void MachineCheckException(struct pt_regs *regs) 240void machine_check_exception(struct pt_regs *regs)
236{ 241{
237 unsigned long reason = get_mc_reason(regs); 242 unsigned long reason = get_mc_reason(regs);
238 243
@@ -393,14 +398,14 @@ void SMIException(struct pt_regs *regs)
393#endif 398#endif
394} 399}
395 400
396void UnknownException(struct pt_regs *regs) 401void unknown_exception(struct pt_regs *regs)
397{ 402{
398 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 403 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
399 regs->nip, regs->msr, regs->trap, print_tainted()); 404 regs->nip, regs->msr, regs->trap, print_tainted());
400 _exception(SIGTRAP, regs, 0, 0); 405 _exception(SIGTRAP, regs, 0, 0);
401} 406}
402 407
403void InstructionBreakpoint(struct pt_regs *regs) 408void instruction_breakpoint_exception(struct pt_regs *regs)
404{ 409{
405 if (debugger_iabr_match(regs)) 410 if (debugger_iabr_match(regs))
406 return; 411 return;
@@ -575,7 +580,7 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
575#define module_find_bug(x) NULL 580#define module_find_bug(x) NULL
576#endif 581#endif
577 582
578static struct bug_entry *find_bug(unsigned long bugaddr) 583struct bug_entry *find_bug(unsigned long bugaddr)
579{ 584{
580 struct bug_entry *bug; 585 struct bug_entry *bug;
581 586
@@ -622,7 +627,7 @@ int check_bug_trap(struct pt_regs *regs)
622 return 0; 627 return 0;
623} 628}
624 629
625void ProgramCheckException(struct pt_regs *regs) 630void program_check_exception(struct pt_regs *regs)
626{ 631{
627 unsigned int reason = get_reason(regs); 632 unsigned int reason = get_reason(regs);
628 extern int do_mathemu(struct pt_regs *regs); 633 extern int do_mathemu(struct pt_regs *regs);
@@ -701,7 +706,7 @@ void ProgramCheckException(struct pt_regs *regs)
701 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 706 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
702} 707}
703 708
704void SingleStepException(struct pt_regs *regs) 709void single_step_exception(struct pt_regs *regs)
705{ 710{
706 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 711 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
707 if (debugger_sstep(regs)) 712 if (debugger_sstep(regs))
@@ -709,7 +714,7 @@ void SingleStepException(struct pt_regs *regs)
709 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 714 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
710} 715}
711 716
712void AlignmentException(struct pt_regs *regs) 717void alignment_exception(struct pt_regs *regs)
713{ 718{
714 int fixed; 719 int fixed;
715 720
@@ -814,7 +819,18 @@ void TAUException(struct pt_regs *regs)
814} 819}
815#endif /* CONFIG_INT_TAU */ 820#endif /* CONFIG_INT_TAU */
816 821
817void AltivecUnavailException(struct pt_regs *regs) 822/*
823 * FP unavailable trap from kernel - print a message, but let
824 * the task use FP in the kernel until it returns to user mode.
825 */
826void kernel_fp_unavailable_exception(struct pt_regs *regs)
827{
828 regs->msr |= MSR_FP;
829 printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
830 current, regs->nip);
831}
832
833void altivec_unavailable_exception(struct pt_regs *regs)
818{ 834{
819 static int kernel_altivec_count; 835 static int kernel_altivec_count;
820 836
@@ -835,7 +851,7 @@ void AltivecUnavailException(struct pt_regs *regs)
835} 851}
836 852
837#ifdef CONFIG_ALTIVEC 853#ifdef CONFIG_ALTIVEC
838void AltivecAssistException(struct pt_regs *regs) 854void altivec_assist_exception(struct pt_regs *regs)
839{ 855{
840 int err; 856 int err;
841 857
@@ -872,7 +888,7 @@ void AltivecAssistException(struct pt_regs *regs)
872#endif /* CONFIG_ALTIVEC */ 888#endif /* CONFIG_ALTIVEC */
873 889
874#ifdef CONFIG_E500 890#ifdef CONFIG_E500
875void PerformanceMonitorException(struct pt_regs *regs) 891void performance_monitor_exception(struct pt_regs *regs)
876{ 892{
877 perf_irq(regs); 893 perf_irq(regs);
878} 894}
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
deleted file mode 100644
index 82a21346bf80..000000000000
--- a/arch/ppc/kernel/vector.S
+++ /dev/null
@@ -1,217 +0,0 @@
1#include <asm/ppc_asm.h>
2#include <asm/processor.h>
3
4/*
5 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called
7 * with preempt disabled.
8 */
9 .data
10fpzero:
11 .long 0
12fpone:
13 .long 0x3f800000 /* 1.0 in single-precision FP */
14fphalf:
15 .long 0x3f000000 /* 0.5 in single-precision FP */
16
17 .text
18/*
19 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention.
21 */
22fpenable:
23 mfmsr r10
24 ori r11,r10,MSR_FP
25 mtmsr r11
26 isync
27 stfd fr0,24(r1)
28 stfd fr1,16(r1)
29 stfd fr31,8(r1)
30 lis r11,fpzero@ha
31 mffs fr31
32 lfs fr1,fpzero@l(r11)
33 mtfsf 0xff,fr1
34 blr
35
36fpdisable:
37 mtfsf 0xff,fr31
38 lfd fr31,8(r1)
39 lfd fr1,16(r1)
40 lfd fr0,24(r1)
41 mtmsr r10
42 isync
43 blr
44
45/*
46 * Vector add, floating point.
47 */
48 .globl vaddfp
49vaddfp:
50 stwu r1,-32(r1)
51 mflr r0
52 stw r0,36(r1)
53 bl fpenable
54 li r0,4
55 mtctr r0
56 li r6,0
571: lfsx fr0,r4,r6
58 lfsx fr1,r5,r6
59 fadds fr0,fr0,fr1
60 stfsx fr0,r3,r6
61 addi r6,r6,4
62 bdnz 1b
63 bl fpdisable
64 lwz r0,36(r1)
65 mtlr r0
66 addi r1,r1,32
67 blr
68
69/*
70 * Vector subtract, floating point.
71 */
72 .globl vsubfp
73vsubfp:
74 stwu r1,-32(r1)
75 mflr r0
76 stw r0,36(r1)
77 bl fpenable
78 li r0,4
79 mtctr r0
80 li r6,0
811: lfsx fr0,r4,r6
82 lfsx fr1,r5,r6
83 fsubs fr0,fr0,fr1
84 stfsx fr0,r3,r6
85 addi r6,r6,4
86 bdnz 1b
87 bl fpdisable
88 lwz r0,36(r1)
89 mtlr r0
90 addi r1,r1,32
91 blr
92
93/*
94 * Vector multiply and add, floating point.
95 */
96 .globl vmaddfp
97vmaddfp:
98 stwu r1,-48(r1)
99 mflr r0
100 stw r0,52(r1)
101 bl fpenable
102 stfd fr2,32(r1)
103 li r0,4
104 mtctr r0
105 li r7,0
1061: lfsx fr0,r4,r7
107 lfsx fr1,r5,r7
108 lfsx fr2,r6,r7
109 fmadds fr0,fr0,fr2,fr1
110 stfsx fr0,r3,r7
111 addi r7,r7,4
112 bdnz 1b
113 lfd fr2,32(r1)
114 bl fpdisable
115 lwz r0,52(r1)
116 mtlr r0
117 addi r1,r1,48
118 blr
119
120/*
121 * Vector negative multiply and subtract, floating point.
122 */
123 .globl vnmsubfp
124vnmsubfp:
125 stwu r1,-48(r1)
126 mflr r0
127 stw r0,52(r1)
128 bl fpenable
129 stfd fr2,32(r1)
130 li r0,4
131 mtctr r0
132 li r7,0
1331: lfsx fr0,r4,r7
134 lfsx fr1,r5,r7
135 lfsx fr2,r6,r7
136 fnmsubs fr0,fr0,fr2,fr1
137 stfsx fr0,r3,r7
138 addi r7,r7,4
139 bdnz 1b
140 lfd fr2,32(r1)
141 bl fpdisable
142 lwz r0,52(r1)
143 mtlr r0
144 addi r1,r1,48
145 blr
146
147/*
148 * Vector reciprocal estimate. We just compute 1.0/x.
149 * r3 -> destination, r4 -> source.
150 */
151 .globl vrefp
152vrefp:
153 stwu r1,-32(r1)
154 mflr r0
155 stw r0,36(r1)
156 bl fpenable
157 lis r9,fpone@ha
158 li r0,4
159 lfs fr1,fpone@l(r9)
160 mtctr r0
161 li r6,0
1621: lfsx fr0,r4,r6
163 fdivs fr0,fr1,fr0
164 stfsx fr0,r3,r6
165 addi r6,r6,4
166 bdnz 1b
167 bl fpdisable
168 lwz r0,36(r1)
169 mtlr r0
170 addi r1,r1,32
171 blr
172
173/*
174 * Vector reciprocal square-root estimate, floating point.
175 * We use the frsqrte instruction for the initial estimate followed
176 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
177 * r3 -> destination, r4 -> source.
178 */
179 .globl vrsqrtefp
180vrsqrtefp:
181 stwu r1,-48(r1)
182 mflr r0
183 stw r0,52(r1)
184 bl fpenable
185 stfd fr2,32(r1)
186 stfd fr3,40(r1)
187 stfd fr4,48(r1)
188 stfd fr5,56(r1)
189 lis r9,fpone@ha
190 lis r8,fphalf@ha
191 li r0,4
192 lfs fr4,fpone@l(r9)
193 lfs fr5,fphalf@l(r8)
194 mtctr r0
195 li r6,0
1961: lfsx fr0,r4,r6
197 frsqrte fr1,fr0 /* r = frsqrte(s) */
198 fmuls fr3,fr1,fr0 /* r * s */
199 fmuls fr2,fr1,fr5 /* r * 0.5 */
200 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
201 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
202 fmuls fr3,fr1,fr0 /* r * s */
203 fmuls fr2,fr1,fr5 /* r * 0.5 */
204 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
205 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
206 stfsx fr1,r3,r6
207 addi r6,r6,4
208 bdnz 1b
209 lfd fr5,56(r1)
210 lfd fr4,48(r1)
211 lfd fr3,40(r1)
212 lfd fr2,32(r1)
213 bl fpdisable
214 lwz r0,36(r1)
215 mtlr r0
216 addi r1,r1,32
217 blr
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 17d2db7e537d..09c6525cfa61 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -149,32 +149,6 @@ SECTIONS
149 149
150 . = ALIGN(4096); 150 . = ALIGN(4096);
151 _sextratext = .; 151 _sextratext = .;
152 __pmac_begin = .;
153 .pmac.text : { *(.pmac.text) }
154 .pmac.data : { *(.pmac.data) }
155 . = ALIGN(4096);
156 __pmac_end = .;
157
158 . = ALIGN(4096);
159 __prep_begin = .;
160 .prep.text : { *(.prep.text) }
161 .prep.data : { *(.prep.data) }
162 . = ALIGN(4096);
163 __prep_end = .;
164
165 . = ALIGN(4096);
166 __chrp_begin = .;
167 .chrp.text : { *(.chrp.text) }
168 .chrp.data : { *(.chrp.data) }
169 . = ALIGN(4096);
170 __chrp_end = .;
171
172 . = ALIGN(4096);
173 __openfirmware_begin = .;
174 .openfirmware.text : { *(.openfirmware.text) }
175 .openfirmware.data : { *(.openfirmware.data) }
176 . = ALIGN(4096);
177 __openfirmware_end = .;
178 _eextratext = .; 152 _eextratext = .;
179 153
180 __bss_start = .; 154 __bss_start = .;
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 36c9b97fd92a..2e258c49e8be 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -65,9 +65,9 @@
65 .stabs "arch/ppc/lib/",N_SO,0,0,0f 65 .stabs "arch/ppc/lib/",N_SO,0,0,0f
66 .stabs "string.S",N_SO,0,0,0f 66 .stabs "string.S",N_SO,0,0,0f
67 67
68CACHELINE_BYTES = L1_CACHE_LINE_SIZE 68CACHELINE_BYTES = L1_CACHE_BYTES
69LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE 69LG_CACHELINE_BYTES = L1_CACHE_SHIFT
70CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) 70CACHELINE_MASK = (L1_CACHE_BYTES-1)
71 71
72_GLOBAL(strcpy) 72_GLOBAL(strcpy)
73 addi r5,r3,-1 73 addi r5,r3,-1
@@ -265,12 +265,12 @@ _GLOBAL(cacheable_memcpy)
265 dcbz r11,r6 265 dcbz r11,r6
266#endif 266#endif
267 COPY_16_BYTES 267 COPY_16_BYTES
268#if L1_CACHE_LINE_SIZE >= 32 268#if L1_CACHE_BYTES >= 32
269 COPY_16_BYTES 269 COPY_16_BYTES
270#if L1_CACHE_LINE_SIZE >= 64 270#if L1_CACHE_BYTES >= 64
271 COPY_16_BYTES 271 COPY_16_BYTES
272 COPY_16_BYTES 272 COPY_16_BYTES
273#if L1_CACHE_LINE_SIZE >= 128 273#if L1_CACHE_BYTES >= 128
274 COPY_16_BYTES 274 COPY_16_BYTES
275 COPY_16_BYTES 275 COPY_16_BYTES
276 COPY_16_BYTES 276 COPY_16_BYTES
@@ -485,12 +485,12 @@ _GLOBAL(__copy_tofrom_user)
485 .text 485 .text
486/* the main body of the cacheline loop */ 486/* the main body of the cacheline loop */
487 COPY_16_BYTES_WITHEX(0) 487 COPY_16_BYTES_WITHEX(0)
488#if L1_CACHE_LINE_SIZE >= 32 488#if L1_CACHE_BYTES >= 32
489 COPY_16_BYTES_WITHEX(1) 489 COPY_16_BYTES_WITHEX(1)
490#if L1_CACHE_LINE_SIZE >= 64 490#if L1_CACHE_BYTES >= 64
491 COPY_16_BYTES_WITHEX(2) 491 COPY_16_BYTES_WITHEX(2)
492 COPY_16_BYTES_WITHEX(3) 492 COPY_16_BYTES_WITHEX(3)
493#if L1_CACHE_LINE_SIZE >= 128 493#if L1_CACHE_BYTES >= 128
494 COPY_16_BYTES_WITHEX(4) 494 COPY_16_BYTES_WITHEX(4)
495 COPY_16_BYTES_WITHEX(5) 495 COPY_16_BYTES_WITHEX(5)
496 COPY_16_BYTES_WITHEX(6) 496 COPY_16_BYTES_WITHEX(6)
@@ -544,12 +544,12 @@ _GLOBAL(__copy_tofrom_user)
544 * 104f (if in read part) or 105f (if in write part), after updating r5 544 * 104f (if in read part) or 105f (if in write part), after updating r5
545 */ 545 */
546 COPY_16_BYTES_EXCODE(0) 546 COPY_16_BYTES_EXCODE(0)
547#if L1_CACHE_LINE_SIZE >= 32 547#if L1_CACHE_BYTES >= 32
548 COPY_16_BYTES_EXCODE(1) 548 COPY_16_BYTES_EXCODE(1)
549#if L1_CACHE_LINE_SIZE >= 64 549#if L1_CACHE_BYTES >= 64
550 COPY_16_BYTES_EXCODE(2) 550 COPY_16_BYTES_EXCODE(2)
551 COPY_16_BYTES_EXCODE(3) 551 COPY_16_BYTES_EXCODE(3)
552#if L1_CACHE_LINE_SIZE >= 128 552#if L1_CACHE_BYTES >= 128
553 COPY_16_BYTES_EXCODE(4) 553 COPY_16_BYTES_EXCODE(4)
554 COPY_16_BYTES_EXCODE(5) 554 COPY_16_BYTES_EXCODE(5)
555 COPY_16_BYTES_EXCODE(6) 555 COPY_16_BYTES_EXCODE(6)
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index f421a4b337f6..db94efd25369 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -69,15 +69,12 @@ int init_bootmem_done;
69int boot_mapsize; 69int boot_mapsize;
70#ifdef CONFIG_PPC_PMAC 70#ifdef CONFIG_PPC_PMAC
71unsigned long agp_special_page; 71unsigned long agp_special_page;
72EXPORT_SYMBOL(agp_special_page);
72#endif 73#endif
73 74
74extern char _end[]; 75extern char _end[];
75extern char etext[], _stext[]; 76extern char etext[], _stext[];
76extern char __init_begin, __init_end; 77extern char __init_begin, __init_end;
77extern char __prep_begin, __prep_end;
78extern char __chrp_begin, __chrp_end;
79extern char __pmac_begin, __pmac_end;
80extern char __openfirmware_begin, __openfirmware_end;
81 78
82#ifdef CONFIG_HIGHMEM 79#ifdef CONFIG_HIGHMEM
83pte_t *kmap_pte; 80pte_t *kmap_pte;
@@ -167,14 +164,6 @@ void free_initmem(void)
167 164
168 printk ("Freeing unused kernel memory:"); 165 printk ("Freeing unused kernel memory:");
169 FREESEC(init); 166 FREESEC(init);
170 if (_machine != _MACH_Pmac)
171 FREESEC(pmac);
172 if (_machine != _MACH_chrp)
173 FREESEC(chrp);
174 if (_machine != _MACH_prep)
175 FREESEC(prep);
176 if (!have_of)
177 FREESEC(openfirmware);
178 printk("\n"); 167 printk("\n");
179 ppc_md.progress = NULL; 168 ppc_md.progress = NULL;
180#undef FREESEC 169#undef FREESEC
diff --git a/arch/ppc/oprofile/common.c b/arch/ppc/oprofile/common.c
deleted file mode 100644
index 3169c67abea7..000000000000
--- a/arch/ppc/oprofile/common.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * PPC 32 oprofile support
3 * Based on PPC64 oprofile support
4 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * Copyright (C) Freescale Semiconductor, Inc 2004
7 *
8 * Author: Andy Fleming
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/oprofile.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/smp.h>
20#include <linux/errno.h>
21#include <asm/ptrace.h>
22#include <asm/system.h>
23#include <asm/perfmon.h>
24#include <asm/cputable.h>
25
26#include "op_impl.h"
27
28static struct op_ppc32_model *model;
29
30static struct op_counter_config ctr[OP_MAX_COUNTER];
31static struct op_system_config sys;
32
33static void op_handle_interrupt(struct pt_regs *regs)
34{
35 model->handle_interrupt(regs, ctr);
36}
37
38static int op_ppc32_setup(void)
39{
40 /* Install our interrupt handler into the existing hook. */
41 if(request_perfmon_irq(&op_handle_interrupt))
42 return -EBUSY;
43
44 mb();
45
46 /* Pre-compute the values to stuff in the hardware registers. */
47 model->reg_setup(ctr, &sys, model->num_counters);
48
49#if 0
50 /* FIXME: Make multi-cpu work */
51 /* Configure the registers on all cpus. */
52 on_each_cpu(model->reg_setup, NULL, 0, 1);
53#endif
54
55 return 0;
56}
57
58static void op_ppc32_shutdown(void)
59{
60 mb();
61
62 /* Remove our interrupt handler. We may be removing this module. */
63 free_perfmon_irq();
64}
65
66static void op_ppc32_cpu_start(void *dummy)
67{
68 model->start(ctr);
69}
70
71static int op_ppc32_start(void)
72{
73 on_each_cpu(op_ppc32_cpu_start, NULL, 0, 1);
74 return 0;
75}
76
77static inline void op_ppc32_cpu_stop(void *dummy)
78{
79 model->stop();
80}
81
82static void op_ppc32_stop(void)
83{
84 on_each_cpu(op_ppc32_cpu_stop, NULL, 0, 1);
85}
86
87static int op_ppc32_create_files(struct super_block *sb, struct dentry *root)
88{
89 int i;
90
91 for (i = 0; i < model->num_counters; ++i) {
92 struct dentry *dir;
93 char buf[3];
94
95 snprintf(buf, sizeof buf, "%d", i);
96 dir = oprofilefs_mkdir(sb, root, buf);
97
98 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
99 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
100 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
101 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
102 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
103
104 /* FIXME: Not sure if this is used */
105 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
106 }
107
108 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
109 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
110
111 /* Default to tracing both kernel and user */
112 sys.enable_kernel = 1;
113 sys.enable_user = 1;
114
115 return 0;
116}
117
118static struct oprofile_operations oprof_ppc32_ops = {
119 .create_files = op_ppc32_create_files,
120 .setup = op_ppc32_setup,
121 .shutdown = op_ppc32_shutdown,
122 .start = op_ppc32_start,
123 .stop = op_ppc32_stop,
124 .cpu_type = NULL /* To be filled in below. */
125};
126
127int __init oprofile_arch_init(struct oprofile_operations *ops)
128{
129 char *name;
130 int cpu_id = smp_processor_id();
131
132#ifdef CONFIG_FSL_BOOKE
133 model = &op_model_fsl_booke;
134#else
135 return -ENODEV;
136#endif
137
138 name = kmalloc(32, GFP_KERNEL);
139
140 if (NULL == name)
141 return -ENOMEM;
142
143 sprintf(name, "ppc/%s", cur_cpu_spec[cpu_id]->cpu_name);
144
145 oprof_ppc32_ops.cpu_type = name;
146
147 model->num_counters = cur_cpu_spec[cpu_id]->num_pmcs;
148
149 *ops = oprof_ppc32_ops;
150
151 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
152 oprof_ppc32_ops.cpu_type);
153
154 return 0;
155}
156
157void oprofile_arch_exit(void)
158{
159 kfree(oprof_ppc32_ops.cpu_type);
160 oprof_ppc32_ops.cpu_type = NULL;
161}
diff --git a/arch/ppc/oprofile/op_impl.h b/arch/ppc/oprofile/op_impl.h
deleted file mode 100644
index bc336dc971e3..000000000000
--- a/arch/ppc/oprofile/op_impl.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * Based on alpha version.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef OP_IMPL_H
13#define OP_IMPL_H 1
14
15#define OP_MAX_COUNTER 8
16
17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config {
19 unsigned long enabled;
20 unsigned long event;
21 unsigned long count;
22 unsigned long kernel;
23 unsigned long user;
24 unsigned long unit_mask;
25};
26
27/* System-wide configuration as set via oprofilefs. */
28struct op_system_config {
29 unsigned long enable_kernel;
30 unsigned long enable_user;
31};
32
33/* Per-arch configuration */
34struct op_ppc32_model {
35 void (*reg_setup) (struct op_counter_config *,
36 struct op_system_config *,
37 int num_counters);
38 void (*start) (struct op_counter_config *);
39 void (*stop) (void);
40 void (*handle_interrupt) (struct pt_regs *,
41 struct op_counter_config *);
42 int num_counters;
43};
44
45#endif /* OP_IMPL_H */
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 27b778ab903b..d32ae112f639 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -90,7 +90,7 @@ ebony_calibrate_decr(void)
90 * on Rev. C silicon then errata forces us to 90 * on Rev. C silicon then errata forces us to
91 * use the internal clock. 91 * use the internal clock.
92 */ 92 */
93 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GP Rev. B") == 0) 93 if (strcmp(cur_cpu_spec->cpu_name, "440GP Rev. B") == 0)
94 freq = EBONY_440GP_RB_SYSCLK; 94 freq = EBONY_440GP_RB_SYSCLK;
95 else 95 else
96 freq = EBONY_440GP_RC_SYSCLK; 96 freq = EBONY_440GP_RC_SYSCLK;
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.h b/arch/ppc/platforms/83xx/mpc834x_sys.h
index 1584cd77a9ef..58e44c042535 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.h
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <syslib/ppc83xx_setup.h> 22#include <syslib/ppc83xx_setup.h>
24#include <asm/ppcboot.h> 23#include <asm/ppcboot.h>
25 24
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
index 3875e839cff7..84acf6e8d45e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <asm/ppcboot.h> 22#include <asm/ppcboot.h>
24 23
25#define BOARD_CCSRBAR ((uint)0xe0000000) 24#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/85xx/stx_gp3.h b/arch/ppc/platforms/85xx/stx_gp3.h
index 7bcc6c35a417..95fdf4b0680b 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.h
+++ b/arch/ppc/platforms/85xx/stx_gp3.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h>
25#include <asm/ppcboot.h> 24#include <asm/ppcboot.h>
26 25
27#define BOARD_CCSRBAR ((uint)0xe0000000) 26#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index df6ff98c023a..48a4a510d598 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -541,7 +541,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
541 541
542 ppc_md.setup_arch = chestnut_setup_arch; 542 ppc_md.setup_arch = chestnut_setup_arch;
543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo; 543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo;
544 ppc_md.irq_canonicalize = NULL;
545 ppc_md.init_IRQ = mv64360_init_irq; 544 ppc_md.init_IRQ = mv64360_init_irq;
546 ppc_md.get_irq = mv64360_get_irq; 545 ppc_md.get_irq = mv64360_get_irq;
547 ppc_md.init = NULL; 546 ppc_md.init = NULL;
diff --git a/arch/ppc/platforms/chrp_pci.c b/arch/ppc/platforms/chrp_pci.c
index 7d3fbb5c5db2..bd047aac01b1 100644
--- a/arch/ppc/platforms/chrp_pci.c
+++ b/arch/ppc/platforms/chrp_pci.c
@@ -29,7 +29,7 @@ void __iomem *gg2_pci_config_base;
29 * limit the bus number to 3 bits 29 * limit the bus number to 3 bits
30 */ 30 */
31 31
32int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, 32int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
33 int len, u32 *val) 33 int len, u32 *val)
34{ 34{
35 volatile void __iomem *cfg_data; 35 volatile void __iomem *cfg_data;
@@ -56,7 +56,7 @@ int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
56 return PCIBIOS_SUCCESSFUL; 56 return PCIBIOS_SUCCESSFUL;
57} 57}
58 58
59int __chrp gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, 59int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
60 int len, u32 val) 60 int len, u32 val)
61{ 61{
62 volatile void __iomem *cfg_data; 62 volatile void __iomem *cfg_data;
@@ -92,7 +92,7 @@ static struct pci_ops gg2_pci_ops =
92/* 92/*
93 * Access functions for PCI config space using RTAS calls. 93 * Access functions for PCI config space using RTAS calls.
94 */ 94 */
95int __chrp 95int
96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
97 int len, u32 *val) 97 int len, u32 *val)
98{ 98{
@@ -108,7 +108,7 @@ rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; 108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
109} 109}
110 110
111int __chrp 111int
112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
113 int len, u32 val) 113 int len, u32 val)
114{ 114{
@@ -203,7 +203,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
203 printk ("RTAS supporting Pegasos OF not found, please upgrade" 203 printk ("RTAS supporting Pegasos OF not found, please upgrade"
204 " your firmware\n"); 204 " your firmware\n");
205 } 205 }
206 pci_assign_all_busses = 1; 206 pci_assign_all_buses = 1;
207} 207}
208 208
209void __init 209void __init
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index 66346f0de7ec..56c53bb3dfd4 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -104,7 +104,7 @@ static const char *gg2_cachemodes[4] = {
104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode" 104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
105}; 105};
106 106
107int __chrp 107int
108chrp_show_cpuinfo(struct seq_file *m) 108chrp_show_cpuinfo(struct seq_file *m)
109{ 109{
110 int i, sdramen; 110 int i, sdramen;
@@ -302,7 +302,7 @@ void __init chrp_setup_arch(void)
302 pci_create_OF_bus_map(); 302 pci_create_OF_bus_map();
303} 303}
304 304
305void __chrp 305void
306chrp_event_scan(void) 306chrp_event_scan(void)
307{ 307{
308 unsigned char log[1024]; 308 unsigned char log[1024];
@@ -313,7 +313,7 @@ chrp_event_scan(void)
313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset; 313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
314} 314}
315 315
316void __chrp 316void
317chrp_restart(char *cmd) 317chrp_restart(char *cmd)
318{ 318{
319 printk("RTAS system-reboot returned %d\n", 319 printk("RTAS system-reboot returned %d\n",
@@ -321,7 +321,7 @@ chrp_restart(char *cmd)
321 for (;;); 321 for (;;);
322} 322}
323 323
324void __chrp 324void
325chrp_power_off(void) 325chrp_power_off(void)
326{ 326{
327 /* allow power on only with power button press */ 327 /* allow power on only with power button press */
@@ -330,20 +330,12 @@ chrp_power_off(void)
330 for (;;); 330 for (;;);
331} 331}
332 332
333void __chrp 333void
334chrp_halt(void) 334chrp_halt(void)
335{ 335{
336 chrp_power_off(); 336 chrp_power_off();
337} 337}
338 338
339u_int __chrp
340chrp_irq_canonicalize(u_int irq)
341{
342 if (irq == 2)
343 return 9;
344 return irq;
345}
346
347/* 339/*
348 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property. 340 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property.
349 * Then checks if it has an interrupt-ranges property. If it does then 341 * Then checks if it has an interrupt-ranges property. If it does then
@@ -499,6 +491,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
499 DMA_MODE_READ = 0x44; 491 DMA_MODE_READ = 0x44;
500 DMA_MODE_WRITE = 0x48; 492 DMA_MODE_WRITE = 0x48;
501 isa_io_base = CHRP_ISA_IO_BASE; /* default value */ 493 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
494 ppc_do_canonicalize_irqs = 1;
502 495
503 if (root) 496 if (root)
504 machine = get_property(root, "model", NULL); 497 machine = get_property(root, "model", NULL);
@@ -517,7 +510,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
517 ppc_md.show_percpuinfo = of_show_percpuinfo; 510 ppc_md.show_percpuinfo = of_show_percpuinfo;
518 ppc_md.show_cpuinfo = chrp_show_cpuinfo; 511 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
519 512
520 ppc_md.irq_canonicalize = chrp_irq_canonicalize;
521 ppc_md.init_IRQ = chrp_init_IRQ; 513 ppc_md.init_IRQ = chrp_init_IRQ;
522 if (_chrp_type == _CHRP_Pegasos) 514 if (_chrp_type == _CHRP_Pegasos)
523 ppc_md.get_irq = i8259_irq; 515 ppc_md.get_irq = i8259_irq;
@@ -561,7 +553,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
561#endif 553#endif
562 554
563#ifdef CONFIG_SMP 555#ifdef CONFIG_SMP
564 ppc_md.smp_ops = &chrp_smp_ops; 556 smp_ops = &chrp_smp_ops;
565#endif /* CONFIG_SMP */ 557#endif /* CONFIG_SMP */
566 558
567 /* 559 /*
@@ -571,7 +563,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
571 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); 563 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
572} 564}
573 565
574void __chrp 566void
575rtas_display_progress(char *s, unsigned short hex) 567rtas_display_progress(char *s, unsigned short hex)
576{ 568{
577 int width; 569 int width;
@@ -598,7 +590,7 @@ rtas_display_progress(char *s, unsigned short hex)
598 call_rtas( "display-character", 1, 1, NULL, ' ' ); 590 call_rtas( "display-character", 1, 1, NULL, ' ' );
599} 591}
600 592
601void __chrp 593void
602rtas_indicator_progress(char *s, unsigned short hex) 594rtas_indicator_progress(char *s, unsigned short hex)
603{ 595{
604 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex); 596 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
diff --git a/arch/ppc/platforms/chrp_smp.c b/arch/ppc/platforms/chrp_smp.c
index 0ea1f7d9e46a..97e539557ecb 100644
--- a/arch/ppc/platforms/chrp_smp.c
+++ b/arch/ppc/platforms/chrp_smp.c
@@ -31,6 +31,7 @@
31#include <asm/residual.h> 31#include <asm/residual.h>
32#include <asm/time.h> 32#include <asm/time.h>
33#include <asm/open_pic.h> 33#include <asm/open_pic.h>
34#include <asm/machdep.h>
34 35
35extern unsigned long smp_chrp_cpu_nr; 36extern unsigned long smp_chrp_cpu_nr;
36 37
@@ -88,7 +89,7 @@ smp_chrp_take_timebase(void)
88} 89}
89 90
90/* CHRP with openpic */ 91/* CHRP with openpic */
91struct smp_ops_t chrp_smp_ops __chrpdata = { 92struct smp_ops_t chrp_smp_ops = {
92 .message_pass = smp_openpic_message_pass, 93 .message_pass = smp_openpic_message_pass,
93 .probe = smp_chrp_probe, 94 .probe = smp_chrp_probe,
94 .kick_cpu = smp_chrp_kick_cpu, 95 .kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c
index 6037ce7796f5..29d074c305f0 100644
--- a/arch/ppc/platforms/chrp_time.c
+++ b/arch/ppc/platforms/chrp_time.c
@@ -52,7 +52,7 @@ long __init chrp_time_init(void)
52 return 0; 52 return 0;
53} 53}
54 54
55int __chrp chrp_cmos_clock_read(int addr) 55int chrp_cmos_clock_read(int addr)
56{ 56{
57 if (nvram_as1 != 0) 57 if (nvram_as1 != 0)
58 outb(addr>>8, nvram_as1); 58 outb(addr>>8, nvram_as1);
@@ -60,7 +60,7 @@ int __chrp chrp_cmos_clock_read(int addr)
60 return (inb(nvram_data)); 60 return (inb(nvram_data));
61} 61}
62 62
63void __chrp chrp_cmos_clock_write(unsigned long val, int addr) 63void chrp_cmos_clock_write(unsigned long val, int addr)
64{ 64{
65 if (nvram_as1 != 0) 65 if (nvram_as1 != 0)
66 outb(addr>>8, nvram_as1); 66 outb(addr>>8, nvram_as1);
@@ -72,7 +72,7 @@ void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
72/* 72/*
73 * Set the hardware clock. -- Cort 73 * Set the hardware clock. -- Cort
74 */ 74 */
75int __chrp chrp_set_rtc_time(unsigned long nowtime) 75int chrp_set_rtc_time(unsigned long nowtime)
76{ 76{
77 unsigned char save_control, save_freq_select; 77 unsigned char save_control, save_freq_select;
78 struct rtc_time tm; 78 struct rtc_time tm;
@@ -118,7 +118,7 @@ int __chrp chrp_set_rtc_time(unsigned long nowtime)
118 return 0; 118 return 0;
119} 119}
120 120
121unsigned long __chrp chrp_get_rtc_time(void) 121unsigned long chrp_get_rtc_time(void)
122{ 122{
123 unsigned int year, mon, day, hour, min, sec; 123 unsigned int year, mon, day, hour, min, sec;
124 int uip, i; 124 int uip, i;
diff --git a/arch/ppc/platforms/ev64360.c b/arch/ppc/platforms/ev64360.c
index 9811a8a52c25..53388a1c334f 100644
--- a/arch/ppc/platforms/ev64360.c
+++ b/arch/ppc/platforms/ev64360.c
@@ -35,6 +35,7 @@
35#include <asm/bootinfo.h> 35#include <asm/bootinfo.h>
36#include <asm/ppcboot.h> 36#include <asm/ppcboot.h>
37#include <asm/mv64x60.h> 37#include <asm/mv64x60.h>
38#include <asm/machdep.h>
38#include <platforms/ev64360.h> 39#include <platforms/ev64360.h>
39 40
40#define BOARD_VENDOR "Marvell" 41#define BOARD_VENDOR "Marvell"
diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c
index 3a5ff9fb71d6..729897c59033 100644
--- a/arch/ppc/platforms/gemini_setup.c
+++ b/arch/ppc/platforms/gemini_setup.c
@@ -35,6 +35,7 @@
35#include <asm/time.h> 35#include <asm/time.h>
36#include <asm/open_pic.h> 36#include <asm/open_pic.h>
37#include <asm/bootinfo.h> 37#include <asm/bootinfo.h>
38#include <asm/machdep.h>
38 39
39void gemini_find_bridges(void); 40void gemini_find_bridges(void);
40static int gemini_get_clock_speed(void); 41static int gemini_get_clock_speed(void);
@@ -555,7 +556,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
555 556
556 ppc_md.setup_arch = gemini_setup_arch; 557 ppc_md.setup_arch = gemini_setup_arch;
557 ppc_md.show_cpuinfo = gemini_show_cpuinfo; 558 ppc_md.show_cpuinfo = gemini_show_cpuinfo;
558 ppc_md.irq_canonicalize = NULL;
559 ppc_md.init_IRQ = gemini_init_IRQ; 559 ppc_md.init_IRQ = gemini_init_IRQ;
560 ppc_md.get_irq = openpic_get_irq; 560 ppc_md.get_irq = openpic_get_irq;
561 ppc_md.init = NULL; 561 ppc_md.init = NULL;
@@ -575,6 +575,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup; 575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup;
576 576
577#ifdef CONFIG_SMP 577#ifdef CONFIG_SMP
578 ppc_md.smp_ops = &gemini_smp_ops; 578 smp_ops = &gemini_smp_ops;
579#endif /* CONFIG_SMP */ 579#endif /* CONFIG_SMP */
580} 580}
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c
index ff3796860123..2cc12b04584a 100644
--- a/arch/ppc/platforms/hdpu.c
+++ b/arch/ppc/platforms/hdpu.c
@@ -753,7 +753,7 @@ static int smp_hdpu_probe(void)
753} 753}
754 754
755static void 755static void
756smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait) 756smp_hdpu_message_pass(int target, int msg)
757{ 757{
758 if (msg > 0x3) { 758 if (msg > 0x3) {
759 printk("SMP %d: smp_message_pass: unknown msg %d\n", 759 printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -949,7 +949,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
949#endif /* CONFIG_SERIAL_TEXT_DEBUG */ 949#endif /* CONFIG_SERIAL_TEXT_DEBUG */
950 950
951#ifdef CONFIG_SMP 951#ifdef CONFIG_SMP
952 ppc_md.smp_ops = &hdpu_smp_ops; 952 smp_ops = &hdpu_smp_ops;
953#endif /* CONFIG_SMP */ 953#endif /* CONFIG_SMP */
954 954
955#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH) 955#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
index 2b53afae0e9c..a301c5ac58dd 100644
--- a/arch/ppc/platforms/katana.c
+++ b/arch/ppc/platforms/katana.c
@@ -42,6 +42,7 @@
42#include <asm/ppcboot.h> 42#include <asm/ppcboot.h>
43#include <asm/mv64x60.h> 43#include <asm/mv64x60.h>
44#include <platforms/katana.h> 44#include <platforms/katana.h>
45#include <asm/machdep.h>
45 46
46static struct mv64x60_handle bh; 47static struct mv64x60_handle bh;
47static katana_id_t katana_id; 48static katana_id_t katana_id;
@@ -520,7 +521,7 @@ katana_fixup_resources(struct pci_dev *dev)
520{ 521{
521 u16 v16; 522 u16 v16;
522 523
523 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_LINE_SIZE>>2); 524 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2);
524 525
525 pci_read_config_word(dev, PCI_COMMAND, &v16); 526 pci_read_config_word(dev, PCI_COMMAND, &v16);
526 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK; 527 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK;
diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c
index b604cf8b3cae..d44cc991179f 100644
--- a/arch/ppc/platforms/lite5200.c
+++ b/arch/ppc/platforms/lite5200.c
@@ -35,6 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/mpc52xx.h> 36#include <asm/mpc52xx.h>
37#include <asm/ppc_sys.h> 37#include <asm/ppc_sys.h>
38#include <asm/machdep.h>
38 39
39#include <syslib/mpc52xx_pci.h> 40#include <syslib/mpc52xx_pci.h>
40 41
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c
index a5569525e0af..800c56a07a97 100644
--- a/arch/ppc/platforms/lopec.c
+++ b/arch/ppc/platforms/lopec.c
@@ -144,15 +144,6 @@ lopec_show_cpuinfo(struct seq_file *m)
144 return 0; 144 return 0;
145} 145}
146 146
147static u32
148lopec_irq_canonicalize(u32 irq)
149{
150 if (irq == 2)
151 return 9;
152 else
153 return irq;
154}
155
156static void 147static void
157lopec_restart(char *cmd) 148lopec_restart(char *cmd)
158{ 149{
@@ -379,10 +370,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
379 ISA_DMA_THRESHOLD = 0x00ffffff; 370 ISA_DMA_THRESHOLD = 0x00ffffff;
380 DMA_MODE_READ = 0x44; 371 DMA_MODE_READ = 0x44;
381 DMA_MODE_WRITE = 0x48; 372 DMA_MODE_WRITE = 0x48;
373 ppc_do_canonicalize_irqs = 1;
382 374
383 ppc_md.setup_arch = lopec_setup_arch; 375 ppc_md.setup_arch = lopec_setup_arch;
384 ppc_md.show_cpuinfo = lopec_show_cpuinfo; 376 ppc_md.show_cpuinfo = lopec_show_cpuinfo;
385 ppc_md.irq_canonicalize = lopec_irq_canonicalize;
386 ppc_md.init_IRQ = lopec_init_IRQ; 377 ppc_md.init_IRQ = lopec_init_IRQ;
387 ppc_md.get_irq = openpic_get_irq; 378 ppc_md.get_irq = openpic_get_irq;
388 379
diff --git a/arch/ppc/platforms/pal4_setup.c b/arch/ppc/platforms/pal4_setup.c
index 12446b93e38c..f93a3f871932 100644
--- a/arch/ppc/platforms/pal4_setup.c
+++ b/arch/ppc/platforms/pal4_setup.c
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/todc.h> 29#include <asm/todc.h>
30#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
31#include <asm/machdep.h>
31 32
32#include <syslib/cpc700.h> 33#include <syslib/cpc700.h>
33 34
diff --git a/arch/ppc/platforms/pmac_backlight.c b/arch/ppc/platforms/pmac_backlight.c
index ed2b1cebc19a..8be2f7d071f0 100644
--- a/arch/ppc/platforms/pmac_backlight.c
+++ b/arch/ppc/platforms/pmac_backlight.c
@@ -37,7 +37,7 @@ static int backlight_req_enable = -1;
37static void backlight_callback(void *); 37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL); 38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39 39
40void __pmac register_backlight_controller(struct backlight_controller *ctrler, 40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type) 41 void *data, char *type)
42{ 42{
43 struct device_node* bk_node; 43 struct device_node* bk_node;
@@ -99,7 +99,7 @@ void __pmac register_backlight_controller(struct backlight_controller *ctrler,
99} 99}
100EXPORT_SYMBOL(register_backlight_controller); 100EXPORT_SYMBOL(register_backlight_controller);
101 101
102void __pmac unregister_backlight_controller(struct backlight_controller 102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data) 103 *ctrler, void *data)
104{ 104{
105 /* We keep the current backlight level (for now) */ 105 /* We keep the current backlight level (for now) */
@@ -108,7 +108,7 @@ void __pmac unregister_backlight_controller(struct backlight_controller
108} 108}
109EXPORT_SYMBOL(unregister_backlight_controller); 109EXPORT_SYMBOL(unregister_backlight_controller);
110 110
111static int __pmac __set_backlight_enable(int enable) 111static int __set_backlight_enable(int enable)
112{ 112{
113 int rc; 113 int rc;
114 114
@@ -122,7 +122,7 @@ static int __pmac __set_backlight_enable(int enable)
122 release_console_sem(); 122 release_console_sem();
123 return rc; 123 return rc;
124} 124}
125int __pmac set_backlight_enable(int enable) 125int set_backlight_enable(int enable)
126{ 126{
127 if (!backlighter) 127 if (!backlighter)
128 return -ENODEV; 128 return -ENODEV;
@@ -133,7 +133,7 @@ int __pmac set_backlight_enable(int enable)
133 133
134EXPORT_SYMBOL(set_backlight_enable); 134EXPORT_SYMBOL(set_backlight_enable);
135 135
136int __pmac get_backlight_enable(void) 136int get_backlight_enable(void)
137{ 137{
138 if (!backlighter) 138 if (!backlighter)
139 return -ENODEV; 139 return -ENODEV;
@@ -141,7 +141,7 @@ int __pmac get_backlight_enable(void)
141} 141}
142EXPORT_SYMBOL(get_backlight_enable); 142EXPORT_SYMBOL(get_backlight_enable);
143 143
144static int __pmac __set_backlight_level(int level) 144static int __set_backlight_level(int level)
145{ 145{
146 int rc = 0; 146 int rc = 0;
147 147
@@ -165,7 +165,7 @@ static int __pmac __set_backlight_level(int level)
165 } 165 }
166 return rc; 166 return rc;
167} 167}
168int __pmac set_backlight_level(int level) 168int set_backlight_level(int level)
169{ 169{
170 if (!backlighter) 170 if (!backlighter)
171 return -ENODEV; 171 return -ENODEV;
@@ -176,7 +176,7 @@ int __pmac set_backlight_level(int level)
176 176
177EXPORT_SYMBOL(set_backlight_level); 177EXPORT_SYMBOL(set_backlight_level);
178 178
179int __pmac get_backlight_level(void) 179int get_backlight_level(void)
180{ 180{
181 if (!backlighter) 181 if (!backlighter)
182 return -ENODEV; 182 return -ENODEV;
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index d4bc5f67ec53..fba7e4d7c0bf 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -136,7 +136,7 @@ static inline void debug_calc_bogomips(void)
136 136
137/* Switch CPU speed under 750FX CPU control 137/* Switch CPU speed under 750FX CPU control
138 */ 138 */
139static int __pmac cpu_750fx_cpu_speed(int low_speed) 139static int cpu_750fx_cpu_speed(int low_speed)
140{ 140{
141 u32 hid2; 141 u32 hid2;
142 142
@@ -172,7 +172,7 @@ static int __pmac cpu_750fx_cpu_speed(int low_speed)
172 return 0; 172 return 0;
173} 173}
174 174
175static unsigned int __pmac cpu_750fx_get_cpu_speed(void) 175static unsigned int cpu_750fx_get_cpu_speed(void)
176{ 176{
177 if (mfspr(SPRN_HID1) & HID1_PS) 177 if (mfspr(SPRN_HID1) & HID1_PS)
178 return low_freq; 178 return low_freq;
@@ -181,7 +181,7 @@ static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
181} 181}
182 182
183/* Switch CPU speed using DFS */ 183/* Switch CPU speed using DFS */
184static int __pmac dfs_set_cpu_speed(int low_speed) 184static int dfs_set_cpu_speed(int low_speed)
185{ 185{
186 if (low_speed == 0) { 186 if (low_speed == 0) {
187 /* ramping up, set voltage first */ 187 /* ramping up, set voltage first */
@@ -205,7 +205,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
205 return 0; 205 return 0;
206} 206}
207 207
208static unsigned int __pmac dfs_get_cpu_speed(void) 208static unsigned int dfs_get_cpu_speed(void)
209{ 209{
210 if (mfspr(SPRN_HID1) & HID1_DFS) 210 if (mfspr(SPRN_HID1) & HID1_DFS)
211 return low_freq; 211 return low_freq;
@@ -216,7 +216,7 @@ static unsigned int __pmac dfs_get_cpu_speed(void)
216 216
217/* Switch CPU speed using slewing GPIOs 217/* Switch CPU speed using slewing GPIOs
218 */ 218 */
219static int __pmac gpios_set_cpu_speed(int low_speed) 219static int gpios_set_cpu_speed(int low_speed)
220{ 220{
221 int gpio, timeout = 0; 221 int gpio, timeout = 0;
222 222
@@ -258,7 +258,7 @@ static int __pmac gpios_set_cpu_speed(int low_speed)
258 258
259/* Switch CPU speed under PMU control 259/* Switch CPU speed under PMU control
260 */ 260 */
261static int __pmac pmu_set_cpu_speed(int low_speed) 261static int pmu_set_cpu_speed(int low_speed)
262{ 262{
263 struct adb_request req; 263 struct adb_request req;
264 unsigned long save_l2cr; 264 unsigned long save_l2cr;
@@ -354,7 +354,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed)
354 return 0; 354 return 0;
355} 355}
356 356
357static int __pmac do_set_cpu_speed(int speed_mode, int notify) 357static int do_set_cpu_speed(int speed_mode, int notify)
358{ 358{
359 struct cpufreq_freqs freqs; 359 struct cpufreq_freqs freqs;
360 unsigned long l3cr; 360 unsigned long l3cr;
@@ -391,17 +391,17 @@ static int __pmac do_set_cpu_speed(int speed_mode, int notify)
391 return 0; 391 return 0;
392} 392}
393 393
394static unsigned int __pmac pmac_cpufreq_get_speed(unsigned int cpu) 394static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
395{ 395{
396 return cur_freq; 396 return cur_freq;
397} 397}
398 398
399static int __pmac pmac_cpufreq_verify(struct cpufreq_policy *policy) 399static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
400{ 400{
401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); 401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
402} 402}
403 403
404static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy, 404static int pmac_cpufreq_target( struct cpufreq_policy *policy,
405 unsigned int target_freq, 405 unsigned int target_freq,
406 unsigned int relation) 406 unsigned int relation)
407{ 407{
@@ -414,13 +414,13 @@ static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy,
414 return do_set_cpu_speed(newstate, 1); 414 return do_set_cpu_speed(newstate, 1);
415} 415}
416 416
417unsigned int __pmac pmac_get_one_cpufreq(int i) 417unsigned int pmac_get_one_cpufreq(int i)
418{ 418{
419 /* Supports only one CPU for now */ 419 /* Supports only one CPU for now */
420 return (i == 0) ? cur_freq : 0; 420 return (i == 0) ? cur_freq : 0;
421} 421}
422 422
423static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) 423static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
424{ 424{
425 if (policy->cpu != 0) 425 if (policy->cpu != 0)
426 return -ENODEV; 426 return -ENODEV;
@@ -433,7 +433,7 @@ static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); 433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
434} 434}
435 435
436static u32 __pmac read_gpio(struct device_node *np) 436static u32 read_gpio(struct device_node *np)
437{ 437{
438 u32 *reg = (u32 *)get_property(np, "reg", NULL); 438 u32 *reg = (u32 *)get_property(np, "reg", NULL);
439 u32 offset; 439 u32 offset;
@@ -452,7 +452,7 @@ static u32 __pmac read_gpio(struct device_node *np)
452 return offset; 452 return offset;
453} 453}
454 454
455static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) 455static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
456{ 456{
457 /* Ok, this could be made a bit smarter, but let's be robust for now. We 457 /* Ok, this could be made a bit smarter, but let's be robust for now. We
458 * always force a speed change to high speed before sleep, to make sure 458 * always force a speed change to high speed before sleep, to make sure
@@ -468,7 +468,7 @@ static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message
468 return 0; 468 return 0;
469} 469}
470 470
471static int __pmac pmac_cpufreq_resume(struct cpufreq_policy *policy) 471static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
472{ 472{
473 /* If we resume, first check if we have a get() function */ 473 /* If we resume, first check if we have a get() function */
474 if (get_speed_proc) 474 if (get_speed_proc)
@@ -501,7 +501,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
501}; 501};
502 502
503 503
504static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) 504static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
505{ 505{
506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL, 506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
507 "voltage-gpio"); 507 "voltage-gpio");
@@ -593,7 +593,7 @@ static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
593 return 0; 593 return 0;
594} 594}
595 595
596static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode) 596static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
597{ 597{
598 struct device_node *volt_gpio_np; 598 struct device_node *volt_gpio_np;
599 599
@@ -620,7 +620,7 @@ static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
620 return 0; 620 return 0;
621} 621}
622 622
623static int __pmac pmac_cpufreq_init_750FX(struct device_node *cpunode) 623static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
624{ 624{
625 struct device_node *volt_gpio_np; 625 struct device_node *volt_gpio_np;
626 u32 pvr, *value; 626 u32 pvr, *value;
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index dd6d45ae0501..58884a63ebdb 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -63,7 +63,7 @@ extern struct device_node *k2_skiplist[2];
63 * We use a single global lock to protect accesses. Each driver has 63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking 64 * to take care of its own locking
65 */ 65 */
66static DEFINE_SPINLOCK(feature_lock __pmacdata); 66static DEFINE_SPINLOCK(feature_lock);
67 67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); 68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); 69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
@@ -72,9 +72,9 @@ static DEFINE_SPINLOCK(feature_lock __pmacdata);
72/* 72/*
73 * Instance of some macio stuffs 73 * Instance of some macio stuffs
74 */ 74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata; 75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76 76
77struct macio_chip* __pmac macio_find(struct device_node* child, int type) 77struct macio_chip* macio_find(struct device_node* child, int type)
78{ 78{
79 while(child) { 79 while(child) {
80 int i; 80 int i;
@@ -89,7 +89,7 @@ struct macio_chip* __pmac macio_find(struct device_node* child, int type)
89} 89}
90EXPORT_SYMBOL_GPL(macio_find); 90EXPORT_SYMBOL_GPL(macio_find);
91 91
92static const char* macio_names[] __pmacdata = 92static const char* macio_names[] =
93{ 93{
94 "Unknown", 94 "Unknown",
95 "Grand Central", 95 "Grand Central",
@@ -116,10 +116,10 @@ static const char* macio_names[] __pmacdata =
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v))) 116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v))) 117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118 118
119static struct device_node* uninorth_node __pmacdata; 119static struct device_node* uninorth_node;
120static u32 __iomem * uninorth_base __pmacdata; 120static u32 __iomem * uninorth_base;
121static u32 uninorth_rev __pmacdata; 121static u32 uninorth_rev;
122static int uninorth_u3 __pmacdata; 122static int uninorth_u3;
123static void __iomem *u3_ht; 123static void __iomem *u3_ht;
124 124
125/* 125/*
@@ -142,13 +142,13 @@ struct pmac_mb_def
142 struct feature_table_entry* features; 142 struct feature_table_entry* features;
143 unsigned long board_flags; 143 unsigned long board_flags;
144}; 144};
145static struct pmac_mb_def pmac_mb __pmacdata; 145static struct pmac_mb_def pmac_mb;
146 146
147/* 147/*
148 * Here are the chip specific feature functions 148 * Here are the chip specific feature functions
149 */ 149 */
150 150
151static inline int __pmac 151static inline int
152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value) 152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value)
153{ 153{
154 struct macio_chip* macio; 154 struct macio_chip* macio;
@@ -170,7 +170,7 @@ simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int
170 170
171#ifndef CONFIG_POWER4 171#ifndef CONFIG_POWER4
172 172
173static long __pmac 173static long
174ohare_htw_scc_enable(struct device_node* node, long param, long value) 174ohare_htw_scc_enable(struct device_node* node, long param, long value)
175{ 175{
176 struct macio_chip* macio; 176 struct macio_chip* macio;
@@ -263,21 +263,21 @@ ohare_htw_scc_enable(struct device_node* node, long param, long value)
263 return 0; 263 return 0;
264} 264}
265 265
266static long __pmac 266static long
267ohare_floppy_enable(struct device_node* node, long param, long value) 267ohare_floppy_enable(struct device_node* node, long param, long value)
268{ 268{
269 return simple_feature_tweak(node, macio_ohare, 269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value); 270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271} 271}
272 272
273static long __pmac 273static long
274ohare_mesh_enable(struct device_node* node, long param, long value) 274ohare_mesh_enable(struct device_node* node, long param, long value)
275{ 275{
276 return simple_feature_tweak(node, macio_ohare, 276 return simple_feature_tweak(node, macio_ohare,
277 OHARE_FCR, OH_MESH_ENABLE, value); 277 OHARE_FCR, OH_MESH_ENABLE, value);
278} 278}
279 279
280static long __pmac 280static long
281ohare_ide_enable(struct device_node* node, long param, long value) 281ohare_ide_enable(struct device_node* node, long param, long value)
282{ 282{
283 switch(param) { 283 switch(param) {
@@ -298,7 +298,7 @@ ohare_ide_enable(struct device_node* node, long param, long value)
298 } 298 }
299} 299}
300 300
301static long __pmac 301static long
302ohare_ide_reset(struct device_node* node, long param, long value) 302ohare_ide_reset(struct device_node* node, long param, long value)
303{ 303{
304 switch(param) { 304 switch(param) {
@@ -313,7 +313,7 @@ ohare_ide_reset(struct device_node* node, long param, long value)
313 } 313 }
314} 314}
315 315
316static long __pmac 316static long
317ohare_sleep_state(struct device_node* node, long param, long value) 317ohare_sleep_state(struct device_node* node, long param, long value)
318{ 318{
319 struct macio_chip* macio = &macio_chips[0]; 319 struct macio_chip* macio = &macio_chips[0];
@@ -329,7 +329,7 @@ ohare_sleep_state(struct device_node* node, long param, long value)
329 return 0; 329 return 0;
330} 330}
331 331
332static long __pmac 332static long
333heathrow_modem_enable(struct device_node* node, long param, long value) 333heathrow_modem_enable(struct device_node* node, long param, long value)
334{ 334{
335 struct macio_chip* macio; 335 struct macio_chip* macio;
@@ -373,7 +373,7 @@ heathrow_modem_enable(struct device_node* node, long param, long value)
373 return 0; 373 return 0;
374} 374}
375 375
376static long __pmac 376static long
377heathrow_floppy_enable(struct device_node* node, long param, long value) 377heathrow_floppy_enable(struct device_node* node, long param, long value)
378{ 378{
379 return simple_feature_tweak(node, macio_unknown, 379 return simple_feature_tweak(node, macio_unknown,
@@ -382,7 +382,7 @@ heathrow_floppy_enable(struct device_node* node, long param, long value)
382 value); 382 value);
383} 383}
384 384
385static long __pmac 385static long
386heathrow_mesh_enable(struct device_node* node, long param, long value) 386heathrow_mesh_enable(struct device_node* node, long param, long value)
387{ 387{
388 struct macio_chip* macio; 388 struct macio_chip* macio;
@@ -411,7 +411,7 @@ heathrow_mesh_enable(struct device_node* node, long param, long value)
411 return 0; 411 return 0;
412} 412}
413 413
414static long __pmac 414static long
415heathrow_ide_enable(struct device_node* node, long param, long value) 415heathrow_ide_enable(struct device_node* node, long param, long value)
416{ 416{
417 switch(param) { 417 switch(param) {
@@ -426,7 +426,7 @@ heathrow_ide_enable(struct device_node* node, long param, long value)
426 } 426 }
427} 427}
428 428
429static long __pmac 429static long
430heathrow_ide_reset(struct device_node* node, long param, long value) 430heathrow_ide_reset(struct device_node* node, long param, long value)
431{ 431{
432 switch(param) { 432 switch(param) {
@@ -441,7 +441,7 @@ heathrow_ide_reset(struct device_node* node, long param, long value)
441 } 441 }
442} 442}
443 443
444static long __pmac 444static long
445heathrow_bmac_enable(struct device_node* node, long param, long value) 445heathrow_bmac_enable(struct device_node* node, long param, long value)
446{ 446{
447 struct macio_chip* macio; 447 struct macio_chip* macio;
@@ -470,7 +470,7 @@ heathrow_bmac_enable(struct device_node* node, long param, long value)
470 return 0; 470 return 0;
471} 471}
472 472
473static long __pmac 473static long
474heathrow_sound_enable(struct device_node* node, long param, long value) 474heathrow_sound_enable(struct device_node* node, long param, long value)
475{ 475{
476 struct macio_chip* macio; 476 struct macio_chip* macio;
@@ -501,16 +501,16 @@ heathrow_sound_enable(struct device_node* node, long param, long value)
501 return 0; 501 return 0;
502} 502}
503 503
504static u32 save_fcr[6] __pmacdata; 504static u32 save_fcr[6];
505static u32 save_mbcr __pmacdata; 505static u32 save_mbcr;
506static u32 save_gpio_levels[2] __pmacdata; 506static u32 save_gpio_levels[2];
507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT] __pmacdata; 507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT] __pmacdata; 508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
509static u32 save_unin_clock_ctl __pmacdata; 509static u32 save_unin_clock_ctl;
510static struct dbdma_regs save_dbdma[13] __pmacdata; 510static struct dbdma_regs save_dbdma[13];
511static struct dbdma_regs save_alt_dbdma[13] __pmacdata; 511static struct dbdma_regs save_alt_dbdma[13];
512 512
513static void __pmac 513static void
514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save) 514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
515{ 515{
516 int i; 516 int i;
@@ -527,7 +527,7 @@ dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
527 } 527 }
528} 528}
529 529
530static void __pmac 530static void
531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save) 531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
532{ 532{
533 int i; 533 int i;
@@ -547,7 +547,7 @@ dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
547 } 547 }
548} 548}
549 549
550static void __pmac 550static void
551heathrow_sleep(struct macio_chip* macio, int secondary) 551heathrow_sleep(struct macio_chip* macio, int secondary)
552{ 552{
553 if (secondary) { 553 if (secondary) {
@@ -580,7 +580,7 @@ heathrow_sleep(struct macio_chip* macio, int secondary)
580 (void)MACIO_IN32(HEATHROW_FCR); 580 (void)MACIO_IN32(HEATHROW_FCR);
581} 581}
582 582
583static void __pmac 583static void
584heathrow_wakeup(struct macio_chip* macio, int secondary) 584heathrow_wakeup(struct macio_chip* macio, int secondary)
585{ 585{
586 if (secondary) { 586 if (secondary) {
@@ -605,7 +605,7 @@ heathrow_wakeup(struct macio_chip* macio, int secondary)
605 } 605 }
606} 606}
607 607
608static long __pmac 608static long
609heathrow_sleep_state(struct device_node* node, long param, long value) 609heathrow_sleep_state(struct device_node* node, long param, long value)
610{ 610{
611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) 611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
@@ -622,7 +622,7 @@ heathrow_sleep_state(struct device_node* node, long param, long value)
622 return 0; 622 return 0;
623} 623}
624 624
625static long __pmac 625static long
626core99_scc_enable(struct device_node* node, long param, long value) 626core99_scc_enable(struct device_node* node, long param, long value)
627{ 627{
628 struct macio_chip* macio; 628 struct macio_chip* macio;
@@ -723,7 +723,7 @@ core99_scc_enable(struct device_node* node, long param, long value)
723 return 0; 723 return 0;
724} 724}
725 725
726static long __pmac 726static long
727core99_modem_enable(struct device_node* node, long param, long value) 727core99_modem_enable(struct device_node* node, long param, long value)
728{ 728{
729 struct macio_chip* macio; 729 struct macio_chip* macio;
@@ -775,7 +775,7 @@ core99_modem_enable(struct device_node* node, long param, long value)
775 return 0; 775 return 0;
776} 776}
777 777
778static long __pmac 778static long
779pangea_modem_enable(struct device_node* node, long param, long value) 779pangea_modem_enable(struct device_node* node, long param, long value)
780{ 780{
781 struct macio_chip* macio; 781 struct macio_chip* macio;
@@ -830,7 +830,7 @@ pangea_modem_enable(struct device_node* node, long param, long value)
830 return 0; 830 return 0;
831} 831}
832 832
833static long __pmac 833static long
834core99_ata100_enable(struct device_node* node, long value) 834core99_ata100_enable(struct device_node* node, long value)
835{ 835{
836 unsigned long flags; 836 unsigned long flags;
@@ -860,7 +860,7 @@ core99_ata100_enable(struct device_node* node, long value)
860 return 0; 860 return 0;
861} 861}
862 862
863static long __pmac 863static long
864core99_ide_enable(struct device_node* node, long param, long value) 864core99_ide_enable(struct device_node* node, long param, long value)
865{ 865{
866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
@@ -883,7 +883,7 @@ core99_ide_enable(struct device_node* node, long param, long value)
883 } 883 }
884} 884}
885 885
886static long __pmac 886static long
887core99_ide_reset(struct device_node* node, long param, long value) 887core99_ide_reset(struct device_node* node, long param, long value)
888{ 888{
889 switch(param) { 889 switch(param) {
@@ -901,7 +901,7 @@ core99_ide_reset(struct device_node* node, long param, long value)
901 } 901 }
902} 902}
903 903
904static long __pmac 904static long
905core99_gmac_enable(struct device_node* node, long param, long value) 905core99_gmac_enable(struct device_node* node, long param, long value)
906{ 906{
907 unsigned long flags; 907 unsigned long flags;
@@ -918,7 +918,7 @@ core99_gmac_enable(struct device_node* node, long param, long value)
918 return 0; 918 return 0;
919} 919}
920 920
921static long __pmac 921static long
922core99_gmac_phy_reset(struct device_node* node, long param, long value) 922core99_gmac_phy_reset(struct device_node* node, long param, long value)
923{ 923{
924 unsigned long flags; 924 unsigned long flags;
@@ -943,7 +943,7 @@ core99_gmac_phy_reset(struct device_node* node, long param, long value)
943 return 0; 943 return 0;
944} 944}
945 945
946static long __pmac 946static long
947core99_sound_chip_enable(struct device_node* node, long param, long value) 947core99_sound_chip_enable(struct device_node* node, long param, long value)
948{ 948{
949 struct macio_chip* macio; 949 struct macio_chip* macio;
@@ -973,7 +973,7 @@ core99_sound_chip_enable(struct device_node* node, long param, long value)
973 return 0; 973 return 0;
974} 974}
975 975
976static long __pmac 976static long
977core99_airport_enable(struct device_node* node, long param, long value) 977core99_airport_enable(struct device_node* node, long param, long value)
978{ 978{
979 struct macio_chip* macio; 979 struct macio_chip* macio;
@@ -1060,7 +1060,7 @@ core99_airport_enable(struct device_node* node, long param, long value)
1060} 1060}
1061 1061
1062#ifdef CONFIG_SMP 1062#ifdef CONFIG_SMP
1063static long __pmac 1063static long
1064core99_reset_cpu(struct device_node* node, long param, long value) 1064core99_reset_cpu(struct device_node* node, long param, long value)
1065{ 1065{
1066 unsigned int reset_io = 0; 1066 unsigned int reset_io = 0;
@@ -1104,7 +1104,7 @@ core99_reset_cpu(struct device_node* node, long param, long value)
1104} 1104}
1105#endif /* CONFIG_SMP */ 1105#endif /* CONFIG_SMP */
1106 1106
1107static long __pmac 1107static long
1108core99_usb_enable(struct device_node* node, long param, long value) 1108core99_usb_enable(struct device_node* node, long param, long value)
1109{ 1109{
1110 struct macio_chip* macio; 1110 struct macio_chip* macio;
@@ -1257,7 +1257,7 @@ core99_usb_enable(struct device_node* node, long param, long value)
1257 return 0; 1257 return 0;
1258} 1258}
1259 1259
1260static long __pmac 1260static long
1261core99_firewire_enable(struct device_node* node, long param, long value) 1261core99_firewire_enable(struct device_node* node, long param, long value)
1262{ 1262{
1263 unsigned long flags; 1263 unsigned long flags;
@@ -1284,7 +1284,7 @@ core99_firewire_enable(struct device_node* node, long param, long value)
1284 return 0; 1284 return 0;
1285} 1285}
1286 1286
1287static long __pmac 1287static long
1288core99_firewire_cable_power(struct device_node* node, long param, long value) 1288core99_firewire_cable_power(struct device_node* node, long param, long value)
1289{ 1289{
1290 unsigned long flags; 1290 unsigned long flags;
@@ -1315,7 +1315,7 @@ core99_firewire_cable_power(struct device_node* node, long param, long value)
1315 return 0; 1315 return 0;
1316} 1316}
1317 1317
1318static long __pmac 1318static long
1319intrepid_aack_delay_enable(struct device_node* node, long param, long value) 1319intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1320{ 1320{
1321 unsigned long flags; 1321 unsigned long flags;
@@ -1336,7 +1336,7 @@ intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1336 1336
1337#endif /* CONFIG_POWER4 */ 1337#endif /* CONFIG_POWER4 */
1338 1338
1339static long __pmac 1339static long
1340core99_read_gpio(struct device_node* node, long param, long value) 1340core99_read_gpio(struct device_node* node, long param, long value)
1341{ 1341{
1342 struct macio_chip* macio = &macio_chips[0]; 1342 struct macio_chip* macio = &macio_chips[0];
@@ -1345,7 +1345,7 @@ core99_read_gpio(struct device_node* node, long param, long value)
1345} 1345}
1346 1346
1347 1347
1348static long __pmac 1348static long
1349core99_write_gpio(struct device_node* node, long param, long value) 1349core99_write_gpio(struct device_node* node, long param, long value)
1350{ 1350{
1351 struct macio_chip* macio = &macio_chips[0]; 1351 struct macio_chip* macio = &macio_chips[0];
@@ -1356,7 +1356,7 @@ core99_write_gpio(struct device_node* node, long param, long value)
1356 1356
1357#ifdef CONFIG_POWER4 1357#ifdef CONFIG_POWER4
1358 1358
1359static long __pmac 1359static long
1360g5_gmac_enable(struct device_node* node, long param, long value) 1360g5_gmac_enable(struct device_node* node, long param, long value)
1361{ 1361{
1362 struct macio_chip* macio = &macio_chips[0]; 1362 struct macio_chip* macio = &macio_chips[0];
@@ -1380,7 +1380,7 @@ g5_gmac_enable(struct device_node* node, long param, long value)
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static long __pmac 1383static long
1384g5_fw_enable(struct device_node* node, long param, long value) 1384g5_fw_enable(struct device_node* node, long param, long value)
1385{ 1385{
1386 struct macio_chip* macio = &macio_chips[0]; 1386 struct macio_chip* macio = &macio_chips[0];
@@ -1403,7 +1403,7 @@ g5_fw_enable(struct device_node* node, long param, long value)
1403 return 0; 1403 return 0;
1404} 1404}
1405 1405
1406static long __pmac 1406static long
1407g5_mpic_enable(struct device_node* node, long param, long value) 1407g5_mpic_enable(struct device_node* node, long param, long value)
1408{ 1408{
1409 unsigned long flags; 1409 unsigned long flags;
@@ -1419,7 +1419,7 @@ g5_mpic_enable(struct device_node* node, long param, long value)
1419} 1419}
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422static long __pmac 1422static long
1423g5_reset_cpu(struct device_node* node, long param, long value) 1423g5_reset_cpu(struct device_node* node, long param, long value)
1424{ 1424{
1425 unsigned int reset_io = 0; 1425 unsigned int reset_io = 0;
@@ -1465,7 +1465,7 @@ g5_reset_cpu(struct device_node* node, long param, long value)
1465 * This takes the second CPU off the bus on dual CPU machines 1465 * This takes the second CPU off the bus on dual CPU machines
1466 * running UP 1466 * running UP
1467 */ 1467 */
1468void __pmac g5_phy_disable_cpu1(void) 1468void g5_phy_disable_cpu1(void)
1469{ 1469{
1470 UN_OUT(U3_API_PHY_CONFIG_1, 0); 1470 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1471} 1471}
@@ -1474,7 +1474,7 @@ void __pmac g5_phy_disable_cpu1(void)
1474 1474
1475#ifndef CONFIG_POWER4 1475#ifndef CONFIG_POWER4
1476 1476
1477static void __pmac 1477static void
1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode) 1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1479{ 1479{
1480 u32 temp; 1480 u32 temp;
@@ -1528,7 +1528,7 @@ keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1529} 1529}
1530 1530
1531static void __pmac 1531static void
1532pangea_shutdown(struct macio_chip* macio, int sleep_mode) 1532pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1533{ 1533{
1534 u32 temp; 1534 u32 temp;
@@ -1562,7 +1562,7 @@ pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1563} 1563}
1564 1564
1565static void __pmac 1565static void
1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode) 1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1567{ 1567{
1568 u32 temp; 1568 u32 temp;
@@ -1591,7 +1591,7 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1591} 1591}
1592 1592
1593 1593
1594void __pmac pmac_tweak_clock_spreading(int enable) 1594void pmac_tweak_clock_spreading(int enable)
1595{ 1595{
1596 struct macio_chip* macio = &macio_chips[0]; 1596 struct macio_chip* macio = &macio_chips[0];
1597 1597
@@ -1698,7 +1698,7 @@ void __pmac pmac_tweak_clock_spreading(int enable)
1698} 1698}
1699 1699
1700 1700
1701static int __pmac 1701static int
1702core99_sleep(void) 1702core99_sleep(void)
1703{ 1703{
1704 struct macio_chip* macio; 1704 struct macio_chip* macio;
@@ -1791,7 +1791,7 @@ core99_sleep(void)
1791 return 0; 1791 return 0;
1792} 1792}
1793 1793
1794static int __pmac 1794static int
1795core99_wake_up(void) 1795core99_wake_up(void)
1796{ 1796{
1797 struct macio_chip* macio; 1797 struct macio_chip* macio;
@@ -1854,7 +1854,7 @@ core99_wake_up(void)
1854 return 0; 1854 return 0;
1855} 1855}
1856 1856
1857static long __pmac 1857static long
1858core99_sleep_state(struct device_node* node, long param, long value) 1858core99_sleep_state(struct device_node* node, long param, long value)
1859{ 1859{
1860 /* Param == 1 means to enter the "fake sleep" mode that is 1860 /* Param == 1 means to enter the "fake sleep" mode that is
@@ -1884,7 +1884,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
1884 1884
1885#endif /* CONFIG_POWER4 */ 1885#endif /* CONFIG_POWER4 */
1886 1886
1887static long __pmac 1887static long
1888generic_dev_can_wake(struct device_node* node, long param, long value) 1888generic_dev_can_wake(struct device_node* node, long param, long value)
1889{ 1889{
1890 /* Todo: eventually check we are really dealing with on-board 1890 /* Todo: eventually check we are really dealing with on-board
@@ -1896,7 +1896,7 @@ generic_dev_can_wake(struct device_node* node, long param, long value)
1896 return 0; 1896 return 0;
1897} 1897}
1898 1898
1899static long __pmac 1899static long
1900generic_get_mb_info(struct device_node* node, long param, long value) 1900generic_get_mb_info(struct device_node* node, long param, long value)
1901{ 1901{
1902 switch(param) { 1902 switch(param) {
@@ -1919,7 +1919,7 @@ generic_get_mb_info(struct device_node* node, long param, long value)
1919 1919
1920/* Used on any machine 1920/* Used on any machine
1921 */ 1921 */
1922static struct feature_table_entry any_features[] __pmacdata = { 1922static struct feature_table_entry any_features[] = {
1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, 1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, 1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1925 { 0, NULL } 1925 { 0, NULL }
@@ -1931,7 +1931,7 @@ static struct feature_table_entry any_features[] __pmacdata = {
1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem 1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1932 * to have issues with turning on/off those asic cells 1932 * to have issues with turning on/off those asic cells
1933 */ 1933 */
1934static struct feature_table_entry ohare_features[] __pmacdata = { 1934static struct feature_table_entry ohare_features[] = {
1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, 1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, 1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
@@ -1945,7 +1945,7 @@ static struct feature_table_entry ohare_features[] __pmacdata = {
1945 * Separated as some features couldn't be properly tested 1945 * Separated as some features couldn't be properly tested
1946 * and the serial port control bits appear to confuse it. 1946 * and the serial port control bits appear to confuse it.
1947 */ 1947 */
1948static struct feature_table_entry heathrow_desktop_features[] __pmacdata = { 1948static struct feature_table_entry heathrow_desktop_features[] = {
1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, 1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, 1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
@@ -1957,7 +1957,7 @@ static struct feature_table_entry heathrow_desktop_features[] __pmacdata = {
1957/* Heathrow based laptop, that is the Wallstreet and mainstreet 1957/* Heathrow based laptop, that is the Wallstreet and mainstreet
1958 * powerbooks. 1958 * powerbooks.
1959 */ 1959 */
1960static struct feature_table_entry heathrow_laptop_features[] __pmacdata = { 1960static struct feature_table_entry heathrow_laptop_features[] = {
1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1973,7 +1973,7 @@ static struct feature_table_entry heathrow_laptop_features[] __pmacdata = {
1973/* Paddington based machines 1973/* Paddington based machines
1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. 1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
1975 */ 1975 */
1976static struct feature_table_entry paddington_features[] __pmacdata = { 1976static struct feature_table_entry paddington_features[] = {
1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1991,7 +1991,7 @@ static struct feature_table_entry paddington_features[] __pmacdata = {
1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo 1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
1992 * used on iBook2 & iMac "flow power". 1992 * used on iBook2 & iMac "flow power".
1993 */ 1993 */
1994static struct feature_table_entry core99_features[] __pmacdata = { 1994static struct feature_table_entry core99_features[] = {
1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, 1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2014,7 +2014,7 @@ static struct feature_table_entry core99_features[] __pmacdata = {
2014 2014
2015/* RackMac 2015/* RackMac
2016 */ 2016 */
2017static struct feature_table_entry rackmac_features[] __pmacdata = { 2017static struct feature_table_entry rackmac_features[] = {
2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2020 { PMAC_FTR_IDE_RESET, core99_ide_reset }, 2020 { PMAC_FTR_IDE_RESET, core99_ide_reset },
@@ -2034,7 +2034,7 @@ static struct feature_table_entry rackmac_features[] __pmacdata = {
2034 2034
2035/* Pangea features 2035/* Pangea features
2036 */ 2036 */
2037static struct feature_table_entry pangea_features[] __pmacdata = { 2037static struct feature_table_entry pangea_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2054,7 +2054,7 @@ static struct feature_table_entry pangea_features[] __pmacdata = {
2054 2054
2055/* Intrepid features 2055/* Intrepid features
2056 */ 2056 */
2057static struct feature_table_entry intrepid_features[] __pmacdata = { 2057static struct feature_table_entry intrepid_features[] = {
2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2077,7 +2077,7 @@ static struct feature_table_entry intrepid_features[] __pmacdata = {
2077 2077
2078/* G5 features 2078/* G5 features
2079 */ 2079 */
2080static struct feature_table_entry g5_features[] __pmacdata = { 2080static struct feature_table_entry g5_features[] = {
2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, 2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable }, 2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, 2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
@@ -2091,7 +2091,7 @@ static struct feature_table_entry g5_features[] __pmacdata = {
2091 2091
2092#endif /* CONFIG_POWER4 */ 2092#endif /* CONFIG_POWER4 */
2093 2093
2094static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { 2094static struct pmac_mb_def pmac_mb_defs[] = {
2095#ifndef CONFIG_POWER4 2095#ifndef CONFIG_POWER4
2096 /* 2096 /*
2097 * Desktops 2097 * Desktops
@@ -2356,7 +2356,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
2356/* 2356/*
2357 * The toplevel feature_call callback 2357 * The toplevel feature_call callback
2358 */ 2358 */
2359long __pmac 2359long
2360pmac_do_feature_call(unsigned int selector, ...) 2360pmac_do_feature_call(unsigned int selector, ...)
2361{ 2361{
2362 struct device_node* node; 2362 struct device_node* node;
@@ -2939,8 +2939,8 @@ void __init pmac_check_ht_link(void)
2939 * Early video resume hook 2939 * Early video resume hook
2940 */ 2940 */
2941 2941
2942static void (*pmac_early_vresume_proc)(void *data) __pmacdata; 2942static void (*pmac_early_vresume_proc)(void *data);
2943static void *pmac_early_vresume_data __pmacdata; 2943static void *pmac_early_vresume_data;
2944 2944
2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data) 2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2946{ 2946{
@@ -2953,7 +2953,7 @@ void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2953} 2953}
2954EXPORT_SYMBOL(pmac_set_early_video_resume); 2954EXPORT_SYMBOL(pmac_set_early_video_resume);
2955 2955
2956void __pmac pmac_call_early_video_resume(void) 2956void pmac_call_early_video_resume(void)
2957{ 2957{
2958 if (pmac_early_vresume_proc) 2958 if (pmac_early_vresume_proc)
2959 pmac_early_vresume_proc(pmac_early_vresume_data); 2959 pmac_early_vresume_proc(pmac_early_vresume_data);
@@ -2963,11 +2963,11 @@ void __pmac pmac_call_early_video_resume(void)
2963 * AGP related suspend/resume code 2963 * AGP related suspend/resume code
2964 */ 2964 */
2965 2965
2966static struct pci_dev *pmac_agp_bridge __pmacdata; 2966static struct pci_dev *pmac_agp_bridge;
2967static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata; 2967static int (*pmac_agp_suspend)(struct pci_dev *bridge);
2968static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata; 2968static int (*pmac_agp_resume)(struct pci_dev *bridge);
2969 2969
2970void __pmac pmac_register_agp_pm(struct pci_dev *bridge, 2970void pmac_register_agp_pm(struct pci_dev *bridge,
2971 int (*suspend)(struct pci_dev *bridge), 2971 int (*suspend)(struct pci_dev *bridge),
2972 int (*resume)(struct pci_dev *bridge)) 2972 int (*resume)(struct pci_dev *bridge))
2973{ 2973{
@@ -2984,7 +2984,7 @@ void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
2984} 2984}
2985EXPORT_SYMBOL(pmac_register_agp_pm); 2985EXPORT_SYMBOL(pmac_register_agp_pm);
2986 2986
2987void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev) 2987void pmac_suspend_agp_for_card(struct pci_dev *dev)
2988{ 2988{
2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) 2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
2990 return; 2990 return;
@@ -2994,7 +2994,7 @@ void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
2994} 2994}
2995EXPORT_SYMBOL(pmac_suspend_agp_for_card); 2995EXPORT_SYMBOL(pmac_suspend_agp_for_card);
2996 2996
2997void __pmac pmac_resume_agp_for_card(struct pci_dev *dev) 2997void pmac_resume_agp_for_card(struct pci_dev *dev)
2998{ 2998{
2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) 2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3000 return; 3000 return;
diff --git a/arch/ppc/platforms/pmac_nvram.c b/arch/ppc/platforms/pmac_nvram.c
index c9de64205996..8c9b008c7226 100644
--- a/arch/ppc/platforms/pmac_nvram.c
+++ b/arch/ppc/platforms/pmac_nvram.c
@@ -88,17 +88,17 @@ extern int system_running;
88static int (*core99_write_bank)(int bank, u8* datas); 88static int (*core99_write_bank)(int bank, u8* datas);
89static int (*core99_erase_bank)(int bank); 89static int (*core99_erase_bank)(int bank);
90 90
91static char *nvram_image __pmacdata; 91static char *nvram_image;
92 92
93 93
94static unsigned char __pmac core99_nvram_read_byte(int addr) 94static unsigned char core99_nvram_read_byte(int addr)
95{ 95{
96 if (nvram_image == NULL) 96 if (nvram_image == NULL)
97 return 0xff; 97 return 0xff;
98 return nvram_image[addr]; 98 return nvram_image[addr];
99} 99}
100 100
101static void __pmac core99_nvram_write_byte(int addr, unsigned char val) 101static void core99_nvram_write_byte(int addr, unsigned char val)
102{ 102{
103 if (nvram_image == NULL) 103 if (nvram_image == NULL)
104 return; 104 return;
@@ -106,18 +106,18 @@ static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
106} 106}
107 107
108 108
109static unsigned char __openfirmware direct_nvram_read_byte(int addr) 109static unsigned char direct_nvram_read_byte(int addr)
110{ 110{
111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); 111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
112} 112}
113 113
114static void __openfirmware direct_nvram_write_byte(int addr, unsigned char val) 114static void direct_nvram_write_byte(int addr, unsigned char val)
115{ 115{
116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); 116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
117} 117}
118 118
119 119
120static unsigned char __pmac indirect_nvram_read_byte(int addr) 120static unsigned char indirect_nvram_read_byte(int addr)
121{ 121{
122 unsigned char val; 122 unsigned char val;
123 unsigned long flags; 123 unsigned long flags;
@@ -130,7 +130,7 @@ static unsigned char __pmac indirect_nvram_read_byte(int addr)
130 return val; 130 return val;
131} 131}
132 132
133static void __pmac indirect_nvram_write_byte(int addr, unsigned char val) 133static void indirect_nvram_write_byte(int addr, unsigned char val)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 136
@@ -143,13 +143,13 @@ static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
143 143
144#ifdef CONFIG_ADB_PMU 144#ifdef CONFIG_ADB_PMU
145 145
146static void __pmac pmu_nvram_complete(struct adb_request *req) 146static void pmu_nvram_complete(struct adb_request *req)
147{ 147{
148 if (req->arg) 148 if (req->arg)
149 complete((struct completion *)req->arg); 149 complete((struct completion *)req->arg);
150} 150}
151 151
152static unsigned char __pmac pmu_nvram_read_byte(int addr) 152static unsigned char pmu_nvram_read_byte(int addr)
153{ 153{
154 struct adb_request req; 154 struct adb_request req;
155 DECLARE_COMPLETION(req_complete); 155 DECLARE_COMPLETION(req_complete);
@@ -165,7 +165,7 @@ static unsigned char __pmac pmu_nvram_read_byte(int addr)
165 return req.reply[0]; 165 return req.reply[0];
166} 166}
167 167
168static void __pmac pmu_nvram_write_byte(int addr, unsigned char val) 168static void pmu_nvram_write_byte(int addr, unsigned char val)
169{ 169{
170 struct adb_request req; 170 struct adb_request req;
171 DECLARE_COMPLETION(req_complete); 171 DECLARE_COMPLETION(req_complete);
@@ -183,7 +183,7 @@ static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
183#endif /* CONFIG_ADB_PMU */ 183#endif /* CONFIG_ADB_PMU */
184 184
185 185
186static u8 __pmac chrp_checksum(struct chrp_header* hdr) 186static u8 chrp_checksum(struct chrp_header* hdr)
187{ 187{
188 u8 *ptr; 188 u8 *ptr;
189 u16 sum = hdr->signature; 189 u16 sum = hdr->signature;
@@ -194,7 +194,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
194 return sum; 194 return sum;
195} 195}
196 196
197static u32 __pmac core99_calc_adler(u8 *buffer) 197static u32 core99_calc_adler(u8 *buffer)
198{ 198{
199 int cnt; 199 int cnt;
200 u32 low, high; 200 u32 low, high;
@@ -216,7 +216,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
216 return (high << 16) | low; 216 return (high << 16) | low;
217} 217}
218 218
219static u32 __pmac core99_check(u8* datas) 219static u32 core99_check(u8* datas)
220{ 220{
221 struct core99_header* hdr99 = (struct core99_header*)datas; 221 struct core99_header* hdr99 = (struct core99_header*)datas;
222 222
@@ -235,7 +235,7 @@ static u32 __pmac core99_check(u8* datas)
235 return hdr99->generation; 235 return hdr99->generation;
236} 236}
237 237
238static int __pmac sm_erase_bank(int bank) 238static int sm_erase_bank(int bank)
239{ 239{
240 int stat, i; 240 int stat, i;
241 unsigned long timeout; 241 unsigned long timeout;
@@ -267,7 +267,7 @@ static int __pmac sm_erase_bank(int bank)
267 return 0; 267 return 0;
268} 268}
269 269
270static int __pmac sm_write_bank(int bank, u8* datas) 270static int sm_write_bank(int bank, u8* datas)
271{ 271{
272 int i, stat = 0; 272 int i, stat = 0;
273 unsigned long timeout; 273 unsigned long timeout;
@@ -302,7 +302,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
302 return 0; 302 return 0;
303} 303}
304 304
305static int __pmac amd_erase_bank(int bank) 305static int amd_erase_bank(int bank)
306{ 306{
307 int i, stat = 0; 307 int i, stat = 0;
308 unsigned long timeout; 308 unsigned long timeout;
@@ -349,7 +349,7 @@ static int __pmac amd_erase_bank(int bank)
349 return 0; 349 return 0;
350} 350}
351 351
352static int __pmac amd_write_bank(int bank, u8* datas) 352static int amd_write_bank(int bank, u8* datas)
353{ 353{
354 int i, stat = 0; 354 int i, stat = 0;
355 unsigned long timeout; 355 unsigned long timeout;
@@ -430,7 +430,7 @@ static void __init lookup_partitions(void)
430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); 430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
431} 431}
432 432
433static void __pmac core99_nvram_sync(void) 433static void core99_nvram_sync(void)
434{ 434{
435 struct core99_header* hdr99; 435 struct core99_header* hdr99;
436 unsigned long flags; 436 unsigned long flags;
@@ -554,12 +554,12 @@ void __init pmac_nvram_init(void)
554 lookup_partitions(); 554 lookup_partitions();
555} 555}
556 556
557int __pmac pmac_get_partition(int partition) 557int pmac_get_partition(int partition)
558{ 558{
559 return nvram_partitions[partition]; 559 return nvram_partitions[partition];
560} 560}
561 561
562u8 __pmac pmac_xpram_read(int xpaddr) 562u8 pmac_xpram_read(int xpaddr)
563{ 563{
564 int offset = nvram_partitions[pmac_nvram_XPRAM]; 564 int offset = nvram_partitions[pmac_nvram_XPRAM];
565 565
@@ -569,7 +569,7 @@ u8 __pmac pmac_xpram_read(int xpaddr)
569 return ppc_md.nvram_read_val(xpaddr + offset); 569 return ppc_md.nvram_read_val(xpaddr + offset);
570} 570}
571 571
572void __pmac pmac_xpram_write(int xpaddr, u8 data) 572void pmac_xpram_write(int xpaddr, u8 data)
573{ 573{
574 int offset = nvram_partitions[pmac_nvram_XPRAM]; 574 int offset = nvram_partitions[pmac_nvram_XPRAM];
575 575
diff --git a/arch/ppc/platforms/pmac_pci.c b/arch/ppc/platforms/pmac_pci.c
index 719fb49fe2bc..786295b6ddd0 100644
--- a/arch/ppc/platforms/pmac_pci.c
+++ b/arch/ppc/platforms/pmac_pci.c
@@ -141,7 +141,7 @@ fixup_bus_range(struct device_node *bridge)
141 |(((unsigned long)(off)) & 0xFCUL) \ 141 |(((unsigned long)(off)) & 0xFCUL) \
142 |1UL) 142 |1UL)
143 143
144static void volatile __iomem * __pmac 144static void volatile __iomem *
145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset) 145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
146{ 146{
147 unsigned int caddr; 147 unsigned int caddr;
@@ -162,7 +162,7 @@ macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
162 return hose->cfg_data + offset; 162 return hose->cfg_data + offset;
163} 163}
164 164
165static int __pmac 165static int
166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
167 int len, u32 *val) 167 int len, u32 *val)
168{ 168{
@@ -190,7 +190,7 @@ macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
190 return PCIBIOS_SUCCESSFUL; 190 return PCIBIOS_SUCCESSFUL;
191} 191}
192 192
193static int __pmac 193static int
194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
195 int len, u32 val) 195 int len, u32 val)
196{ 196{
@@ -230,7 +230,7 @@ static struct pci_ops macrisc_pci_ops =
230/* 230/*
231 * Verifiy that a specific (bus, dev_fn) exists on chaos 231 * Verifiy that a specific (bus, dev_fn) exists on chaos
232 */ 232 */
233static int __pmac 233static int
234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) 234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
235{ 235{
236 struct device_node *np; 236 struct device_node *np;
@@ -252,7 +252,7 @@ chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
252 return PCIBIOS_SUCCESSFUL; 252 return PCIBIOS_SUCCESSFUL;
253} 253}
254 254
255static int __pmac 255static int
256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
257 int len, u32 *val) 257 int len, u32 *val)
258{ 258{
@@ -264,7 +264,7 @@ chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
264 return macrisc_read_config(bus, devfn, offset, len, val); 264 return macrisc_read_config(bus, devfn, offset, len, val);
265} 265}
266 266
267static int __pmac 267static int
268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
269 int len, u32 val) 269 int len, u32 val)
270{ 270{
@@ -294,7 +294,7 @@ static struct pci_ops chaos_pci_ops =
294 + (((unsigned long)bus) << 16) \ 294 + (((unsigned long)bus) << 16) \
295 + 0x01000000UL) 295 + 0x01000000UL)
296 296
297static void volatile __iomem * __pmac 297static void volatile __iomem *
298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset) 298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
299{ 299{
300 if (bus == hose->first_busno) { 300 if (bus == hose->first_busno) {
@@ -307,7 +307,7 @@ u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset); 307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
308} 308}
309 309
310static int __pmac 310static int
311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
312 int len, u32 *val) 312 int len, u32 *val)
313{ 313{
@@ -357,7 +357,7 @@ u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
357 return PCIBIOS_SUCCESSFUL; 357 return PCIBIOS_SUCCESSFUL;
358} 358}
359 359
360static int __pmac 360static int
361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
362 int len, u32 val) 362 int len, u32 val)
363{ 363{
@@ -575,7 +575,7 @@ pmac_find_bridges(void)
575 * some offset between bus number and domains for now when we 575 * some offset between bus number and domains for now when we
576 * assign all busses should help for now 576 * assign all busses should help for now
577 */ 577 */
578 if (pci_assign_all_busses) 578 if (pci_assign_all_buses)
579 pcibios_assign_bus_offset = 0x10; 579 pcibios_assign_bus_offset = 0x10;
580 580
581#ifdef CONFIG_POWER4 581#ifdef CONFIG_POWER4
@@ -643,7 +643,7 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
643static int __init 643static int __init
644setup_uninorth(struct pci_controller* hose, struct reg_property* addr) 644setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
645{ 645{
646 pci_assign_all_busses = 1; 646 pci_assign_all_buses = 1;
647 has_uninorth = 1; 647 has_uninorth = 1;
648 hose->ops = &macrisc_pci_ops; 648 hose->ops = &macrisc_pci_ops;
649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
@@ -677,7 +677,7 @@ setup_u3_agp(struct pci_controller* hose, struct reg_property* addr)
677{ 677{
678 /* On G5, we move AGP up to high bus number so we don't need 678 /* On G5, we move AGP up to high bus number so we don't need
679 * to reassign bus numbers for HT. If we ever have P2P bridges 679 * to reassign bus numbers for HT. If we ever have P2P bridges
680 * on AGP, we'll have to move pci_assign_all_busses to the 680 * on AGP, we'll have to move pci_assign_all_buses to the
681 * pci_controller structure so we enable it for AGP and not for 681 * pci_controller structure so we enable it for AGP and not for
682 * HT childs. 682 * HT childs.
683 * We hard code the address because of the different size of 683 * We hard code the address because of the different size of
@@ -899,7 +899,7 @@ pmac_pcibios_fixup(void)
899 pcibios_fixup_OF_interrupts(); 899 pcibios_fixup_OF_interrupts();
900} 900}
901 901
902int __pmac 902int
903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial) 903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
904{ 904{
905 struct device_node* node; 905 struct device_node* node;
@@ -1096,7 +1096,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1096 * Disable second function on K2-SATA, it's broken 1096 * Disable second function on K2-SATA, it's broken
1097 * and disable IO BARs on first one 1097 * and disable IO BARs on first one
1098 */ 1098 */
1099void __pmac pmac_pci_fixup_k2_sata(struct pci_dev* dev) 1099void pmac_pci_fixup_k2_sata(struct pci_dev* dev)
1100{ 1100{
1101 int i; 1101 int i;
1102 u16 cmd; 1102 u16 cmd;
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 2ce058895e03..9f2d95ea8564 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -35,6 +35,7 @@
35#include <asm/open_pic.h> 35#include <asm/open_pic.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/pmac_feature.h> 37#include <asm/pmac_feature.h>
38#include <asm/machdep.h>
38 39
39#include "pmac_pic.h" 40#include "pmac_pic.h"
40 41
@@ -53,7 +54,7 @@ struct pmac_irq_hw {
53}; 54};
54 55
55/* Default addresses */ 56/* Default addresses */
56static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = { 57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
57 (struct pmac_irq_hw *) 0xf3000020, 58 (struct pmac_irq_hw *) 0xf3000020,
58 (struct pmac_irq_hw *) 0xf3000010, 59 (struct pmac_irq_hw *) 0xf3000010,
59 (struct pmac_irq_hw *) 0xf4000020, 60 (struct pmac_irq_hw *) 0xf4000020,
@@ -64,22 +65,22 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
64#define OHARE_LEVEL_MASK 0x1ff00000 65#define OHARE_LEVEL_MASK 0x1ff00000
65#define HEATHROW_LEVEL_MASK 0x1ff00000 66#define HEATHROW_LEVEL_MASK 0x1ff00000
66 67
67static int max_irqs __pmacdata; 68static int max_irqs;
68static int max_real_irqs __pmacdata; 69static int max_real_irqs;
69static u32 level_mask[4] __pmacdata; 70static u32 level_mask[4];
70 71
71static DEFINE_SPINLOCK(pmac_pic_lock __pmacdata); 72static DEFINE_SPINLOCK(pmac_pic_lock);
72 73
73 74
74#define GATWICK_IRQ_POOL_SIZE 10 75#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE] __pmacdata; 76static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76 77
77/* 78/*
78 * Mark an irq as "lost". This is only used on the pmac 79 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask). 80 * since it can lose interrupts (see pmac_set_irq_mask).
80 * -- Cort 81 * -- Cort
81 */ 82 */
82void __pmac 83void
83__set_lost(unsigned long irq_nr, int nokick) 84__set_lost(unsigned long irq_nr, int nokick)
84{ 85{
85 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 86 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
@@ -89,7 +90,7 @@ __set_lost(unsigned long irq_nr, int nokick)
89 } 90 }
90} 91}
91 92
92static void __pmac 93static void
93pmac_mask_and_ack_irq(unsigned int irq_nr) 94pmac_mask_and_ack_irq(unsigned int irq_nr)
94{ 95{
95 unsigned long bit = 1UL << (irq_nr & 0x1f); 96 unsigned long bit = 1UL << (irq_nr & 0x1f);
@@ -114,7 +115,7 @@ pmac_mask_and_ack_irq(unsigned int irq_nr)
114 spin_unlock_irqrestore(&pmac_pic_lock, flags); 115 spin_unlock_irqrestore(&pmac_pic_lock, flags);
115} 116}
116 117
117static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 118static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
118{ 119{
119 unsigned long bit = 1UL << (irq_nr & 0x1f); 120 unsigned long bit = 1UL << (irq_nr & 0x1f);
120 int i = irq_nr >> 5; 121 int i = irq_nr >> 5;
@@ -147,7 +148,7 @@ static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
147/* When an irq gets requested for the first client, if it's an 148/* When an irq gets requested for the first client, if it's an
148 * edge interrupt, we clear any previous one on the controller 149 * edge interrupt, we clear any previous one on the controller
149 */ 150 */
150static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr) 151static unsigned int pmac_startup_irq(unsigned int irq_nr)
151{ 152{
152 unsigned long bit = 1UL << (irq_nr & 0x1f); 153 unsigned long bit = 1UL << (irq_nr & 0x1f);
153 int i = irq_nr >> 5; 154 int i = irq_nr >> 5;
@@ -160,20 +161,20 @@ static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
160 return 0; 161 return 0;
161} 162}
162 163
163static void __pmac pmac_mask_irq(unsigned int irq_nr) 164static void pmac_mask_irq(unsigned int irq_nr)
164{ 165{
165 clear_bit(irq_nr, ppc_cached_irq_mask); 166 clear_bit(irq_nr, ppc_cached_irq_mask);
166 pmac_set_irq_mask(irq_nr, 0); 167 pmac_set_irq_mask(irq_nr, 0);
167 mb(); 168 mb();
168} 169}
169 170
170static void __pmac pmac_unmask_irq(unsigned int irq_nr) 171static void pmac_unmask_irq(unsigned int irq_nr)
171{ 172{
172 set_bit(irq_nr, ppc_cached_irq_mask); 173 set_bit(irq_nr, ppc_cached_irq_mask);
173 pmac_set_irq_mask(irq_nr, 0); 174 pmac_set_irq_mask(irq_nr, 0);
174} 175}
175 176
176static void __pmac pmac_end_irq(unsigned int irq_nr) 177static void pmac_end_irq(unsigned int irq_nr)
177{ 178{
178 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 179 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
179 && irq_desc[irq_nr].action) { 180 && irq_desc[irq_nr].action) {
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index d6356f480d90..55d2beffe560 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -122,7 +122,7 @@ extern struct smp_ops_t psurge_smp_ops;
122extern struct smp_ops_t core99_smp_ops; 122extern struct smp_ops_t core99_smp_ops;
123#endif /* CONFIG_SMP */ 123#endif /* CONFIG_SMP */
124 124
125static int __pmac 125static int
126pmac_show_cpuinfo(struct seq_file *m) 126pmac_show_cpuinfo(struct seq_file *m)
127{ 127{
128 struct device_node *np; 128 struct device_node *np;
@@ -226,7 +226,7 @@ pmac_show_cpuinfo(struct seq_file *m)
226 return 0; 226 return 0;
227} 227}
228 228
229static int __openfirmware 229static int
230pmac_show_percpuinfo(struct seq_file *m, int i) 230pmac_show_percpuinfo(struct seq_file *m, int i)
231{ 231{
232#ifdef CONFIG_CPU_FREQ_PMAC 232#ifdef CONFIG_CPU_FREQ_PMAC
@@ -330,9 +330,9 @@ pmac_setup_arch(void)
330#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
331 /* Check for Core99 */ 331 /* Check for Core99 */
332 if (find_devices("uni-n") || find_devices("u3")) 332 if (find_devices("uni-n") || find_devices("u3"))
333 ppc_md.smp_ops = &core99_smp_ops; 333 smp_ops = &core99_smp_ops;
334 else 334 else
335 ppc_md.smp_ops = &psurge_smp_ops; 335 smp_ops = &psurge_smp_ops;
336#endif /* CONFIG_SMP */ 336#endif /* CONFIG_SMP */
337 337
338 pci_create_OF_bus_map(); 338 pci_create_OF_bus_map();
@@ -447,7 +447,7 @@ static int pmac_pm_enter(suspend_state_t state)
447 enable_kernel_fp(); 447 enable_kernel_fp();
448 448
449#ifdef CONFIG_ALTIVEC 449#ifdef CONFIG_ALTIVEC
450 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) 450 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
451 enable_kernel_altivec(); 451 enable_kernel_altivec();
452#endif /* CONFIG_ALTIVEC */ 452#endif /* CONFIG_ALTIVEC */
453 453
@@ -485,7 +485,7 @@ static int pmac_late_init(void)
485late_initcall(pmac_late_init); 485late_initcall(pmac_late_init);
486 486
487/* can't be __init - can be called whenever a disk is first accessed */ 487/* can't be __init - can be called whenever a disk is first accessed */
488void __pmac 488void
489note_bootable_part(dev_t dev, int part, int goodness) 489note_bootable_part(dev_t dev, int part, int goodness)
490{ 490{
491 static int found_boot = 0; 491 static int found_boot = 0;
@@ -511,7 +511,7 @@ note_bootable_part(dev_t dev, int part, int goodness)
511 } 511 }
512} 512}
513 513
514static void __pmac 514static void
515pmac_restart(char *cmd) 515pmac_restart(char *cmd)
516{ 516{
517#ifdef CONFIG_ADB_CUDA 517#ifdef CONFIG_ADB_CUDA
@@ -536,7 +536,7 @@ pmac_restart(char *cmd)
536 } 536 }
537} 537}
538 538
539static void __pmac 539static void
540pmac_power_off(void) 540pmac_power_off(void)
541{ 541{
542#ifdef CONFIG_ADB_CUDA 542#ifdef CONFIG_ADB_CUDA
@@ -561,7 +561,7 @@ pmac_power_off(void)
561 } 561 }
562} 562}
563 563
564static void __pmac 564static void
565pmac_halt(void) 565pmac_halt(void)
566{ 566{
567 pmac_power_off(); 567 pmac_power_off();
@@ -661,7 +661,6 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
661 ppc_md.setup_arch = pmac_setup_arch; 661 ppc_md.setup_arch = pmac_setup_arch;
662 ppc_md.show_cpuinfo = pmac_show_cpuinfo; 662 ppc_md.show_cpuinfo = pmac_show_cpuinfo;
663 ppc_md.show_percpuinfo = pmac_show_percpuinfo; 663 ppc_md.show_percpuinfo = pmac_show_percpuinfo;
664 ppc_md.irq_canonicalize = NULL;
665 ppc_md.init_IRQ = pmac_pic_init; 664 ppc_md.init_IRQ = pmac_pic_init;
666 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */ 665 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
667 666
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S
index 88419c77ac43..22b113d19b24 100644
--- a/arch/ppc/platforms/pmac_sleep.S
+++ b/arch/ppc/platforms/pmac_sleep.S
@@ -387,10 +387,10 @@ turn_on_mmu:
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ 387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388 388
389 .section .data 389 .section .data
390 .balign L1_CACHE_LINE_SIZE 390 .balign L1_CACHE_BYTES
391sleep_storage: 391sleep_storage:
392 .long 0 392 .long 0
393 .balign L1_CACHE_LINE_SIZE, 0 393 .balign L1_CACHE_BYTES, 0
394 394
395#endif /* CONFIG_6xx */ 395#endif /* CONFIG_6xx */
396 .section .text 396 .section .text
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 794a23994b82..26ff26238f03 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -186,7 +186,7 @@ static inline void psurge_clr_ipi(int cpu)
186 */ 186 */
187static unsigned long psurge_smp_message[NR_CPUS]; 187static unsigned long psurge_smp_message[NR_CPUS];
188 188
189void __pmac psurge_smp_message_recv(struct pt_regs *regs) 189void psurge_smp_message_recv(struct pt_regs *regs)
190{ 190{
191 int cpu = smp_processor_id(); 191 int cpu = smp_processor_id();
192 int msg; 192 int msg;
@@ -203,14 +203,13 @@ void __pmac psurge_smp_message_recv(struct pt_regs *regs)
203 smp_message_recv(msg, regs); 203 smp_message_recv(msg, regs);
204} 204}
205 205
206irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs) 206irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
207{ 207{
208 psurge_smp_message_recv(regs); 208 psurge_smp_message_recv(regs);
209 return IRQ_HANDLED; 209 return IRQ_HANDLED;
210} 210}
211 211
212static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data, 212static void smp_psurge_message_pass(int target, int msg)
213 int wait)
214{ 213{
215 int i; 214 int i;
216 215
@@ -629,7 +628,7 @@ void smp_core99_give_timebase(void)
629 628
630 629
631/* PowerSurge-style Macs */ 630/* PowerSurge-style Macs */
632struct smp_ops_t psurge_smp_ops __pmacdata = { 631struct smp_ops_t psurge_smp_ops = {
633 .message_pass = smp_psurge_message_pass, 632 .message_pass = smp_psurge_message_pass,
634 .probe = smp_psurge_probe, 633 .probe = smp_psurge_probe,
635 .kick_cpu = smp_psurge_kick_cpu, 634 .kick_cpu = smp_psurge_kick_cpu,
@@ -639,7 +638,7 @@ struct smp_ops_t psurge_smp_ops __pmacdata = {
639}; 638};
640 639
641/* Core99 Macs (dual G4s) */ 640/* Core99 Macs (dual G4s) */
642struct smp_ops_t core99_smp_ops __pmacdata = { 641struct smp_ops_t core99_smp_ops = {
643 .message_pass = smp_openpic_message_pass, 642 .message_pass = smp_openpic_message_pass,
644 .probe = smp_core99_probe, 643 .probe = smp_core99_probe,
645 .kick_cpu = smp_core99_kick_cpu, 644 .kick_cpu = smp_core99_kick_cpu,
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c
index efb819f9490d..edb9fcc64790 100644
--- a/arch/ppc/platforms/pmac_time.c
+++ b/arch/ppc/platforms/pmac_time.c
@@ -77,7 +77,7 @@ pmac_time_init(void)
77#endif 77#endif
78} 78}
79 79
80unsigned long __pmac 80unsigned long
81pmac_get_rtc_time(void) 81pmac_get_rtc_time(void)
82{ 82{
83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -118,7 +118,7 @@ pmac_get_rtc_time(void)
118 return 0; 118 return 0;
119} 119}
120 120
121int __pmac 121int
122pmac_set_rtc_time(unsigned long nowtime) 122pmac_set_rtc_time(unsigned long nowtime)
123{ 123{
124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -210,7 +210,7 @@ via_calibrate_decr(void)
210/* 210/*
211 * Reset the time after a sleep. 211 * Reset the time after a sleep.
212 */ 212 */
213static int __pmac 213static int
214time_sleep_notify(struct pmu_sleep_notifier *self, int when) 214time_sleep_notify(struct pmu_sleep_notifier *self, int when)
215{ 215{
216 static unsigned long time_diff; 216 static unsigned long time_diff;
@@ -235,7 +235,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when)
235 return PBOOK_SLEEP_OK; 235 return PBOOK_SLEEP_OK;
236} 236}
237 237
238static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { 238static struct pmu_sleep_notifier time_sleep_notifier = {
239 time_sleep_notify, SLEEP_LEVEL_MISC, 239 time_sleep_notify, SLEEP_LEVEL_MISC,
240}; 240};
241#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index e70aae20d6f9..59eb330b2090 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -646,14 +646,6 @@ static void pplus_power_off(void)
646 pplus_halt(); 646 pplus_halt();
647} 647}
648 648
649static unsigned int pplus_irq_canonicalize(u_int irq)
650{
651 if (irq == 2)
652 return 9;
653 else
654 return irq;
655}
656
657static void __init pplus_init_IRQ(void) 649static void __init pplus_init_IRQ(void)
658{ 650{
659 int i; 651 int i;
@@ -872,10 +864,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
872 ISA_DMA_THRESHOLD = 0x00ffffff; 864 ISA_DMA_THRESHOLD = 0x00ffffff;
873 DMA_MODE_READ = 0x44; 865 DMA_MODE_READ = 0x44;
874 DMA_MODE_WRITE = 0x48; 866 DMA_MODE_WRITE = 0x48;
867 ppc_do_canonicalize_irqs = 1;
875 868
876 ppc_md.setup_arch = pplus_setup_arch; 869 ppc_md.setup_arch = pplus_setup_arch;
877 ppc_md.show_cpuinfo = pplus_show_cpuinfo; 870 ppc_md.show_cpuinfo = pplus_show_cpuinfo;
878 ppc_md.irq_canonicalize = pplus_irq_canonicalize;
879 ppc_md.init_IRQ = pplus_init_IRQ; 871 ppc_md.init_IRQ = pplus_init_IRQ;
880 /* this gets changed later on if we have an OpenPIC -- Cort */ 872 /* this gets changed later on if we have an OpenPIC -- Cort */
881 ppc_md.get_irq = i8259_irq; 873 ppc_md.get_irq = i8259_irq;
@@ -911,6 +903,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
911 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc; 903 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc;
912#endif 904#endif
913#ifdef CONFIG_SMP 905#ifdef CONFIG_SMP
914 ppc_md.smp_ops = &pplus_smp_ops; 906 smp_ops = &pplus_smp_ops;
915#endif /* CONFIG_SMP */ 907#endif /* CONFIG_SMP */
916} 908}
diff --git a/arch/ppc/platforms/prep_pci.c b/arch/ppc/platforms/prep_pci.c
index 4760cb64251d..e50b9996848c 100644
--- a/arch/ppc/platforms/prep_pci.c
+++ b/arch/ppc/platforms/prep_pci.c
@@ -43,7 +43,7 @@ static unsigned long *ProcInfo;
43/* Tables for known hardware */ 43/* Tables for known hardware */
44 44
45/* Motorola PowerStackII - Utah */ 45/* Motorola PowerStackII - Utah */
46static char Utah_pci_IRQ_map[23] __prepdata = 46static char Utah_pci_IRQ_map[23] =
47{ 47{
48 0, /* Slot 0 - unused */ 48 0, /* Slot 0 - unused */
49 0, /* Slot 1 - unused */ 49 0, /* Slot 1 - unused */
@@ -72,7 +72,7 @@ static char Utah_pci_IRQ_map[23] __prepdata =
72 0, /* Slot 22 - unused */ 72 0, /* Slot 22 - unused */
73}; 73};
74 74
75static char Utah_pci_IRQ_routes[] __prepdata = 75static char Utah_pci_IRQ_routes[] =
76{ 76{
77 0, /* Line 0 - Unused */ 77 0, /* Line 0 - Unused */
78 9, /* Line 1 */ 78 9, /* Line 1 */
@@ -84,7 +84,7 @@ static char Utah_pci_IRQ_routes[] __prepdata =
84 84
85/* Motorola PowerStackII - Omaha */ 85/* Motorola PowerStackII - Omaha */
86/* no integrated SCSI or ethernet */ 86/* no integrated SCSI or ethernet */
87static char Omaha_pci_IRQ_map[23] __prepdata = 87static char Omaha_pci_IRQ_map[23] =
88{ 88{
89 0, /* Slot 0 - unused */ 89 0, /* Slot 0 - unused */
90 0, /* Slot 1 - unused */ 90 0, /* Slot 1 - unused */
@@ -111,7 +111,7 @@ static char Omaha_pci_IRQ_map[23] __prepdata =
111 0, 111 0,
112}; 112};
113 113
114static char Omaha_pci_IRQ_routes[] __prepdata = 114static char Omaha_pci_IRQ_routes[] =
115{ 115{
116 0, /* Line 0 - Unused */ 116 0, /* Line 0 - Unused */
117 9, /* Line 1 */ 117 9, /* Line 1 */
@@ -121,7 +121,7 @@ static char Omaha_pci_IRQ_routes[] __prepdata =
121}; 121};
122 122
123/* Motorola PowerStack */ 123/* Motorola PowerStack */
124static char Blackhawk_pci_IRQ_map[19] __prepdata = 124static char Blackhawk_pci_IRQ_map[19] =
125{ 125{
126 0, /* Slot 0 - unused */ 126 0, /* Slot 0 - unused */
127 0, /* Slot 1 - unused */ 127 0, /* Slot 1 - unused */
@@ -144,7 +144,7 @@ static char Blackhawk_pci_IRQ_map[19] __prepdata =
144 3, /* Slot P5 */ 144 3, /* Slot P5 */
145}; 145};
146 146
147static char Blackhawk_pci_IRQ_routes[] __prepdata = 147static char Blackhawk_pci_IRQ_routes[] =
148{ 148{
149 0, /* Line 0 - Unused */ 149 0, /* Line 0 - Unused */
150 9, /* Line 1 */ 150 9, /* Line 1 */
@@ -154,7 +154,7 @@ static char Blackhawk_pci_IRQ_routes[] __prepdata =
154}; 154};
155 155
156/* Motorola Mesquite */ 156/* Motorola Mesquite */
157static char Mesquite_pci_IRQ_map[23] __prepdata = 157static char Mesquite_pci_IRQ_map[23] =
158{ 158{
159 0, /* Slot 0 - unused */ 159 0, /* Slot 0 - unused */
160 0, /* Slot 1 - unused */ 160 0, /* Slot 1 - unused */
@@ -182,7 +182,7 @@ static char Mesquite_pci_IRQ_map[23] __prepdata =
182}; 182};
183 183
184/* Motorola Sitka */ 184/* Motorola Sitka */
185static char Sitka_pci_IRQ_map[21] __prepdata = 185static char Sitka_pci_IRQ_map[21] =
186{ 186{
187 0, /* Slot 0 - unused */ 187 0, /* Slot 0 - unused */
188 0, /* Slot 1 - unused */ 188 0, /* Slot 1 - unused */
@@ -208,7 +208,7 @@ static char Sitka_pci_IRQ_map[21] __prepdata =
208}; 208};
209 209
210/* Motorola MTX */ 210/* Motorola MTX */
211static char MTX_pci_IRQ_map[23] __prepdata = 211static char MTX_pci_IRQ_map[23] =
212{ 212{
213 0, /* Slot 0 - unused */ 213 0, /* Slot 0 - unused */
214 0, /* Slot 1 - unused */ 214 0, /* Slot 1 - unused */
@@ -237,7 +237,7 @@ static char MTX_pci_IRQ_map[23] __prepdata =
237 237
238/* Motorola MTX Plus */ 238/* Motorola MTX Plus */
239/* Secondary bus interrupt routing is not supported yet */ 239/* Secondary bus interrupt routing is not supported yet */
240static char MTXplus_pci_IRQ_map[23] __prepdata = 240static char MTXplus_pci_IRQ_map[23] =
241{ 241{
242 0, /* Slot 0 - unused */ 242 0, /* Slot 0 - unused */
243 0, /* Slot 1 - unused */ 243 0, /* Slot 1 - unused */
@@ -264,13 +264,13 @@ static char MTXplus_pci_IRQ_map[23] __prepdata =
264 0, /* Slot 22 - unused */ 264 0, /* Slot 22 - unused */
265}; 265};
266 266
267static char Raven_pci_IRQ_routes[] __prepdata = 267static char Raven_pci_IRQ_routes[] =
268{ 268{
269 0, /* This is a dummy structure */ 269 0, /* This is a dummy structure */
270}; 270};
271 271
272/* Motorola MVME16xx */ 272/* Motorola MVME16xx */
273static char Genesis_pci_IRQ_map[16] __prepdata = 273static char Genesis_pci_IRQ_map[16] =
274{ 274{
275 0, /* Slot 0 - unused */ 275 0, /* Slot 0 - unused */
276 0, /* Slot 1 - unused */ 276 0, /* Slot 1 - unused */
@@ -290,7 +290,7 @@ static char Genesis_pci_IRQ_map[16] __prepdata =
290 0, /* Slot 15 - unused */ 290 0, /* Slot 15 - unused */
291}; 291};
292 292
293static char Genesis_pci_IRQ_routes[] __prepdata = 293static char Genesis_pci_IRQ_routes[] =
294{ 294{
295 0, /* Line 0 - Unused */ 295 0, /* Line 0 - Unused */
296 10, /* Line 1 */ 296 10, /* Line 1 */
@@ -299,7 +299,7 @@ static char Genesis_pci_IRQ_routes[] __prepdata =
299 15 /* Line 4 */ 299 15 /* Line 4 */
300}; 300};
301 301
302static char Genesis2_pci_IRQ_map[23] __prepdata = 302static char Genesis2_pci_IRQ_map[23] =
303{ 303{
304 0, /* Slot 0 - unused */ 304 0, /* Slot 0 - unused */
305 0, /* Slot 1 - unused */ 305 0, /* Slot 1 - unused */
@@ -327,7 +327,7 @@ static char Genesis2_pci_IRQ_map[23] __prepdata =
327}; 327};
328 328
329/* Motorola Series-E */ 329/* Motorola Series-E */
330static char Comet_pci_IRQ_map[23] __prepdata = 330static char Comet_pci_IRQ_map[23] =
331{ 331{
332 0, /* Slot 0 - unused */ 332 0, /* Slot 0 - unused */
333 0, /* Slot 1 - unused */ 333 0, /* Slot 1 - unused */
@@ -354,7 +354,7 @@ static char Comet_pci_IRQ_map[23] __prepdata =
354 0, 354 0,
355}; 355};
356 356
357static char Comet_pci_IRQ_routes[] __prepdata = 357static char Comet_pci_IRQ_routes[] =
358{ 358{
359 0, /* Line 0 - Unused */ 359 0, /* Line 0 - Unused */
360 10, /* Line 1 */ 360 10, /* Line 1 */
@@ -364,7 +364,7 @@ static char Comet_pci_IRQ_routes[] __prepdata =
364}; 364};
365 365
366/* Motorola Series-EX */ 366/* Motorola Series-EX */
367static char Comet2_pci_IRQ_map[23] __prepdata = 367static char Comet2_pci_IRQ_map[23] =
368{ 368{
369 0, /* Slot 0 - unused */ 369 0, /* Slot 0 - unused */
370 0, /* Slot 1 - unused */ 370 0, /* Slot 1 - unused */
@@ -391,7 +391,7 @@ static char Comet2_pci_IRQ_map[23] __prepdata =
391 0, 391 0,
392}; 392};
393 393
394static char Comet2_pci_IRQ_routes[] __prepdata = 394static char Comet2_pci_IRQ_routes[] =
395{ 395{
396 0, /* Line 0 - Unused */ 396 0, /* Line 0 - Unused */
397 10, /* Line 1 */ 397 10, /* Line 1 */
@@ -405,7 +405,7 @@ static char Comet2_pci_IRQ_routes[] __prepdata =
405 * This is actually based on the Carolina motherboard 405 * This is actually based on the Carolina motherboard
406 * -- Cort 406 * -- Cort
407 */ 407 */
408static char ibm8xx_pci_IRQ_map[23] __prepdata = { 408static char ibm8xx_pci_IRQ_map[23] = {
409 0, /* Slot 0 - unused */ 409 0, /* Slot 0 - unused */
410 0, /* Slot 1 - unused */ 410 0, /* Slot 1 - unused */
411 0, /* Slot 2 - unused */ 411 0, /* Slot 2 - unused */
@@ -431,7 +431,7 @@ static char ibm8xx_pci_IRQ_map[23] __prepdata = {
431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
432}; 432};
433 433
434static char ibm8xx_pci_IRQ_routes[] __prepdata = { 434static char ibm8xx_pci_IRQ_routes[] = {
435 0, /* Line 0 - unused */ 435 0, /* Line 0 - unused */
436 15, /* Line 1 */ 436 15, /* Line 1 */
437 15, /* Line 2 */ 437 15, /* Line 2 */
@@ -443,7 +443,7 @@ static char ibm8xx_pci_IRQ_routes[] __prepdata = {
443 * a 6015 ibm board 443 * a 6015 ibm board
444 * -- Cort 444 * -- Cort
445 */ 445 */
446static char ibm6015_pci_IRQ_map[23] __prepdata = { 446static char ibm6015_pci_IRQ_map[23] = {
447 0, /* Slot 0 - unused */ 447 0, /* Slot 0 - unused */
448 0, /* Slot 1 - unused */ 448 0, /* Slot 1 - unused */
449 0, /* Slot 2 - unused */ 449 0, /* Slot 2 - unused */
@@ -469,7 +469,7 @@ static char ibm6015_pci_IRQ_map[23] __prepdata = {
469 2, /* Slot 22 - */ 469 2, /* Slot 22 - */
470}; 470};
471 471
472static char ibm6015_pci_IRQ_routes[] __prepdata = { 472static char ibm6015_pci_IRQ_routes[] = {
473 0, /* Line 0 - unused */ 473 0, /* Line 0 - unused */
474 13, /* Line 1 */ 474 13, /* Line 1 */
475 15, /* Line 2 */ 475 15, /* Line 2 */
@@ -479,7 +479,7 @@ static char ibm6015_pci_IRQ_routes[] __prepdata = {
479 479
480 480
481/* IBM Nobis and Thinkpad 850 */ 481/* IBM Nobis and Thinkpad 850 */
482static char Nobis_pci_IRQ_map[23] __prepdata ={ 482static char Nobis_pci_IRQ_map[23] ={
483 0, /* Slot 0 - unused */ 483 0, /* Slot 0 - unused */
484 0, /* Slot 1 - unused */ 484 0, /* Slot 1 - unused */
485 0, /* Slot 2 - unused */ 485 0, /* Slot 2 - unused */
@@ -498,7 +498,7 @@ static char Nobis_pci_IRQ_map[23] __prepdata ={
498 0, /* Slot 15 - unused */ 498 0, /* Slot 15 - unused */
499}; 499};
500 500
501static char Nobis_pci_IRQ_routes[] __prepdata = { 501static char Nobis_pci_IRQ_routes[] = {
502 0, /* Line 0 - Unused */ 502 0, /* Line 0 - Unused */
503 13, /* Line 1 */ 503 13, /* Line 1 */
504 13, /* Line 2 */ 504 13, /* Line 2 */
@@ -510,7 +510,7 @@ static char Nobis_pci_IRQ_routes[] __prepdata = {
510 * IBM RS/6000 43p/140 -- paulus 510 * IBM RS/6000 43p/140 -- paulus
511 * XXX we should get all this from the residual data 511 * XXX we should get all this from the residual data
512 */ 512 */
513static char ibm43p_pci_IRQ_map[23] __prepdata = { 513static char ibm43p_pci_IRQ_map[23] = {
514 0, /* Slot 0 - unused */ 514 0, /* Slot 0 - unused */
515 0, /* Slot 1 - unused */ 515 0, /* Slot 1 - unused */
516 0, /* Slot 2 - unused */ 516 0, /* Slot 2 - unused */
@@ -536,7 +536,7 @@ static char ibm43p_pci_IRQ_map[23] __prepdata = {
536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
537}; 537};
538 538
539static char ibm43p_pci_IRQ_routes[] __prepdata = { 539static char ibm43p_pci_IRQ_routes[] = {
540 0, /* Line 0 - unused */ 540 0, /* Line 0 - unused */
541 15, /* Line 1 */ 541 15, /* Line 1 */
542 15, /* Line 2 */ 542 15, /* Line 2 */
@@ -559,7 +559,7 @@ struct powerplus_irq_list
559 * are routed to OpenPIC inputs 5-8. These values are offset by 559 * are routed to OpenPIC inputs 5-8. These values are offset by
560 * 16 in the table to reflect the Linux kernel interrupt value. 560 * 16 in the table to reflect the Linux kernel interrupt value.
561 */ 561 */
562struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata = 562struct powerplus_irq_list Powerplus_pci_IRQ_list =
563{ 563{
564 {25, 26, 27, 28}, 564 {25, 26, 27, 28},
565 {21, 22, 23, 24} 565 {21, 22, 23, 24}
@@ -572,7 +572,7 @@ struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
572 * are routed to OpenPIC inputs 12-15. These values are offset by 572 * are routed to OpenPIC inputs 12-15. These values are offset by
573 * 16 in the table to reflect the Linux kernel interrupt value. 573 * 16 in the table to reflect the Linux kernel interrupt value.
574 */ 574 */
575struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata = 575struct powerplus_irq_list Mesquite_pci_IRQ_list =
576{ 576{
577 {24, 25, 26, 27}, 577 {24, 25, 26, 27},
578 {28, 29, 30, 31} 578 {28, 29, 30, 31}
@@ -582,7 +582,7 @@ struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
582 * This table represents the standard PCI swizzle defined in the 582 * This table represents the standard PCI swizzle defined in the
583 * PCI bus specification. 583 * PCI bus specification.
584 */ 584 */
585static unsigned char prep_pci_intpins[4][4] __prepdata = 585static unsigned char prep_pci_intpins[4][4] =
586{ 586{
587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */ 587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */
588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */ 588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */
@@ -622,7 +622,7 @@ static unsigned char prep_pci_intpins[4][4] __prepdata =
622#define MIN_DEVNR 11 622#define MIN_DEVNR 11
623#define MAX_DEVNR 22 623#define MAX_DEVNR 22
624 624
625static int __prep 625static int
626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
627 int len, u32 *val) 627 int len, u32 *val)
628{ 628{
@@ -652,7 +652,7 @@ prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
652 return PCIBIOS_SUCCESSFUL; 652 return PCIBIOS_SUCCESSFUL;
653} 653}
654 654
655static int __prep 655static int
656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
657 int len, u32 val) 657 int len, u32 val)
658{ 658{
@@ -804,7 +804,7 @@ struct mot_info {
804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */ 804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */
805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */ 805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */
806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */ 806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */
807} mot_info[] __prepdata = { 807} mot_info[] = {
808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF}, 808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF},
809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00}, 809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00},
810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00}, 810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00},
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index bc926be95472..9e5637e5f5a9 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
89#define cached_21 (((char *)(ppc_cached_irq_mask))[3]) 89#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2]) 90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
91 91
92/* for the mac fs */
93dev_t boot_dev;
94
95#ifdef CONFIG_SOUND_CS4232 92#ifdef CONFIG_SOUND_CS4232
96long ppc_cs4232_dma, ppc_cs4232_dma2; 93long ppc_cs4232_dma, ppc_cs4232_dma2;
97#endif 94#endif
@@ -173,7 +170,7 @@ prep_carolina_enable_l2(void)
173} 170}
174 171
175/* cpuinfo code common to all IBM PReP */ 172/* cpuinfo code common to all IBM PReP */
176static void __prep 173static void
177prep_ibm_cpuinfo(struct seq_file *m) 174prep_ibm_cpuinfo(struct seq_file *m)
178{ 175{
179 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 176 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -209,14 +206,14 @@ prep_ibm_cpuinfo(struct seq_file *m)
209 } 206 }
210} 207}
211 208
212static int __prep 209static int
213prep_gen_cpuinfo(struct seq_file *m) 210prep_gen_cpuinfo(struct seq_file *m)
214{ 211{
215 prep_ibm_cpuinfo(m); 212 prep_ibm_cpuinfo(m);
216 return 0; 213 return 0;
217} 214}
218 215
219static int __prep 216static int
220prep_sandalfoot_cpuinfo(struct seq_file *m) 217prep_sandalfoot_cpuinfo(struct seq_file *m)
221{ 218{
222 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 219 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -243,7 +240,7 @@ prep_sandalfoot_cpuinfo(struct seq_file *m)
243 return 0; 240 return 0;
244} 241}
245 242
246static int __prep 243static int
247prep_thinkpad_cpuinfo(struct seq_file *m) 244prep_thinkpad_cpuinfo(struct seq_file *m)
248{ 245{
249 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 246 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -314,7 +311,7 @@ prep_thinkpad_cpuinfo(struct seq_file *m)
314 return 0; 311 return 0;
315} 312}
316 313
317static int __prep 314static int
318prep_carolina_cpuinfo(struct seq_file *m) 315prep_carolina_cpuinfo(struct seq_file *m)
319{ 316{
320 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 317 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -350,7 +347,7 @@ prep_carolina_cpuinfo(struct seq_file *m)
350 return 0; 347 return 0;
351} 348}
352 349
353static int __prep 350static int
354prep_tiger1_cpuinfo(struct seq_file *m) 351prep_tiger1_cpuinfo(struct seq_file *m)
355{ 352{
356 unsigned int l2_reg = inb(PREP_IBM_L2INFO); 353 unsigned int l2_reg = inb(PREP_IBM_L2INFO);
@@ -393,7 +390,7 @@ prep_tiger1_cpuinfo(struct seq_file *m)
393 390
394 391
395/* Used by all Motorola PReP */ 392/* Used by all Motorola PReP */
396static int __prep 393static int
397prep_mot_cpuinfo(struct seq_file *m) 394prep_mot_cpuinfo(struct seq_file *m)
398{ 395{
399 unsigned int cachew = *((unsigned char *)CACHECRBA); 396 unsigned int cachew = *((unsigned char *)CACHECRBA);
@@ -454,7 +451,7 @@ no_l2:
454 return 0; 451 return 0;
455} 452}
456 453
457static void __prep 454static void
458prep_restart(char *cmd) 455prep_restart(char *cmd)
459{ 456{
460#define PREP_SP92 0x92 /* Special Port 92 */ 457#define PREP_SP92 0x92 /* Special Port 92 */
@@ -473,7 +470,7 @@ prep_restart(char *cmd)
473#undef PREP_SP92 470#undef PREP_SP92
474} 471}
475 472
476static void __prep 473static void
477prep_halt(void) 474prep_halt(void)
478{ 475{
479 local_irq_disable(); /* no interrupts */ 476 local_irq_disable(); /* no interrupts */
@@ -488,7 +485,7 @@ prep_halt(void)
488/* Carrera is the power manager in the Thinkpads. Unfortunately not much is 485/* Carrera is the power manager in the Thinkpads. Unfortunately not much is
489 * known about it, so we can't power down. 486 * known about it, so we can't power down.
490 */ 487 */
491static void __prep 488static void
492prep_carrera_poweroff(void) 489prep_carrera_poweroff(void)
493{ 490{
494 prep_halt(); 491 prep_halt();
@@ -501,7 +498,7 @@ prep_carrera_poweroff(void)
501 * somewhat in the IBM Carolina Technical Specification. 498 * somewhat in the IBM Carolina Technical Specification.
502 * -Hollis 499 * -Hollis
503 */ 500 */
504static void __prep 501static void
505utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value) 502utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
506{ 503{
507 /* 504 /*
@@ -539,7 +536,7 @@ utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
539 udelay(100); /* important: let controller recover */ 536 udelay(100); /* important: let controller recover */
540} 537}
541 538
542static void __prep 539static void
543prep_sig750_poweroff(void) 540prep_sig750_poweroff(void)
544{ 541{
545 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */ 542 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */
@@ -554,7 +551,7 @@ prep_sig750_poweroff(void)
554 /* not reached */ 551 /* not reached */
555} 552}
556 553
557static int __prep 554static int
558prep_show_percpuinfo(struct seq_file *m, int i) 555prep_show_percpuinfo(struct seq_file *m, int i)
559{ 556{
560 /* PREP's without residual data will give incorrect values here */ 557 /* PREP's without residual data will give incorrect values here */
@@ -700,12 +697,12 @@ prep_set_bat(void)
700/* 697/*
701 * IBM 3-digit status LED 698 * IBM 3-digit status LED
702 */ 699 */
703static unsigned int ibm_statusled_base __prepdata; 700static unsigned int ibm_statusled_base;
704 701
705static void __prep 702static void
706ibm_statusled_progress(char *s, unsigned short hex); 703ibm_statusled_progress(char *s, unsigned short hex);
707 704
708static int __prep 705static int
709ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2, 706ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
710 void * dummy3) 707 void * dummy3)
711{ 708{
@@ -713,13 +710,13 @@ ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
713 return NOTIFY_DONE; 710 return NOTIFY_DONE;
714} 711}
715 712
716static struct notifier_block ibm_statusled_block __prepdata = { 713static struct notifier_block ibm_statusled_block = {
717 ibm_statusled_panic, 714 ibm_statusled_panic,
718 NULL, 715 NULL,
719 INT_MAX /* try to do it first */ 716 INT_MAX /* try to do it first */
720}; 717};
721 718
722static void __prep 719static void
723ibm_statusled_progress(char *s, unsigned short hex) 720ibm_statusled_progress(char *s, unsigned short hex)
724{ 721{
725 static int notifier_installed; 722 static int notifier_installed;
@@ -945,19 +942,6 @@ prep_calibrate_decr(void)
945 todc_calibrate_decr(); 942 todc_calibrate_decr();
946} 943}
947 944
948static unsigned int __prep
949prep_irq_canonicalize(u_int irq)
950{
951 if (irq == 2)
952 {
953 return 9;
954 }
955 else
956 {
957 return irq;
958 }
959}
960
961static void __init 945static void __init
962prep_init_IRQ(void) 946prep_init_IRQ(void)
963{ 947{
@@ -996,7 +980,7 @@ prep_init_IRQ(void)
996/* 980/*
997 * IDE stuff. 981 * IDE stuff.
998 */ 982 */
999static int __prep 983static int
1000prep_ide_default_irq(unsigned long base) 984prep_ide_default_irq(unsigned long base)
1001{ 985{
1002 switch (base) { 986 switch (base) {
@@ -1010,7 +994,7 @@ prep_ide_default_irq(unsigned long base)
1010 } 994 }
1011} 995}
1012 996
1013static unsigned long __prep 997static unsigned long
1014prep_ide_default_io_base(int index) 998prep_ide_default_io_base(int index)
1015{ 999{
1016 switch (index) { 1000 switch (index) {
@@ -1055,7 +1039,7 @@ smp_prep_setup_cpu(int cpu_nr)
1055 do_openpic_setup_cpu(); 1039 do_openpic_setup_cpu();
1056} 1040}
1057 1041
1058static struct smp_ops_t prep_smp_ops __prepdata = { 1042static struct smp_ops_t prep_smp_ops = {
1059 smp_openpic_message_pass, 1043 smp_openpic_message_pass,
1060 smp_prep_probe, 1044 smp_prep_probe,
1061 smp_prep_kick_cpu, 1045 smp_prep_kick_cpu,
@@ -1113,6 +1097,7 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1113 ISA_DMA_THRESHOLD = 0x00ffffff; 1097 ISA_DMA_THRESHOLD = 0x00ffffff;
1114 DMA_MODE_READ = 0x44; 1098 DMA_MODE_READ = 0x44;
1115 DMA_MODE_WRITE = 0x48; 1099 DMA_MODE_WRITE = 0x48;
1100 ppc_do_canonicalize_irqs = 1;
1116 1101
1117 /* figure out what kind of prep workstation we are */ 1102 /* figure out what kind of prep workstation we are */
1118 if (have_residual_data) { 1103 if (have_residual_data) {
@@ -1139,7 +1124,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1139 ppc_md.setup_arch = prep_setup_arch; 1124 ppc_md.setup_arch = prep_setup_arch;
1140 ppc_md.show_percpuinfo = prep_show_percpuinfo; 1125 ppc_md.show_percpuinfo = prep_show_percpuinfo;
1141 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */ 1126 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */
1142 ppc_md.irq_canonicalize = prep_irq_canonicalize;
1143 ppc_md.init_IRQ = prep_init_IRQ; 1127 ppc_md.init_IRQ = prep_init_IRQ;
1144 /* this gets changed later on if we have an OpenPIC -- Cort */ 1128 /* this gets changed later on if we have an OpenPIC -- Cort */
1145 ppc_md.get_irq = i8259_irq; 1129 ppc_md.get_irq = i8259_irq;
@@ -1176,6 +1160,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1176#endif 1160#endif
1177 1161
1178#ifdef CONFIG_SMP 1162#ifdef CONFIG_SMP
1179 ppc_md.smp_ops = &prep_smp_ops; 1163 smp_ops = &prep_smp_ops;
1180#endif /* CONFIG_SMP */ 1164#endif /* CONFIG_SMP */
1181} 1165}
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index 0376c8cff5d1..5058568c13ec 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -1183,18 +1183,18 @@ static void __init ppc7d_setup_arch(void)
1183 ROOT_DEV = Root_HDA1; 1183 ROOT_DEV = Root_HDA1;
1184#endif 1184#endif
1185 1185
1186 if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) || 1186 if ((cur_cpu_spec->cpu_features & CPU_FTR_SPEC7450) ||
1187 (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)) 1187 (cur_cpu_spec->cpu_features & CPU_FTR_L3CR))
1188 /* 745x is different. We only want to pass along enable. */ 1188 /* 745x is different. We only want to pass along enable. */
1189 _set_L2CR(L2CR_L2E); 1189 _set_L2CR(L2CR_L2E);
1190 else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) 1190 else if (cur_cpu_spec->cpu_features & CPU_FTR_L2CR)
1191 /* All modules have 1MB of L2. We also assume that an 1191 /* All modules have 1MB of L2. We also assume that an
1192 * L2 divisor of 3 will work. 1192 * L2 divisor of 3 will work.
1193 */ 1193 */
1194 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3 1194 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3
1195 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF); 1195 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
1196 1196
1197 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR) 1197 if (cur_cpu_spec->cpu_features & CPU_FTR_L3CR)
1198 /* No L3 cache */ 1198 /* No L3 cache */
1199 _set_L3CR(0); 1199 _set_L3CR(0);
1200 1200
@@ -1424,6 +1424,7 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
1424 ppc_md.setup_arch = ppc7d_setup_arch; 1424 ppc_md.setup_arch = ppc7d_setup_arch;
1425 ppc_md.init = ppc7d_init2; 1425 ppc_md.init = ppc7d_init2;
1426 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo; 1426 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo;
1427 /* XXX this is broken... */
1427 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize; 1428 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize;
1428 ppc_md.init_IRQ = ppc7d_init_irq; 1429 ppc_md.init_IRQ = ppc7d_init_irq;
1429 ppc_md.get_irq = ppc7d_get_irq; 1430 ppc_md.get_irq = ppc7d_get_irq;
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c
index 0f84ca603612..c9911601cfdf 100644
--- a/arch/ppc/platforms/residual.c
+++ b/arch/ppc/platforms/residual.c
@@ -47,7 +47,7 @@
47#include <asm/ide.h> 47#include <asm/ide.h>
48 48
49 49
50unsigned char __res[sizeof(RESIDUAL)] __prepdata = {0,}; 50unsigned char __res[sizeof(RESIDUAL)] = {0,};
51RESIDUAL *res = (RESIDUAL *)&__res; 51RESIDUAL *res = (RESIDUAL *)&__res;
52 52
53char * PnP_BASE_TYPES[] __initdata = { 53char * PnP_BASE_TYPES[] __initdata = {
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c
index 5232283c1974..d4c9781989fb 100644
--- a/arch/ppc/platforms/sandpoint.c
+++ b/arch/ppc/platforms/sandpoint.c
@@ -508,15 +508,6 @@ sandpoint_init_IRQ(void)
508 i8259_init(0xfef00000); 508 i8259_init(0xfef00000);
509} 509}
510 510
511static u32
512sandpoint_irq_canonicalize(u32 irq)
513{
514 if (irq == 2)
515 return 9;
516 else
517 return irq;
518}
519
520static unsigned long __init 511static unsigned long __init
521sandpoint_find_end_of_memory(void) 512sandpoint_find_end_of_memory(void)
522{ 513{
@@ -727,10 +718,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
727 ISA_DMA_THRESHOLD = 0x00ffffff; 718 ISA_DMA_THRESHOLD = 0x00ffffff;
728 DMA_MODE_READ = 0x44; 719 DMA_MODE_READ = 0x44;
729 DMA_MODE_WRITE = 0x48; 720 DMA_MODE_WRITE = 0x48;
721 ppc_do_canonicalize_irqs = 1;
730 722
731 ppc_md.setup_arch = sandpoint_setup_arch; 723 ppc_md.setup_arch = sandpoint_setup_arch;
732 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo; 724 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo;
733 ppc_md.irq_canonicalize = sandpoint_irq_canonicalize;
734 ppc_md.init_IRQ = sandpoint_init_IRQ; 725 ppc_md.init_IRQ = sandpoint_init_IRQ;
735 ppc_md.get_irq = openpic_get_irq; 726 ppc_md.get_irq = openpic_get_irq;
736 727
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index b8d08f33f7ee..f6a2f1938bfa 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \
39ifeq ($(CONFIG_8xx),y) 39ifeq ($(CONFIG_8xx),y)
40obj-$(CONFIG_PCI) += qspan_pci.o i8259.o 40obj-$(CONFIG_PCI) += qspan_pci.o i8259.o
41endif 41endif
42obj-$(CONFIG_PPC_OF) += prom_init.o prom.o of_device.o 42obj-$(CONFIG_PPC_OF) += prom_init.o prom.o
43obj-$(CONFIG_PPC_PMAC) += open_pic.o indirect_pci.o 43obj-$(CONFIG_PPC_PMAC) += open_pic.o indirect_pci.o
44obj-$(CONFIG_POWER4) += open_pic2.o 44obj-$(CONFIG_POWER4) += open_pic2.o
45obj-$(CONFIG_PPC_CHRP) += open_pic.o indirect_pci.o i8259.o 45obj-$(CONFIG_PPC_CHRP) += open_pic.o indirect_pci.o i8259.o
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index 7734f6836174..12fa83e6774a 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -53,8 +53,8 @@ extern char *klimit;
53 * chrp only uses it during early boot. 53 * chrp only uses it during early boot.
54 */ 54 */
55#ifdef CONFIG_XMON 55#ifdef CONFIG_XMON
56#define BTEXT __pmac 56#define BTEXT
57#define BTDATA __pmacdata 57#define BTDATA
58#else 58#else
59#define BTEXT __init 59#define BTEXT __init
60#define BTDATA __initdata 60#define BTDATA __initdata
@@ -187,7 +187,7 @@ btext_setup_display(int width, int height, int depth, int pitch,
187 * changes. 187 * changes.
188 */ 188 */
189 189
190void __openfirmware 190void
191map_boot_text(void) 191map_boot_text(void)
192{ 192{
193 unsigned long base, offset, size; 193 unsigned long base, offset, size;
diff --git a/arch/ppc/syslib/gt64260_pic.c b/arch/ppc/syslib/gt64260_pic.c
index 44aa87385451..f97b3a9abd1e 100644
--- a/arch/ppc/syslib/gt64260_pic.c
+++ b/arch/ppc/syslib/gt64260_pic.c
@@ -45,6 +45,7 @@
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/mv64x60.h> 47#include <asm/mv64x60.h>
48#include <asm/machdep.h>
48 49
49#define CPU_INTR_STR "gt64260 cpu interface error" 50#define CPU_INTR_STR "gt64260 cpu interface error"
50#define PCI0_INTR_STR "gt64260 pci 0 error" 51#define PCI0_INTR_STR "gt64260 pci 0 error"
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
index 0bb919859b8b..c36db279b43d 100644
--- a/arch/ppc/syslib/ibm440gx_common.c
+++ b/arch/ppc/syslib/ibm440gx_common.c
@@ -236,9 +236,9 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C, 236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
237 enable it on all other revisions 237 enable it on all other revisions
238 */ 238 */
239 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. A") == 0 || 239 if (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. A") == 0 ||
240 strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. B") == 0 240 strcmp(cur_cpu_spec->cpu_name, "440GX Rev. B") == 0
241 || (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. C") 241 || (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. C")
242 == 0 && p->cpu > 667000000)) 242 == 0 && p->cpu > 667000000))
243 ibm440gx_l2c_disable(); 243 ibm440gx_l2c_disable();
244 else 244 else
diff --git a/arch/ppc/syslib/ibm44x_common.c b/arch/ppc/syslib/ibm44x_common.c
index 7612e0623f99..95e11f93c15d 100644
--- a/arch/ppc/syslib/ibm44x_common.c
+++ b/arch/ppc/syslib/ibm44x_common.c
@@ -178,7 +178,7 @@ void __init ibm44x_platform_init(void)
178#endif 178#endif
179} 179}
180 180
181/* Called from MachineCheckException */ 181/* Called from machine_check_exception */
182void platform_machine_check(struct pt_regs *regs) 182void platform_machine_check(struct pt_regs *regs)
183{ 183{
184 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n", 184 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n",
diff --git a/arch/ppc/syslib/m82xx_pci.c b/arch/ppc/syslib/m82xx_pci.c
index 9db58c587b46..5cce123789f1 100644
--- a/arch/ppc/syslib/m82xx_pci.c
+++ b/arch/ppc/syslib/m82xx_pci.c
@@ -302,7 +302,7 @@ pq2ads_setup_pci(struct pci_controller *hose)
302 302
303void __init pq2_find_bridges(void) 303void __init pq2_find_bridges(void)
304{ 304{
305 extern int pci_assign_all_busses; 305 extern int pci_assign_all_buses;
306 struct pci_controller * hose; 306 struct pci_controller * hose;
307 int host_bridge; 307 int host_bridge;
308 308
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 4c888da89b3c..c88e2d4dceb7 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -406,7 +406,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
406 406
407 ppc_md.setup_arch = m8xx_setup_arch; 407 ppc_md.setup_arch = m8xx_setup_arch;
408 ppc_md.show_percpuinfo = m8xx_show_percpuinfo; 408 ppc_md.show_percpuinfo = m8xx_show_percpuinfo;
409 ppc_md.irq_canonicalize = NULL;
410 ppc_md.init_IRQ = m8xx_init_IRQ; 409 ppc_md.init_IRQ = m8xx_init_IRQ;
411 ppc_md.get_irq = m8xx_get_irq; 410 ppc_md.get_irq = m8xx_get_irq;
412 ppc_md.init = NULL; 411 ppc_md.init = NULL;
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c
index 59cf3e8bd1a0..4ac19080eb85 100644
--- a/arch/ppc/syslib/mpc52xx_pci.c
+++ b/arch/ppc/syslib/mpc52xx_pci.c
@@ -21,6 +21,7 @@
21#include "mpc52xx_pci.h" 21#include "mpc52xx_pci.h"
22 22
23#include <asm/delay.h> 23#include <asm/delay.h>
24#include <asm/machdep.h>
24 25
25 26
26static int 27static int
@@ -181,7 +182,7 @@ mpc52xx_find_bridges(void)
181 struct mpc52xx_pci __iomem *pci_regs; 182 struct mpc52xx_pci __iomem *pci_regs;
182 struct pci_controller *hose; 183 struct pci_controller *hose;
183 184
184 pci_assign_all_busses = 1; 185 pci_assign_all_buses = 1;
185 186
186 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE); 187 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE);
187 if (!pci_regs) 188 if (!pci_regs)
diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
index 95b3b8a7f0ba..dbf8acac507f 100644
--- a/arch/ppc/syslib/mpc83xx_devices.c
+++ b/arch/ppc/syslib/mpc83xx_devices.c
@@ -21,6 +21,7 @@
21#include <asm/mpc83xx.h> 21#include <asm/mpc83xx.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/ppc_sys.h> 23#include <asm/ppc_sys.h>
24#include <asm/machdep.h>
24 25
25/* We use offsets for IORESOURCE_MEM since we do not know at compile time 26/* We use offsets for IORESOURCE_MEM since we do not know at compile time
26 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup 27 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup
diff --git a/arch/ppc/syslib/mv64360_pic.c b/arch/ppc/syslib/mv64360_pic.c
index 8356da4678a2..58b0aa813e85 100644
--- a/arch/ppc/syslib/mv64360_pic.c
+++ b/arch/ppc/syslib/mv64360_pic.c
@@ -48,6 +48,7 @@
48#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/mv64x60.h> 50#include <asm/mv64x60.h>
51#include <asm/machdep.h>
51 52
52#ifdef CONFIG_IRQ_ALL_CPUS 53#ifdef CONFIG_IRQ_ALL_CPUS
53#error "The mv64360 does not support distribution of IRQs on all CPUs" 54#error "The mv64360 does not support distribution of IRQs on all CPUs"
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 4849850a59ed..a781c50d2f4c 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -1304,7 +1304,7 @@ mv64x60_config_pci_params(struct pci_controller *hose,
1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val); 1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1305 1305
1306 /* Set latency timer, cache line size, clear BIST */ 1306 /* Set latency timer, cache line size, clear BIST */
1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2); 1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val); 1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1309 1309
1310 mv64x60_pci_exclude_bridge = save_exclude; 1310 mv64x60_pci_exclude_bridge = save_exclude;
diff --git a/arch/ppc/syslib/mv64x60_dbg.c b/arch/ppc/syslib/mv64x60_dbg.c
index 2927c7adf5e5..fa5b2e45e0ca 100644
--- a/arch/ppc/syslib/mv64x60_dbg.c
+++ b/arch/ppc/syslib/mv64x60_dbg.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/mv64x60.h> 26#include <asm/mv64x60.h>
27#include <asm/machdep.h>
27 28
28 29
29#if defined(CONFIG_SERIAL_TEXT_DEBUG) 30#if defined(CONFIG_SERIAL_TEXT_DEBUG)
diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c
deleted file mode 100644
index 93c7231ea709..000000000000
--- a/arch/ppc/syslib/of_device.c
+++ /dev/null
@@ -1,276 +0,0 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <asm/errno.h>
8#include <asm/of_device.h>
9
10/**
11 * of_match_device - Tell if an of_device structure has a matching
12 * of_match structure
13 * @ids: array of of device match structures to search in
14 * @dev: the of device structure to match against
15 *
16 * Used by a driver to check whether an of_device present in the
17 * system is in its list of supported devices.
18 */
19const struct of_device_id * of_match_device(const struct of_device_id *matches,
20 const struct of_device *dev)
21{
22 if (!dev->node)
23 return NULL;
24 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
25 int match = 1;
26 if (matches->name[0])
27 match &= dev->node->name
28 && !strcmp(matches->name, dev->node->name);
29 if (matches->type[0])
30 match &= dev->node->type
31 && !strcmp(matches->type, dev->node->type);
32 if (matches->compatible[0])
33 match &= device_is_compatible(dev->node,
34 matches->compatible);
35 if (match)
36 return matches;
37 matches++;
38 }
39 return NULL;
40}
41
42static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
43{
44 struct of_device * of_dev = to_of_device(dev);
45 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
46 const struct of_device_id * matches = of_drv->match_table;
47
48 if (!matches)
49 return 0;
50
51 return of_match_device(matches, of_dev) != NULL;
52}
53
54struct of_device *of_dev_get(struct of_device *dev)
55{
56 struct device *tmp;
57
58 if (!dev)
59 return NULL;
60 tmp = get_device(&dev->dev);
61 if (tmp)
62 return to_of_device(tmp);
63 else
64 return NULL;
65}
66
67void of_dev_put(struct of_device *dev)
68{
69 if (dev)
70 put_device(&dev->dev);
71}
72
73
74static int of_device_probe(struct device *dev)
75{
76 int error = -ENODEV;
77 struct of_platform_driver *drv;
78 struct of_device *of_dev;
79 const struct of_device_id *match;
80
81 drv = to_of_platform_driver(dev->driver);
82 of_dev = to_of_device(dev);
83
84 if (!drv->probe)
85 return error;
86
87 of_dev_get(of_dev);
88
89 match = of_match_device(drv->match_table, of_dev);
90 if (match)
91 error = drv->probe(of_dev, match);
92 if (error)
93 of_dev_put(of_dev);
94
95 return error;
96}
97
98static int of_device_remove(struct device *dev)
99{
100 struct of_device * of_dev = to_of_device(dev);
101 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
102
103 if (dev->driver && drv->remove)
104 drv->remove(of_dev);
105 return 0;
106}
107
108static int of_device_suspend(struct device *dev, pm_message_t state)
109{
110 struct of_device * of_dev = to_of_device(dev);
111 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
112 int error = 0;
113
114 if (dev->driver && drv->suspend)
115 error = drv->suspend(of_dev, state);
116 return error;
117}
118
119static int of_device_resume(struct device * dev)
120{
121 struct of_device * of_dev = to_of_device(dev);
122 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
123 int error = 0;
124
125 if (dev->driver && drv->resume)
126 error = drv->resume(of_dev);
127 return error;
128}
129
130struct bus_type of_platform_bus_type = {
131 .name = "of_platform",
132 .match = of_platform_bus_match,
133 .suspend = of_device_suspend,
134 .resume = of_device_resume,
135};
136
137static int __init of_bus_driver_init(void)
138{
139 return bus_register(&of_platform_bus_type);
140}
141
142postcore_initcall(of_bus_driver_init);
143
144int of_register_driver(struct of_platform_driver *drv)
145{
146 int count = 0;
147
148 /* initialize common driver fields */
149 drv->driver.name = drv->name;
150 drv->driver.bus = &of_platform_bus_type;
151 drv->driver.probe = of_device_probe;
152 drv->driver.remove = of_device_remove;
153
154 /* register with core */
155 count = driver_register(&drv->driver);
156 return count ? count : 1;
157}
158
159void of_unregister_driver(struct of_platform_driver *drv)
160{
161 driver_unregister(&drv->driver);
162}
163
164
165static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
166{
167 struct of_device *ofdev;
168
169 ofdev = to_of_device(dev);
170 return sprintf(buf, "%s", ofdev->node->full_name);
171}
172
173static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
174
175/**
176 * of_release_dev - free an of device structure when all users of it are finished.
177 * @dev: device that's been disconnected
178 *
179 * Will be called only by the device core when all users of this of device are
180 * done.
181 */
182void of_release_dev(struct device *dev)
183{
184 struct of_device *ofdev;
185
186 ofdev = to_of_device(dev);
187 of_node_put(ofdev->node);
188 kfree(ofdev);
189}
190
191int of_device_register(struct of_device *ofdev)
192{
193 int rc;
194 struct of_device **odprop;
195
196 BUG_ON(ofdev->node == NULL);
197
198 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
199 if (!odprop) {
200 struct property *new_prop;
201
202 new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
203 GFP_KERNEL);
204 if (new_prop == NULL)
205 return -ENOMEM;
206 new_prop->name = "linux,device";
207 new_prop->length = sizeof(sizeof(struct of_device *));
208 new_prop->value = (unsigned char *)&new_prop[1];
209 odprop = (struct of_device **)new_prop->value;
210 *odprop = NULL;
211 prom_add_property(ofdev->node, new_prop);
212 }
213 *odprop = ofdev;
214
215 rc = device_register(&ofdev->dev);
216 if (rc)
217 return rc;
218
219 device_create_file(&ofdev->dev, &dev_attr_devspec);
220
221 return 0;
222}
223
224void of_device_unregister(struct of_device *ofdev)
225{
226 struct of_device **odprop;
227
228 device_remove_file(&ofdev->dev, &dev_attr_devspec);
229
230 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
231 if (odprop)
232 *odprop = NULL;
233
234 device_unregister(&ofdev->dev);
235}
236
237struct of_device* of_platform_device_create(struct device_node *np,
238 const char *bus_id,
239 struct device *parent)
240{
241 struct of_device *dev;
242 u32 *reg;
243
244 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
245 if (!dev)
246 return NULL;
247 memset(dev, 0, sizeof(*dev));
248
249 dev->node = of_node_get(np);
250 dev->dma_mask = 0xffffffffUL;
251 dev->dev.dma_mask = &dev->dma_mask;
252 dev->dev.parent = parent;
253 dev->dev.bus = &of_platform_bus_type;
254 dev->dev.release = of_release_dev;
255
256 reg = (u32 *)get_property(np, "reg", NULL);
257 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
258
259 if (of_device_register(dev) != 0) {
260 kfree(dev);
261 return NULL;
262 }
263
264 return dev;
265}
266
267EXPORT_SYMBOL(of_match_device);
268EXPORT_SYMBOL(of_platform_bus_type);
269EXPORT_SYMBOL(of_register_driver);
270EXPORT_SYMBOL(of_unregister_driver);
271EXPORT_SYMBOL(of_device_register);
272EXPORT_SYMBOL(of_device_unregister);
273EXPORT_SYMBOL(of_dev_get);
274EXPORT_SYMBOL(of_dev_put);
275EXPORT_SYMBOL(of_platform_device_create);
276EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 1cf5de21a3fd..894779712b46 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -23,6 +23,7 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/open_pic.h> 24#include <asm/open_pic.h>
25#include <asm/i8259.h> 25#include <asm/i8259.h>
26#include <asm/machdep.h>
26 27
27#include "open_pic_defs.h" 28#include "open_pic_defs.h"
28 29
@@ -889,7 +890,7 @@ openpic_get_irq(struct pt_regs *regs)
889 890
890#ifdef CONFIG_SMP 891#ifdef CONFIG_SMP
891void 892void
892smp_openpic_message_pass(int target, int msg, unsigned long data, int wait) 893smp_openpic_message_pass(int target, int msg)
893{ 894{
894 cpumask_t mask = CPU_MASK_ALL; 895 cpumask_t mask = CPU_MASK_ALL;
895 /* make sure we're sending something that translates to an IPI */ 896 /* make sure we're sending something that translates to an IPI */
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
index 16cff91d9f41..1c40049b9a45 100644
--- a/arch/ppc/syslib/open_pic2.c
+++ b/arch/ppc/syslib/open_pic2.c
@@ -27,6 +27,7 @@
27#include <asm/sections.h> 27#include <asm/sections.h>
28#include <asm/open_pic.h> 28#include <asm/open_pic.h>
29#include <asm/i8259.h> 29#include <asm/i8259.h>
30#include <asm/machdep.h>
30 31
31#include "open_pic_defs.h" 32#include "open_pic_defs.h"
32 33
diff --git a/arch/ppc/syslib/ppc403_pic.c b/arch/ppc/syslib/ppc403_pic.c
index ce4d1deb86e9..c46043c47225 100644
--- a/arch/ppc/syslib/ppc403_pic.c
+++ b/arch/ppc/syslib/ppc403_pic.c
@@ -26,6 +26,7 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/ppc4xx_pic.h> 28#include <asm/ppc4xx_pic.h>
29#include <asm/machdep.h>
29 30
30/* Function Prototypes */ 31/* Function Prototypes */
31 32
diff --git a/arch/ppc/syslib/ppc4xx_pic.c b/arch/ppc/syslib/ppc4xx_pic.c
index 40086212b9c3..0b435633a0d1 100644
--- a/arch/ppc/syslib/ppc4xx_pic.c
+++ b/arch/ppc/syslib/ppc4xx_pic.c
@@ -25,6 +25,7 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/ppc4xx_pic.h> 27#include <asm/ppc4xx_pic.h>
28#include <asm/machdep.h>
28 29
29/* See comment in include/arch-ppc/ppc4xx_pic.h 30/* See comment in include/arch-ppc/ppc4xx_pic.h
30 * for more info about these two variables 31 * for more info about these two variables
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c
index bf83240689dc..e83a83fd95e1 100644
--- a/arch/ppc/syslib/ppc4xx_setup.c
+++ b/arch/ppc/syslib/ppc4xx_setup.c
@@ -278,7 +278,7 @@ ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */ 278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
279} 279}
280 280
281/* Called from MachineCheckException */ 281/* Called from machine_check_exception */
282void platform_machine_check(struct pt_regs *regs) 282void platform_machine_check(struct pt_regs *regs)
283{ 283{
284#if defined(DCRN_PLB0_BEAR) 284#if defined(DCRN_PLB0_BEAR)
diff --git a/arch/ppc/syslib/ppc83xx_setup.c b/arch/ppc/syslib/ppc83xx_setup.c
index 890484e576e7..4da168a6ad03 100644
--- a/arch/ppc/syslib/ppc83xx_setup.c
+++ b/arch/ppc/syslib/ppc83xx_setup.c
@@ -40,6 +40,7 @@
40#include <asm/ppc_sys.h> 40#include <asm/ppc_sys.h>
41#include <asm/kgdb.h> 41#include <asm/kgdb.h>
42#include <asm/delay.h> 42#include <asm/delay.h>
43#include <asm/machdep.h>
43 44
44#include <syslib/ppc83xx_setup.h> 45#include <syslib/ppc83xx_setup.h>
45#if defined(CONFIG_PCI) 46#if defined(CONFIG_PCI)
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
index 832b8bf99ae7..de2f90576577 100644
--- a/arch/ppc/syslib/ppc85xx_setup.c
+++ b/arch/ppc/syslib/ppc85xx_setup.c
@@ -29,6 +29,7 @@
29#include <asm/mmu.h> 29#include <asm/mmu.h>
30#include <asm/ppc_sys.h> 30#include <asm/ppc_sys.h>
31#include <asm/kgdb.h> 31#include <asm/kgdb.h>
32#include <asm/machdep.h>
32 33
33#include <syslib/ppc85xx_setup.h> 34#include <syslib/ppc85xx_setup.h>
34 35
diff --git a/arch/ppc/syslib/pq2_devices.c b/arch/ppc/syslib/pq2_devices.c
index 1d3869768f96..6f88ba93412b 100644
--- a/arch/ppc/syslib/pq2_devices.c
+++ b/arch/ppc/syslib/pq2_devices.c
@@ -18,6 +18,7 @@
18#include <asm/cpm2.h> 18#include <asm/cpm2.h>
19#include <asm/irq.h> 19#include <asm/irq.h>
20#include <asm/ppc_sys.h> 20#include <asm/ppc_sys.h>
21#include <asm/machdep.h>
21 22
22struct platform_device ppc_sys_platform_devices[] = { 23struct platform_device ppc_sys_platform_devices[] = {
23 [MPC82xx_CPM_FCC1] = { 24 [MPC82xx_CPM_FCC1] = {
diff --git a/arch/ppc/syslib/prep_nvram.c b/arch/ppc/syslib/prep_nvram.c
index 8599850ca772..2c6364d9641f 100644
--- a/arch/ppc/syslib/prep_nvram.c
+++ b/arch/ppc/syslib/prep_nvram.c
@@ -22,14 +22,14 @@
22static char nvramData[MAX_PREP_NVRAM]; 22static char nvramData[MAX_PREP_NVRAM];
23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0]; 23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
24 24
25unsigned char __prep prep_nvram_read_val(int addr) 25unsigned char prep_nvram_read_val(int addr)
26{ 26{
27 outb(addr, PREP_NVRAM_AS0); 27 outb(addr, PREP_NVRAM_AS0);
28 outb(addr>>8, PREP_NVRAM_AS1); 28 outb(addr>>8, PREP_NVRAM_AS1);
29 return inb(PREP_NVRAM_DATA); 29 return inb(PREP_NVRAM_DATA);
30} 30}
31 31
32void __prep prep_nvram_write_val(int addr, 32void prep_nvram_write_val(int addr,
33 unsigned char val) 33 unsigned char val)
34{ 34{
35 outb(addr, PREP_NVRAM_AS0); 35 outb(addr, PREP_NVRAM_AS0);
@@ -81,8 +81,7 @@ void __init init_prep_nvram(void)
81 } 81 }
82} 82}
83 83
84__prep 84char *prep_nvram_get_var(const char *name)
85char __prep *prep_nvram_get_var(const char *name)
86{ 85{
87 char *cp; 86 char *cp;
88 int namelen; 87 int namelen;
@@ -101,8 +100,7 @@ char __prep *prep_nvram_get_var(const char *name)
101 return NULL; 100 return NULL;
102} 101}
103 102
104__prep 103char *prep_nvram_first_var(void)
105char __prep *prep_nvram_first_var(void)
106{ 104{
107 if (nvram->Header.GELength == 0) { 105 if (nvram->Header.GELength == 0) {
108 return NULL; 106 return NULL;
@@ -112,8 +110,7 @@ char __prep *prep_nvram_first_var(void)
112 } 110 }
113} 111}
114 112
115__prep 113char *prep_nvram_next_var(char *name)
116char __prep *prep_nvram_next_var(char *name)
117{ 114{
118 char *cp; 115 char *cp;
119 116
diff --git a/arch/ppc/syslib/prom.c b/arch/ppc/syslib/prom.c
index 2c64ed627475..278da6ee62ea 100644
--- a/arch/ppc/syslib/prom.c
+++ b/arch/ppc/syslib/prom.c
@@ -89,7 +89,7 @@ extern char cmd_line[512]; /* XXX */
89extern boot_infos_t *boot_infos; 89extern boot_infos_t *boot_infos;
90unsigned long dev_tree_size; 90unsigned long dev_tree_size;
91 91
92void __openfirmware 92void
93phys_call_rtas(int service, int nargs, int nret, ...) 93phys_call_rtas(int service, int nargs, int nret, ...)
94{ 94{
95 va_list list; 95 va_list list;
@@ -862,7 +862,7 @@ find_type_devices(const char *type)
862/* 862/*
863 * Returns all nodes linked together 863 * Returns all nodes linked together
864 */ 864 */
865struct device_node * __openfirmware 865struct device_node *
866find_all_nodes(void) 866find_all_nodes(void)
867{ 867{
868 struct device_node *head, **prevp, *np; 868 struct device_node *head, **prevp, *np;
@@ -1165,7 +1165,7 @@ get_property(struct device_node *np, const char *name, int *lenp)
1165/* 1165/*
1166 * Add a property to a node 1166 * Add a property to a node
1167 */ 1167 */
1168void __openfirmware 1168void
1169prom_add_property(struct device_node* np, struct property* prop) 1169prom_add_property(struct device_node* np, struct property* prop)
1170{ 1170{
1171 struct property **next = &np->properties; 1171 struct property **next = &np->properties;
@@ -1177,7 +1177,7 @@ prom_add_property(struct device_node* np, struct property* prop)
1177} 1177}
1178 1178
1179/* I quickly hacked that one, check against spec ! */ 1179/* I quickly hacked that one, check against spec ! */
1180static inline unsigned long __openfirmware 1180static inline unsigned long
1181bus_space_to_resource_flags(unsigned int bus_space) 1181bus_space_to_resource_flags(unsigned int bus_space)
1182{ 1182{
1183 u8 space = (bus_space >> 24) & 0xf; 1183 u8 space = (bus_space >> 24) & 0xf;
@@ -1194,7 +1194,7 @@ bus_space_to_resource_flags(unsigned int bus_space)
1194 } 1194 }
1195} 1195}
1196 1196
1197static struct resource* __openfirmware 1197static struct resource*
1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range) 1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1199{ 1199{
1200 unsigned long mask; 1200 unsigned long mask;
@@ -1224,7 +1224,7 @@ find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1224 * or other nodes attached to the root node. Ultimately, put some 1224 * or other nodes attached to the root node. Ultimately, put some
1225 * link to resources in the OF node. 1225 * link to resources in the OF node.
1226 */ 1226 */
1227struct resource* __openfirmware 1227struct resource*
1228request_OF_resource(struct device_node* node, int index, const char* name_postfix) 1228request_OF_resource(struct device_node* node, int index, const char* name_postfix)
1229{ 1229{
1230 struct pci_dev* pcidev; 1230 struct pci_dev* pcidev;
@@ -1280,7 +1280,7 @@ fail:
1280 return NULL; 1280 return NULL;
1281} 1281}
1282 1282
1283int __openfirmware 1283int
1284release_OF_resource(struct device_node* node, int index) 1284release_OF_resource(struct device_node* node, int index)
1285{ 1285{
1286 struct pci_dev* pcidev; 1286 struct pci_dev* pcidev;
@@ -1346,7 +1346,7 @@ release_OF_resource(struct device_node* node, int index)
1346} 1346}
1347 1347
1348#if 0 1348#if 0
1349void __openfirmware 1349void
1350print_properties(struct device_node *np) 1350print_properties(struct device_node *np)
1351{ 1351{
1352 struct property *pp; 1352 struct property *pp;
@@ -1400,7 +1400,7 @@ print_properties(struct device_node *np)
1400static DEFINE_SPINLOCK(rtas_lock); 1400static DEFINE_SPINLOCK(rtas_lock);
1401 1401
1402/* this can be called after setup -- Cort */ 1402/* this can be called after setup -- Cort */
1403int __openfirmware 1403int
1404call_rtas(const char *service, int nargs, int nret, 1404call_rtas(const char *service, int nargs, int nret,
1405 unsigned long *outputs, ...) 1405 unsigned long *outputs, ...)
1406{ 1406{
diff --git a/arch/ppc/syslib/xilinx_pic.c b/arch/ppc/syslib/xilinx_pic.c
index 2cbcad278cef..47f04c71fe9c 100644
--- a/arch/ppc/syslib/xilinx_pic.c
+++ b/arch/ppc/syslib/xilinx_pic.c
@@ -17,6 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/xparameters.h> 18#include <asm/xparameters.h>
19#include <asm/ibm4xx.h> 19#include <asm/ibm4xx.h>
20#include <asm/machdep.h>
20 21
21/* No one else should require these constants, so define them locally here. */ 22/* No one else should require these constants, so define them locally here. */
22#define ISR 0 /* Interrupt Status Register */ 23#define ISR 0 /* Interrupt Status Register */
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index 507d4eeffe07..98612d420346 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -478,8 +478,9 @@ void *xmon_stdout;
478void *xmon_stderr; 478void *xmon_stderr;
479 479
480void 480void
481xmon_init(void) 481xmon_init(int arg)
482{ 482{
483 xmon_map_scc();
483} 484}
484 485
485int 486int
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index c658650af429..8cbac7f32092 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -10,6 +10,9 @@ config MMU
10 bool 10 bool
11 default y 11 default y
12 12
13config PPC_STD_MMU
14 def_bool y
15
13config UID16 16config UID16
14 bool 17 bool
15 18
@@ -186,6 +189,9 @@ config BOOTX_TEXT
186 Say Y here to see progress messages from the boot firmware in text 189 Say Y here to see progress messages from the boot firmware in text
187 mode. Requires an Open Firmware compatible video card. 190 mode. Requires an Open Firmware compatible video card.
188 191
192config POWER4
193 def_bool y
194
189config POWER4_ONLY 195config POWER4_ONLY
190 bool "Optimize for POWER4" 196 bool "Optimize for POWER4"
191 default n 197 default n
@@ -357,7 +363,6 @@ config HOTPLUG_CPU
357 363
358config PROC_DEVICETREE 364config PROC_DEVICETREE
359 bool "Support for Open Firmware device tree in /proc" 365 bool "Support for Open Firmware device tree in /proc"
360 depends on !PPC_ISERIES
361 help 366 help
362 This option adds a device-tree directory under /proc which contains 367 This option adds a device-tree directory under /proc which contains
363 an image of the device tree that the kernel copies from Open 368 an image of the device tree that the kernel copies from Open
@@ -461,7 +466,7 @@ config VIOPATH
461 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH 466 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
462 default y 467 default y
463 468
464source "arch/ppc64/oprofile/Kconfig" 469source "arch/powerpc/oprofile/Kconfig"
465 470
466source "arch/ppc64/Kconfig.debug" 471source "arch/ppc64/Kconfig.debug"
467 472
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 521c2a5a2862..4d18bdb680f0 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -82,10 +82,12 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
82head-y := arch/ppc64/kernel/head.o 82head-y := arch/ppc64/kernel/head.o
83 83
84libs-y += arch/ppc64/lib/ 84libs-y += arch/ppc64/lib/
85core-y += arch/ppc64/kernel/ 85core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
86core-y += arch/ppc64/mm/ 86core-y += arch/powerpc/mm/
87core-y += arch/powerpc/sysdev/
88core-y += arch/powerpc/platforms/
87core-$(CONFIG_XMON) += arch/ppc64/xmon/ 89core-$(CONFIG_XMON) += arch/ppc64/xmon/
88drivers-$(CONFIG_OPROFILE) += arch/ppc64/oprofile/ 90drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
89 91
90boot := arch/ppc64/boot 92boot := arch/ppc64/boot
91 93
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b138902..000000000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/system.h>
13#include <asm/iSeries/HvLpEvent.h>
14#include <asm/iSeries/HvCallEvent.h>
15#include <asm/iSeries/ItLpNaca.h>
16
17/* Array of LpEvent handler functions */
18LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
19unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
20
21/* Register a handler for an LpEvent type */
22
23int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
24{
25 int rc = 1;
26 if ( eventType < HvLpEvent_Type_NumTypes ) {
27 lpEventHandler[eventType] = handler;
28 rc = 0;
29 }
30 return rc;
31
32}
33
34int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
35{
36 int rc = 1;
37
38 might_sleep();
39
40 if ( eventType < HvLpEvent_Type_NumTypes ) {
41 if ( !lpEventHandlerPaths[eventType] ) {
42 lpEventHandler[eventType] = NULL;
43 rc = 0;
44
45 /* We now sleep until all other CPUs have scheduled. This ensures that
46 * the deletion is seen by all other CPUs, and that the deleted handler
47 * isn't still running on another CPU when we return. */
48 synchronize_rcu();
49 }
50 }
51 return rc;
52}
53EXPORT_SYMBOL(HvLpEvent_registerHandler);
54EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
55
56/* (lpIndex is the partition index of the target partition.
57 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
58 * indicates to use our partition index - for the other types)
59 */
60int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
61{
62 int rc = 1;
63 if ( eventType < HvLpEvent_Type_NumTypes &&
64 lpEventHandler[eventType] ) {
65 if ( lpIndex == 0 )
66 lpIndex = itLpNaca.xLpIndex;
67 HvCallEvent_openLpEventPath( lpIndex, eventType );
68 ++lpEventHandlerPaths[eventType];
69 rc = 0;
70 }
71 return rc;
72}
73
74int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
75{
76 int rc = 1;
77 if ( eventType < HvLpEvent_Type_NumTypes &&
78 lpEventHandler[eventType] &&
79 lpEventHandlerPaths[eventType] ) {
80 if ( lpIndex == 0 )
81 lpIndex = itLpNaca.xLpIndex;
82 HvCallEvent_closeLpEventPath( lpIndex, eventType );
83 --lpEventHandlerPaths[eventType];
84 rc = 0;
85 }
86 return rc;
87}
88
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index ae60eb1193c6..424dd250cd87 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -2,36 +2,34 @@
2# Makefile for the linux ppc64 kernel. 2# Makefile for the linux ppc64 kernel.
3# 3#
4 4
5ifneq ($(CONFIG_PPC_MERGE),y)
6
5EXTRA_CFLAGS += -mno-minimal-toc 7EXTRA_CFLAGS += -mno-minimal-toc
6extra-y := head.o vmlinux.lds 8extra-y := head.o vmlinux.lds
7 9
8obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \ 10obj-y := setup.o entry.o misc.o prom.o
9 time.o process.o signal.o syscalls.o misc.o ptrace.o \
10 align.o semaphore.o bitops.o pacaData.o \
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/
16 11
17obj-$(CONFIG_PPC_OF) += of_device.o 12endif
13
14obj-y += irq.o idle.o dma.o \
15 signal.o \
16 align.o bitops.o pacaData.o \
17 udbg.o ioctl32.o \
18 rtc.o \
19 cpu_setup_power4.o \
20 iommu.o sysfs.o vdso.o firmware.o
21obj-y += vdso32/ vdso64/
18 22
19pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \
20 iSeries_VpdInfo.o
21pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 23pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
22 24
23obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) 25obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
24 26
25obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \ 27obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o
26 iSeries_setup.o ItLpQueue.o hvCall.o \ 28ifneq ($(CONFIG_PPC_MERGE),y)
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ 29obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
28 iSeries_iommu.o 30endif
29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += rtasd.o ras.o udbg_16550.o
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 pSeries_setup.o pSeries_iommu.o udbg_16550.o
35 33
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ 34obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o 35 bpa_iic.o spider-pic.o
@@ -41,25 +39,24 @@ obj-$(CONFIG_EEH) += eeh.o
41obj-$(CONFIG_PROC_FS) += proc_ppc64.o 39obj-$(CONFIG_PROC_FS) += proc_ppc64.o
42obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 40obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
43obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
44obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 42obj-$(CONFIG_MODULES) += module.o
43ifneq ($(CONFIG_PPC_MERGE),y)
44obj-$(CONFIG_MODULES) += ppc_ksyms.o
45endif
45obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o 46obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o
46obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 47obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
47obj-$(CONFIG_SCANLOG) += scanlog.o 48obj-$(CONFIG_SCANLOG) += scanlog.o
48obj-$(CONFIG_VIOPATH) += viopath.o
49obj-$(CONFIG_LPARCFG) += lparcfg.o 49obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51ifneq ($(CONFIG_PPC_MERGE),y)
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 52obj-$(CONFIG_BOOTX_TEXT) += btext.o
53endif
52obj-$(CONFIG_HVCS) += hvcserver.o 54obj-$(CONFIG_HVCS) += hvcserver.o
53 55
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 56obj-$(CONFIG_IBMVIO) += vio.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
57obj-$(CONFIG_XICS) += xics.o 57obj-$(CONFIG_XICS) += xics.o
58obj-$(CONFIG_MPIC) += mpic.o
59 58
60obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 59obj-$(CONFIG_PPC_PMAC) += udbg_scc.o
61 pmac_time.o pmac_nvram.o pmac_low_i2c.o \
62 udbg_scc.o
63 60
64obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \ 61obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
65 udbg_16550.o 62 udbg_16550.o
@@ -67,19 +64,17 @@ obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
67obj-$(CONFIG_U3_DART) += u3_iommu.o 64obj-$(CONFIG_U3_DART) += u3_iommu.o
68 65
69ifdef CONFIG_SMP 66ifdef CONFIG_SMP
70obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 67obj-$(CONFIG_PPC_PMAC) += smp-tbsync.o
71obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
72obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
73obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
74obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 68obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
75endif 69endif
76 70
77obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
78obj-$(CONFIG_KPROBES) += kprobes.o 71obj-$(CONFIG_KPROBES) += kprobes.o
79 72
80CFLAGS_ioctl32.o += -Ifs/ 73CFLAGS_ioctl32.o += -Ifs/
81 74
75ifneq ($(CONFIG_PPC_MERGE),y)
82ifeq ($(CONFIG_PPC_ISERIES),y) 76ifeq ($(CONFIG_PPC_ISERIES),y)
83arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s 77arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
84AFLAGS_head.o += -Iarch/ppc64/kernel 78AFLAGS_head.o += -Iarch/powerpc/kernel
79endif
85endif 80endif
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 1ff4fa05a973..5e6046cb414e 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -46,8 +46,6 @@
46int main(void) 46int main(void)
47{ 47{
48 /* thread struct on stack */ 48 /* thread struct on stack */
49 DEFINE(THREAD_SHIFT, THREAD_SHIFT);
50 DEFINE(THREAD_SIZE, THREAD_SIZE);
51 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 49 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
52 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 50 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
53 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); 51 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
@@ -77,6 +75,7 @@ int main(void)
77 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 75 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
78 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 76 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
79 DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); 77 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
78 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
80 79
81 /* paca */ 80 /* paca */
82 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 81 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index 5f2460090e03..da1b4b7a3269 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
39#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h>
42 43
43#include "pci.h"
44#include "bpa_iommu.h" 44#include "bpa_iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db66f458..c2dc8f282eb8 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,9 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/nvram.h> 44#include <asm/nvram.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
47#include <asm/irq.h>
46 48
47#include "pci.h"
48#include "bpa_iic.h" 49#include "bpa_iic.h"
49#include "bpa_iommu.h" 50#include "bpa_iommu.h"
50 51
@@ -54,7 +55,7 @@
54#define DBG(fmt...) 55#define DBG(fmt...)
55#endif 56#endif
56 57
57void bpa_get_cpuinfo(struct seq_file *m) 58void bpa_show_cpuinfo(struct seq_file *m)
58{ 59{
59 struct device_node *root; 60 struct device_node *root;
60 const char *model = ""; 61 const char *model = "";
@@ -128,7 +129,7 @@ struct machdep_calls __initdata bpa_md = {
128 .probe = bpa_probe, 129 .probe = bpa_probe,
129 .setup_arch = bpa_setup_arch, 130 .setup_arch = bpa_setup_arch,
130 .init_early = bpa_init_early, 131 .init_early = bpa_init_early,
131 .get_cpuinfo = bpa_get_cpuinfo, 132 .show_cpuinfo = bpa_show_cpuinfo,
132 .restart = rtas_restart, 133 .restart = rtas_restart,
133 .power_off = rtas_power_off, 134 .power_off = rtas_power_off,
134 .halt = rtas_halt, 135 .halt = rtas_halt,
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
index b6fbfbe9032d..506a37885c5c 100644
--- a/arch/ppc64/kernel/btext.c
+++ b/arch/ppc64/kernel/btext.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/lmb.h> 19#include <asm/lmb.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/udbg.h>
21 22
22#undef NO_SCROLL 23#undef NO_SCROLL
23 24
@@ -131,6 +132,47 @@ int btext_initialize(struct device_node *np)
131 return 0; 132 return 0;
132} 133}
133 134
135static void btext_putc(unsigned char c)
136{
137 btext_drawchar(c);
138}
139
140void __init init_boot_display(void)
141{
142 char *name;
143 struct device_node *np = NULL;
144 int rc = -ENODEV;
145
146 printk("trying to initialize btext ...\n");
147
148 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
149 if (name != NULL) {
150 np = of_find_node_by_path(name);
151 if (np != NULL) {
152 if (strcmp(np->type, "display") != 0) {
153 printk("boot stdout isn't a display !\n");
154 of_node_put(np);
155 np = NULL;
156 }
157 }
158 }
159 if (np)
160 rc = btext_initialize(np);
161 if (rc) {
162 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
163 if (get_property(np, "linux,opened", NULL)) {
164 printk("trying %s ...\n", np->full_name);
165 rc = btext_initialize(np);
166 printk("result: %d\n", rc);
167 }
168 if (rc == 0)
169 break;
170 }
171 }
172 if (rc == 0 && udbg_putc == NULL)
173 udbg_putc = btext_putc;
174}
175
134 176
135/* Calc the base address of a given point (x,y) */ 177/* Calc the base address of a given point (x,y) */
136static unsigned char * calc_base(int x, int y) 178static unsigned char * calc_base(int x, int y)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
deleted file mode 100644
index 8831a28c3c4e..000000000000
--- a/arch/ppc64/kernel/cputable.c
+++ /dev/null
@@ -1,308 +0,0 @@
1/*
2 * arch/ppc64/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/threads.h>
19#include <linux/init.h>
20#include <linux/module.h>
21
22#include <asm/oprofile_impl.h>
23#include <asm/cputable.h>
24
25struct cpu_spec* cur_cpu_spec = NULL;
26EXPORT_SYMBOL(cur_cpu_spec);
27
28/* NOTE:
29 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
30 * the responsibility of the appropriate CPU save/restore functions to
31 * eventually copy these settings over. Those save/restore aren't yet
32 * part of the cputable though. That has to be fixed for both ppc32
33 * and ppc64
34 */
35extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
39
40
41/* We only set the altivec features if the kernel was compiled with altivec
42 * support
43 */
44#ifdef CONFIG_ALTIVEC
45#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
46#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
47#else
48#define CPU_FTR_ALTIVEC_COMP 0
49#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
50#endif
51
52struct cpu_spec cpu_specs[] = {
53 { /* Power3 */
54 .pvr_mask = 0xffff0000,
55 .pvr_value = 0x00400000,
56 .cpu_name = "POWER3 (630)",
57 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
58 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
59 .cpu_user_features = COMMON_USER_PPC64,
60 .icache_bsize = 128,
61 .dcache_bsize = 128,
62 .num_pmcs = 8,
63 .cpu_setup = __setup_cpu_power3,
64#ifdef CONFIG_OPROFILE
65 .oprofile_cpu_type = "ppc64/power3",
66 .oprofile_model = &op_model_rs64,
67#endif
68 },
69 { /* Power3+ */
70 .pvr_mask = 0xffff0000,
71 .pvr_value = 0x00410000,
72 .cpu_name = "POWER3 (630+)",
73 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
74 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
75 .cpu_user_features = COMMON_USER_PPC64,
76 .icache_bsize = 128,
77 .dcache_bsize = 128,
78 .num_pmcs = 8,
79 .cpu_setup = __setup_cpu_power3,
80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
83#endif
84 },
85 { /* Northstar */
86 .pvr_mask = 0xffff0000,
87 .pvr_value = 0x00330000,
88 .cpu_name = "RS64-II (northstar)",
89 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
90 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
91 CPU_FTR_MMCRA | CPU_FTR_CTRL,
92 .cpu_user_features = COMMON_USER_PPC64,
93 .icache_bsize = 128,
94 .dcache_bsize = 128,
95 .num_pmcs = 8,
96 .cpu_setup = __setup_cpu_power3,
97#ifdef CONFIG_OPROFILE
98 .oprofile_cpu_type = "ppc64/rs64",
99 .oprofile_model = &op_model_rs64,
100#endif
101 },
102 { /* Pulsar */
103 .pvr_mask = 0xffff0000,
104 .pvr_value = 0x00340000,
105 .cpu_name = "RS64-III (pulsar)",
106 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
107 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
108 CPU_FTR_MMCRA | CPU_FTR_CTRL,
109 .cpu_user_features = COMMON_USER_PPC64,
110 .icache_bsize = 128,
111 .dcache_bsize = 128,
112 .num_pmcs = 8,
113 .cpu_setup = __setup_cpu_power3,
114#ifdef CONFIG_OPROFILE
115 .oprofile_cpu_type = "ppc64/rs64",
116 .oprofile_model = &op_model_rs64,
117#endif
118 },
119 { /* I-star */
120 .pvr_mask = 0xffff0000,
121 .pvr_value = 0x00360000,
122 .cpu_name = "RS64-III (icestar)",
123 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
124 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
125 CPU_FTR_MMCRA | CPU_FTR_CTRL,
126 .cpu_user_features = COMMON_USER_PPC64,
127 .icache_bsize = 128,
128 .dcache_bsize = 128,
129 .num_pmcs = 8,
130 .cpu_setup = __setup_cpu_power3,
131#ifdef CONFIG_OPROFILE
132 .oprofile_cpu_type = "ppc64/rs64",
133 .oprofile_model = &op_model_rs64,
134#endif
135 },
136 { /* S-star */
137 .pvr_mask = 0xffff0000,
138 .pvr_value = 0x00370000,
139 .cpu_name = "RS64-IV (sstar)",
140 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
141 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
142 CPU_FTR_MMCRA | CPU_FTR_CTRL,
143 .cpu_user_features = COMMON_USER_PPC64,
144 .icache_bsize = 128,
145 .dcache_bsize = 128,
146 .num_pmcs = 8,
147 .cpu_setup = __setup_cpu_power3,
148#ifdef CONFIG_OPROFILE
149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_model = &op_model_rs64,
151#endif
152 },
153 { /* Power4 */
154 .pvr_mask = 0xffff0000,
155 .pvr_value = 0x00350000,
156 .cpu_name = "POWER4 (gp)",
157 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
158 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
159 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
160 .cpu_user_features = COMMON_USER_PPC64,
161 .icache_bsize = 128,
162 .dcache_bsize = 128,
163 .num_pmcs = 8,
164 .cpu_setup = __setup_cpu_power4,
165#ifdef CONFIG_OPROFILE
166 .oprofile_cpu_type = "ppc64/power4",
167 .oprofile_model = &op_model_rs64,
168#endif
169 },
170 { /* Power4+ */
171 .pvr_mask = 0xffff0000,
172 .pvr_value = 0x00380000,
173 .cpu_name = "POWER4+ (gq)",
174 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
176 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
177 .cpu_user_features = COMMON_USER_PPC64,
178 .icache_bsize = 128,
179 .dcache_bsize = 128,
180 .num_pmcs = 8,
181 .cpu_setup = __setup_cpu_power4,
182#ifdef CONFIG_OPROFILE
183 .oprofile_cpu_type = "ppc64/power4",
184 .oprofile_model = &op_model_power4,
185#endif
186 },
187 { /* PPC970 */
188 .pvr_mask = 0xffff0000,
189 .pvr_value = 0x00390000,
190 .cpu_name = "PPC970",
191 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
192 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
193 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
194 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
195 .cpu_user_features = COMMON_USER_PPC64 |
196 PPC_FEATURE_HAS_ALTIVEC_COMP,
197 .icache_bsize = 128,
198 .dcache_bsize = 128,
199 .num_pmcs = 8,
200 .cpu_setup = __setup_cpu_ppc970,
201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
205 },
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
211 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
212 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
213 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
214 .cpu_user_features = COMMON_USER_PPC64 |
215 PPC_FEATURE_HAS_ALTIVEC_COMP,
216 .icache_bsize = 128,
217 .dcache_bsize = 128,
218 .num_pmcs = 8,
219 .cpu_setup = __setup_cpu_ppc970,
220#ifdef CONFIG_OPROFILE
221 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_model = &op_model_power4,
223#endif
224 },
225 { /* PPC970MP */
226 .pvr_mask = 0xffff0000,
227 .pvr_value = 0x00440000,
228 .cpu_name = "PPC970MP",
229 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
231 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
249 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
250 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
251 CPU_FTR_MMCRA_SIHV,
252 .cpu_user_features = COMMON_USER_PPC64,
253 .icache_bsize = 128,
254 .dcache_bsize = 128,
255 .num_pmcs = 6,
256 .cpu_setup = __setup_cpu_power4,
257#ifdef CONFIG_OPROFILE
258 .oprofile_cpu_type = "ppc64/power5",
259 .oprofile_model = &op_model_power4,
260#endif
261 },
262 { /* Power5 */
263 .pvr_mask = 0xffff0000,
264 .pvr_value = 0x003b0000,
265 .cpu_name = "POWER5 (gs)",
266 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
267 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
268 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
269 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
270 CPU_FTR_MMCRA_SIHV,
271 .cpu_user_features = COMMON_USER_PPC64,
272 .icache_bsize = 128,
273 .dcache_bsize = 128,
274 .num_pmcs = 6,
275 .cpu_setup = __setup_cpu_power4,
276#ifdef CONFIG_OPROFILE
277 .oprofile_cpu_type = "ppc64/power5",
278 .oprofile_model = &op_model_power4,
279#endif
280 },
281 { /* BE DD1.x */
282 .pvr_mask = 0xffff0000,
283 .pvr_value = 0x00700000,
284 .cpu_name = "Broadband Engine",
285 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
286 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
287 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
288 CPU_FTR_SMT,
289 .cpu_user_features = COMMON_USER_PPC64 |
290 PPC_FEATURE_HAS_ALTIVEC_COMP,
291 .icache_bsize = 128,
292 .dcache_bsize = 128,
293 .cpu_setup = __setup_cpu_be,
294 },
295 { /* default match */
296 .pvr_mask = 0x00000000,
297 .pvr_value = 0x00000000,
298 .cpu_name = "POWER4 (compatible)",
299 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
300 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
301 CPU_FTR_PPCAS_ARCH_V2,
302 .cpu_user_features = COMMON_USER_PPC64,
303 .icache_bsize = 128,
304 .dcache_bsize = 128,
305 .num_pmcs = 6,
306 .cpu_setup = __setup_cpu_power4,
307 }
308};
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd731222..035d1b14a207 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/systemcfg.h> 35#include <asm/systemcfg.h>
36#include "pci.h" 36#include <asm/ppc-pci.h>
37 37
38#undef DEBUG 38#undef DEBUG
39 39
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
index e8c0bbf4d000..5d2fcbe384c1 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/ppc64/kernel/entry.S
@@ -191,8 +191,8 @@ syscall_exit_trace_cont:
191 ld r1,GPR1(r1) 191 ld r1,GPR1(r1)
192 mtlr r4 192 mtlr r4
193 mtcr r5 193 mtcr r5
194 mtspr SRR0,r7 194 mtspr SPRN_SRR0,r7
195 mtspr SRR1,r8 195 mtspr SPRN_SRR1,r8
196 rfid 196 rfid
197 b . /* prevent speculative execution */ 197 b . /* prevent speculative execution */
198 198
@@ -265,7 +265,7 @@ _GLOBAL(save_nvgprs)
265 */ 265 */
266_GLOBAL(ppc32_sigsuspend) 266_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs 267 bl .save_nvgprs
268 bl .sys32_sigsuspend 268 bl .compat_sys_sigsuspend
269 b 70f 269 b 70f
270 270
271_GLOBAL(ppc64_rt_sigsuspend) 271_GLOBAL(ppc64_rt_sigsuspend)
@@ -275,7 +275,7 @@ _GLOBAL(ppc64_rt_sigsuspend)
275 275
276_GLOBAL(ppc32_rt_sigsuspend) 276_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs 277 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend 278 bl .compat_sys_rt_sigsuspend
27970: cmpdi 0,r3,0 27970: cmpdi 0,r3,0
280 /* If it returned an error, we need to return via syscall_exit to set 280 /* If it returned an error, we need to return via syscall_exit to set
281 the SO bit in cr0 and potentially stop for ptrace. */ 281 the SO bit in cr0 and potentially stop for ptrace. */
@@ -310,7 +310,7 @@ _GLOBAL(ppc_clone)
310 310
311_GLOBAL(ppc32_swapcontext) 311_GLOBAL(ppc32_swapcontext)
312 bl .save_nvgprs 312 bl .save_nvgprs
313 bl .sys32_swapcontext 313 bl .compat_sys_swapcontext
314 b 80f 314 b 80f
315 315
316_GLOBAL(ppc64_swapcontext) 316_GLOBAL(ppc64_swapcontext)
@@ -319,11 +319,11 @@ _GLOBAL(ppc64_swapcontext)
319 b 80f 319 b 80f
320 320
321_GLOBAL(ppc32_sigreturn) 321_GLOBAL(ppc32_sigreturn)
322 bl .sys32_sigreturn 322 bl .compat_sys_sigreturn
323 b 80f 323 b 80f
324 324
325_GLOBAL(ppc32_rt_sigreturn) 325_GLOBAL(ppc32_rt_sigreturn)
326 bl .sys32_rt_sigreturn 326 bl .compat_sys_rt_sigreturn
327 b 80f 327 b 80f
328 328
329_GLOBAL(ppc64_rt_sigreturn) 329_GLOBAL(ppc64_rt_sigreturn)
@@ -531,7 +531,7 @@ restore:
531 mtctr r3 531 mtctr r3
532 mtlr r0 532 mtlr r0
533 ld r3,_XER(r1) 533 ld r3,_XER(r1)
534 mtspr XER,r3 534 mtspr SPRN_XER,r3
535 535
536 REST_8GPRS(5, r1) 536 REST_8GPRS(5, r1)
537 537
@@ -543,12 +543,12 @@ restore:
543 mtmsrd r0,1 543 mtmsrd r0,1
544 544
545 ld r0,_MSR(r1) 545 ld r0,_MSR(r1)
546 mtspr SRR1,r0 546 mtspr SPRN_SRR1,r0
547 547
548 ld r2,_CCR(r1) 548 ld r2,_CCR(r1)
549 mtcrf 0xFF,r2 549 mtcrf 0xFF,r2
550 ld r2,_NIP(r1) 550 ld r2,_NIP(r1)
551 mtspr SRR0,r2 551 mtspr SPRN_SRR0,r2
552 552
553 ld r0,GPR0(r1) 553 ld r0,GPR0(r1)
554 ld r2,GPR2(r1) 554 ld r2,GPR2(r1)
@@ -643,7 +643,7 @@ _GLOBAL(enter_rtas)
643 std r4,_CCR(r1) 643 std r4,_CCR(r1)
644 mfctr r5 644 mfctr r5
645 std r5,_CTR(r1) 645 std r5,_CTR(r1)
646 mfspr r6,XER 646 mfspr r6,SPRN_XER
647 std r6,_XER(r1) 647 std r6,_XER(r1)
648 mfdar r7 648 mfdar r7
649 std r7,_DAR(r1) 649 std r7,_DAR(r1)
@@ -697,14 +697,14 @@ _GLOBAL(enter_rtas)
697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
698 ld r4,RTASBASE(r4) /* get the rtas->base value */ 698 ld r4,RTASBASE(r4) /* get the rtas->base value */
699 699
700 mtspr SRR0,r5 700 mtspr SPRN_SRR0,r5
701 mtspr SRR1,r6 701 mtspr SPRN_SRR1,r6
702 rfid 702 rfid
703 b . /* prevent speculative execution */ 703 b . /* prevent speculative execution */
704 704
705_STATIC(rtas_return_loc) 705_STATIC(rtas_return_loc)
706 /* relocation is off at this point */ 706 /* relocation is off at this point */
707 mfspr r4,SPRG3 /* Get PACA */ 707 mfspr r4,SPRN_SPRG3 /* Get PACA */
708 SET_REG_TO_CONST(r5, KERNELBASE) 708 SET_REG_TO_CONST(r5, KERNELBASE)
709 sub r4,r4,r5 /* RELOC the PACA base pointer */ 709 sub r4,r4,r5 /* RELOC the PACA base pointer */
710 710
@@ -718,8 +718,8 @@ _STATIC(rtas_return_loc)
718 LOADADDR(r3,.rtas_restore_regs) 718 LOADADDR(r3,.rtas_restore_regs)
719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
720 720
721 mtspr SRR0,r3 721 mtspr SPRN_SRR0,r3
722 mtspr SRR1,r4 722 mtspr SPRN_SRR1,r4
723 rfid 723 rfid
724 b . /* prevent speculative execution */ 724 b . /* prevent speculative execution */
725 725
@@ -730,14 +730,14 @@ _STATIC(rtas_restore_regs)
730 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 730 REST_8GPRS(14, r1) /* Restore the non-volatiles */
731 REST_10GPRS(22, r1) /* ditto */ 731 REST_10GPRS(22, r1) /* ditto */
732 732
733 mfspr r13,SPRG3 733 mfspr r13,SPRN_SPRG3
734 734
735 ld r4,_CCR(r1) 735 ld r4,_CCR(r1)
736 mtcr r4 736 mtcr r4
737 ld r5,_CTR(r1) 737 ld r5,_CTR(r1)
738 mtctr r5 738 mtctr r5
739 ld r6,_XER(r1) 739 ld r6,_XER(r1)
740 mtspr XER,r6 740 mtspr SPRN_XER,r6
741 ld r7,_DAR(r1) 741 ld r7,_DAR(r1)
742 mtdar r7 742 mtdar r7
743 ld r8,_DSISR(r1) 743 ld r8,_DSISR(r1)
@@ -774,7 +774,7 @@ _GLOBAL(enter_prom)
774 std r4,_CCR(r1) 774 std r4,_CCR(r1)
775 mfctr r5 775 mfctr r5
776 std r5,_CTR(r1) 776 std r5,_CTR(r1)
777 mfspr r6,XER 777 mfspr r6,SPRN_XER
778 std r6,_XER(r1) 778 std r6,_XER(r1)
779 mfdar r7 779 mfdar r7
780 std r7,_DAR(r1) 780 std r7,_DAR(r1)
@@ -827,7 +827,7 @@ _GLOBAL(enter_prom)
827 ld r5,_CTR(r1) 827 ld r5,_CTR(r1)
828 mtctr r5 828 mtctr r5
829 ld r6,_XER(r1) 829 ld r6,_XER(r1)
830 mtspr XER,r6 830 mtspr SPRN_XER,r6
831 ld r7,_DAR(r1) 831 ld r7,_DAR(r1)
832 mtdar r7 832 mtdar r7
833 ld r8,_DSISR(r1) 833 ld r8,_DSISR(r1)
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 72c61041151a..f58af9c246cb 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -36,6 +36,7 @@
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h> 38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
39 40
40#ifdef CONFIG_PPC_ISERIES 41#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE 42#define DO_SOFT_DISABLE
@@ -201,22 +202,22 @@ exception_marker:
201#define EX_CCR 60 202#define EX_CCR 60
202 203
203#define EXCEPTION_PROLOG_PSERIES(area, label) \ 204#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \ 207 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \ 208 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \ 209 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \ 210 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \ 211 std r9,area+EX_R13(r13); \
211 mfcr r9; \ 212 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \ 213 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \ 214 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \ 216 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \ 218 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \ 220 mtspr SPRN_SRR1,r10; \
220 rfid; \ 221 rfid; \
221 b . /* prevent speculative execution */ 222 b . /* prevent speculative execution */
222 223
@@ -225,12 +226,12 @@ exception_marker:
225 * This code runs with relocation on. 226 * This code runs with relocation on.
226 */ 227 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \ 228#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \ 229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \ 231 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \ 232 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \ 233 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \ 234 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \ 235 std r9,area+EX_R13(r13); \
235 mfcr r9 236 mfcr r9
236 237
@@ -283,7 +284,7 @@ exception_marker:
283 std r9,_LINK(r1); \ 284 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \ 285 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \ 286 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \ 287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \ 288 std r11,_XER(r1); \
288 li r9,(n)+1; \ 289 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \ 290 std r9,_TRAP(r1); /* set trap number */ \
@@ -300,7 +301,7 @@ exception_marker:
300 .globl label##_pSeries; \ 301 .globl label##_pSeries; \
301label##_pSeries: \ 302label##_pSeries: \
302 HMT_MEDIUM; \ 303 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \ 304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \ 305 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306 307
@@ -308,7 +309,7 @@ label##_pSeries: \
308 .globl label##_iSeries; \ 309 .globl label##_iSeries; \
309label##_iSeries: \ 310label##_iSeries: \
310 HMT_MEDIUM; \ 311 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \ 312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \ 313 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \ 314 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \ 315 EXCEPTION_PROLOG_ISERIES_2; \
@@ -318,7 +319,7 @@ label##_iSeries: \
318 .globl label##_iSeries; \ 319 .globl label##_iSeries; \
319label##_iSeries: \ 320label##_iSeries: \
320 HMT_MEDIUM; \ 321 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \ 322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \ 323 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \ 325 lbz r10,PACAPROCENABLED(r13); \
@@ -388,7 +389,7 @@ __start_interrupts:
388 . = 0x200 389 . = 0x200
389_machine_check_pSeries: 390_machine_check_pSeries:
390 HMT_MEDIUM 391 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */ 392 mtspr SPRN_SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13) 393 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394 395
@@ -396,18 +397,18 @@ _machine_check_pSeries:
396 .globl data_access_pSeries 397 .globl data_access_pSeries
397data_access_pSeries: 398data_access_pSeries:
398 HMT_MEDIUM 399 HMT_MEDIUM
399 mtspr SPRG1,r13 400 mtspr SPRN_SPRG1,r13
400BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
401 mtspr SPRG2,r12 402 mtspr SPRN_SPRG2,r12
402 mfspr r13,DAR 403 mfspr r13,SPRN_DAR
403 mfspr r12,DSISR 404 mfspr r12,SPRN_DSISR
404 srdi r13,r13,60 405 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20 406 rlwimi r13,r12,16,0x20
406 mfcr r12 407 mfcr r12
407 cmpwi r13,0x2c 408 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries 409 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12 410 mtcrf 0x80,r12
410 mfspr r12,SPRG2 411 mfspr r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413 414
@@ -415,19 +416,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 .globl data_access_slb_pSeries 416 .globl data_access_slb_pSeries
416data_access_slb_pSeries: 417data_access_slb_pSeries:
417 HMT_MEDIUM 418 HMT_MEDIUM
418 mtspr SPRG1,r13 419 mtspr SPRN_SPRG1,r13
419 RUNLATCH_ON(r13) 420 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */ 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13) 423 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13) 424 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13) 425 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13) 426 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1 427 mfspr r9,SPRN_SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13) 428 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9 429 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */ 430 mfspr r12,SPRN_SRR1 /* and SRR1 */
430 mfspr r3,DAR 431 mfspr r3,SPRN_DAR
431 b .do_slb_miss /* Rel. branch works in real mode */ 432 b .do_slb_miss /* Rel. branch works in real mode */
432 433
433 STD_EXCEPTION_PSERIES(0x400, instruction_access) 434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -436,19 +437,19 @@ data_access_slb_pSeries:
436 .globl instruction_access_slb_pSeries 437 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries: 438instruction_access_slb_pSeries:
438 HMT_MEDIUM 439 HMT_MEDIUM
439 mtspr SPRG1,r13 440 mtspr SPRN_SPRG1,r13
440 RUNLATCH_ON(r13) 441 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */ 442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13) 444 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13) 445 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13) 446 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13) 447 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1 448 mfspr r9,SPRN_SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13) 449 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9 450 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */ 451 mfspr r12,SPRN_SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */ 452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */ 453 b .do_slb_miss /* Rel. branch works in real mode */
453 454
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
@@ -466,15 +467,15 @@ system_call_pSeries:
466 RUNLATCH_ON(r9) 467 RUNLATCH_ON(r9)
467 mr r9,r13 468 mr r9,r13
468 mfmsr r10 469 mfmsr r10
469 mfspr r13,SPRG3 470 mfspr r13,SPRN_SPRG3
470 mfspr r11,SRR0 471 mfspr r11,SPRN_SRR0
471 clrrdi r12,r13,32 472 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h 473 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l 474 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12 475 mtspr SPRN_SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1 477 mfspr r12,SPRN_SRR1
477 mtspr SRR1,r10 478 mtspr SPRN_SRR1,r10
478 rfid 479 rfid
479 b . /* prevent speculative execution */ 480 b . /* prevent speculative execution */
480 481
@@ -504,25 +505,25 @@ system_call_pSeries:
504 .align 7 505 .align 7
505_GLOBAL(do_stab_bolted_pSeries) 506_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12 507 mtcrf 0x80,r12
507 mfspr r12,SPRG2 508 mfspr r12,SPRN_SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509 510
510/* 511/*
511 * Vectors for the FWNMI option. Share common code. 512 * Vectors for the FWNMI option. Share common code.
512 */ 513 */
513 .globl system_reset_fwnmi 514 .globl system_reset_fwnmi
514system_reset_fwnmi: 515system_reset_fwnmi:
515 HMT_MEDIUM 516 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */ 517 mtspr SPRN_SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13) 518 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519 520
520 .globl machine_check_fwnmi 521 .globl machine_check_fwnmi
521machine_check_fwnmi: 522machine_check_fwnmi:
522 HMT_MEDIUM 523 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */ 524 mtspr SPRN_SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13) 525 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526 527
527#ifdef CONFIG_PPC_ISERIES 528#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/ 529/*** ISeries-LPAR interrupt handlers ***/
@@ -531,18 +532,18 @@ machine_check_fwnmi:
531 532
532 .globl data_access_iSeries 533 .globl data_access_iSeries
533data_access_iSeries: 534data_access_iSeries:
534 mtspr SPRG1,r13 535 mtspr SPRN_SPRG1,r13
535BEGIN_FTR_SECTION 536BEGIN_FTR_SECTION
536 mtspr SPRG2,r12 537 mtspr SPRN_SPRG2,r12
537 mfspr r13,DAR 538 mfspr r13,SPRN_DAR
538 mfspr r12,DSISR 539 mfspr r12,SPRN_DSISR
539 srdi r13,r13,60 540 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20 541 rlwimi r13,r12,16,0x20
541 mfcr r12 542 mfcr r12
542 cmpwi r13,0x2c 543 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries 544 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12 545 mtcrf 0x80,r12
545 mfspr r12,SPRG2 546 mfspr r12,SPRN_SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2 549 EXCEPTION_PROLOG_ISERIES_2
@@ -550,25 +551,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
550 551
551.do_stab_bolted_iSeries: 552.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12 553 mtcrf 0x80,r12
553 mfspr r12,SPRG2 554 mfspr r12,SPRN_SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2 556 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted 557 b .do_stab_bolted
557 558
558 .globl data_access_slb_iSeries 559 .globl data_access_slb_iSeries
559data_access_slb_iSeries: 560data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */ 561 mtspr SPRN_SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13) 563 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13) 564 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR 565 mfspr r3,SPRN_DAR
565 b .do_slb_miss 566 b .do_slb_miss
566 567
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568 569
569 .globl instruction_access_slb_iSeries 570 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries: 571instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */ 572 mtspr SPRN_SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13) 574 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13) 575 ld r12,PACALPPACA+LPPACASRR1(r13)
@@ -586,7 +587,7 @@ instruction_access_slb_iSeries:
586 .globl system_call_iSeries 587 .globl system_call_iSeries
587system_call_iSeries: 588system_call_iSeries:
588 mr r9,r13 589 mr r9,r13
589 mfspr r13,SPRG3 590 mfspr r13,SPRN_SPRG3
590 EXCEPTION_PROLOG_ISERIES_2 591 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common 592 b system_call_common
592 593
@@ -596,7 +597,7 @@ system_call_iSeries:
596 597
597 .globl system_reset_iSeries 598 .globl system_reset_iSeries
598system_reset_iSeries: 599system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */ 600 mfspr r13,SPRN_SPRG3 /* Get paca address */
600 mfmsr r24 601 mfmsr r24
601 ori r24,r24,MSR_RI 602 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */ 603 mtmsrd r24 /* RI on */
@@ -639,7 +640,7 @@ iSeries_secondary_smp_loop:
639#endif /* CONFIG_SMP */ 640#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */ 642 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */ 643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries 644 b 1b /* If SMP not configured, secondaries
644 * loop forever */ 645 * loop forever */
645 646
@@ -656,8 +657,8 @@ hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */ 657 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13) 658 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13) 659 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11 660 mtspr SPRN_SRR0,r11
660 mtspr SRR1,r12 661 mtspr SPRN_SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13) 662 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13) 663 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13) 664 ld r11,PACA_EXGEN+EX_R11(r13)
@@ -713,8 +714,8 @@ bad_stack:
713 std r10,GPR1(r1) 714 std r10,GPR1(r1)
714 std r11,_NIP(r1) 715 std r11,_NIP(r1)
715 std r12,_MSR(r1) 716 std r12,_MSR(r1)
716 mfspr r11,DAR 717 mfspr r11,SPRN_DAR
717 mfspr r12,DSISR 718 mfspr r12,SPRN_DSISR
718 std r11,_DAR(r1) 719 std r11,_DAR(r1)
719 std r12,_DSISR(r1) 720 std r12,_DSISR(r1)
720 mflr r10 721 mflr r10
@@ -766,8 +767,8 @@ fast_exception_return:
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 767 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1 768 mtmsrd r10,1
768 769
769 mtspr SRR1,r12 770 mtspr SPRN_SRR1,r12
770 mtspr SRR0,r11 771 mtspr SPRN_SRR0,r11
771 REST_4GPRS(10, r1) 772 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1) 773 ld r1,GPR1(r1)
773 rfid 774 rfid
@@ -788,9 +789,9 @@ unrecov_fer:
788 .globl data_access_common 789 .globl data_access_common
789data_access_common: 790data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 791 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR 792 mfspr r10,SPRN_DAR
792 std r10,PACA_EXGEN+EX_DAR(r13) 793 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR 794 mfspr r10,SPRN_DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13) 795 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 796 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13) 797 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -821,9 +822,9 @@ hardware_interrupt_entry:
821 .align 7 822 .align 7
822 .globl alignment_common 823 .globl alignment_common
823alignment_common: 824alignment_common:
824 mfspr r10,DAR 825 mfspr r10,SPRN_DAR
825 std r10,PACA_EXGEN+EX_DAR(r13) 826 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR 827 mfspr r10,SPRN_DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13) 828 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 829 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13) 830 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -1120,7 +1121,7 @@ _GLOBAL(do_stab_bolted)
1120 1121
1121 /* Hash to the primary group */ 1122 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13) 1123 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR 1124 mfspr r11,SPRN_DAR
1124 srdi r11,r11,28 1125 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1126 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126 1127
@@ -1162,7 +1163,7 @@ _GLOBAL(do_stab_bolted)
11622: std r9,8(r10) /* Store the vsid part of the ste */ 11632: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio 1164 eieio
1164 1165
1165 mfspr r11,DAR /* Get the new esid */ 1166 mfspr r11,SPRN_DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1167 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */ 1168 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */ 1169 std r11,0(r10) /* Put new entry back into the stab */
@@ -1182,8 +1183,8 @@ _GLOBAL(do_stab_bolted)
1182 clrrdi r10,r10,2 1183 clrrdi r10,r10,2
1183 mtmsrd r10,1 1184 mtmsrd r10,1
1184 1185
1185 mtspr SRR0,r11 1186 mtspr SPRN_SRR0,r11
1186 mtspr SRR1,r12 1187 mtspr SPRN_SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13) 1188 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13) 1189 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13) 1190 ld r11,PACA_EXSLB+EX_R11(r13)
@@ -1229,8 +1230,8 @@ _GLOBAL(do_slb_miss)
1229.machine pop 1230.machine pop
1230 1231
1231#ifdef CONFIG_PPC_ISERIES 1232#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11 1233 mtspr SPRN_SRR0,r11
1233 mtspr SRR1,r12 1234 mtspr SPRN_SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */ 1235#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13) 1236 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13) 1237 ld r10,PACA_EXSLB+EX_R10(r13)
@@ -1253,7 +1254,7 @@ unrecov_slb:
1253 * 1254 *
1254 * On iSeries, the hypervisor must fill in at least one entry before 1255 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv 1256 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a 1257 * as a page number (see xLparMap in lpardata.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >> 1258 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT). 1259 * PAGE_SHIFT).
1259 */ 1260 */
@@ -1316,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1316 mr r3,r24 /* not found, copy phys to r3 */ 1317 mr r3,r24 /* not found, copy phys to r3 */
1317 b .kexec_wait /* next kernel might do better */ 1318 b .kexec_wait /* next kernel might do better */
1318 1319
13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 13202: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1320 /* From now on, r24 is expected to be logical cpuid */ 1321 /* From now on, r24 is expected to be logical cpuid */
1321 mr r24,r5 1322 mr r24,r5
13223: HMT_LOW 13233: HMT_LOW
@@ -1364,6 +1365,7 @@ _STATIC(__start_initialization_iSeries)
1364 addi r2,r2,0x4000 1365 addi r2,r2,0x4000
1365 1366
1366 bl .iSeries_early_setup 1367 bl .iSeries_early_setup
1368 bl .early_setup
1367 1369
1368 /* relocation is on at this point */ 1370 /* relocation is on at this point */
1369 1371
@@ -1554,20 +1556,17 @@ copy_to_here:
1554 .section ".text"; 1556 .section ".text";
1555 .align 2 ; 1557 .align 2 ;
1556 1558
1557 .globl pmac_secondary_start_1 1559 .globl __secondary_start_pmac_0
1558pmac_secondary_start_1: 1560__secondary_start_pmac_0:
1559 li r24, 1 1561 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1560 b .pmac_secondary_start 1562 li r24,0
1561 1563 b 1f
1562 .globl pmac_secondary_start_2 1564 li r24,1
1563pmac_secondary_start_2: 1565 b 1f
1564 li r24, 2 1566 li r24,2
1565 b .pmac_secondary_start 1567 b 1f
1566 1568 li r24,3
1567 .globl pmac_secondary_start_3 15691:
1568pmac_secondary_start_3:
1569 li r24, 3
1570 b .pmac_secondary_start
1571 1570
1572_GLOBAL(pmac_secondary_start) 1571_GLOBAL(pmac_secondary_start)
1573 /* turn on 64-bit mode */ 1572 /* turn on 64-bit mode */
@@ -1586,7 +1585,7 @@ _GLOBAL(pmac_secondary_start)
1586 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1585 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1587 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1586 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1588 add r13,r13,r4 /* for this processor. */ 1587 add r13,r13,r4 /* for this processor. */
1589 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1588 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1590 1589
1591 /* Create a temp kernel stack for use before relocation is on. */ 1590 /* Create a temp kernel stack for use before relocation is on. */
1592 ld r1,PACAEMERGSP(r13) 1591 ld r1,PACAEMERGSP(r13)
@@ -1621,7 +1620,7 @@ _GLOBAL(__secondary_start)
1621 /* Initialize the page table pointer register. */ 1620 /* Initialize the page table pointer register. */
1622 LOADADDR(r6,_SDR1) 1621 LOADADDR(r6,_SDR1)
1623 ld r6,0(r6) /* get the value of _SDR1 */ 1622 ld r6,0(r6) /* get the value of _SDR1 */
1624 mtspr SDR1,r6 /* set the htab location */ 1623 mtspr SPRN_SDR1,r6 /* set the htab location */
1625#endif 1624#endif
1626 /* Initialize the first segment table (or SLB) entry */ 1625 /* Initialize the first segment table (or SLB) entry */
1627 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1626 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
@@ -1650,7 +1649,7 @@ _GLOBAL(__secondary_start)
1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1649 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1650 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1652 beq 98f /* branch if result is 0 */ 1651 beq 98f /* branch if result is 0 */
1653 mfspr r3,PVR 1652 mfspr r3,SPRN_PVR
1654 srwi r3,r3,16 1653 srwi r3,r3,16
1655 cmpwi r3,0x37 /* SStar */ 1654 cmpwi r3,0x37 /* SStar */
1656 beq 97f 1655 beq 97f
@@ -1674,8 +1673,8 @@ _GLOBAL(__secondary_start)
1674#ifdef DO_SOFT_DISABLE 1673#ifdef DO_SOFT_DISABLE
1675 ori r4,r4,MSR_EE 1674 ori r4,r4,MSR_EE
1676#endif 1675#endif
1677 mtspr SRR0,r3 1676 mtspr SPRN_SRR0,r3
1678 mtspr SRR1,r4 1677 mtspr SPRN_SRR1,r4
1679 rfid 1678 rfid
1680 b . /* prevent speculative execution */ 1679 b . /* prevent speculative execution */
1681 1680
@@ -1737,7 +1736,7 @@ _STATIC(start_here_multiplatform)
1737 1736
1738#ifdef CONFIG_HMT 1737#ifdef CONFIG_HMT
1739 /* Start up the second thread on cpu 0 */ 1738 /* Start up the second thread on cpu 0 */
1740 mfspr r3,PVR 1739 mfspr r3,SPRN_PVR
1741 srwi r3,r3,16 1740 srwi r3,r3,16
1742 cmpwi r3,0x34 /* Pulsar */ 1741 cmpwi r3,0x34 /* Pulsar */
1743 beq 90f 1742 beq 90f
@@ -1797,7 +1796,7 @@ _STATIC(start_here_multiplatform)
1797 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1796 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1798 add r13,r13,r24 /* for this processor. */ 1797 add r13,r13,r24 /* for this processor. */
1799 sub r13,r13,r26 /* convert to physical addr */ 1798 sub r13,r13,r26 /* convert to physical addr */
1800 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ 1799 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1801 1800
1802 /* Do very early kernel initializations, including initial hash table, 1801 /* Do very early kernel initializations, including initial hash table,
1803 * stab and slb setup before we turn on relocation. */ 1802 * stab and slb setup before we turn on relocation. */
@@ -1814,7 +1813,7 @@ _STATIC(start_here_multiplatform)
1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1813 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1814 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1816 beq 98f /* branch if result is 0 */ 1815 beq 98f /* branch if result is 0 */
1817 mfspr r3,PVR 1816 mfspr r3,SPRN_PVR
1818 srwi r3,r3,16 1817 srwi r3,r3,16
1819 cmpwi r3,0x37 /* SStar */ 1818 cmpwi r3,0x37 /* SStar */
1820 beq 97f 1819 beq 97f
@@ -1838,12 +1837,12 @@ _STATIC(start_here_multiplatform)
1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1837 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1839 sub r6,r6,r26 1838 sub r6,r6,r26
1840 ld r6,0(r6) /* get the value of _SDR1 */ 1839 ld r6,0(r6) /* get the value of _SDR1 */
1841 mtspr SDR1,r6 /* set the htab location */ 1840 mtspr SPRN_SDR1,r6 /* set the htab location */
184298: 184198:
1843 LOADADDR(r3,.start_here_common) 1842 LOADADDR(r3,.start_here_common)
1844 SET_REG_TO_CONST(r4, MSR_KERNEL) 1843 SET_REG_TO_CONST(r4, MSR_KERNEL)
1845 mtspr SRR0,r3 1844 mtspr SPRN_SRR0,r3
1846 mtspr SRR1,r4 1845 mtspr SPRN_SRR1,r4
1847 rfid 1846 rfid
1848 b . /* prevent speculative execution */ 1847 b . /* prevent speculative execution */
1849#endif /* CONFIG_PPC_MULTIPLATFORM */ 1848#endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -1874,7 +1873,7 @@ _STATIC(start_here_common)
1874 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1873 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1875 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1874 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1876 add r13,r13,r24 /* for this processor. */ 1875 add r13,r13,r24 /* for this processor. */
1877 mtspr SPRG3,r13 1876 mtspr SPRN_SPRG3,r13
1878 1877
1879 /* ptr to current */ 1878 /* ptr to current */
1880 LOADADDR(r4,init_task) 1879 LOADADDR(r4,init_task)
@@ -1901,7 +1900,7 @@ _STATIC(start_here_common)
1901_GLOBAL(hmt_init) 1900_GLOBAL(hmt_init)
1902#ifdef CONFIG_HMT 1901#ifdef CONFIG_HMT
1903 LOADADDR(r5, hmt_thread_data) 1902 LOADADDR(r5, hmt_thread_data)
1904 mfspr r7,PVR 1903 mfspr r7,SPRN_PVR
1905 srwi r7,r7,16 1904 srwi r7,r7,16
1906 cmpwi r7,0x34 /* Pulsar */ 1905 cmpwi r7,0x34 /* Pulsar */
1907 beq 90f 1906 beq 90f
@@ -1910,10 +1909,10 @@ _GLOBAL(hmt_init)
1910 cmpwi r7,0x37 /* SStar */ 1909 cmpwi r7,0x37 /* SStar */
1911 beq 91f 1910 beq 91f
1912 b 101f 1911 b 101f
191390: mfspr r6,PIR 191290: mfspr r6,SPRN_PIR
1914 andi. r6,r6,0x1f 1913 andi. r6,r6,0x1f
1915 b 92f 1914 b 92f
191691: mfspr r6,PIR 191591: mfspr r6,SPRN_PIR
1917 andi. r6,r6,0x3ff 1916 andi. r6,r6,0x3ff
191892: sldi r4,r24,3 191792: sldi r4,r24,3
1919 stwx r6,r5,r4 1918 stwx r6,r5,r4
@@ -1924,8 +1923,8 @@ __hmt_secondary_hold:
1924 LOADADDR(r5, hmt_thread_data) 1923 LOADADDR(r5, hmt_thread_data)
1925 clrldi r5,r5,4 1924 clrldi r5,r5,4
1926 li r7,0 1925 li r7,0
1927 mfspr r6,PIR 1926 mfspr r6,SPRN_PIR
1928 mfspr r8,PVR 1927 mfspr r8,SPRN_PVR
1929 srwi r8,r8,16 1928 srwi r8,r8,16
1930 cmpwi r8,0x34 1929 cmpwi r8,0x34
1931 bne 93f 1930 bne 93f
@@ -1951,39 +1950,41 @@ __hmt_secondary_hold:
1951_GLOBAL(hmt_start_secondary) 1950_GLOBAL(hmt_start_secondary)
1952 LOADADDR(r4,__hmt_secondary_hold) 1951 LOADADDR(r4,__hmt_secondary_hold)
1953 clrldi r4,r4,4 1952 clrldi r4,r4,4
1954 mtspr NIADORM, r4 1953 mtspr SPRN_NIADORM, r4
1955 mfspr r4, MSRDORM 1954 mfspr r4, SPRN_MSRDORM
1956 li r5, -65 1955 li r5, -65
1957 and r4, r4, r5 1956 and r4, r4, r5
1958 mtspr MSRDORM, r4 1957 mtspr SPRN_MSRDORM, r4
1959 lis r4,0xffef 1958 lis r4,0xffef
1960 ori r4,r4,0x7403 1959 ori r4,r4,0x7403
1961 mtspr TSC, r4 1960 mtspr SPRN_TSC, r4
1962 li r4,0x1f4 1961 li r4,0x1f4
1963 mtspr TST, r4 1962 mtspr SPRN_TST, r4
1964 mfspr r4, HID0 1963 mfspr r4, SPRN_HID0
1965 ori r4, r4, 0x1 1964 ori r4, r4, 0x1
1966 mtspr HID0, r4 1965 mtspr SPRN_HID0, r4
1967 mfspr r4, SPRN_CTRLF 1966 mfspr r4, SPRN_CTRLF
1968 oris r4, r4, 0x40 1967 oris r4, r4, 0x40
1969 mtspr SPRN_CTRLT, r4 1968 mtspr SPRN_CTRLT, r4
1970 blr 1969 blr
1971#endif 1970#endif
1972 1971
1973#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) 1972#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1974_GLOBAL(smp_release_cpus) 1973_GLOBAL(smp_release_cpus)
1975 /* All secondary cpus are spinning on a common 1974 /* All secondary cpus are spinning on a common
1976 * spinloop, release them all now so they can start 1975 * spinloop, release them all now so they can start
1977 * to spin on their individual paca spinloops. 1976 * to spin on their individual paca spinloops.
1978 * For non SMP kernels, the secondary cpus never 1977 * For non SMP kernels, the secondary cpus never
1979 * get out of the common spinloop. 1978 * get out of the common spinloop.
1979 * XXX This does nothing useful on iSeries, secondaries are
1980 * already waiting on their paca.
1980 */ 1981 */
1981 li r3,1 1982 li r3,1
1982 LOADADDR(r5,__secondary_hold_spinloop) 1983 LOADADDR(r5,__secondary_hold_spinloop)
1983 std r3,0(r5) 1984 std r3,0(r5)
1984 sync 1985 sync
1985 blr 1986 blr
1986#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ 1987#endif /* CONFIG_SMP */
1987 1988
1988 1989
1989/* 1990/*
@@ -1992,7 +1993,7 @@ _GLOBAL(smp_release_cpus)
1992 */ 1993 */
1993 .section ".bss" 1994 .section ".bss"
1994 1995
1995 .align 12 1996 .align PAGE_SHIFT
1996 1997
1997 .globl empty_zero_page 1998 .globl empty_zero_page
1998empty_zero_page: 1999empty_zero_page:
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 954395d42636..8abd2ad92832 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -31,7 +31,7 @@
31 31
32extern void power4_idle(void); 32extern void power4_idle(void);
33 33
34int default_idle(void) 34void default_idle(void)
35{ 35{
36 long oldval; 36 long oldval;
37 unsigned int cpu = smp_processor_id(); 37 unsigned int cpu = smp_processor_id();
@@ -64,11 +64,9 @@ int default_idle(void)
64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
65 cpu_die(); 65 cpu_die();
66 } 66 }
67
68 return 0;
69} 67}
70 68
71int native_idle(void) 69void native_idle(void)
72{ 70{
73 while (1) { 71 while (1) {
74 ppc64_runlatch_off(); 72 ppc64_runlatch_off();
@@ -85,8 +83,6 @@ int native_idle(void)
85 system_state == SYSTEM_RUNNING) 83 system_state == SYSTEM_RUNNING)
86 cpu_die(); 84 cpu_die();
87 } 85 }
88
89 return 0;
90} 86}
91 87
92void cpu_idle(void) 88void cpu_idle(void)
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 9c6facc24f70..ed876a5178ae 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -395,7 +395,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
395 if (post_kprobe_handler(args->regs)) 395 if (post_kprobe_handler(args->regs))
396 ret = NOTIFY_STOP; 396 ret = NOTIFY_STOP;
397 break; 397 break;
398 case DIE_GPF:
399 case DIE_PAGE_FAULT: 398 case DIE_PAGE_FAULT:
400 if (kprobe_running() && 399 if (kprobe_running() &&
401 kprobe_fault_handler(args->regs, args->trapnr)) 400 kprobe_fault_handler(args->regs, args->trapnr))
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/ppc64/kernel/maple_pci.c
index 1d297e0edfc0..633324b5e61b 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/ppc64/kernel/maple_pci.c
@@ -23,8 +23,7 @@
23#include <asm/pci-bridge.h> 23#include <asm/pci-bridge.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26 26#include <asm/ppc-pci.h>
27#include "pci.h"
28 27
29#ifdef DEBUG 28#ifdef DEBUG
30#define DBG(x...) printk(x) 29#define DBG(x...) printk(x)
@@ -276,7 +275,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
276{ 275{
277 /* On G5, we move AGP up to high bus number so we don't need 276 /* On G5, we move AGP up to high bus number so we don't need
278 * to reassign bus numbers for HT. If we ever have P2P bridges 277 * to reassign bus numbers for HT. If we ever have P2P bridges
279 * on AGP, we'll have to move pci_assign_all_busses to the 278 * on AGP, we'll have to move pci_assign_all_buses to the
280 * pci_controller structure so we enable it for AGP and not for 279 * pci_controller structure so we enable it for AGP and not for
281 * HT childs. 280 * HT childs.
282 * We hard code the address because of the different size of 281 * We hard code the address because of the different size of
@@ -360,7 +359,7 @@ static int __init add_bridge(struct device_node *dev)
360 359
361 /* Interpret the "ranges" property */ 360 /* Interpret the "ranges" property */
362 /* This also maps the I/O region and sets isa_io/mem_base */ 361 /* This also maps the I/O region and sets isa_io/mem_base */
363 pci_process_bridge_OF_ranges(hose, dev); 362 pci_process_bridge_OF_ranges(hose, dev, primary);
364 pci_setup_phb_io(hose, primary); 363 pci_setup_phb_io(hose, primary);
365 364
366 /* Fixup "bus-range" OF property */ 365 /* Fixup "bus-range" OF property */
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index fc0567498a3a..a107ed69a355 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -59,8 +59,8 @@
59#include <asm/time.h> 59#include <asm/time.h>
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/lmb.h> 61#include <asm/lmb.h>
62 62#include <asm/mpic.h>
63#include "mpic.h" 63#include <asm/udbg.h>
64 64
65#ifdef DEBUG 65#ifdef DEBUG
66#define DBG(fmt...) udbg_printf(fmt) 66#define DBG(fmt...) udbg_printf(fmt)
@@ -70,7 +70,7 @@
70 70
71extern int maple_set_rtc_time(struct rtc_time *tm); 71extern int maple_set_rtc_time(struct rtc_time *tm);
72extern void maple_get_rtc_time(struct rtc_time *tm); 72extern void maple_get_rtc_time(struct rtc_time *tm);
73extern void maple_get_boot_time(struct rtc_time *tm); 73extern unsigned long maple_get_boot_time(void);
74extern void maple_calibrate_decr(void); 74extern void maple_calibrate_decr(void);
75extern void maple_pci_init(void); 75extern void maple_pci_init(void);
76extern void maple_pcibios_fixup(void); 76extern void maple_pcibios_fixup(void);
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/ppc64/kernel/maple_time.c
index d65210abcd03..445cb7470bf5 100644
--- a/arch/ppc64/kernel/maple_time.c
+++ b/arch/ppc64/kernel/maple_time.c
@@ -156,8 +156,9 @@ int maple_set_rtc_time(struct rtc_time *tm)
156 return 0; 156 return 0;
157} 157}
158 158
159void __init maple_get_boot_time(struct rtc_time *tm) 159unsigned long __init maple_get_boot_time(void)
160{ 160{
161 struct rtc_time tm;
161 struct device_node *rtcs; 162 struct device_node *rtcs;
162 163
163 rtcs = find_compatible_devices("rtc", "pnpPNP,b00"); 164 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
@@ -170,6 +171,8 @@ void __init maple_get_boot_time(struct rtc_time *tm)
170 "legacy address (0x%x)\n", maple_rtc_addr); 171 "legacy address (0x%x)\n", maple_rtc_addr);
171 } 172 }
172 173
173 maple_get_rtc_time(tm); 174 maple_get_rtc_time(&tm);
175 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
176 tm.tm_hour, tm.tm_min, tm.tm_sec);
174} 177}
175 178
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index e7241ad80a08..a33448c2bd91 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -28,6 +28,7 @@
28#include <asm/ppc_asm.h> 28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
31#include <asm/thread_info.h>
31 32
32 .text 33 .text
33 34
@@ -64,44 +65,6 @@ _GLOBAL(get_srr1)
64_GLOBAL(get_sp) 65_GLOBAL(get_sp)
65 mr r3,r1 66 mr r3,r1
66 blr 67 blr
67
68#ifdef CONFIG_PPC_ISERIES
69/* unsigned long local_save_flags(void) */
70_GLOBAL(local_get_flags)
71 lbz r3,PACAPROCENABLED(r13)
72 blr
73
74/* unsigned long local_irq_disable(void) */
75_GLOBAL(local_irq_disable)
76 lbz r3,PACAPROCENABLED(r13)
77 li r4,0
78 stb r4,PACAPROCENABLED(r13)
79 blr /* Done */
80
81/* void local_irq_restore(unsigned long flags) */
82_GLOBAL(local_irq_restore)
83 lbz r5,PACAPROCENABLED(r13)
84 /* Check if things are setup the way we want _already_. */
85 cmpw 0,r3,r5
86 beqlr
87 /* are we enabling interrupts? */
88 cmpdi 0,r3,0
89 stb r3,PACAPROCENABLED(r13)
90 beqlr
91 /* Check pending interrupts */
92 /* A decrementer, IPI or PMC interrupt may have occurred
93 * while we were in the hypervisor (which enables) */
94 ld r4,PACALPPACA+LPPACAANYINT(r13)
95 cmpdi r4,0
96 beqlr
97
98 /*
99 * Handle pending interrupts in interrupt context
100 */
101 li r0,0x5555
102 sc
103 blr
104#endif /* CONFIG_PPC_ISERIES */
105 68
106#ifdef CONFIG_IRQSTACKS 69#ifdef CONFIG_IRQSTACKS
107_GLOBAL(call_do_softirq) 70_GLOBAL(call_do_softirq)
@@ -329,7 +292,7 @@ _GLOBAL(__flush_dcache_icache)
329 292
330/* Flush the dcache */ 293/* Flush the dcache */
331 ld r7,PPC64_CACHES@toc(r2) 294 ld r7,PPC64_CACHES@toc(r2)
332 clrrdi r3,r3,12 /* Page align */ 295 clrrdi r3,r3,PAGE_SHIFT /* Page align */
333 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ 296 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
334 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ 297 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
335 mr r6,r3 298 mr r6,r3
@@ -778,6 +741,13 @@ _GLOBAL(giveup_altivec)
778_GLOBAL(__setup_cpu_power3) 741_GLOBAL(__setup_cpu_power3)
779 blr 742 blr
780 743
744_GLOBAL(execve)
745 li r0,__NR_execve
746 sc
747 bnslr
748 neg r3,r3
749 blr
750
781/* kexec_wait(phys_cpu) 751/* kexec_wait(phys_cpu)
782 * 752 *
783 * wait for the flag to change, indicating this kernel is going away but 753 * wait for the flag to change, indicating this kernel is going away but
@@ -959,11 +929,11 @@ _GLOBAL(sys_call_table32)
959 .llong .sys_write 929 .llong .sys_write
960 .llong .compat_sys_open /* 5 */ 930 .llong .compat_sys_open /* 5 */
961 .llong .sys_close 931 .llong .sys_close
962 .llong .sys32_waitpid 932 .llong .compat_sys_waitpid
963 .llong .sys32_creat 933 .llong .compat_sys_creat
964 .llong .sys_link 934 .llong .sys_link
965 .llong .sys_unlink /* 10 */ 935 .llong .sys_unlink /* 10 */
966 .llong .sys32_execve 936 .llong .compat_sys_execve
967 .llong .sys_chdir 937 .llong .sys_chdir
968 .llong .compat_sys_time 938 .llong .compat_sys_time
969 .llong .sys_mknod 939 .llong .sys_mknod
@@ -978,20 +948,20 @@ _GLOBAL(sys_call_table32)
978 .llong .sys_setuid 948 .llong .sys_setuid
979 .llong .sys_getuid 949 .llong .sys_getuid
980 .llong .compat_sys_stime /* 25 */ 950 .llong .compat_sys_stime /* 25 */
981 .llong .sys32_ptrace 951 .llong .compat_sys_ptrace
982 .llong .sys_alarm 952 .llong .sys_alarm
983 .llong .sys_ni_syscall /* old fstat syscall */ 953 .llong .sys_ni_syscall /* old fstat syscall */
984 .llong .sys32_pause 954 .llong .compat_sys_pause
985 .llong .compat_sys_utime /* 30 */ 955 .llong .compat_sys_utime /* 30 */
986 .llong .sys_ni_syscall /* old stty syscall */ 956 .llong .sys_ni_syscall /* old stty syscall */
987 .llong .sys_ni_syscall /* old gtty syscall */ 957 .llong .sys_ni_syscall /* old gtty syscall */
988 .llong .sys32_access 958 .llong .compat_sys_access
989 .llong .sys32_nice 959 .llong .compat_sys_nice
990 .llong .sys_ni_syscall /* 35 - old ftime syscall */ 960 .llong .sys_ni_syscall /* 35 - old ftime syscall */
991 .llong .sys_sync 961 .llong .sys_sync
992 .llong .sys32_kill 962 .llong .compat_sys_kill
993 .llong .sys_rename 963 .llong .sys_rename
994 .llong .sys32_mkdir 964 .llong .compat_sys_mkdir
995 .llong .sys_rmdir /* 40 */ 965 .llong .sys_rmdir /* 40 */
996 .llong .sys_dup 966 .llong .sys_dup
997 .llong .sys_pipe 967 .llong .sys_pipe
@@ -1009,35 +979,35 @@ _GLOBAL(sys_call_table32)
1009 .llong .compat_sys_ioctl 979 .llong .compat_sys_ioctl
1010 .llong .compat_sys_fcntl /* 55 */ 980 .llong .compat_sys_fcntl /* 55 */
1011 .llong .sys_ni_syscall /* old mpx syscall */ 981 .llong .sys_ni_syscall /* old mpx syscall */
1012 .llong .sys32_setpgid 982 .llong .compat_sys_setpgid
1013 .llong .sys_ni_syscall /* old ulimit syscall */ 983 .llong .sys_ni_syscall /* old ulimit syscall */
1014 .llong .sys32_olduname 984 .llong .sys_olduname
1015 .llong .sys32_umask /* 60 */ 985 .llong .compat_sys_umask /* 60 */
1016 .llong .sys_chroot 986 .llong .sys_chroot
1017 .llong .sys_ustat 987 .llong .sys_ustat
1018 .llong .sys_dup2 988 .llong .sys_dup2
1019 .llong .sys_getppid 989 .llong .sys_getppid
1020 .llong .sys_getpgrp /* 65 */ 990 .llong .sys_getpgrp /* 65 */
1021 .llong .sys_setsid 991 .llong .sys_setsid
1022 .llong .sys32_sigaction 992 .llong .compat_sys_sigaction
1023 .llong .sys_sgetmask 993 .llong .sys_sgetmask
1024 .llong .sys32_ssetmask 994 .llong .compat_sys_ssetmask
1025 .llong .sys_setreuid /* 70 */ 995 .llong .sys_setreuid /* 70 */
1026 .llong .sys_setregid 996 .llong .sys_setregid
1027 .llong .ppc32_sigsuspend 997 .llong .ppc32_sigsuspend
1028 .llong .compat_sys_sigpending 998 .llong .compat_sys_sigpending
1029 .llong .sys32_sethostname 999 .llong .compat_sys_sethostname
1030 .llong .compat_sys_setrlimit /* 75 */ 1000 .llong .compat_sys_setrlimit /* 75 */
1031 .llong .compat_sys_old_getrlimit 1001 .llong .compat_sys_old_getrlimit
1032 .llong .compat_sys_getrusage 1002 .llong .compat_sys_getrusage
1033 .llong .sys32_gettimeofday 1003 .llong .compat_sys_gettimeofday
1034 .llong .sys32_settimeofday 1004 .llong .compat_sys_settimeofday
1035 .llong .sys32_getgroups /* 80 */ 1005 .llong .compat_sys_getgroups /* 80 */
1036 .llong .sys32_setgroups 1006 .llong .compat_sys_setgroups
1037 .llong .sys_ni_syscall /* old select syscall */ 1007 .llong .sys_ni_syscall /* old select syscall */
1038 .llong .sys_symlink 1008 .llong .sys_symlink
1039 .llong .sys_ni_syscall /* old lstat syscall */ 1009 .llong .sys_ni_syscall /* old lstat syscall */
1040 .llong .sys32_readlink /* 85 */ 1010 .llong .compat_sys_readlink /* 85 */
1041 .llong .sys_uselib 1011 .llong .sys_uselib
1042 .llong .sys_swapon 1012 .llong .sys_swapon
1043 .llong .sys_reboot 1013 .llong .sys_reboot
@@ -1048,35 +1018,35 @@ _GLOBAL(sys_call_table32)
1048 .llong .sys_ftruncate 1018 .llong .sys_ftruncate
1049 .llong .sys_fchmod 1019 .llong .sys_fchmod
1050 .llong .sys_fchown /* 95 */ 1020 .llong .sys_fchown /* 95 */
1051 .llong .sys32_getpriority 1021 .llong .compat_sys_getpriority
1052 .llong .sys32_setpriority 1022 .llong .compat_sys_setpriority
1053 .llong .sys_ni_syscall /* old profil syscall */ 1023 .llong .sys_ni_syscall /* old profil syscall */
1054 .llong .compat_sys_statfs 1024 .llong .compat_sys_statfs
1055 .llong .compat_sys_fstatfs /* 100 */ 1025 .llong .compat_sys_fstatfs /* 100 */
1056 .llong .sys_ni_syscall /* old ioperm syscall */ 1026 .llong .sys_ni_syscall /* old ioperm syscall */
1057 .llong .compat_sys_socketcall 1027 .llong .compat_sys_socketcall
1058 .llong .sys32_syslog 1028 .llong .compat_sys_syslog
1059 .llong .compat_sys_setitimer 1029 .llong .compat_sys_setitimer
1060 .llong .compat_sys_getitimer /* 105 */ 1030 .llong .compat_sys_getitimer /* 105 */
1061 .llong .compat_sys_newstat 1031 .llong .compat_sys_newstat
1062 .llong .compat_sys_newlstat 1032 .llong .compat_sys_newlstat
1063 .llong .compat_sys_newfstat 1033 .llong .compat_sys_newfstat
1064 .llong .sys32_uname 1034 .llong .sys_uname
1065 .llong .sys_ni_syscall /* 110 old iopl syscall */ 1035 .llong .sys_ni_syscall /* 110 old iopl syscall */
1066 .llong .sys_vhangup 1036 .llong .sys_vhangup
1067 .llong .sys_ni_syscall /* old idle syscall */ 1037 .llong .sys_ni_syscall /* old idle syscall */
1068 .llong .sys_ni_syscall /* old vm86 syscall */ 1038 .llong .sys_ni_syscall /* old vm86 syscall */
1069 .llong .compat_sys_wait4 1039 .llong .compat_sys_wait4
1070 .llong .sys_swapoff /* 115 */ 1040 .llong .sys_swapoff /* 115 */
1071 .llong .sys32_sysinfo 1041 .llong .compat_sys_sysinfo
1072 .llong .sys32_ipc 1042 .llong .sys32_ipc
1073 .llong .sys_fsync 1043 .llong .sys_fsync
1074 .llong .ppc32_sigreturn 1044 .llong .ppc32_sigreturn
1075 .llong .ppc_clone /* 120 */ 1045 .llong .ppc_clone /* 120 */
1076 .llong .sys32_setdomainname 1046 .llong .compat_sys_setdomainname
1077 .llong .ppc64_newuname 1047 .llong .ppc_newuname
1078 .llong .sys_ni_syscall /* old modify_ldt syscall */ 1048 .llong .sys_ni_syscall /* old modify_ldt syscall */
1079 .llong .sys32_adjtimex 1049 .llong .compat_sys_adjtimex
1080 .llong .sys_mprotect /* 125 */ 1050 .llong .sys_mprotect /* 125 */
1081 .llong .compat_sys_sigprocmask 1051 .llong .compat_sys_sigprocmask
1082 .llong .sys_ni_syscall /* old create_module syscall */ 1052 .llong .sys_ni_syscall /* old create_module syscall */
@@ -1084,36 +1054,36 @@ _GLOBAL(sys_call_table32)
1084 .llong .sys_delete_module 1054 .llong .sys_delete_module
1085 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */ 1055 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1086 .llong .sys_quotactl 1056 .llong .sys_quotactl
1087 .llong .sys32_getpgid 1057 .llong .compat_sys_getpgid
1088 .llong .sys_fchdir 1058 .llong .sys_fchdir
1089 .llong .sys_bdflush 1059 .llong .sys_bdflush
1090 .llong .sys32_sysfs /* 135 */ 1060 .llong .compat_sys_sysfs /* 135 */
1091 .llong .ppc64_personality 1061 .llong .ppc64_personality
1092 .llong .sys_ni_syscall /* for afs_syscall */ 1062 .llong .sys_ni_syscall /* for afs_syscall */
1093 .llong .sys_setfsuid 1063 .llong .sys_setfsuid
1094 .llong .sys_setfsgid 1064 .llong .sys_setfsgid
1095 .llong .sys_llseek /* 140 */ 1065 .llong .sys_llseek /* 140 */
1096 .llong .sys32_getdents 1066 .llong .compat_sys_getdents
1097 .llong .ppc32_select 1067 .llong .ppc32_select
1098 .llong .sys_flock 1068 .llong .sys_flock
1099 .llong .sys_msync 1069 .llong .sys_msync
1100 .llong .compat_sys_readv /* 145 */ 1070 .llong .compat_sys_readv /* 145 */
1101 .llong .compat_sys_writev 1071 .llong .compat_sys_writev
1102 .llong .sys32_getsid 1072 .llong .compat_sys_getsid
1103 .llong .sys_fdatasync 1073 .llong .sys_fdatasync
1104 .llong .sys32_sysctl 1074 .llong .compat_sys_sysctl
1105 .llong .sys_mlock /* 150 */ 1075 .llong .sys_mlock /* 150 */
1106 .llong .sys_munlock 1076 .llong .sys_munlock
1107 .llong .sys_mlockall 1077 .llong .sys_mlockall
1108 .llong .sys_munlockall 1078 .llong .sys_munlockall
1109 .llong .sys32_sched_setparam 1079 .llong .compat_sys_sched_setparam
1110 .llong .sys32_sched_getparam /* 155 */ 1080 .llong .compat_sys_sched_getparam /* 155 */
1111 .llong .sys32_sched_setscheduler 1081 .llong .compat_sys_sched_setscheduler
1112 .llong .sys32_sched_getscheduler 1082 .llong .compat_sys_sched_getscheduler
1113 .llong .sys_sched_yield 1083 .llong .sys_sched_yield
1114 .llong .sys32_sched_get_priority_max 1084 .llong .compat_sys_sched_get_priority_max
1115 .llong .sys32_sched_get_priority_min /* 160 */ 1085 .llong .compat_sys_sched_get_priority_min /* 160 */
1116 .llong .sys32_sched_rr_get_interval 1086 .llong .compat_sys_sched_rr_get_interval
1117 .llong .compat_sys_nanosleep 1087 .llong .compat_sys_nanosleep
1118 .llong .sys_mremap 1088 .llong .sys_mremap
1119 .llong .sys_setresuid 1089 .llong .sys_setresuid
@@ -1123,36 +1093,36 @@ _GLOBAL(sys_call_table32)
1123 .llong .compat_sys_nfsservctl 1093 .llong .compat_sys_nfsservctl
1124 .llong .sys_setresgid 1094 .llong .sys_setresgid
1125 .llong .sys_getresgid /* 170 */ 1095 .llong .sys_getresgid /* 170 */
1126 .llong .sys32_prctl 1096 .llong .compat_sys_prctl
1127 .llong .ppc32_rt_sigreturn 1097 .llong .ppc32_rt_sigreturn
1128 .llong .sys32_rt_sigaction 1098 .llong .compat_sys_rt_sigaction
1129 .llong .sys32_rt_sigprocmask 1099 .llong .compat_sys_rt_sigprocmask
1130 .llong .sys32_rt_sigpending /* 175 */ 1100 .llong .compat_sys_rt_sigpending /* 175 */
1131 .llong .compat_sys_rt_sigtimedwait 1101 .llong .compat_sys_rt_sigtimedwait
1132 .llong .sys32_rt_sigqueueinfo 1102 .llong .compat_sys_rt_sigqueueinfo
1133 .llong .ppc32_rt_sigsuspend 1103 .llong .ppc32_rt_sigsuspend
1134 .llong .sys32_pread64 1104 .llong .compat_sys_pread64
1135 .llong .sys32_pwrite64 /* 180 */ 1105 .llong .compat_sys_pwrite64 /* 180 */
1136 .llong .sys_chown 1106 .llong .sys_chown
1137 .llong .sys_getcwd 1107 .llong .sys_getcwd
1138 .llong .sys_capget 1108 .llong .sys_capget
1139 .llong .sys_capset 1109 .llong .sys_capset
1140 .llong .sys32_sigaltstack /* 185 */ 1110 .llong .compat_sys_sigaltstack /* 185 */
1141 .llong .sys32_sendfile 1111 .llong .compat_sys_sendfile
1142 .llong .sys_ni_syscall /* reserved for streams1 */ 1112 .llong .sys_ni_syscall /* reserved for streams1 */
1143 .llong .sys_ni_syscall /* reserved for streams2 */ 1113 .llong .sys_ni_syscall /* reserved for streams2 */
1144 .llong .ppc_vfork 1114 .llong .ppc_vfork
1145 .llong .compat_sys_getrlimit /* 190 */ 1115 .llong .compat_sys_getrlimit /* 190 */
1146 .llong .sys32_readahead 1116 .llong .compat_sys_readahead
1147 .llong .sys32_mmap2 1117 .llong .compat_sys_mmap2
1148 .llong .sys32_truncate64 1118 .llong .compat_sys_truncate64
1149 .llong .sys32_ftruncate64 1119 .llong .compat_sys_ftruncate64
1150 .llong .sys_stat64 /* 195 */ 1120 .llong .sys_stat64 /* 195 */
1151 .llong .sys_lstat64 1121 .llong .sys_lstat64
1152 .llong .sys_fstat64 1122 .llong .sys_fstat64
1153 .llong .sys32_pciconfig_read 1123 .llong .compat_sys_pciconfig_read
1154 .llong .sys32_pciconfig_write 1124 .llong .compat_sys_pciconfig_write
1155 .llong .sys32_pciconfig_iobase /* 200 - pciconfig_iobase */ 1125 .llong .compat_sys_pciconfig_iobase /* 200 - pciconfig_iobase */
1156 .llong .sys_ni_syscall /* reserved for MacOnLinux */ 1126 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1157 .llong .sys_getdents64 1127 .llong .sys_getdents64
1158 .llong .sys_pivot_root 1128 .llong .sys_pivot_root
@@ -1178,7 +1148,7 @@ _GLOBAL(sys_call_table32)
1178 .llong .compat_sys_sched_getaffinity 1148 .llong .compat_sys_sched_getaffinity
1179 .llong .sys_ni_syscall 1149 .llong .sys_ni_syscall
1180 .llong .sys_ni_syscall /* 225 - reserved for tux */ 1150 .llong .sys_ni_syscall /* 225 - reserved for tux */
1181 .llong .sys32_sendfile64 1151 .llong .compat_sys_sendfile64
1182 .llong .compat_sys_io_setup 1152 .llong .compat_sys_io_setup
1183 .llong .sys_io_destroy 1153 .llong .sys_io_destroy
1184 .llong .compat_sys_io_getevents 1154 .llong .compat_sys_io_getevents
@@ -1197,16 +1167,16 @@ _GLOBAL(sys_call_table32)
1197 .llong .compat_sys_timer_gettime 1167 .llong .compat_sys_timer_gettime
1198 .llong .sys_timer_getoverrun 1168 .llong .sys_timer_getoverrun
1199 .llong .sys_timer_delete 1169 .llong .sys_timer_delete
1200 .llong .compat_sys_clock_settime /* 245 */ 1170 .llong .compat_sys_clock_settime/* 245 */
1201 .llong .compat_sys_clock_gettime 1171 .llong .compat_sys_clock_gettime
1202 .llong .compat_sys_clock_getres 1172 .llong .compat_sys_clock_getres
1203 .llong .compat_sys_clock_nanosleep 1173 .llong .compat_sys_clock_nanosleep
1204 .llong .ppc32_swapcontext 1174 .llong .ppc32_swapcontext
1205 .llong .sys32_tgkill /* 250 */ 1175 .llong .compat_sys_tgkill /* 250 */
1206 .llong .sys32_utimes 1176 .llong .compat_sys_utimes
1207 .llong .compat_sys_statfs64 1177 .llong .compat_sys_statfs64
1208 .llong .compat_sys_fstatfs64 1178 .llong .compat_sys_fstatfs64
1209 .llong .ppc32_fadvise64_64 /* 32bit only fadvise64_64 */ 1179 .llong .ppc_fadvise64_64 /* 32bit only fadvise64_64 */
1210 .llong .ppc_rtas /* 255 */ 1180 .llong .ppc_rtas /* 255 */
1211 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */ 1181 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1212 .llong .sys_ni_syscall /* 257 reserved for vserver */ 1182 .llong .sys_ni_syscall /* 257 reserved for vserver */
@@ -1221,12 +1191,12 @@ _GLOBAL(sys_call_table32)
1221 .llong .compat_sys_mq_notify 1191 .llong .compat_sys_mq_notify
1222 .llong .compat_sys_mq_getsetattr 1192 .llong .compat_sys_mq_getsetattr
1223 .llong .compat_sys_kexec_load 1193 .llong .compat_sys_kexec_load
1224 .llong .sys32_add_key 1194 .llong .compat_sys_add_key
1225 .llong .sys32_request_key /* 270 */ 1195 .llong .compat_sys_request_key /* 270 */
1226 .llong .compat_sys_keyctl 1196 .llong .compat_sys_keyctl
1227 .llong .compat_sys_waitid 1197 .llong .compat_sys_waitid
1228 .llong .sys32_ioprio_set 1198 .llong .compat_sys_ioprio_set
1229 .llong .sys32_ioprio_get 1199 .llong .compat_sys_ioprio_get
1230 .llong .sys_inotify_init /* 275 */ 1200 .llong .sys_inotify_init /* 275 */
1231 .llong .sys_inotify_add_watch 1201 .llong .sys_inotify_add_watch
1232 .llong .sys_inotify_rm_watch 1202 .llong .sys_inotify_rm_watch
@@ -1355,7 +1325,7 @@ _GLOBAL(sys_call_table)
1355 .llong .sys_ni_syscall 1325 .llong .sys_ni_syscall
1356 .llong .ppc_clone /* 120 */ 1326 .llong .ppc_clone /* 120 */
1357 .llong .sys_setdomainname 1327 .llong .sys_setdomainname
1358 .llong .ppc64_newuname 1328 .llong .ppc_newuname
1359 .llong .sys_ni_syscall /* old modify_ldt syscall */ 1329 .llong .sys_ni_syscall /* old modify_ldt syscall */
1360 .llong .sys_adjtimex 1330 .llong .sys_adjtimex
1361 .llong .sys_mprotect /* 125 */ 1331 .llong .sys_mprotect /* 125 */
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1da69d5..b2fb6746f00b 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34#include <asm/ppc-pci.h>
35#include "pci.h"
36 35
37#ifdef DEBUG 36#ifdef DEBUG
38#define DBG(fmt...) udbg_printf(fmt) 37#define DBG(fmt...) udbg_printf(fmt)
@@ -881,9 +880,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
881} 880}
882 881
883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 882void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
884 struct device_node *dev) 883 struct device_node *dev, int prim)
885{ 884{
886 unsigned int *ranges; 885 unsigned int *ranges, pci_space;
887 unsigned long size; 886 unsigned long size;
888 int rlen = 0; 887 int rlen = 0;
889 int memno = 0; 888 int memno = 0;
@@ -906,16 +905,39 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen); 905 ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
907 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 906 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
908 res = NULL; 907 res = NULL;
909 pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; 908 pci_space = ranges[0];
909 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
910 910
911 cpu_phys_addr = ranges[3]; 911 cpu_phys_addr = ranges[3];
912 if (na == 2) 912 if (na >= 2)
913 cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; 913 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
914 914
915 size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; 915 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
916 ranges += np;
916 if (size == 0) 917 if (size == 0)
917 continue; 918 continue;
918 switch ((ranges[0] >> 24) & 0x3) { 919
920 /* Now consume following elements while they are contiguous */
921 while (rlen >= np * sizeof(unsigned int)) {
922 unsigned long addr, phys;
923
924 if (ranges[0] != pci_space)
925 break;
926 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
927 phys = ranges[3];
928 if (na >= 2)
929 phys = (phys << 32) | ranges[4];
930 if (addr != pci_addr + size ||
931 phys != cpu_phys_addr + size)
932 break;
933
934 size += ((unsigned long)ranges[na+3] << 32)
935 | ranges[na+4];
936 ranges += np;
937 rlen -= np * sizeof(unsigned int);
938 }
939
940 switch ((pci_space >> 24) & 0x3) {
919 case 1: /* I/O space */ 941 case 1: /* I/O space */
920 hose->io_base_phys = cpu_phys_addr; 942 hose->io_base_phys = cpu_phys_addr;
921 hose->pci_io_size = size; 943 hose->pci_io_size = size;
@@ -949,7 +971,6 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
949 res->sibling = NULL; 971 res->sibling = NULL;
950 res->child = NULL; 972 res->child = NULL;
951 } 973 }
952 ranges += np;
953 } 974 }
954} 975}
955 976
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index 54055c81017a..e1a32f802c0b 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h> 29#include <asm/abs_addr.h>
30 30#include <asm/ppc-pci.h>
31#include "pci.h"
32 31
33static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flag) 33 dma_addr_t *dma_handle, gfp_t flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d07d57..493bbe43f5b4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
30#include <asm/prom.h> 30#include <asm/prom.h>
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h> 32#include <asm/pSeries_reconfig.h>
33 33#include <asm/ppc-pci.h>
34#include "pci.h"
35 34
36/* 35/*
37 * Traverse_func that inits the PCI fields of the device node. 36 * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index d9e33b7d4203..bdf15dbbf4f0 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/ppc64/kernel/pci_iommu.c 2 * arch/ppc64/kernel/pci_iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * 4 *
5 * Rewrite, cleanup, new allocation schemes: 5 * Rewrite, cleanup, new allocation schemes:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * 7 *
8 * Dynamic DMA mapping support, platform-independent parts. 8 * Dynamic DMA mapping support, platform-independent parts.
@@ -11,19 +11,18 @@
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 25
26#include <linux/config.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -37,11 +36,7 @@
37#include <asm/iommu.h> 36#include <asm/iommu.h>
38#include <asm/pci-bridge.h> 37#include <asm/pci-bridge.h>
39#include <asm/machdep.h> 38#include <asm/machdep.h>
40#include "pci.h" 39#include <asm/ppc-pci.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#include <asm/iSeries/iSeries_pci.h>
44#endif /* CONFIG_PPC_ISERIES */
45 40
46/* 41/*
47 * We can use ->sysdata directly and avoid the extra work in 42 * We can use ->sysdata directly and avoid the extra work in
@@ -61,13 +56,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
61 } else 56 } else
62 pdev = to_pci_dev(dev); 57 pdev = to_pci_dev(dev);
63 58
64#ifdef CONFIG_PPC_ISERIES
65 return ISERIES_DEVNODE(pdev)->iommu_table;
66#endif /* CONFIG_PPC_ISERIES */
67
68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 59 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 60}
72 61
73 62
diff --git a/arch/ppc64/kernel/pmac.h b/arch/ppc64/kernel/pmac.h
deleted file mode 100644
index 40e1c5030f74..000000000000
--- a/arch/ppc64/kernel/pmac.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6
7/*
8 * Declaration for the various functions exported by the
9 * pmac_* files. Mostly for use by pmac_setup
10 */
11
12extern void pmac_get_boot_time(struct rtc_time *tm);
13extern void pmac_get_rtc_time(struct rtc_time *tm);
14extern int pmac_set_rtc_time(struct rtc_time *tm);
15extern void pmac_read_rtc_time(void);
16extern void pmac_calibrate_decr(void);
17
18extern void pmac_pcibios_fixup(void);
19extern void pmac_pci_init(void);
20extern void pmac_setup_pci_dma(void);
21extern void pmac_check_ht_link(void);
22
23extern void pmac_setup_smp(void);
24
25extern unsigned long pmac_ide_get_base(int index);
26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
27 unsigned long data_port, unsigned long ctrl_port, int *irq);
28
29extern void pmac_nvram_init(void);
30
31#endif /* __PMAC_H__ */
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
deleted file mode 100644
index eb4e6c3f694d..000000000000
--- a/arch/ppc64/kernel/pmac_feature.c
+++ /dev/null
@@ -1,767 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/keylargo.h>
35#include <asm/uninorth.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/dbdma.h>
41#include <asm/pci-bridge.h>
42#include <asm/pmac_low_i2c.h>
43
44#undef DEBUG_FEATURE
45
46#ifdef DEBUG_FEATURE
47#define DBG(fmt...) printk(KERN_DEBUG fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * We use a single global lock to protect accesses. Each driver has
54 * to take care of its own locking
55 */
56static DEFINE_SPINLOCK(feature_lock __pmacdata);
57
58#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
59#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
60
61
62/*
63 * Instance of some macio stuffs
64 */
65struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata;
66
67struct macio_chip* __pmac macio_find(struct device_node* child, int type)
68{
69 while(child) {
70 int i;
71
72 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
73 if (child == macio_chips[i].of_node &&
74 (!type || macio_chips[i].type == type))
75 return &macio_chips[i];
76 child = child->parent;
77 }
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(macio_find);
81
82static const char* macio_names[] __pmacdata =
83{
84 "Unknown",
85 "Grand Central",
86 "OHare",
87 "OHareII",
88 "Heathrow",
89 "Gatwick",
90 "Paddington",
91 "Keylargo",
92 "Pangea",
93 "Intrepid",
94 "K2"
95};
96
97
98
99/*
100 * Uninorth reg. access. Note that Uni-N regs are big endian
101 */
102
103#define UN_REG(r) (uninorth_base + ((r) >> 2))
104#define UN_IN(r) (in_be32(UN_REG(r)))
105#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
106#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
107#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
108
109static struct device_node* uninorth_node __pmacdata;
110static u32* uninorth_base __pmacdata;
111static u32 uninorth_rev __pmacdata;
112static void *u3_ht;
113
114extern struct device_node *k2_skiplist[2];
115
116/*
117 * For each motherboard family, we have a table of functions pointers
118 * that handle the various features.
119 */
120
121typedef long (*feature_call)(struct device_node* node, long param, long value);
122
123struct feature_table_entry {
124 unsigned int selector;
125 feature_call function;
126};
127
128struct pmac_mb_def
129{
130 const char* model_string;
131 const char* model_name;
132 int model_id;
133 struct feature_table_entry* features;
134 unsigned long board_flags;
135};
136static struct pmac_mb_def pmac_mb __pmacdata;
137
138/*
139 * Here are the chip specific feature functions
140 */
141
142
143static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
144{
145 struct macio_chip* macio = &macio_chips[0];
146
147 return MACIO_IN8(param);
148}
149
150
151static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
152{
153 struct macio_chip* macio = &macio_chips[0];
154
155 MACIO_OUT8(param, (u8)(value & 0xff));
156 return 0;
157}
158
159static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
160{
161 struct macio_chip* macio = &macio_chips[0];
162 unsigned long flags;
163
164 if (node == NULL)
165 return -ENODEV;
166
167 LOCK(flags);
168 if (value) {
169 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
170 mb();
171 k2_skiplist[0] = NULL;
172 } else {
173 k2_skiplist[0] = node;
174 mb();
175 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
176 }
177
178 UNLOCK(flags);
179 mdelay(1);
180
181 return 0;
182}
183
184static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
185{
186 struct macio_chip* macio = &macio_chips[0];
187 unsigned long flags;
188
189 if (node == NULL)
190 return -ENODEV;
191
192 LOCK(flags);
193 if (value) {
194 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
195 mb();
196 k2_skiplist[1] = NULL;
197 } else {
198 k2_skiplist[1] = node;
199 mb();
200 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
201 }
202
203 UNLOCK(flags);
204 mdelay(1);
205
206 return 0;
207}
208
209static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
210{
211 unsigned long flags;
212
213 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
214 return 0;
215
216 LOCK(flags);
217 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
218 UNLOCK(flags);
219
220 return 0;
221}
222
223static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
224{
225 struct macio_chip* macio = &macio_chips[0];
226 struct device_node *phy;
227 int need_reset;
228
229 /*
230 * We must not reset the combo PHYs, only the BCM5221 found in
231 * the iMac G5.
232 */
233 phy = of_get_next_child(node, NULL);
234 if (!phy)
235 return -ENODEV;
236 need_reset = device_is_compatible(phy, "B5221");
237 of_node_put(phy);
238 if (!need_reset)
239 return 0;
240
241 /* PHY reset is GPIO 29, not in device-tree unfortunately */
242 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
243 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
244 /* Thankfully, this is now always called at a time when we can
245 * schedule by sungem.
246 */
247 msleep(10);
248 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
249
250 return 0;
251}
252
253static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
254{
255 /* Very crude implementation for now */
256 struct macio_chip* macio = &macio_chips[0];
257 unsigned long flags;
258
259 if (value == 0)
260 return 0; /* don't disable yet */
261
262 LOCK(flags);
263 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
264 KL3_I2S0_CLK18_ENABLE);
265 udelay(10);
266 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
267 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
268 udelay(10);
269 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
270 UNLOCK(flags);
271 udelay(10);
272
273 return 0;
274}
275
276
277#ifdef CONFIG_SMP
278static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
279{
280 unsigned int reset_io = 0;
281 unsigned long flags;
282 struct macio_chip* macio;
283 struct device_node* np;
284
285 macio = &macio_chips[0];
286 if (macio->type != macio_keylargo2)
287 return -ENODEV;
288
289 np = find_path_device("/cpus");
290 if (np == NULL)
291 return -ENODEV;
292 for (np = np->child; np != NULL; np = np->sibling) {
293 u32* num = (u32 *)get_property(np, "reg", NULL);
294 u32* rst = (u32 *)get_property(np, "soft-reset", NULL);
295 if (num == NULL || rst == NULL)
296 continue;
297 if (param == *num) {
298 reset_io = *rst;
299 break;
300 }
301 }
302 if (np == NULL || reset_io == 0)
303 return -ENODEV;
304
305 LOCK(flags);
306 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
307 (void)MACIO_IN8(reset_io);
308 udelay(1);
309 MACIO_OUT8(reset_io, 0);
310 (void)MACIO_IN8(reset_io);
311 UNLOCK(flags);
312
313 return 0;
314}
315#endif /* CONFIG_SMP */
316
317/*
318 * This can be called from pmac_smp so isn't static
319 *
320 * This takes the second CPU off the bus on dual CPU machines
321 * running UP
322 */
323void __pmac g5_phy_disable_cpu1(void)
324{
325 UN_OUT(U3_API_PHY_CONFIG_1, 0);
326}
327
328static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
329{
330 switch(param) {
331 case PMAC_MB_INFO_MODEL:
332 return pmac_mb.model_id;
333 case PMAC_MB_INFO_FLAGS:
334 return pmac_mb.board_flags;
335 case PMAC_MB_INFO_NAME:
336 /* hack hack hack... but should work */
337 *((const char **)value) = pmac_mb.model_name;
338 return 0;
339 }
340 return -EINVAL;
341}
342
343
344/*
345 * Table definitions
346 */
347
348/* Used on any machine
349 */
350static struct feature_table_entry any_features[] __pmacdata = {
351 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
352 { 0, NULL }
353};
354
355/* G5 features
356 */
357static struct feature_table_entry g5_features[] __pmacdata = {
358 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
359 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
360 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
361 { PMAC_FTR_READ_GPIO, g5_read_gpio },
362 { PMAC_FTR_WRITE_GPIO, g5_write_gpio },
363 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
364 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
365#ifdef CONFIG_SMP
366 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
367#endif /* CONFIG_SMP */
368 { 0, NULL }
369};
370
371static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
372 { "PowerMac7,2", "PowerMac G5",
373 PMAC_TYPE_POWERMAC_G5, g5_features,
374 0,
375 },
376 { "PowerMac7,3", "PowerMac G5",
377 PMAC_TYPE_POWERMAC_G5, g5_features,
378 0,
379 },
380 { "PowerMac8,1", "iMac G5",
381 PMAC_TYPE_IMAC_G5, g5_features,
382 0,
383 },
384 { "PowerMac9,1", "PowerMac G5",
385 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
386 0,
387 },
388 { "RackMac3,1", "XServe G5",
389 PMAC_TYPE_XSERVE_G5, g5_features,
390 0,
391 },
392};
393
394/*
395 * The toplevel feature_call callback
396 */
397long __pmac pmac_do_feature_call(unsigned int selector, ...)
398{
399 struct device_node* node;
400 long param, value;
401 int i;
402 feature_call func = NULL;
403 va_list args;
404
405 if (pmac_mb.features)
406 for (i=0; pmac_mb.features[i].function; i++)
407 if (pmac_mb.features[i].selector == selector) {
408 func = pmac_mb.features[i].function;
409 break;
410 }
411 if (!func)
412 for (i=0; any_features[i].function; i++)
413 if (any_features[i].selector == selector) {
414 func = any_features[i].function;
415 break;
416 }
417 if (!func)
418 return -ENODEV;
419
420 va_start(args, selector);
421 node = (struct device_node*)va_arg(args, void*);
422 param = va_arg(args, long);
423 value = va_arg(args, long);
424 va_end(args);
425
426 return func(node, param, value);
427}
428
429static int __init probe_motherboard(void)
430{
431 int i;
432 struct macio_chip* macio = &macio_chips[0];
433 const char* model = NULL;
434 struct device_node *dt;
435
436 /* Lookup known motherboard type in device-tree. First try an
437 * exact match on the "model" property, then try a "compatible"
438 * match is none is found.
439 */
440 dt = find_devices("device-tree");
441 if (dt != NULL)
442 model = (const char *) get_property(dt, "model", NULL);
443 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
444 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
445 pmac_mb = pmac_mb_defs[i];
446 goto found;
447 }
448 }
449 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
450 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
451 pmac_mb = pmac_mb_defs[i];
452 goto found;
453 }
454 }
455
456 /* Fallback to selection depending on mac-io chip type */
457 switch(macio->type) {
458 case macio_keylargo2:
459 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
460 pmac_mb.model_name = "Unknown K2-based";
461 pmac_mb.features = g5_features;
462
463 default:
464 return -ENODEV;
465 }
466found:
467 /* Check for "mobile" machine */
468 if (model && (strncmp(model, "PowerBook", 9) == 0
469 || strncmp(model, "iBook", 5) == 0))
470 pmac_mb.board_flags |= PMAC_MB_MOBILE;
471
472
473 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
474 return 0;
475}
476
477/* Initialize the Core99 UniNorth host bridge and memory controller
478 */
479static void __init probe_uninorth(void)
480{
481 uninorth_node = of_find_node_by_name(NULL, "u3");
482 if (uninorth_node && uninorth_node->n_addrs > 0) {
483 /* Small hack until I figure out if parsing in prom.c is correct. I should
484 * get rid of those pre-parsed junk anyway
485 */
486 unsigned long address = uninorth_node->addrs[0].address;
487 uninorth_base = ioremap(address, 0x40000);
488 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
489 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
490 } else
491 uninorth_node = NULL;
492
493 if (!uninorth_node)
494 return;
495
496 printk(KERN_INFO "Found U3 memory controller & host bridge, revision: %d\n",
497 uninorth_rev);
498 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
499
500}
501
502static void __init probe_one_macio(const char* name, const char* compat, int type)
503{
504 struct device_node* node;
505 int i;
506 volatile u32* base;
507 u32* revp;
508
509 node = find_devices(name);
510 if (!node || !node->n_addrs)
511 return;
512 if (compat)
513 do {
514 if (device_is_compatible(node, compat))
515 break;
516 node = node->next;
517 } while (node);
518 if (!node)
519 return;
520 for(i=0; i<MAX_MACIO_CHIPS; i++) {
521 if (!macio_chips[i].of_node)
522 break;
523 if (macio_chips[i].of_node == node)
524 return;
525 }
526 if (i >= MAX_MACIO_CHIPS) {
527 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
528 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
529 return;
530 }
531 base = (volatile u32*)ioremap(node->addrs[0].address, node->addrs[0].size);
532 if (!base) {
533 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
534 return;
535 }
536 if (type == macio_keylargo) {
537 u32* did = (u32 *)get_property(node, "device-id", NULL);
538 if (*did == 0x00000025)
539 type = macio_pangea;
540 if (*did == 0x0000003e)
541 type = macio_intrepid;
542 }
543 macio_chips[i].of_node = node;
544 macio_chips[i].type = type;
545 macio_chips[i].base = base;
546 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
547 macio_chips[i].name = macio_names[type];
548 revp = (u32 *)get_property(node, "revision-id", NULL);
549 if (revp)
550 macio_chips[i].rev = *revp;
551 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
552 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
553}
554
555static int __init
556probe_macios(void)
557{
558 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
559
560 macio_chips[0].lbus.index = 0;
561 macio_chips[1].lbus.index = 1;
562
563 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
564}
565
566static void __init
567set_initial_features(void)
568{
569 struct device_node *np;
570
571 if (macio_chips[0].type == macio_keylargo2) {
572#ifndef CONFIG_SMP
573 /* On SMP machines running UP, we have the second CPU eating
574 * bus cycles. We need to take it off the bus. This is done
575 * from pmac_smp for SMP kernels running on one CPU
576 */
577 np = of_find_node_by_type(NULL, "cpu");
578 if (np != NULL)
579 np = of_find_node_by_type(np, "cpu");
580 if (np != NULL) {
581 g5_phy_disable_cpu1();
582 of_node_put(np);
583 }
584#endif /* CONFIG_SMP */
585 /* Enable GMAC for now for PCI probing. It will be disabled
586 * later on after PCI probe
587 */
588 np = of_find_node_by_name(NULL, "ethernet");
589 while(np) {
590 if (device_is_compatible(np, "K2-GMAC"))
591 g5_gmac_enable(np, 0, 1);
592 np = of_find_node_by_name(np, "ethernet");
593 }
594
595 /* Enable FW before PCI probe. Will be disabled later on
596 * Note: We should have a batter way to check that we are
597 * dealing with uninorth internal cell and not a PCI cell
598 * on the external PCI. The code below works though.
599 */
600 np = of_find_node_by_name(NULL, "firewire");
601 while(np) {
602 if (device_is_compatible(np, "pci106b,5811")) {
603 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
604 g5_fw_enable(np, 0, 1);
605 }
606 np = of_find_node_by_name(np, "firewire");
607 }
608 }
609}
610
611void __init
612pmac_feature_init(void)
613{
614 /* Detect the UniNorth memory controller */
615 probe_uninorth();
616
617 /* Probe mac-io controllers */
618 if (probe_macios()) {
619 printk(KERN_WARNING "No mac-io chip found\n");
620 return;
621 }
622
623 /* Setup low-level i2c stuffs */
624 pmac_init_low_i2c();
625
626 /* Probe machine type */
627 if (probe_motherboard())
628 printk(KERN_WARNING "Unknown PowerMac !\n");
629
630 /* Set some initial features (turn off some chips that will
631 * be later turned on)
632 */
633 set_initial_features();
634}
635
636int __init pmac_feature_late_init(void)
637{
638#if 0
639 struct device_node* np;
640
641 /* Request some resources late */
642 if (uninorth_node)
643 request_OF_resource(uninorth_node, 0, NULL);
644 np = find_devices("hammerhead");
645 if (np)
646 request_OF_resource(np, 0, NULL);
647 np = find_devices("interrupt-controller");
648 if (np)
649 request_OF_resource(np, 0, NULL);
650#endif
651 return 0;
652}
653
654device_initcall(pmac_feature_late_init);
655
656#if 0
657static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
658{
659 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
660 int bits[8] = { 8,16,0,32,2,4,0,0 };
661 int freq = (frq >> 8) & 0xf;
662
663 if (freqs[freq] == 0)
664 printk("%s: Unknown HT link frequency %x\n", name, freq);
665 else
666 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
667 name, freqs[freq],
668 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
669}
670#endif
671
672void __init pmac_check_ht_link(void)
673{
674#if 0 /* Disabled for now */
675 u32 ufreq, freq, ucfg, cfg;
676 struct device_node *pcix_node;
677 struct pci_dn *pdn;
678 u8 px_bus, px_devfn;
679 struct pci_controller *px_hose;
680
681 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
682 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
683 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
684 dump_HT_speeds("U3 HyperTransport", cfg, freq);
685
686 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
687 if (pcix_node == NULL) {
688 printk("No PCI-X bridge found\n");
689 return;
690 }
691 pdn = pcix_node->data;
692 px_hose = pdn->phb;
693 px_bus = pdn->busno;
694 px_devfn = pdn->devfn;
695
696 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
697 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
698 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
699 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
700 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
701 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
702#endif
703}
704
705/*
706 * Early video resume hook
707 */
708
709static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
710static void *pmac_early_vresume_data __pmacdata;
711
712void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
713{
714 if (_machine != _MACH_Pmac)
715 return;
716 preempt_disable();
717 pmac_early_vresume_proc = proc;
718 pmac_early_vresume_data = data;
719 preempt_enable();
720}
721EXPORT_SYMBOL(pmac_set_early_video_resume);
722
723
724/*
725 * AGP related suspend/resume code
726 */
727
728static struct pci_dev *pmac_agp_bridge __pmacdata;
729static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
730static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
731
732void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
733 int (*suspend)(struct pci_dev *bridge),
734 int (*resume)(struct pci_dev *bridge))
735{
736 if (suspend || resume) {
737 pmac_agp_bridge = bridge;
738 pmac_agp_suspend = suspend;
739 pmac_agp_resume = resume;
740 return;
741 }
742 if (bridge != pmac_agp_bridge)
743 return;
744 pmac_agp_suspend = pmac_agp_resume = NULL;
745 return;
746}
747EXPORT_SYMBOL(pmac_register_agp_pm);
748
749void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
750{
751 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
752 return;
753 if (pmac_agp_bridge->bus != dev->bus)
754 return;
755 pmac_agp_suspend(pmac_agp_bridge);
756}
757EXPORT_SYMBOL(pmac_suspend_agp_for_card);
758
759void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
760{
761 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
762 return;
763 if (pmac_agp_bridge->bus != dev->bus)
764 return;
765 pmac_agp_resume(pmac_agp_bridge);
766}
767EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
deleted file mode 100644
index dc40a0cad0b4..000000000000
--- a/arch/ppc64/kernel/pmac_pci.c
+++ /dev/null
@@ -1,793 +0,0 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 * At present the "bandit" and "chaos" bridges are supported.
4 * Fortunately you access configuration space in the same
5 * way with either bridge.
6 *
7 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
8 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/iommu.h>
30
31#include "pci.h"
32#include "pmac.h"
33
34#define DEBUG
35
36#ifdef DEBUG
37#define DBG(x...) printk(x)
38#else
39#define DBG(x...)
40#endif
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45static struct pci_controller *u3_agp;
46struct device_node *k2_skiplist[2];
47
48static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
49{
50 for (; node != 0;node = node->sibling) {
51 int * bus_range;
52 unsigned int *class_code;
53 int len;
54
55 /* For PCI<->PCI bridges or CardBus bridges, we go down */
56 class_code = (unsigned int *) get_property(node, "class-code", NULL);
57 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
58 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
59 continue;
60 bus_range = (int *) get_property(node, "bus-range", &len);
61 if (bus_range != NULL && len > 2 * sizeof(int)) {
62 if (bus_range[1] > higher)
63 higher = bus_range[1];
64 }
65 higher = fixup_one_level_bus_range(node->child, higher);
66 }
67 return higher;
68}
69
70/* This routine fixes the "bus-range" property of all bridges in the
71 * system since they tend to have their "last" member wrong on macs
72 *
73 * Note that the bus numbers manipulated here are OF bus numbers, they
74 * are not Linux bus numbers.
75 */
76static void __init fixup_bus_range(struct device_node *bridge)
77{
78 int * bus_range;
79 int len;
80
81 /* Lookup the "bus-range" property for the hose */
82 bus_range = (int *) get_property(bridge, "bus-range", &len);
83 if (bus_range == NULL || len < 2 * sizeof(int)) {
84 printk(KERN_WARNING "Can't get bus-range for %s\n",
85 bridge->full_name);
86 return;
87 }
88 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
89}
90
91/*
92 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
93 *
94 * The "Bandit" version is present in all early PCI PowerMacs,
95 * and up to the first ones using Grackle. Some machines may
96 * have 2 bandit controllers (2 PCI busses).
97 *
98 * "Chaos" is used in some "Bandit"-type machines as a bridge
99 * for the separate display bus. It is accessed the same
100 * way as bandit, but cannot be probed for devices. It therefore
101 * has its own config access functions.
102 *
103 * The "UniNorth" version is present in all Core99 machines
104 * (iBook, G4, new IMacs, and all the recent Apple machines).
105 * It contains 3 controllers in one ASIC.
106 *
107 * The U3 is the bridge used on G5 machines. It contains on
108 * AGP bus which is dealt with the old UniNorth access routines
109 * and an HyperTransport bus which uses its own set of access
110 * functions.
111 */
112
113#define MACRISC_CFA0(devfn, off) \
114 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
115 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
116 | (((unsigned long)(off)) & 0xFCUL))
117
118#define MACRISC_CFA1(bus, devfn, off) \
119 ((((unsigned long)(bus)) << 16) \
120 |(((unsigned long)(devfn)) << 8) \
121 |(((unsigned long)(off)) & 0xFCUL) \
122 |1UL)
123
124static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
125 u8 bus, u8 dev_fn, u8 offset)
126{
127 unsigned int caddr;
128
129 if (bus == hose->first_busno) {
130 if (dev_fn < (11 << 3))
131 return 0;
132 caddr = MACRISC_CFA0(dev_fn, offset);
133 } else
134 caddr = MACRISC_CFA1(bus, dev_fn, offset);
135
136 /* Uninorth will return garbage if we don't read back the value ! */
137 do {
138 out_le32(hose->cfg_addr, caddr);
139 } while (in_le32(hose->cfg_addr) != caddr);
140
141 offset &= has_uninorth ? 0x07 : 0x03;
142 return ((unsigned long)hose->cfg_data) + offset;
143}
144
145static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
146 int offset, int len, u32 *val)
147{
148 struct pci_controller *hose;
149 unsigned long addr;
150
151 hose = pci_bus_to_host(bus);
152 if (hose == NULL)
153 return PCIBIOS_DEVICE_NOT_FOUND;
154
155 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
156 if (!addr)
157 return PCIBIOS_DEVICE_NOT_FOUND;
158 /*
159 * Note: the caller has already checked that offset is
160 * suitably aligned and that len is 1, 2 or 4.
161 */
162 switch (len) {
163 case 1:
164 *val = in_8((u8 *)addr);
165 break;
166 case 2:
167 *val = in_le16((u16 *)addr);
168 break;
169 default:
170 *val = in_le32((u32 *)addr);
171 break;
172 }
173 return PCIBIOS_SUCCESSFUL;
174}
175
176static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
177 int offset, int len, u32 val)
178{
179 struct pci_controller *hose;
180 unsigned long addr;
181
182 hose = pci_bus_to_host(bus);
183 if (hose == NULL)
184 return PCIBIOS_DEVICE_NOT_FOUND;
185
186 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
187 if (!addr)
188 return PCIBIOS_DEVICE_NOT_FOUND;
189 /*
190 * Note: the caller has already checked that offset is
191 * suitably aligned and that len is 1, 2 or 4.
192 */
193 switch (len) {
194 case 1:
195 out_8((u8 *)addr, val);
196 (void) in_8((u8 *)addr);
197 break;
198 case 2:
199 out_le16((u16 *)addr, val);
200 (void) in_le16((u16 *)addr);
201 break;
202 default:
203 out_le32((u32 *)addr, val);
204 (void) in_le32((u32 *)addr);
205 break;
206 }
207 return PCIBIOS_SUCCESSFUL;
208}
209
210static struct pci_ops macrisc_pci_ops =
211{
212 macrisc_read_config,
213 macrisc_write_config
214};
215
216/*
217 * These versions of U3 HyperTransport config space access ops do not
218 * implement self-view of the HT host yet
219 */
220
221/*
222 * This function deals with some "special cases" devices.
223 *
224 * 0 -> No special case
225 * 1 -> Skip the device but act as if the access was successfull
226 * (return 0xff's on reads, eventually, cache config space
227 * accesses in a later version)
228 * -1 -> Hide the device (unsuccessful acess)
229 */
230static int u3_ht_skip_device(struct pci_controller *hose,
231 struct pci_bus *bus, unsigned int devfn)
232{
233 struct device_node *busdn, *dn;
234 int i;
235
236 /* We only allow config cycles to devices that are in OF device-tree
237 * as we are apparently having some weird things going on with some
238 * revs of K2 on recent G5s
239 */
240 if (bus->self)
241 busdn = pci_device_to_OF_node(bus->self);
242 else
243 busdn = hose->arch_data;
244 for (dn = busdn->child; dn; dn = dn->sibling)
245 if (dn->data && PCI_DN(dn)->devfn == devfn)
246 break;
247 if (dn == NULL)
248 return -1;
249
250 /*
251 * When a device in K2 is powered down, we die on config
252 * cycle accesses. Fix that here.
253 */
254 for (i=0; i<2; i++)
255 if (k2_skiplist[i] == dn)
256 return 1;
257
258 return 0;
259}
260
261#define U3_HT_CFA0(devfn, off) \
262 ((((unsigned long)devfn) << 8) | offset)
263#define U3_HT_CFA1(bus, devfn, off) \
264 (U3_HT_CFA0(devfn, off) \
265 + (((unsigned long)bus) << 16) \
266 + 0x01000000UL)
267
268static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
269 u8 bus, u8 devfn, u8 offset)
270{
271 if (bus == hose->first_busno) {
272 /* For now, we don't self probe U3 HT bridge */
273 if (PCI_SLOT(devfn) == 0)
274 return 0;
275 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
276 } else
277 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
278}
279
280static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
281 int offset, int len, u32 *val)
282{
283 struct pci_controller *hose;
284 unsigned long addr;
285
286
287 hose = pci_bus_to_host(bus);
288 if (hose == NULL)
289 return PCIBIOS_DEVICE_NOT_FOUND;
290
291 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
292 if (!addr)
293 return PCIBIOS_DEVICE_NOT_FOUND;
294
295 switch (u3_ht_skip_device(hose, bus, devfn)) {
296 case 0:
297 break;
298 case 1:
299 switch (len) {
300 case 1:
301 *val = 0xff; break;
302 case 2:
303 *val = 0xffff; break;
304 default:
305 *val = 0xfffffffful; break;
306 }
307 return PCIBIOS_SUCCESSFUL;
308 default:
309 return PCIBIOS_DEVICE_NOT_FOUND;
310 }
311
312 /*
313 * Note: the caller has already checked that offset is
314 * suitably aligned and that len is 1, 2 or 4.
315 */
316 switch (len) {
317 case 1:
318 *val = in_8((u8 *)addr);
319 break;
320 case 2:
321 *val = in_le16((u16 *)addr);
322 break;
323 default:
324 *val = in_le32((u32 *)addr);
325 break;
326 }
327 return PCIBIOS_SUCCESSFUL;
328}
329
330static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
331 int offset, int len, u32 val)
332{
333 struct pci_controller *hose;
334 unsigned long addr;
335
336 hose = pci_bus_to_host(bus);
337 if (hose == NULL)
338 return PCIBIOS_DEVICE_NOT_FOUND;
339
340 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
341 if (!addr)
342 return PCIBIOS_DEVICE_NOT_FOUND;
343
344 switch (u3_ht_skip_device(hose, bus, devfn)) {
345 case 0:
346 break;
347 case 1:
348 return PCIBIOS_SUCCESSFUL;
349 default:
350 return PCIBIOS_DEVICE_NOT_FOUND;
351 }
352
353 /*
354 * Note: the caller has already checked that offset is
355 * suitably aligned and that len is 1, 2 or 4.
356 */
357 switch (len) {
358 case 1:
359 out_8((u8 *)addr, val);
360 (void) in_8((u8 *)addr);
361 break;
362 case 2:
363 out_le16((u16 *)addr, val);
364 (void) in_le16((u16 *)addr);
365 break;
366 default:
367 out_le32((u32 *)addr, val);
368 (void) in_le32((u32 *)addr);
369 break;
370 }
371 return PCIBIOS_SUCCESSFUL;
372}
373
374static struct pci_ops u3_ht_pci_ops =
375{
376 u3_ht_read_config,
377 u3_ht_write_config
378};
379
380static void __init setup_u3_agp(struct pci_controller* hose)
381{
382 /* On G5, we move AGP up to high bus number so we don't need
383 * to reassign bus numbers for HT. If we ever have P2P bridges
384 * on AGP, we'll have to move pci_assign_all_busses to the
385 * pci_controller structure so we enable it for AGP and not for
386 * HT childs.
387 * We hard code the address because of the different size of
388 * the reg address cell, we shall fix that by killing struct
389 * reg_property and using some accessor functions instead
390 */
391 hose->first_busno = 0xf0;
392 hose->last_busno = 0xff;
393 has_uninorth = 1;
394 hose->ops = &macrisc_pci_ops;
395 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
396 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
397
398 u3_agp = hose;
399}
400
401static void __init setup_u3_ht(struct pci_controller* hose)
402{
403 struct device_node *np = (struct device_node *)hose->arch_data;
404 int i, cur;
405
406 hose->ops = &u3_ht_pci_ops;
407
408 /* We hard code the address because of the different size of
409 * the reg address cell, we shall fix that by killing struct
410 * reg_property and using some accessor functions instead
411 */
412 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
413
414 /*
415 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
416 * have been allocated to AGP. So far, this version of the code doesn't assign
417 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
418 * We need to fix that sooner or later by either parsing all child "ranges"
419 * properties or figuring out the U3 address space decoding logic and
420 * then read it's configuration register (if any).
421 */
422 hose->io_base_phys = 0xf4000000;
423 hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
424 isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
425 hose->io_resource.name = np->full_name;
426 hose->io_resource.start = 0;
427 hose->io_resource.end = 0x003fffff;
428 hose->io_resource.flags = IORESOURCE_IO;
429 hose->pci_mem_offset = 0;
430 hose->first_busno = 0;
431 hose->last_busno = 0xef;
432 hose->mem_resources[0].name = np->full_name;
433 hose->mem_resources[0].start = 0x80000000;
434 hose->mem_resources[0].end = 0xefffffff;
435 hose->mem_resources[0].flags = IORESOURCE_MEM;
436
437 if (u3_agp == NULL) {
438 DBG("U3 has no AGP, using full resource range\n");
439 return;
440 }
441
442 /* We "remove" the AGP resources from the resources allocated to HT, that
443 * is we create "holes". However, that code does assumptions that so far
444 * happen to be true (cross fingers...), typically that resources in the
445 * AGP node are properly ordered
446 */
447 cur = 0;
448 for (i=0; i<3; i++) {
449 struct resource *res = &u3_agp->mem_resources[i];
450 if (res->flags != IORESOURCE_MEM)
451 continue;
452 /* We don't care about "fine" resources */
453 if (res->start >= 0xf0000000)
454 continue;
455 /* Check if it's just a matter of "shrinking" us in one direction */
456 if (hose->mem_resources[cur].start == res->start) {
457 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
458 cur, hose->mem_resources[cur].start, res->end + 1);
459 hose->mem_resources[cur].start = res->end + 1;
460 continue;
461 }
462 if (hose->mem_resources[cur].end == res->end) {
463 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
464 cur, hose->mem_resources[cur].end, res->start - 1);
465 hose->mem_resources[cur].end = res->start - 1;
466 continue;
467 }
468 /* No, it's not the case, we need a hole */
469 if (cur == 2) {
470 /* not enough resources for a hole, we drop part of the range */
471 printk(KERN_WARNING "Running out of resources for /ht host !\n");
472 hose->mem_resources[cur].end = res->start - 1;
473 continue;
474 }
475 cur++;
476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
477 cur-1, res->start - 1, cur, res->end + 1);
478 hose->mem_resources[cur].name = np->full_name;
479 hose->mem_resources[cur].flags = IORESOURCE_MEM;
480 hose->mem_resources[cur].start = res->end + 1;
481 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
482 hose->mem_resources[cur-1].end = res->start - 1;
483 }
484}
485
486static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
487 struct device_node *dev, int primary)
488{
489 static unsigned int static_lc_ranges[2024];
490 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
491 unsigned int size;
492 int rlen = 0, orig_rlen;
493 int memno = 0;
494 struct resource *res;
495 int np, na = prom_n_addr_cells(dev);
496
497 np = na + 5;
498
499 /* First we try to merge ranges to fix a problem with some pmacs
500 * that can have more than 3 ranges, fortunately using contiguous
501 * addresses -- BenH
502 */
503 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
504 if (!dt_ranges)
505 return;
506 /* lc_ranges = alloc_bootmem(rlen);*/
507 lc_ranges = static_lc_ranges;
508 if (!lc_ranges)
509 return; /* what can we do here ? */
510 memcpy(lc_ranges, dt_ranges, rlen);
511 orig_rlen = rlen;
512
513 /* Let's work on a copy of the "ranges" property instead of damaging
514 * the device-tree image in memory
515 */
516 ranges = lc_ranges;
517 prev = NULL;
518 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
519 if (prev) {
520 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
521 (prev[2] + prev[na+4]) == ranges[2] &&
522 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
523 prev[na+4] += ranges[na+4];
524 ranges[0] = 0;
525 ranges += np;
526 continue;
527 }
528 }
529 prev = ranges;
530 ranges += np;
531 }
532
533 /*
534 * The ranges property is laid out as an array of elements,
535 * each of which comprises:
536 * cells 0 - 2: a PCI address
537 * cells 3 or 3+4: a CPU physical address
538 * (size depending on dev->n_addr_cells)
539 * cells 4+5 or 5+6: the size of the range
540 */
541 ranges = lc_ranges;
542 rlen = orig_rlen;
543 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
544 res = NULL;
545 size = ranges[na+4];
546 switch (ranges[0] >> 24) {
547 case 1: /* I/O space */
548 if (ranges[2] != 0)
549 break;
550 hose->io_base_phys = ranges[na+2];
551 /* limit I/O space to 16MB */
552 if (size > 0x01000000)
553 size = 0x01000000;
554 hose->io_base_virt = ioremap(ranges[na+2], size);
555 if (primary)
556 isa_io_base = (unsigned long) hose->io_base_virt;
557 res = &hose->io_resource;
558 res->flags = IORESOURCE_IO;
559 res->start = ranges[2];
560 break;
561 case 2: /* memory space */
562 memno = 0;
563 if (ranges[1] == 0 && ranges[2] == 0
564 && ranges[na+4] <= (16 << 20)) {
565 /* 1st 16MB, i.e. ISA memory area */
566#if 0
567 if (primary)
568 isa_mem_base = ranges[na+2];
569#endif
570 memno = 1;
571 }
572 while (memno < 3 && hose->mem_resources[memno].flags)
573 ++memno;
574 if (memno == 0)
575 hose->pci_mem_offset = ranges[na+2] - ranges[2];
576 if (memno < 3) {
577 res = &hose->mem_resources[memno];
578 res->flags = IORESOURCE_MEM;
579 res->start = ranges[na+2];
580 }
581 break;
582 }
583 if (res != NULL) {
584 res->name = dev->full_name;
585 res->end = res->start + size - 1;
586 res->parent = NULL;
587 res->sibling = NULL;
588 res->child = NULL;
589 }
590 ranges += np;
591 }
592}
593
594/*
595 * We assume that if we have a G3 powermac, we have one bridge called
596 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
597 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
598 */
599static int __init add_bridge(struct device_node *dev)
600{
601 int len;
602 struct pci_controller *hose;
603 char* disp_name;
604 int *bus_range;
605 int primary = 1;
606 struct property *of_prop;
607
608 DBG("Adding PCI host bridge %s\n", dev->full_name);
609
610 bus_range = (int *) get_property(dev, "bus-range", &len);
611 if (bus_range == NULL || len < 2 * sizeof(int)) {
612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
613 dev->full_name);
614 }
615
616 hose = alloc_bootmem(sizeof(struct pci_controller));
617 if (hose == NULL)
618 return -ENOMEM;
619 pci_setup_pci_controller(hose);
620
621 hose->arch_data = dev;
622 hose->first_busno = bus_range ? bus_range[0] : 0;
623 hose->last_busno = bus_range ? bus_range[1] : 0xff;
624
625 of_prop = alloc_bootmem(sizeof(struct property) +
626 sizeof(hose->global_number));
627 if (of_prop) {
628 memset(of_prop, 0, sizeof(struct property));
629 of_prop->name = "linux,pci-domain";
630 of_prop->length = sizeof(hose->global_number);
631 of_prop->value = (unsigned char *)&of_prop[1];
632 memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
633 prom_add_property(dev, of_prop);
634 }
635
636 disp_name = NULL;
637 if (device_is_compatible(dev, "u3-agp")) {
638 setup_u3_agp(hose);
639 disp_name = "U3-AGP";
640 primary = 0;
641 } else if (device_is_compatible(dev, "u3-ht")) {
642 setup_u3_ht(hose);
643 disp_name = "U3-HT";
644 primary = 1;
645 }
646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
647 disp_name, hose->first_busno, hose->last_busno);
648
649 /* Interpret the "ranges" property */
650 /* This also maps the I/O region and sets isa_io/mem_base */
651 pmac_process_bridge_OF_ranges(hose, dev, primary);
652
653 /* Fixup "bus-range" OF property */
654 fixup_bus_range(dev);
655
656 return 0;
657}
658
659/*
660 * We use our own read_irq_line here because PCI_INTERRUPT_PIN is
661 * crap on some of Apple ASICs. We unconditionally use the Open Firmware
662 * interrupt number as this is always right.
663 */
664static int pmac_pci_read_irq_line(struct pci_dev *pci_dev)
665{
666 struct device_node *node;
667
668 node = pci_device_to_OF_node(pci_dev);
669 if (node == NULL)
670 return -1;
671 if (node->n_intrs == 0)
672 return -1;
673 pci_dev->irq = node->intrs[0].line;
674 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
675
676 return 0;
677}
678
679void __init pmac_pcibios_fixup(void)
680{
681 struct pci_dev *dev = NULL;
682
683 for_each_pci_dev(dev)
684 pmac_pci_read_irq_line(dev);
685}
686
687static void __init pmac_fixup_phb_resources(void)
688{
689 struct pci_controller *hose, *tmp;
690
691 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
692 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
693 hose->io_resource.start += offset;
694 hose->io_resource.end += offset;
695 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
696 hose->global_number,
697 hose->io_resource.start, hose->io_resource.end);
698 }
699}
700
701void __init pmac_pci_init(void)
702{
703 struct device_node *np, *root;
704 struct device_node *ht = NULL;
705
706 /* Probe root PCI hosts, that is on U3 the AGP host and the
707 * HyperTransport host. That one is actually "kept" around
708 * and actually added last as it's resource management relies
709 * on the AGP resources to have been setup first
710 */
711 root = of_find_node_by_path("/");
712 if (root == NULL) {
713 printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
714 return;
715 }
716 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
717 if (np->name == NULL)
718 continue;
719 if (strcmp(np->name, "pci") == 0) {
720 if (add_bridge(np) == 0)
721 of_node_get(np);
722 }
723 if (strcmp(np->name, "ht") == 0) {
724 of_node_get(np);
725 ht = np;
726 }
727 }
728 of_node_put(root);
729
730 /* Now setup the HyperTransport host if we found any
731 */
732 if (ht && add_bridge(ht) != 0)
733 of_node_put(ht);
734
735 /* Fixup the IO resources on our host bridges as the common code
736 * does it only for childs of the host bridges
737 */
738 pmac_fixup_phb_resources();
739
740 /* Setup the linkage between OF nodes and PHBs */
741 pci_devs_phb_init();
742
743 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
744 * assume there is no P2P bridge on the AGP bus, which should be a
745 * safe assumptions hopefully.
746 */
747 if (u3_agp) {
748 struct device_node *np = u3_agp->arch_data;
749 PCI_DN(np)->busno = 0xf0;
750 for (np = np->child; np; np = np->sibling)
751 PCI_DN(np)->busno = 0xf0;
752 }
753
754 pmac_check_ht_link();
755
756 /* Tell pci.c to not use the common resource allocation mecanism */
757 pci_probe_only = 1;
758
759 /* Allow all IO */
760 io_page_mask = -1;
761}
762
763/*
764 * Disable second function on K2-SATA, it's broken
765 * and disable IO BARs on first one
766 */
767static void fixup_k2_sata(struct pci_dev* dev)
768{
769 int i;
770 u16 cmd;
771
772 if (PCI_FUNC(dev->devfn) > 0) {
773 pci_read_config_word(dev, PCI_COMMAND, &cmd);
774 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
775 pci_write_config_word(dev, PCI_COMMAND, cmd);
776 for (i = 0; i < 6; i++) {
777 dev->resource[i].start = dev->resource[i].end = 0;
778 dev->resource[i].flags = 0;
779 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
780 }
781 } else {
782 pci_read_config_word(dev, PCI_COMMAND, &cmd);
783 cmd &= ~PCI_COMMAND_IO;
784 pci_write_config_word(dev, PCI_COMMAND, cmd);
785 for (i = 0; i < 5; i++) {
786 dev->resource[i].start = dev->resource[i].end = 0;
787 dev->resource[i].flags = 0;
788 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
789 }
790 }
791}
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
793
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
deleted file mode 100644
index fa8121d53b89..000000000000
--- a/arch/ppc64/kernel/pmac_setup.c
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#undef DEBUG
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/errno.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
36#include <linux/ptrace.h>
37#include <linux/slab.h>
38#include <linux/user.h>
39#include <linux/a.out.h>
40#include <linux/tty.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/ioport.h>
44#include <linux/major.h>
45#include <linux/initrd.h>
46#include <linux/vt_kern.h>
47#include <linux/console.h>
48#include <linux/ide.h>
49#include <linux/pci.h>
50#include <linux/adb.h>
51#include <linux/cuda.h>
52#include <linux/pmu.h>
53#include <linux/irq.h>
54#include <linux/seq_file.h>
55#include <linux/root_dev.h>
56#include <linux/bitops.h>
57
58#include <asm/processor.h>
59#include <asm/sections.h>
60#include <asm/prom.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/iommu.h>
65#include <asm/machdep.h>
66#include <asm/dma.h>
67#include <asm/btext.h>
68#include <asm/cputable.h>
69#include <asm/pmac_feature.h>
70#include <asm/time.h>
71#include <asm/of_device.h>
72#include <asm/lmb.h>
73#include <asm/smu.h>
74#include <asm/pmc.h>
75
76#include "pmac.h"
77#include "mpic.h"
78
79#ifdef DEBUG
80#define DBG(fmt...) udbg_printf(fmt)
81#else
82#define DBG(fmt...)
83#endif
84
85static int current_root_goodness = -1;
86#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
87
88extern int powersave_nap;
89int sccdbg;
90
91sys_ctrler_t sys_ctrler;
92EXPORT_SYMBOL(sys_ctrler);
93
94#ifdef CONFIG_PMAC_SMU
95unsigned long smu_cmdbuf_abs;
96EXPORT_SYMBOL(smu_cmdbuf_abs);
97#endif
98
99extern void udbg_init_scc(struct device_node *np);
100
101static void __pmac pmac_show_cpuinfo(struct seq_file *m)
102{
103 struct device_node *np;
104 char *pp;
105 int plen;
106 char* mbname;
107 int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
108 PMAC_MB_INFO_MODEL, 0);
109 unsigned int mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
110 PMAC_MB_INFO_FLAGS, 0);
111
112 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
113 (long)&mbname) != 0)
114 mbname = "Unknown";
115
116 /* find motherboard type */
117 seq_printf(m, "machine\t\t: ");
118 np = of_find_node_by_path("/");
119 if (np != NULL) {
120 pp = (char *) get_property(np, "model", NULL);
121 if (pp != NULL)
122 seq_printf(m, "%s\n", pp);
123 else
124 seq_printf(m, "PowerMac\n");
125 pp = (char *) get_property(np, "compatible", &plen);
126 if (pp != NULL) {
127 seq_printf(m, "motherboard\t:");
128 while (plen > 0) {
129 int l = strlen(pp) + 1;
130 seq_printf(m, " %s", pp);
131 plen -= l;
132 pp += l;
133 }
134 seq_printf(m, "\n");
135 }
136 of_node_put(np);
137 } else
138 seq_printf(m, "PowerMac\n");
139
140 /* print parsed model */
141 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
142 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
143
144 /* Indicate newworld */
145 seq_printf(m, "pmac-generation\t: NewWorld\n");
146}
147
148
149static void __init pmac_setup_arch(void)
150{
151 /* init to some ~sane value until calibrate_delay() runs */
152 loops_per_jiffy = 50000000;
153
154 /* Probe motherboard chipset */
155 pmac_feature_init();
156#if 0
157 /* Lock-enable the SCC channel used for debug */
158 if (sccdbg) {
159 np = of_find_node_by_name(NULL, "escc");
160 if (np)
161 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np,
162 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
163 }
164#endif
165 /* We can NAP */
166 powersave_nap = 1;
167
168#ifdef CONFIG_ADB_PMU
169 /* Initialize the PMU if any */
170 find_via_pmu();
171#endif
172#ifdef CONFIG_PMAC_SMU
173 /* Initialize the SMU if any */
174 smu_init();
175#endif
176
177 /* Init NVRAM access */
178 pmac_nvram_init();
179
180 /* Setup SMP callback */
181#ifdef CONFIG_SMP
182 pmac_setup_smp();
183#endif
184
185 /* Lookup PCI hosts */
186 pmac_pci_init();
187
188#ifdef CONFIG_DUMMY_CONSOLE
189 conswitchp = &dummy_con;
190#endif
191
192 printk(KERN_INFO "Using native/NAP idle loop\n");
193}
194
195#ifdef CONFIG_SCSI
196void note_scsi_host(struct device_node *node, void *host)
197{
198 /* Obsolete */
199}
200#endif
201
202
203static int initializing = 1;
204
205static int pmac_late_init(void)
206{
207 initializing = 0;
208 return 0;
209}
210
211late_initcall(pmac_late_init);
212
213/* can't be __init - can be called whenever a disk is first accessed */
214void __pmac note_bootable_part(dev_t dev, int part, int goodness)
215{
216 extern dev_t boot_dev;
217 char *p;
218
219 if (!initializing)
220 return;
221 if ((goodness <= current_root_goodness) &&
222 ROOT_DEV != DEFAULT_ROOT_DEVICE)
223 return;
224 p = strstr(saved_command_line, "root=");
225 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
226 return;
227
228 if (!boot_dev || dev == boot_dev) {
229 ROOT_DEV = dev + part;
230 boot_dev = 0;
231 current_root_goodness = goodness;
232 }
233}
234
235static void __pmac pmac_restart(char *cmd)
236{
237 switch(sys_ctrler) {
238#ifdef CONFIG_ADB_PMU
239 case SYS_CTRLER_PMU:
240 pmu_restart();
241 break;
242#endif
243
244#ifdef CONFIG_PMAC_SMU
245 case SYS_CTRLER_SMU:
246 smu_restart();
247 break;
248#endif
249 default:
250 ;
251 }
252}
253
254static void __pmac pmac_power_off(void)
255{
256 switch(sys_ctrler) {
257#ifdef CONFIG_ADB_PMU
258 case SYS_CTRLER_PMU:
259 pmu_shutdown();
260 break;
261#endif
262#ifdef CONFIG_PMAC_SMU
263 case SYS_CTRLER_SMU:
264 smu_shutdown();
265 break;
266#endif
267 default:
268 ;
269 }
270}
271
272static void __pmac pmac_halt(void)
273{
274 pmac_power_off();
275}
276
277#ifdef CONFIG_BOOTX_TEXT
278static void btext_putc(unsigned char c)
279{
280 btext_drawchar(c);
281}
282
283static void __init init_boot_display(void)
284{
285 char *name;
286 struct device_node *np = NULL;
287 int rc = -ENODEV;
288
289 printk("trying to initialize btext ...\n");
290
291 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
292 if (name != NULL) {
293 np = of_find_node_by_path(name);
294 if (np != NULL) {
295 if (strcmp(np->type, "display") != 0) {
296 printk("boot stdout isn't a display !\n");
297 of_node_put(np);
298 np = NULL;
299 }
300 }
301 }
302 if (np)
303 rc = btext_initialize(np);
304 if (rc == 0)
305 return;
306
307 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
308 if (get_property(np, "linux,opened", NULL)) {
309 printk("trying %s ...\n", np->full_name);
310 rc = btext_initialize(np);
311 printk("result: %d\n", rc);
312 }
313 if (rc == 0)
314 return;
315 }
316}
317#endif /* CONFIG_BOOTX_TEXT */
318
319/*
320 * Early initialization.
321 */
322static void __init pmac_init_early(void)
323{
324 DBG(" -> pmac_init_early\n");
325
326 /* Initialize hash table, from now on, we can take hash faults
327 * and call ioremap
328 */
329 hpte_init_native();
330
331 /* Init SCC */
332 if (strstr(cmd_line, "sccdbg")) {
333 sccdbg = 1;
334 udbg_init_scc(NULL);
335 }
336#ifdef CONFIG_BOOTX_TEXT
337 else {
338 init_boot_display();
339
340 udbg_putc = btext_putc;
341 }
342#endif /* CONFIG_BOOTX_TEXT */
343
344 /* Setup interrupt mapping options */
345 ppc64_interrupt_controller = IC_OPEN_PIC;
346
347 iommu_init_early_u3();
348
349 DBG(" <- pmac_init_early\n");
350}
351
352static int pmac_u3_cascade(struct pt_regs *regs, void *data)
353{
354 return mpic_get_one_irq((struct mpic *)data, regs);
355}
356
357static __init void pmac_init_IRQ(void)
358{
359 struct device_node *irqctrler = NULL;
360 struct device_node *irqctrler2 = NULL;
361 struct device_node *np = NULL;
362 struct mpic *mpic1, *mpic2;
363
364 /* We first try to detect Apple's new Core99 chipset, since mac-io
365 * is quite different on those machines and contains an IBM MPIC2.
366 */
367 while ((np = of_find_node_by_type(np, "open-pic")) != NULL) {
368 struct device_node *parent = of_get_parent(np);
369 if (parent && !strcmp(parent->name, "u3"))
370 irqctrler2 = of_node_get(np);
371 else
372 irqctrler = of_node_get(np);
373 of_node_put(parent);
374 }
375 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
376 unsigned char senses[128];
377
378 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
379 (unsigned int)irqctrler->addrs[0].address);
380
381 prom_get_irq_senses(senses, 0, 128);
382 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
383 MPIC_PRIMARY | MPIC_WANTS_RESET,
384 0, 0, 128, 256, senses, 128, " K2-MPIC ");
385 BUG_ON(mpic1 == NULL);
386 mpic_init(mpic1);
387
388 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
389 irqctrler2->n_addrs > 0) {
390 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
391 (u32)irqctrler2->addrs[0].address,
392 irqctrler2->intrs[0].line);
393
394 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
395 prom_get_irq_senses(senses, 128, 128 + 128);
396
397 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
398 * hypertransport interrupts routed to it
399 */
400 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
401 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
402 0, 128, 128, 0, senses, 128, " U3-MPIC ");
403 BUG_ON(mpic2 == NULL);
404 mpic_init(mpic2);
405 mpic_setup_cascade(irqctrler2->intrs[0].line,
406 pmac_u3_cascade, mpic2);
407 }
408 }
409 of_node_put(irqctrler);
410 of_node_put(irqctrler2);
411}
412
413static void __init pmac_progress(char *s, unsigned short hex)
414{
415 if (sccdbg) {
416 udbg_puts(s);
417 udbg_puts("\n");
418 }
419#ifdef CONFIG_BOOTX_TEXT
420 else if (boot_text_mapped) {
421 btext_drawstring(s);
422 btext_drawstring("\n");
423 }
424#endif /* CONFIG_BOOTX_TEXT */
425}
426
427/*
428 * pmac has no legacy IO, anything calling this function has to
429 * fail or bad things will happen
430 */
431static int pmac_check_legacy_ioport(unsigned int baseport)
432{
433 return -ENODEV;
434}
435
436static int __init pmac_declare_of_platform_devices(void)
437{
438 struct device_node *np, *npp;
439
440 npp = of_find_node_by_name(NULL, "u3");
441 if (npp) {
442 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
443 if (strncmp(np->name, "i2c", 3) == 0) {
444 of_platform_device_create(np, "u3-i2c", NULL);
445 of_node_put(np);
446 break;
447 }
448 }
449 of_node_put(npp);
450 }
451 npp = of_find_node_by_type(NULL, "smu");
452 if (npp) {
453 of_platform_device_create(npp, "smu", NULL);
454 of_node_put(npp);
455 }
456
457 return 0;
458}
459
460device_initcall(pmac_declare_of_platform_devices);
461
462/*
463 * Called very early, MMU is off, device-tree isn't unflattened
464 */
465static int __init pmac_probe(int platform)
466{
467 if (platform != PLATFORM_POWERMAC)
468 return 0;
469 /*
470 * On U3, the DART (iommu) must be allocated now since it
471 * has an impact on htab_initialize (due to the large page it
472 * occupies having to be broken up so the DART itself is not
473 * part of the cacheable linar mapping
474 */
475 alloc_u3_dart_table();
476
477#ifdef CONFIG_PMAC_SMU
478 /*
479 * SMU based G5s need some memory below 2Gb, at least the current
480 * driver needs that. We have to allocate it now. We allocate 4k
481 * (1 small page) for now.
482 */
483 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
484#endif /* CONFIG_PMAC_SMU */
485
486 return 1;
487}
488
489static int pmac_probe_mode(struct pci_bus *bus)
490{
491 struct device_node *node = bus->sysdata;
492
493 /* We need to use normal PCI probing for the AGP bus,
494 since the device for the AGP bridge isn't in the tree. */
495 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
496 return PCI_PROBE_NORMAL;
497
498 return PCI_PROBE_DEVTREE;
499}
500
501struct machdep_calls __initdata pmac_md = {
502#ifdef CONFIG_HOTPLUG_CPU
503 .cpu_die = generic_mach_cpu_die,
504#endif
505 .probe = pmac_probe,
506 .setup_arch = pmac_setup_arch,
507 .init_early = pmac_init_early,
508 .get_cpuinfo = pmac_show_cpuinfo,
509 .init_IRQ = pmac_init_IRQ,
510 .get_irq = mpic_get_irq,
511 .pcibios_fixup = pmac_pcibios_fixup,
512 .pci_probe_mode = pmac_probe_mode,
513 .restart = pmac_restart,
514 .power_off = pmac_power_off,
515 .halt = pmac_halt,
516 .get_boot_time = pmac_get_boot_time,
517 .set_rtc_time = pmac_set_rtc_time,
518 .get_rtc_time = pmac_get_rtc_time,
519 .calibrate_decr = pmac_calibrate_decr,
520 .feature_call = pmac_do_feature_call,
521 .progress = pmac_progress,
522 .check_legacy_ioport = pmac_check_legacy_ioport,
523 .idle_loop = native_idle,
524 .enable_pmcs = power4_enable_pmcs,
525};
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
deleted file mode 100644
index a23de37227bf..000000000000
--- a/arch/ppc64/kernel/pmac_smp.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24
25#undef DEBUG
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/smp.h>
31#include <linux/smp_lock.h>
32#include <linux/interrupt.h>
33#include <linux/kernel_stat.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/errno.h>
37#include <linux/irq.h>
38
39#include <asm/ptrace.h>
40#include <asm/atomic.h>
41#include <asm/irq.h>
42#include <asm/page.h>
43#include <asm/pgtable.h>
44#include <asm/sections.h>
45#include <asm/io.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/pmac_feature.h>
50#include <asm/time.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#include "mpic.h"
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void pmac_secondary_start_1(void);
64extern void pmac_secondary_start_2(void);
65extern void pmac_secondary_start_3(void);
66
67extern struct smp_ops_t *smp_ops;
68
69static void (*pmac_tb_freeze)(int freeze);
70static struct device_node *pmac_tb_clock_chip_host;
71static u8 pmac_tb_pulsar_addr;
72static DEFINE_SPINLOCK(timebase_lock);
73static unsigned long timebase;
74
75static void smp_core99_cypress_tb_freeze(int freeze)
76{
77 u8 data;
78 int rc;
79
80 /* Strangely, the device-tree says address is 0xd2, but darwin
81 * accesses 0xd0 ...
82 */
83 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
84 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
85 0xd0 | pmac_low_i2c_read,
86 0x81, &data, 1);
87 if (rc != 0)
88 goto bail;
89
90 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
91
92 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
93 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
94 0xd0 | pmac_low_i2c_write,
95 0x81, &data, 1);
96
97 bail:
98 if (rc != 0) {
99 printk("Cypress Timebase %s rc: %d\n",
100 freeze ? "freeze" : "unfreeze", rc);
101 panic("Timebase freeze failed !\n");
102 }
103}
104
105static void smp_core99_pulsar_tb_freeze(int freeze)
106{
107 u8 data;
108 int rc;
109
110 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
111 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
112 pmac_tb_pulsar_addr | pmac_low_i2c_read,
113 0x2e, &data, 1);
114 if (rc != 0)
115 goto bail;
116
117 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
118
119 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
120 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
121 pmac_tb_pulsar_addr | pmac_low_i2c_write,
122 0x2e, &data, 1);
123 bail:
124 if (rc != 0) {
125 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
126 freeze ? "freeze" : "unfreeze", rc);
127 panic("Timebase freeze failed !\n");
128 }
129}
130
131
132static void smp_core99_give_timebase(void)
133{
134 /* Open i2c bus for synchronous access */
135 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
136 panic("Can't open i2c for TB sync !\n");
137
138 spin_lock(&timebase_lock);
139 (*pmac_tb_freeze)(1);
140 mb();
141 timebase = get_tb();
142 spin_unlock(&timebase_lock);
143
144 while (timebase)
145 barrier();
146
147 spin_lock(&timebase_lock);
148 (*pmac_tb_freeze)(0);
149 spin_unlock(&timebase_lock);
150
151 /* Close i2c bus */
152 pmac_low_i2c_close(pmac_tb_clock_chip_host);
153}
154
155
156static void __devinit smp_core99_take_timebase(void)
157{
158 while (!timebase)
159 barrier();
160 spin_lock(&timebase_lock);
161 set_tb(timebase >> 32, timebase & 0xffffffff);
162 timebase = 0;
163 spin_unlock(&timebase_lock);
164}
165
166
167static int __init smp_core99_probe(void)
168{
169 struct device_node *cpus;
170 struct device_node *cc;
171 int ncpus = 0;
172
173 /* Maybe use systemconfiguration here ? */
174 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
175
176 /* Count CPUs in the device-tree */
177 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
178 ++ncpus;
179
180 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
181
182 /* Nothing more to do if less than 2 of them */
183 if (ncpus <= 1)
184 return 1;
185
186 /* HW sync only on these platforms */
187 if (!machine_is_compatible("PowerMac7,2") &&
188 !machine_is_compatible("PowerMac7,3") &&
189 !machine_is_compatible("RackMac3,1"))
190 goto nohwsync;
191
192 /* Look for the clock chip */
193 for (cc = NULL; (cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL;) {
194 struct device_node *p = of_get_parent(cc);
195 u32 *reg;
196 int ok;
197 ok = p && device_is_compatible(p, "uni-n-i2c");
198 if (!ok)
199 goto next;
200 reg = (u32 *)get_property(cc, "reg", NULL);
201 if (reg == NULL)
202 goto next;
203 switch (*reg) {
204 case 0xd2:
205 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
206 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
207 pmac_tb_pulsar_addr = 0xd2;
208 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
209 } else if (device_is_compatible(cc, "cy28508")) {
210 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
211 printk(KERN_INFO "Timebase clock is Cypress chip\n");
212 }
213 break;
214 case 0xd4:
215 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
216 pmac_tb_pulsar_addr = 0xd4;
217 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
218 break;
219 }
220 if (pmac_tb_freeze != NULL) {
221 pmac_tb_clock_chip_host = p;
222 smp_ops->give_timebase = smp_core99_give_timebase;
223 smp_ops->take_timebase = smp_core99_take_timebase;
224 of_node_put(cc);
225 of_node_put(p);
226 break;
227 }
228 next:
229 of_node_put(p);
230 }
231
232 nohwsync:
233 mpic_request_ipis();
234
235 return ncpus;
236}
237
238static void __init smp_core99_kick_cpu(int nr)
239{
240 int save_vector, j;
241 unsigned long new_vector;
242 unsigned long flags;
243 volatile unsigned int *vector
244 = ((volatile unsigned int *)(KERNELBASE+0x100));
245
246 if (nr < 1 || nr > 3)
247 return;
248 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
249
250 local_irq_save(flags);
251 local_irq_disable();
252
253 /* Save reset vector */
254 save_vector = *vector;
255
256 /* Setup fake reset vector that does
257 * b .pmac_secondary_start - KERNELBASE
258 */
259 switch(nr) {
260 case 1:
261 new_vector = (unsigned long)pmac_secondary_start_1;
262 break;
263 case 2:
264 new_vector = (unsigned long)pmac_secondary_start_2;
265 break;
266 case 3:
267 default:
268 new_vector = (unsigned long)pmac_secondary_start_3;
269 break;
270 }
271 *vector = 0x48000002 + (new_vector - KERNELBASE);
272
273 /* flush data cache and inval instruction cache */
274 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
275
276 /* Put some life in our friend */
277 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
278 paca[nr].cpu_start = 1;
279
280 /* FIXME: We wait a bit for the CPU to take the exception, I should
281 * instead wait for the entry code to set something for me. Well,
282 * ideally, all that crap will be done in prom.c and the CPU left
283 * in a RAM-based wait loop like CHRP.
284 */
285 for (j = 1; j < 1000000; j++)
286 mb();
287
288 /* Restore our exception vector */
289 *vector = save_vector;
290 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
291
292 local_irq_restore(flags);
293 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
294}
295
296static void __init smp_core99_setup_cpu(int cpu_nr)
297{
298 /* Setup MPIC */
299 mpic_setup_this_cpu();
300
301 if (cpu_nr == 0) {
302 extern void g5_phy_disable_cpu1(void);
303
304 /* If we didn't start the second CPU, we must take
305 * it off the bus
306 */
307 if (num_online_cpus() < 2)
308 g5_phy_disable_cpu1();
309 if (ppc_md.progress) ppc_md.progress("smp_core99_setup_cpu 0 done", 0x349);
310 }
311}
312
313struct smp_ops_t core99_smp_ops __pmacdata = {
314 .message_pass = smp_mpic_message_pass,
315 .probe = smp_core99_probe,
316 .kick_cpu = smp_core99_kick_cpu,
317 .setup_cpu = smp_core99_setup_cpu,
318 .give_timebase = smp_generic_give_timebase,
319 .take_timebase = smp_generic_take_timebase,
320};
321
322void __init pmac_setup_smp(void)
323{
324 smp_ops = &core99_smp_ops;
325#ifdef CONFIG_HOTPLUG_CPU
326 smp_ops->cpu_enable = generic_cpu_enable;
327 smp_ops->cpu_disable = generic_cpu_disable;
328 smp_ops->cpu_die = generic_cpu_die;
329#endif
330}
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
deleted file mode 100644
index 41bbb8c59697..000000000000
--- a/arch/ppc64/kernel/pmac_time.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/pmu.h>
23#include <linux/interrupt.h>
24
25#include <asm/sections.h>
26#include <asm/prom.h>
27#include <asm/system.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/machdep.h>
31#include <asm/time.h>
32#include <asm/nvram.h>
33#include <asm/smu.h>
34
35#undef DEBUG
36
37#ifdef DEBUG
38#define DBG(x...) printk(x)
39#else
40#define DBG(x...)
41#endif
42
43/* Apparently the RTC stores seconds since 1 Jan 1904 */
44#define RTC_OFFSET 2082844800
45
46/*
47 * Calibrate the decrementer frequency with the VIA timer 1.
48 */
49#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
50
51extern struct timezone sys_tz;
52extern void to_tm(int tim, struct rtc_time * tm);
53
54void __pmac pmac_get_rtc_time(struct rtc_time *tm)
55{
56 switch(sys_ctrler) {
57#ifdef CONFIG_ADB_PMU
58 case SYS_CTRLER_PMU: {
59 /* TODO: Move that to a function in the PMU driver */
60 struct adb_request req;
61 unsigned int now;
62
63 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
64 return;
65 pmu_wait_complete(&req);
66 if (req.reply_len != 4)
67 printk(KERN_ERR "pmac_get_rtc_time: PMU returned a %d"
68 " bytes reply\n", req.reply_len);
69 now = (req.reply[0] << 24) + (req.reply[1] << 16)
70 + (req.reply[2] << 8) + req.reply[3];
71 DBG("get: %u -> %u\n", (int)now, (int)(now - RTC_OFFSET));
72 now -= RTC_OFFSET;
73
74 to_tm(now, tm);
75 tm->tm_year -= 1900;
76 tm->tm_mon -= 1;
77
78 DBG("-> tm_mday: %d, tm_mon: %d, tm_year: %d, %d:%02d:%02d\n",
79 tm->tm_mday, tm->tm_mon, tm->tm_year,
80 tm->tm_hour, tm->tm_min, tm->tm_sec);
81 break;
82 }
83#endif /* CONFIG_ADB_PMU */
84
85#ifdef CONFIG_PMAC_SMU
86 case SYS_CTRLER_SMU:
87 smu_get_rtc_time(tm, 1);
88 break;
89#endif /* CONFIG_PMAC_SMU */
90 default:
91 ;
92 }
93}
94
95int __pmac pmac_set_rtc_time(struct rtc_time *tm)
96{
97 switch(sys_ctrler) {
98#ifdef CONFIG_ADB_PMU
99 case SYS_CTRLER_PMU: {
100 /* TODO: Move that to a function in the PMU driver */
101 struct adb_request req;
102 unsigned int nowtime;
103
104 DBG("set: tm_mday: %d, tm_mon: %d, tm_year: %d,"
105 " %d:%02d:%02d\n",
106 tm->tm_mday, tm->tm_mon, tm->tm_year,
107 tm->tm_hour, tm->tm_min, tm->tm_sec);
108
109 nowtime = mktime(tm->tm_year + 1900, tm->tm_mon + 1,
110 tm->tm_mday, tm->tm_hour, tm->tm_min,
111 tm->tm_sec);
112
113 DBG("-> %u -> %u\n", (int)nowtime,
114 (int)(nowtime + RTC_OFFSET));
115 nowtime += RTC_OFFSET;
116
117 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
118 nowtime >> 24, nowtime >> 16,
119 nowtime >> 8, nowtime) < 0)
120 return -ENXIO;
121 pmu_wait_complete(&req);
122 if (req.reply_len != 0)
123 printk(KERN_ERR "pmac_set_rtc_time: PMU returned a %d"
124 " bytes reply\n", req.reply_len);
125 return 0;
126 }
127#endif /* CONFIG_ADB_PMU */
128
129#ifdef CONFIG_PMAC_SMU
130 case SYS_CTRLER_SMU:
131 return smu_set_rtc_time(tm, 1);
132#endif /* CONFIG_PMAC_SMU */
133 default:
134 return -ENODEV;
135 }
136}
137
138void __init pmac_get_boot_time(struct rtc_time *tm)
139{
140 pmac_get_rtc_time(tm);
141
142#ifdef disabled__CONFIG_NVRAM
143 s32 delta = 0;
144 int dst;
145
146 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
147 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
148 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
149 if (delta & 0x00800000UL)
150 delta |= 0xFF000000UL;
151 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
152 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
153 dst ? "on" : "off");
154#endif
155}
156
157/*
158 * Query the OF and get the decr frequency.
159 * FIXME: merge this with generic_calibrate_decr
160 */
161void __init pmac_calibrate_decr(void)
162{
163 struct device_node *cpu;
164 unsigned int freq, *fp;
165 struct div_result divres;
166
167 /*
168 * The cpu node should have a timebase-frequency property
169 * to tell us the rate at which the decrementer counts.
170 */
171 cpu = find_type_devices("cpu");
172 if (cpu == 0)
173 panic("can't find cpu node in time_init");
174 fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
175 if (fp == 0)
176 panic("can't get cpu timebase frequency");
177 freq = *fp;
178 printk("time_init: decrementer frequency = %u.%.6u MHz\n",
179 freq/1000000, freq%1000000);
180 tb_ticks_per_jiffy = freq / HZ;
181 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
182 tb_ticks_per_usec = freq / 1000000;
183 tb_to_us = mulhwu_scale_factor(freq, 1000000);
184 div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
185 tb_to_xs = divres.result_low;
186 ppc_tb_freq = freq;
187
188 fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
189 if (fp == 0)
190 panic("can't get cpu processor frequency");
191 ppc_proc_freq = *fp;
192
193 setup_default_decr();
194}
195
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f4eec6..84006e26342c 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h> 20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/iSeries/HvCallSc.h>
23 22
24EXPORT_SYMBOL(strcpy); 23EXPORT_SYMBOL(strcpy);
25EXPORT_SYMBOL(strncpy); 24EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
46 45
47EXPORT_SYMBOL(reloc_offset); 46EXPORT_SYMBOL(reloc_offset);
48 47
49#ifdef CONFIG_PPC_ISERIES
50EXPORT_SYMBOL(HvCall0);
51EXPORT_SYMBOL(HvCall1);
52EXPORT_SYMBOL(HvCall2);
53EXPORT_SYMBOL(HvCall3);
54EXPORT_SYMBOL(HvCall4);
55EXPORT_SYMBOL(HvCall5);
56EXPORT_SYMBOL(HvCall6);
57EXPORT_SYMBOL(HvCall7);
58#endif
59
60EXPORT_SYMBOL(_insb); 48EXPORT_SYMBOL(_insb);
61EXPORT_SYMBOL(_outsb); 49EXPORT_SYMBOL(_outsb);
62EXPORT_SYMBOL(_insw); 50EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
77EXPORT_SYMBOL(__flush_icache_range); 65EXPORT_SYMBOL(__flush_icache_range);
78EXPORT_SYMBOL(flush_dcache_range); 66EXPORT_SYMBOL(flush_dcache_range);
79 67
80#ifdef CONFIG_SMP
81#ifdef CONFIG_PPC_ISERIES
82EXPORT_SYMBOL(local_get_flags);
83EXPORT_SYMBOL(local_irq_disable);
84EXPORT_SYMBOL(local_irq_restore);
85#endif
86#endif
87
88EXPORT_SYMBOL(memcpy); 68EXPORT_SYMBOL(memcpy);
89EXPORT_SYMBOL(memset); 69EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memmove); 70EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 7035deb6de92..a0866f12647f 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -78,6 +78,7 @@ typedef int interpret_func(struct device_node *, unsigned long *,
78extern struct rtas_t rtas; 78extern struct rtas_t rtas;
79extern struct lmb lmb; 79extern struct lmb lmb;
80extern unsigned long klimit; 80extern unsigned long klimit;
81extern unsigned long memory_limit;
81 82
82static int __initdata dt_root_addr_cells; 83static int __initdata dt_root_addr_cells;
83static int __initdata dt_root_size_cells; 84static int __initdata dt_root_size_cells;
@@ -1063,7 +1064,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1063{ 1064{
1064 u32 *prop; 1065 u32 *prop;
1065 u64 *prop64; 1066 u64 *prop64;
1066 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 1067 extern unsigned long tce_alloc_start, tce_alloc_end;
1067 1068
1068 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1069 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1069 1070
@@ -1237,7 +1238,7 @@ void __init early_init_devtree(void *params)
1237 lmb_init(); 1238 lmb_init();
1238 scan_flat_dt(early_init_dt_scan_root, NULL); 1239 scan_flat_dt(early_init_dt_scan_root, NULL);
1239 scan_flat_dt(early_init_dt_scan_memory, NULL); 1240 scan_flat_dt(early_init_dt_scan_memory, NULL);
1240 lmb_enforce_memory_limit(); 1241 lmb_enforce_memory_limit(memory_limit);
1241 lmb_analyze(); 1242 lmb_analyze();
1242 systemcfg->physicalMemorySize = lmb_phys_mem_size(); 1243 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1243 lmb_reserve(0, __pa(klimit)); 1244 lmb_reserve(0, __pa(klimit));
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
deleted file mode 100644
index b1c044ca5756..000000000000
--- a/arch/ppc64/kernel/ptrace.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/signal.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/pgtable.h>
37#include <asm/system.h>
38#include <asm/ptrace-common.h>
39
40/*
41 * does not yet catch signals sent when the child dies.
42 * in exit.c or in signal.c.
43 */
44
45/*
46 * Called by kernel/ptrace.c when detaching..
47 *
48 * Make sure single step bits etc are not set.
49 */
50void ptrace_disable(struct task_struct *child)
51{
52 /* make sure the single step bit is not set. */
53 clear_single_step(child);
54}
55
56int sys_ptrace(long request, long pid, long addr, long data)
57{
58 struct task_struct *child;
59 int ret = -EPERM;
60
61 lock_kernel();
62 if (request == PTRACE_TRACEME) {
63 /* are we already being traced? */
64 if (current->ptrace & PT_PTRACED)
65 goto out;
66 ret = security_ptrace(current->parent, current);
67 if (ret)
68 goto out;
69 /* set the ptrace bit in the process flags. */
70 current->ptrace |= PT_PTRACED;
71 ret = 0;
72 goto out;
73 }
74 ret = -ESRCH;
75 read_lock(&tasklist_lock);
76 child = find_task_by_pid(pid);
77 if (child)
78 get_task_struct(child);
79 read_unlock(&tasklist_lock);
80 if (!child)
81 goto out;
82
83 ret = -EPERM;
84 if (pid == 1) /* you may not mess with init */
85 goto out_tsk;
86
87 if (request == PTRACE_ATTACH) {
88 ret = ptrace_attach(child);
89 goto out_tsk;
90 }
91
92 ret = ptrace_check_attach(child, request == PTRACE_KILL);
93 if (ret < 0)
94 goto out_tsk;
95
96 switch (request) {
97 /* when I and D space are separate, these will need to be fixed. */
98 case PTRACE_PEEKTEXT: /* read word at location addr. */
99 case PTRACE_PEEKDATA: {
100 unsigned long tmp;
101 int copied;
102
103 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
104 ret = -EIO;
105 if (copied != sizeof(tmp))
106 break;
107 ret = put_user(tmp,(unsigned long __user *) data);
108 break;
109 }
110
111 /* read the word at location addr in the USER area. */
112 case PTRACE_PEEKUSR: {
113 unsigned long index;
114 unsigned long tmp;
115
116 ret = -EIO;
117 /* convert to index and check */
118 index = (unsigned long) addr >> 3;
119 if ((addr & 7) || (index > PT_FPSCR))
120 break;
121
122 if (index < PT_FPR0) {
123 tmp = get_reg(child, (int)index);
124 } else {
125 flush_fp_to_thread(child);
126 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
127 }
128 ret = put_user(tmp,(unsigned long __user *) data);
129 break;
130 }
131
132 /* If I and D space are separate, this will have to be fixed. */
133 case PTRACE_POKETEXT: /* write the word at location addr. */
134 case PTRACE_POKEDATA:
135 ret = 0;
136 if (access_process_vm(child, addr, &data, sizeof(data), 1)
137 == sizeof(data))
138 break;
139 ret = -EIO;
140 break;
141
142 /* write the word at location addr in the USER area */
143 case PTRACE_POKEUSR: {
144 unsigned long index;
145
146 ret = -EIO;
147 /* convert to index and check */
148 index = (unsigned long) addr >> 3;
149 if ((addr & 7) || (index > PT_FPSCR))
150 break;
151
152 if (index == PT_ORIG_R3)
153 break;
154 if (index < PT_FPR0) {
155 ret = put_reg(child, index, data);
156 } else {
157 flush_fp_to_thread(child);
158 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
159 ret = 0;
160 }
161 break;
162 }
163
164 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
165 case PTRACE_CONT: { /* restart after signal. */
166 ret = -EIO;
167 if (!valid_signal(data))
168 break;
169 if (request == PTRACE_SYSCALL)
170 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
171 else
172 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
173 child->exit_code = data;
174 /* make sure the single step bit is not set. */
175 clear_single_step(child);
176 wake_up_process(child);
177 ret = 0;
178 break;
179 }
180
181 /*
182 * make the child exit. Best I can do is send it a sigkill.
183 * perhaps it should be put in the status that it wants to
184 * exit.
185 */
186 case PTRACE_KILL: {
187 ret = 0;
188 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
189 break;
190 child->exit_code = SIGKILL;
191 /* make sure the single step bit is not set. */
192 clear_single_step(child);
193 wake_up_process(child);
194 break;
195 }
196
197 case PTRACE_SINGLESTEP: { /* set the trap flag. */
198 ret = -EIO;
199 if (!valid_signal(data))
200 break;
201 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 set_single_step(child);
203 child->exit_code = data;
204 /* give it a chance to run. */
205 wake_up_process(child);
206 ret = 0;
207 break;
208 }
209
210 case PTRACE_GET_DEBUGREG: {
211 ret = -EINVAL;
212 /* We only support one DABR and no IABRS at the moment */
213 if (addr > 0)
214 break;
215 ret = put_user(child->thread.dabr,
216 (unsigned long __user *)data);
217 break;
218 }
219
220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data);
222 break;
223
224 case PTRACE_DETACH:
225 ret = ptrace_detach(child, data);
226 break;
227
228 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
229 int i;
230 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
231 unsigned long __user *tmp = (unsigned long __user *)addr;
232
233 for (i = 0; i < 32; i++) {
234 ret = put_user(*reg, tmp);
235 if (ret)
236 break;
237 reg++;
238 tmp++;
239 }
240 break;
241 }
242
243 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
244 int i;
245 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
246 unsigned long __user *tmp = (unsigned long __user *)addr;
247
248 for (i = 0; i < 32; i++) {
249 ret = get_user(*reg, tmp);
250 if (ret)
251 break;
252 reg++;
253 tmp++;
254 }
255 break;
256 }
257
258 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
259 int i;
260 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
261 unsigned long __user *tmp = (unsigned long __user *)addr;
262
263 flush_fp_to_thread(child);
264
265 for (i = 0; i < 32; i++) {
266 ret = put_user(*reg, tmp);
267 if (ret)
268 break;
269 reg++;
270 tmp++;
271 }
272 break;
273 }
274
275 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
276 int i;
277 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
278 unsigned long __user *tmp = (unsigned long __user *)addr;
279
280 flush_fp_to_thread(child);
281
282 for (i = 0; i < 32; i++) {
283 ret = get_user(*reg, tmp);
284 if (ret)
285 break;
286 reg++;
287 tmp++;
288 }
289 break;
290 }
291
292#ifdef CONFIG_ALTIVEC
293 case PTRACE_GETVRREGS:
294 /* Get the child altivec register state. */
295 flush_altivec_to_thread(child);
296 ret = get_vrregs((unsigned long __user *)data, child);
297 break;
298
299 case PTRACE_SETVRREGS:
300 /* Set the child altivec register state. */
301 flush_altivec_to_thread(child);
302 ret = set_vrregs(child, (unsigned long __user *)data);
303 break;
304#endif
305
306 default:
307 ret = ptrace_request(child, request, addr, data);
308 break;
309 }
310out_tsk:
311 put_task_struct(child);
312out:
313 unlock_kernel();
314 return ret;
315}
316
317static void do_syscall_trace(void)
318{
319 /* the 0x80 provides a way for the tracing parent to distinguish
320 between a syscall stop and SIGTRAP delivery */
321 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
322 ? 0x80 : 0));
323
324 /*
325 * this isn't the same as continuing with a signal, but it will do
326 * for normal use. strace only continues with a signal if the
327 * stopping signal is not SIGTRAP. -brl
328 */
329 if (current->exit_code) {
330 send_sig(current->exit_code, current, 1);
331 current->exit_code = 0;
332 }
333}
334
335void do_syscall_trace_enter(struct pt_regs *regs)
336{
337 secure_computing(regs->gpr[0]);
338
339 if (test_thread_flag(TIF_SYSCALL_TRACE)
340 && (current->ptrace & PT_PTRACED))
341 do_syscall_trace();
342
343 if (unlikely(current->audit_context))
344 audit_syscall_entry(current,
345 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
346 regs->gpr[0],
347 regs->gpr[3], regs->gpr[4],
348 regs->gpr[5], regs->gpr[6]);
349
350}
351
352void do_syscall_trace_leave(struct pt_regs *regs)
353{
354 if (unlikely(current->audit_context))
355 audit_syscall_exit(current,
356 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
357 regs->result);
358
359 if ((test_thread_flag(TIF_SYSCALL_TRACE)
360 || test_thread_flag(TIF_SINGLESTEP))
361 && (current->ptrace & PT_PTRACED))
362 do_syscall_trace();
363}
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 1f3ff860fdf0..5bdd5b079d96 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/rtc.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
diff --git a/arch/ppc64/kernel/rtas.c b/arch/ppc64/kernel/rtas.c
index 5e8eb33b8e54..36adab591bd3 100644
--- a/arch/ppc64/kernel/rtas.c
+++ b/arch/ppc64/kernel/rtas.c
@@ -30,6 +30,7 @@
30#include <asm/delay.h> 30#include <asm/delay.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/systemcfg.h> 32#include <asm/systemcfg.h>
33#include <asm/ppcdebug.h>
33 34
34struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 35struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
35 36
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4a9719b48abe..3ad15c90fbbd 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -38,9 +38,8 @@
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/rtas.h> 40#include <asm/rtas.h>
41 41#include <asm/mpic.h>
42#include "mpic.h" 42#include <asm/ppc-pci.h>
43#include "pci.h"
44 43
45/* RTAS tokens */ 44/* RTAS tokens */
46static int read_pci_config; 45static int read_pci_config;
@@ -401,7 +400,7 @@ unsigned long __init find_and_init_phbs(void)
401 if (!phb) 400 if (!phb)
402 continue; 401 continue;
403 402
404 pci_process_bridge_OF_ranges(phb, node); 403 pci_process_bridge_OF_ranges(phb, node, 0);
405 pci_setup_phb_io(phb, index == 0); 404 pci_setup_phb_io(phb, index == 0);
406#ifdef CONFIG_PPC_PSERIES 405#ifdef CONFIG_PPC_PSERIES
407 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { 406 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
@@ -451,7 +450,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
451 if (!phb) 450 if (!phb)
452 return NULL; 451 return NULL;
453 452
454 pci_process_bridge_OF_ranges(phb, dn); 453 pci_process_bridge_OF_ranges(phb, dn, primary);
455 454
456 pci_setup_phb_io_dynamic(phb, primary); 455 pci_setup_phb_io_dynamic(phb, primary);
457 of_node_put(root); 456 of_node_put(root);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc61325..79e7ed2858dd 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/rtas.h> 44#include <asm/rtas.h>
45 45
46#include <asm/iSeries/mf.h>
47#include <asm/machdep.h> 46#include <asm/machdep.h>
48 47
49extern int piranha_simulator;
50
51/* 48/*
52 * We sponge a minor off of the misc major. No need slurping 49 * We sponge a minor off of the misc major. No need slurping
53 * up another valuable major dev number for this. If you add 50 * up another valuable major dev number for this. If you add
@@ -265,44 +262,10 @@ static int rtc_read_proc(char *page, char **start, off_t off,
265 return len; 262 return len;
266} 263}
267 264
268#ifdef CONFIG_PPC_ISERIES
269/*
270 * Get the RTC from the virtual service processor
271 * This requires flowing LpEvents to the primary partition
272 */
273void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
274{
275 if (piranha_simulator)
276 return;
277
278 mf_get_rtc(rtc_tm);
279 rtc_tm->tm_mon--;
280}
281
282/*
283 * Set the RTC in the virtual service processor
284 * This requires flowing LpEvents to the primary partition
285 */
286int iSeries_set_rtc_time(struct rtc_time *tm)
287{
288 mf_set_rtc(tm);
289 return 0;
290}
291
292void iSeries_get_boot_time(struct rtc_time *tm)
293{
294 if ( piranha_simulator )
295 return;
296
297 mf_get_boot_rtc(tm);
298 tm->tm_mon -= 1;
299}
300#endif
301
302#ifdef CONFIG_PPC_RTAS 265#ifdef CONFIG_PPC_RTAS
303#define MAX_RTC_WAIT 5000 /* 5 sec */ 266#define MAX_RTC_WAIT 5000 /* 5 sec */
304#define RTAS_CLOCK_BUSY (-2) 267#define RTAS_CLOCK_BUSY (-2)
305void rtas_get_boot_time(struct rtc_time *rtc_tm) 268unsigned long rtas_get_boot_time(void)
306{ 269{
307 int ret[8]; 270 int ret[8];
308 int error, wait_time; 271 int error, wait_time;
@@ -322,15 +285,10 @@ void rtas_get_boot_time(struct rtc_time *rtc_tm)
322 if (error != 0 && printk_ratelimit()) { 285 if (error != 0 && printk_ratelimit()) {
323 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 286 printk(KERN_WARNING "error: reading the clock failed (%d)\n",
324 error); 287 error);
325 return; 288 return 0;
326 } 289 }
327 290
328 rtc_tm->tm_sec = ret[5]; 291 return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
329 rtc_tm->tm_min = ret[4];
330 rtc_tm->tm_hour = ret[3];
331 rtc_tm->tm_mday = ret[2];
332 rtc_tm->tm_mon = ret[1] - 1;
333 rtc_tm->tm_year = ret[0] - 1900;
334} 292}
335 293
336/* NOTE: get_rtc_time will get an error if executed in interrupt context 294/* NOTE: get_rtc_time will get an error if executed in interrupt context
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 5ac48bd64891..44ee6ebe9a60 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -58,6 +58,7 @@
58#include <asm/mmu.h> 58#include <asm/mmu.h>
59#include <asm/lmb.h> 59#include <asm/lmb.h>
60#include <asm/iSeries/ItLpNaca.h> 60#include <asm/iSeries/ItLpNaca.h>
61#include <asm/firmware.h>
61 62
62#ifdef DEBUG 63#ifdef DEBUG
63#define DBG(fmt...) udbg_printf(fmt) 64#define DBG(fmt...) udbg_printf(fmt)
@@ -153,7 +154,7 @@ struct screen_info screen_info = {
153 .orig_video_points = 16 154 .orig_video_points = 16
154}; 155};
155 156
156#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) 157#ifdef CONFIG_SMP
157 158
158static int smt_enabled_cmdline; 159static int smt_enabled_cmdline;
159 160
@@ -306,15 +307,13 @@ static void __init setup_cpu_maps(void)
306 307
307 systemcfg->processorCount = num_present_cpus(); 308 systemcfg->processorCount = num_present_cpus();
308} 309}
309#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */ 310#endif /* CONFIG_SMP */
310
311
312#ifdef CONFIG_PPC_MULTIPLATFORM
313 311
314extern struct machdep_calls pSeries_md; 312extern struct machdep_calls pSeries_md;
315extern struct machdep_calls pmac_md; 313extern struct machdep_calls pmac_md;
316extern struct machdep_calls maple_md; 314extern struct machdep_calls maple_md;
317extern struct machdep_calls bpa_md; 315extern struct machdep_calls bpa_md;
316extern struct machdep_calls iseries_md;
318 317
319/* Ultimately, stuff them in an elf section like initcalls... */ 318/* Ultimately, stuff them in an elf section like initcalls... */
320static struct machdep_calls __initdata *machines[] = { 319static struct machdep_calls __initdata *machines[] = {
@@ -330,6 +329,9 @@ static struct machdep_calls __initdata *machines[] = {
330#ifdef CONFIG_PPC_BPA 329#ifdef CONFIG_PPC_BPA
331 &bpa_md, 330 &bpa_md,
332#endif 331#endif
332#ifdef CONFIG_PPC_ISERIES
333 &iseries_md,
334#endif
333 NULL 335 NULL
334}; 336};
335 337
@@ -401,7 +403,8 @@ void __init early_setup(unsigned long dt_ptr)
401 /* 403 /*
402 * Initialize stab / SLB management 404 * Initialize stab / SLB management
403 */ 405 */
404 stab_initialize(lpaca->stab_real); 406 if (!firmware_has_feature(FW_FEATURE_ISERIES))
407 stab_initialize(lpaca->stab_real);
405 408
406 /* 409 /*
407 * Initialize the MMU Hash table and create the linear mapping 410 * Initialize the MMU Hash table and create the linear mapping
@@ -532,8 +535,6 @@ static void __init check_for_initrd(void)
532#endif /* CONFIG_BLK_DEV_INITRD */ 535#endif /* CONFIG_BLK_DEV_INITRD */
533} 536}
534 537
535#endif /* CONFIG_PPC_MULTIPLATFORM */
536
537/* 538/*
538 * Do some initial setup of the system. The parameters are those which 539 * Do some initial setup of the system. The parameters are those which
539 * were passed in from the bootloader. 540 * were passed in from the bootloader.
@@ -542,14 +543,6 @@ void __init setup_system(void)
542{ 543{
543 DBG(" -> setup_system()\n"); 544 DBG(" -> setup_system()\n");
544 545
545#ifdef CONFIG_PPC_ISERIES
546 /* pSeries systems are identified in prom.c via OF. */
547 if (itLpNaca.xLparInstalled == 1)
548 systemcfg->platform = PLATFORM_ISERIES_LPAR;
549
550 ppc_md.init_early();
551#else /* CONFIG_PPC_ISERIES */
552
553 /* 546 /*
554 * Unflatten the device-tree passed by prom_init or kexec 547 * Unflatten the device-tree passed by prom_init or kexec
555 */ 548 */
@@ -592,6 +585,10 @@ void __init setup_system(void)
592 */ 585 */
593 finish_device_tree(); 586 finish_device_tree();
594 587
588#ifdef CONFIG_BOOTX_TEXT
589 init_boot_display();
590#endif
591
595 /* 592 /*
596 * Initialize xmon 593 * Initialize xmon
597 */ 594 */
@@ -607,9 +604,8 @@ void __init setup_system(void)
607 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 604 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
608 605
609 parse_early_param(); 606 parse_early_param();
610#endif /* !CONFIG_PPC_ISERIES */
611 607
612#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) 608#ifdef CONFIG_SMP
613 /* 609 /*
614 * iSeries has already initialized the cpu maps at this point. 610 * iSeries has already initialized the cpu maps at this point.
615 */ 611 */
@@ -619,7 +615,7 @@ void __init setup_system(void)
619 * we can map physical -> logical CPU ids 615 * we can map physical -> logical CPU ids
620 */ 616 */
621 smp_release_cpus(); 617 smp_release_cpus();
622#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */ 618#endif
623 619
624 printk("Starting Linux PPC64 %s\n", system_utsname.version); 620 printk("Starting Linux PPC64 %s\n", system_utsname.version);
625 621
@@ -711,8 +707,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
711 if (cpu_id == NR_CPUS) { 707 if (cpu_id == NR_CPUS) {
712 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 708 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
713 709
714 if (ppc_md.get_cpuinfo != NULL) 710 if (ppc_md.show_cpuinfo != NULL)
715 ppc_md.get_cpuinfo(m); 711 ppc_md.show_cpuinfo(m);
716 712
717 return 0; 713 return 0;
718 } 714 }
@@ -1047,6 +1043,10 @@ void __init setup_arch(char **cmdline_p)
1047 /* initialize the syscall map in systemcfg */ 1043 /* initialize the syscall map in systemcfg */
1048 setup_syscall_map(); 1044 setup_syscall_map();
1049 1045
1046#ifdef CONFIG_DUMMY_CONSOLE
1047 conswitchp = &dummy_con;
1048#endif
1049
1050 ppc_md.setup_arch(); 1050 ppc_md.setup_arch();
1051 1051
1052 /* Use the default idle loop if the platform hasn't provided one. */ 1052 /* Use the default idle loop if the platform hasn't provided one. */
@@ -1091,15 +1091,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
1091 printk("[terminate]%04x %s\n", src, msg); 1091 printk("[terminate]%04x %s\n", src, msg);
1092} 1092}
1093 1093
1094/* This should only be called on processor 0 during calibrate decr */
1095void __init setup_default_decr(void)
1096{
1097 struct paca_struct *lpaca = get_paca();
1098
1099 lpaca->default_decr = tb_ticks_per_jiffy;
1100 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
1101}
1102
1103#ifndef CONFIG_PPC_ISERIES 1094#ifndef CONFIG_PPC_ISERIES
1104/* 1095/*
1105 * This function can be used by platforms to "find" legacy serial ports. 1096 * This function can be used by platforms to "find" legacy serial ports.
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 793b562da653..017c12919832 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -45,8 +45,7 @@
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
48 48#include <asm/mpic.h>
49#include "mpic.h"
50 49
51#ifdef DEBUG 50#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt) 51#define DBG(fmt...) udbg_printf(fmt)
@@ -70,28 +69,6 @@ void smp_call_function_interrupt(void);
70int smt_enabled_at_boot = 1; 69int smt_enabled_at_boot = 1;
71 70
72#ifdef CONFIG_MPIC 71#ifdef CONFIG_MPIC
73void smp_mpic_message_pass(int target, int msg)
74{
75 /* make sure we're sending something that translates to an IPI */
76 if ( msg > 0x3 ){
77 printk("SMP %d: smp_message_pass: unknown msg %d\n",
78 smp_processor_id(), msg);
79 return;
80 }
81 switch ( target )
82 {
83 case MSG_ALL:
84 mpic_send_ipi(msg, 0xffffffff);
85 break;
86 case MSG_ALL_BUT_SELF:
87 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
88 break;
89 default:
90 mpic_send_ipi(msg, 1 << target);
91 break;
92 }
93}
94
95int __init smp_mpic_probe(void) 72int __init smp_mpic_probe(void)
96{ 73{
97 int nr_cpus; 74 int nr_cpus;
@@ -128,21 +105,6 @@ void __devinit smp_generic_kick_cpu(int nr)
128 105
129#endif /* CONFIG_MPIC */ 106#endif /* CONFIG_MPIC */
130 107
131static void __init smp_space_timers(unsigned int max_cpus)
132{
133 int i;
134 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
135 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
136
137 for_each_cpu(i) {
138 if (i != boot_cpuid) {
139 paca[i].next_jiffy_update_tb =
140 previous_tb + offset;
141 previous_tb = paca[i].next_jiffy_update_tb;
142 }
143 }
144}
145
146void smp_message_recv(int msg, struct pt_regs *regs) 108void smp_message_recv(int msg, struct pt_regs *regs)
147{ 109{
148 switch(msg) { 110 switch(msg) {
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
deleted file mode 100644
index 7467ae508e6e..000000000000
--- a/arch/ppc64/kernel/traps.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/traps.c
3 *
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Modified by Cort Dougan (cort@cs.nmt.edu)
12 * and Paul Mackerras (paulus@cs.anu.edu.au)
13 */
14
15/*
16 * This file handles the architecture-dependent parts of hardware exceptions
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/interrupt.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34#include <asm/kdebug.h>
35
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/processor.h>
41#include <asm/ppcdebug.h>
42#include <asm/rtas.h>
43#include <asm/systemcfg.h>
44#include <asm/machdep.h>
45#include <asm/pmc.h>
46
47#ifdef CONFIG_DEBUGGER
48int (*__debugger)(struct pt_regs *regs);
49int (*__debugger_ipi)(struct pt_regs *regs);
50int (*__debugger_bpt)(struct pt_regs *regs);
51int (*__debugger_sstep)(struct pt_regs *regs);
52int (*__debugger_iabr_match)(struct pt_regs *regs);
53int (*__debugger_dabr_match)(struct pt_regs *regs);
54int (*__debugger_fault_handler)(struct pt_regs *regs);
55
56EXPORT_SYMBOL(__debugger);
57EXPORT_SYMBOL(__debugger_ipi);
58EXPORT_SYMBOL(__debugger_bpt);
59EXPORT_SYMBOL(__debugger_sstep);
60EXPORT_SYMBOL(__debugger_iabr_match);
61EXPORT_SYMBOL(__debugger_dabr_match);
62EXPORT_SYMBOL(__debugger_fault_handler);
63#endif
64
65struct notifier_block *ppc64_die_chain;
66static DEFINE_SPINLOCK(die_notifier_lock);
67
68int register_die_notifier(struct notifier_block *nb)
69{
70 int err = 0;
71 unsigned long flags;
72
73 spin_lock_irqsave(&die_notifier_lock, flags);
74 err = notifier_chain_register(&ppc64_die_chain, nb);
75 spin_unlock_irqrestore(&die_notifier_lock, flags);
76 return err;
77}
78
79/*
80 * Trap & Exception support
81 */
82
83static DEFINE_SPINLOCK(die_lock);
84
85int die(const char *str, struct pt_regs *regs, long err)
86{
87 static int die_counter;
88 int nl = 0;
89
90 if (debugger(regs))
91 return 1;
92
93 console_verbose();
94 spin_lock_irq(&die_lock);
95 bust_spinlocks(1);
96 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
97#ifdef CONFIG_PREEMPT
98 printk("PREEMPT ");
99 nl = 1;
100#endif
101#ifdef CONFIG_SMP
102 printk("SMP NR_CPUS=%d ", NR_CPUS);
103 nl = 1;
104#endif
105#ifdef CONFIG_DEBUG_PAGEALLOC
106 printk("DEBUG_PAGEALLOC ");
107 nl = 1;
108#endif
109#ifdef CONFIG_NUMA
110 printk("NUMA ");
111 nl = 1;
112#endif
113 switch(systemcfg->platform) {
114 case PLATFORM_PSERIES:
115 printk("PSERIES ");
116 nl = 1;
117 break;
118 case PLATFORM_PSERIES_LPAR:
119 printk("PSERIES LPAR ");
120 nl = 1;
121 break;
122 case PLATFORM_ISERIES_LPAR:
123 printk("ISERIES LPAR ");
124 nl = 1;
125 break;
126 case PLATFORM_POWERMAC:
127 printk("POWERMAC ");
128 nl = 1;
129 break;
130 case PLATFORM_BPA:
131 printk("BPA ");
132 nl = 1;
133 break;
134 }
135 if (nl)
136 printk("\n");
137 print_modules();
138 show_regs(regs);
139 bust_spinlocks(0);
140 spin_unlock_irq(&die_lock);
141
142 if (in_interrupt())
143 panic("Fatal exception in interrupt");
144
145 if (panic_on_oops) {
146 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
147 ssleep(5);
148 panic("Fatal exception");
149 }
150 do_exit(SIGSEGV);
151
152 return 0;
153}
154
155void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
156{
157 siginfo_t info;
158
159 if (!user_mode(regs)) {
160 if (die("Exception in kernel mode", regs, signr))
161 return;
162 }
163
164 memset(&info, 0, sizeof(info));
165 info.si_signo = signr;
166 info.si_code = code;
167 info.si_addr = (void __user *) addr;
168 force_sig_info(signr, &info, current);
169}
170
171void system_reset_exception(struct pt_regs *regs)
172{
173 /* See if any machine dependent calls */
174 if (ppc_md.system_reset_exception)
175 ppc_md.system_reset_exception(regs);
176
177 die("System Reset", regs, 0);
178
179 /* Must die if the interrupt is not recoverable */
180 if (!(regs->msr & MSR_RI))
181 panic("Unrecoverable System Reset");
182
183 /* What should we do here? We could issue a shutdown or hard reset. */
184}
185
186void machine_check_exception(struct pt_regs *regs)
187{
188 int recover = 0;
189
190 /* See if any machine dependent calls */
191 if (ppc_md.machine_check_exception)
192 recover = ppc_md.machine_check_exception(regs);
193
194 if (recover)
195 return;
196
197 if (debugger_fault_handler(regs))
198 return;
199 die("Machine check", regs, 0);
200
201 /* Must die if the interrupt is not recoverable */
202 if (!(regs->msr & MSR_RI))
203 panic("Unrecoverable Machine check");
204}
205
206void unknown_exception(struct pt_regs *regs)
207{
208 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
209 regs->nip, regs->msr, regs->trap);
210
211 _exception(SIGTRAP, regs, 0, 0);
212}
213
214void instruction_breakpoint_exception(struct pt_regs *regs)
215{
216 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
217 5, SIGTRAP) == NOTIFY_STOP)
218 return;
219 if (debugger_iabr_match(regs))
220 return;
221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
222}
223
224void __kprobes single_step_exception(struct pt_regs *regs)
225{
226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
227
228 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
229 5, SIGTRAP) == NOTIFY_STOP)
230 return;
231 if (debugger_sstep(regs))
232 return;
233
234 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
235}
236
237/*
238 * After we have successfully emulated an instruction, we have to
239 * check if the instruction was being single-stepped, and if so,
240 * pretend we got a single-step exception. This was pointed out
241 * by Kumar Gala. -- paulus
242 */
243static inline void emulate_single_step(struct pt_regs *regs)
244{
245 if (regs->msr & MSR_SE)
246 single_step_exception(regs);
247}
248
249static void parse_fpe(struct pt_regs *regs)
250{
251 int code = 0;
252 unsigned long fpscr;
253
254 flush_fp_to_thread(current);
255
256 fpscr = current->thread.fpscr;
257
258 /* Invalid operation */
259 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
260 code = FPE_FLTINV;
261
262 /* Overflow */
263 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
264 code = FPE_FLTOVF;
265
266 /* Underflow */
267 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
268 code = FPE_FLTUND;
269
270 /* Divide by zero */
271 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
272 code = FPE_FLTDIV;
273
274 /* Inexact result */
275 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
276 code = FPE_FLTRES;
277
278 _exception(SIGFPE, regs, code, regs->nip);
279}
280
281/*
282 * Illegal instruction emulation support. Return non-zero if we can't
283 * emulate, or -EFAULT if the associated memory access caused an access
284 * fault. Return zero on success.
285 */
286
287#define INST_MFSPR_PVR 0x7c1f42a6
288#define INST_MFSPR_PVR_MASK 0xfc1fffff
289
290#define INST_DCBA 0x7c0005ec
291#define INST_DCBA_MASK 0x7c0007fe
292
293#define INST_MCRXR 0x7c000400
294#define INST_MCRXR_MASK 0x7c0007fe
295
296static int emulate_instruction(struct pt_regs *regs)
297{
298 unsigned int instword;
299
300 if (!user_mode(regs))
301 return -EINVAL;
302
303 CHECK_FULL_REGS(regs);
304
305 if (get_user(instword, (unsigned int __user *)(regs->nip)))
306 return -EFAULT;
307
308 /* Emulate the mfspr rD, PVR. */
309 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
310 unsigned int rd;
311
312 rd = (instword >> 21) & 0x1f;
313 regs->gpr[rd] = mfspr(SPRN_PVR);
314 return 0;
315 }
316
317 /* Emulating the dcba insn is just a no-op. */
318 if ((instword & INST_DCBA_MASK) == INST_DCBA) {
319 static int warned;
320
321 if (!warned) {
322 printk(KERN_WARNING
323 "process %d (%s) uses obsolete 'dcba' insn\n",
324 current->pid, current->comm);
325 warned = 1;
326 }
327 return 0;
328 }
329
330 /* Emulate the mcrxr insn. */
331 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
332 static int warned;
333 unsigned int shift;
334
335 if (!warned) {
336 printk(KERN_WARNING
337 "process %d (%s) uses obsolete 'mcrxr' insn\n",
338 current->pid, current->comm);
339 warned = 1;
340 }
341
342 shift = (instword >> 21) & 0x1c;
343 regs->ccr &= ~(0xf0000000 >> shift);
344 regs->ccr |= (regs->xer & 0xf0000000) >> shift;
345 regs->xer &= ~0xf0000000;
346 return 0;
347 }
348
349 return -EINVAL;
350}
351
352/*
353 * Look through the list of trap instructions that are used for BUG(),
354 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
355 * that the exception was caused by a trap instruction of some kind.
356 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
357 * otherwise.
358 */
359extern struct bug_entry __start___bug_table[], __stop___bug_table[];
360
361#ifndef CONFIG_MODULES
362#define module_find_bug(x) NULL
363#endif
364
365struct bug_entry *find_bug(unsigned long bugaddr)
366{
367 struct bug_entry *bug;
368
369 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
370 if (bugaddr == bug->bug_addr)
371 return bug;
372 return module_find_bug(bugaddr);
373}
374
375static int
376check_bug_trap(struct pt_regs *regs)
377{
378 struct bug_entry *bug;
379 unsigned long addr;
380
381 if (regs->msr & MSR_PR)
382 return 0; /* not in kernel */
383 addr = regs->nip; /* address of trap instruction */
384 if (addr < PAGE_OFFSET)
385 return 0;
386 bug = find_bug(regs->nip);
387 if (bug == NULL)
388 return 0;
389 if (bug->line & BUG_WARNING_TRAP) {
390 /* this is a WARN_ON rather than BUG/BUG_ON */
391 printk(KERN_ERR "Badness in %s at %s:%d\n",
392 bug->function, bug->file,
393 (unsigned int)bug->line & ~BUG_WARNING_TRAP);
394 show_stack(current, (void *)regs->gpr[1]);
395 return 1;
396 }
397 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
398 bug->function, bug->file, (unsigned int)bug->line);
399 return 0;
400}
401
402void __kprobes program_check_exception(struct pt_regs *regs)
403{
404 if (debugger_fault_handler(regs))
405 return;
406
407 if (regs->msr & 0x100000) {
408 /* IEEE FP exception */
409 parse_fpe(regs);
410 } else if (regs->msr & 0x20000) {
411 /* trap exception */
412
413 if (notify_die(DIE_BPT, "breakpoint", regs, 5,
414 5, SIGTRAP) == NOTIFY_STOP)
415 return;
416 if (debugger_bpt(regs))
417 return;
418
419 if (check_bug_trap(regs)) {
420 regs->nip += 4;
421 return;
422 }
423 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
424
425 } else {
426 /* Privileged or illegal instruction; try to emulate it. */
427 switch (emulate_instruction(regs)) {
428 case 0:
429 regs->nip += 4;
430 emulate_single_step(regs);
431 break;
432
433 case -EFAULT:
434 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
435 break;
436
437 default:
438 if (regs->msr & 0x40000)
439 /* priveleged */
440 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
441 else
442 /* illegal */
443 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
444 break;
445 }
446 }
447}
448
449void kernel_fp_unavailable_exception(struct pt_regs *regs)
450{
451 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
452 "%lx at %lx\n", regs->trap, regs->nip);
453 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
454}
455
456void altivec_unavailable_exception(struct pt_regs *regs)
457{
458 if (user_mode(regs)) {
459 /* A user program has executed an altivec instruction,
460 but this kernel doesn't support altivec. */
461 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
462 return;
463 }
464 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
465 "%lx at %lx\n", regs->trap, regs->nip);
466 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
467}
468
469extern perf_irq_t perf_irq;
470
471void performance_monitor_exception(struct pt_regs *regs)
472{
473 perf_irq(regs);
474}
475
476void alignment_exception(struct pt_regs *regs)
477{
478 int fixed;
479
480 fixed = fix_alignment(regs);
481
482 if (fixed == 1) {
483 regs->nip += 4; /* skip over emulated instruction */
484 emulate_single_step(regs);
485 return;
486 }
487
488 /* Operand address was bad */
489 if (fixed == -EFAULT) {
490 if (user_mode(regs)) {
491 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->dar);
492 } else {
493 /* Search exception table */
494 bad_page_fault(regs, regs->dar, SIGSEGV);
495 }
496
497 return;
498 }
499
500 _exception(SIGBUS, regs, BUS_ADRALN, regs->nip);
501}
502
503#ifdef CONFIG_ALTIVEC
504void altivec_assist_exception(struct pt_regs *regs)
505{
506 int err;
507 siginfo_t info;
508
509 if (!user_mode(regs)) {
510 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
511 " at %lx\n", regs->nip);
512 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
513 }
514
515 flush_altivec_to_thread(current);
516
517 err = emulate_altivec(regs);
518 if (err == 0) {
519 regs->nip += 4; /* skip emulated instruction */
520 emulate_single_step(regs);
521 return;
522 }
523
524 if (err == -EFAULT) {
525 /* got an error reading the instruction */
526 info.si_signo = SIGSEGV;
527 info.si_errno = 0;
528 info.si_code = SEGV_MAPERR;
529 info.si_addr = (void __user *) regs->nip;
530 force_sig_info(SIGSEGV, &info, current);
531 } else {
532 /* didn't recognize the instruction */
533 /* XXX quick hack for now: set the non-Java bit in the VSCR */
534 if (printk_ratelimit())
535 printk(KERN_ERR "Unrecognized altivec instruction "
536 "in %s at %lx\n", current->comm, regs->nip);
537 current->thread.vscr.u[3] |= 0x10000;
538 }
539}
540#endif /* CONFIG_ALTIVEC */
541
542/*
543 * We enter here if we get an unrecoverable exception, that is, one
544 * that happened at a point where the RI (recoverable interrupt) bit
545 * in the MSR is 0. This indicates that SRR0/1 are live, and that
546 * we therefore lost state by taking this exception.
547 */
548void unrecoverable_exception(struct pt_regs *regs)
549{
550 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
551 regs->trap, regs->nip);
552 die("Unrecoverable exception", regs, SIGABRT);
553}
554
555/*
556 * We enter here if we discover during exception entry that we are
557 * running in supervisor mode with a userspace value in the stack pointer.
558 */
559void kernel_bad_stack(struct pt_regs *regs)
560{
561 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
562 regs->gpr[1], regs->nip);
563 die("Bad kernel stack pointer", regs, SIGABRT);
564}
565
566void __init trap_init(void)
567{
568}
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/ppc64/kernel/u3_iommu.c
index 41ea09cb9ac7..fba871a1bda5 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/ppc64/kernel/u3_iommu.c
@@ -44,39 +44,11 @@
44#include <asm/abs_addr.h> 44#include <asm/abs_addr.h>
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46#include <asm/lmb.h> 46#include <asm/lmb.h>
47 47#include <asm/dart.h>
48#include "pci.h" 48#include <asm/ppc-pci.h>
49 49
50extern int iommu_force_on; 50extern int iommu_force_on;
51 51
52/* physical base of DART registers */
53#define DART_BASE 0xf8033000UL
54
55/* Offset from base to control register */
56#define DARTCNTL 0
57/* Offset from base to exception register */
58#define DARTEXCP 0x10
59/* Offset from base to TLB tag registers */
60#define DARTTAG 0x1000
61
62
63/* Control Register fields */
64
65/* base address of table (pfn) */
66#define DARTCNTL_BASE_MASK 0xfffff
67#define DARTCNTL_BASE_SHIFT 12
68
69#define DARTCNTL_FLUSHTLB 0x400
70#define DARTCNTL_ENABLE 0x200
71
72/* size of table in pages */
73#define DARTCNTL_SIZE_MASK 0x1ff
74#define DARTCNTL_SIZE_SHIFT 0
75
76/* DART table fields */
77#define DARTMAP_VALID 0x80000000
78#define DARTMAP_RPNMASK 0x00ffffff
79
80/* Physical base address and size of the DART table */ 52/* Physical base address and size of the DART table */
81unsigned long dart_tablebase; /* exported to htab_initialize */ 53unsigned long dart_tablebase; /* exported to htab_initialize */
82static unsigned long dart_tablesize; 54static unsigned long dart_tablesize;
@@ -152,18 +124,21 @@ static void dart_build(struct iommu_table *tbl, long index,
152 124
153 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); 125 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
154 126
127 index <<= DART_PAGE_FACTOR;
128 npages <<= DART_PAGE_FACTOR;
129
155 dp = ((unsigned int*)tbl->it_base) + index; 130 dp = ((unsigned int*)tbl->it_base) + index;
156 131
157 /* On U3, all memory is contigous, so we can move this 132 /* On U3, all memory is contigous, so we can move this
158 * out of the loop. 133 * out of the loop.
159 */ 134 */
160 while (npages--) { 135 while (npages--) {
161 rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; 136 rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
162 137
163 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); 138 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
164 139
165 rpn++; 140 rpn++;
166 uaddr += PAGE_SIZE; 141 uaddr += DART_PAGE_SIZE;
167 } 142 }
168 143
169 dart_dirty = 1; 144 dart_dirty = 1;
@@ -181,6 +156,9 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
181 156
182 DBG("dart: free at: %lx, %lx\n", index, npages); 157 DBG("dart: free at: %lx, %lx\n", index, npages);
183 158
159 index <<= DART_PAGE_FACTOR;
160 npages <<= DART_PAGE_FACTOR;
161
184 dp = ((unsigned int *)tbl->it_base) + index; 162 dp = ((unsigned int *)tbl->it_base) + index;
185 163
186 while (npages--) 164 while (npages--)
@@ -209,10 +187,10 @@ static int dart_init(struct device_node *dart_node)
209 * that to work around what looks like a problem with the HT bridge 187 * that to work around what looks like a problem with the HT bridge
210 * prefetching into invalid pages and corrupting data 188 * prefetching into invalid pages and corrupting data
211 */ 189 */
212 tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE); 190 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
213 if (!tmp) 191 if (!tmp)
214 panic("U3-DART: Cannot allocate spare page!"); 192 panic("U3-DART: Cannot allocate spare page!");
215 dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK); 193 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
216 194
217 /* Map in DART registers. FIXME: Use device node to get base address */ 195 /* Map in DART registers. FIXME: Use device node to get base address */
218 dart = ioremap(DART_BASE, 0x7000); 196 dart = ioremap(DART_BASE, 0x7000);
@@ -223,8 +201,8 @@ static int dart_init(struct device_node *dart_node)
223 * table size and enable bit 201 * table size and enable bit
224 */ 202 */
225 regword = DARTCNTL_ENABLE | 203 regword = DARTCNTL_ENABLE |
226 ((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) | 204 ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
227 (((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK) 205 (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
228 << DARTCNTL_SIZE_SHIFT); 206 << DARTCNTL_SIZE_SHIFT);
229 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); 207 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
230 208
diff --git a/arch/ppc64/kernel/vdso64/sigtramp.S b/arch/ppc64/kernel/vdso64/sigtramp.S
index 8ae8f205e470..31b604ab56de 100644
--- a/arch/ppc64/kernel/vdso64/sigtramp.S
+++ b/arch/ppc64/kernel/vdso64/sigtramp.S
@@ -15,6 +15,7 @@
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18#include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
18 19
19 .text 20 .text
20 21
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644
index cb207629f21f..000000000000
--- a/arch/ppc64/kernel/vecemu.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Routines to emulate some Altivec/VMX instructions, specifically
3 * those that can trap when given denormalized operands in Java mode.
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/sched.h>
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12/* Functions in vector.S */
13extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
14extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
15extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
16extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
17extern void vrefp(vector128 *dst, vector128 *src);
18extern void vrsqrtefp(vector128 *dst, vector128 *src);
19extern void vexptep(vector128 *dst, vector128 *src);
20
21static unsigned int exp2s[8] = {
22 0x800000,
23 0x8b95c2,
24 0x9837f0,
25 0xa5fed7,
26 0xb504f3,
27 0xc5672a,
28 0xd744fd,
29 0xeac0c7
30};
31
32/*
33 * Computes an estimate of 2^x. The `s' argument is the 32-bit
34 * single-precision floating-point representation of x.
35 */
36static unsigned int eexp2(unsigned int s)
37{
38 int exp, pwr;
39 unsigned int mant, frac;
40
41 /* extract exponent field from input */
42 exp = ((s >> 23) & 0xff) - 127;
43 if (exp > 7) {
44 /* check for NaN input */
45 if (exp == 128 && (s & 0x7fffff) != 0)
46 return s | 0x400000; /* return QNaN */
47 /* 2^-big = 0, 2^+big = +Inf */
48 return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
49 }
50 if (exp < -23)
51 return 0x3f800000; /* 1.0 */
52
53 /* convert to fixed point integer in 9.23 representation */
54 pwr = (s & 0x7fffff) | 0x800000;
55 if (exp > 0)
56 pwr <<= exp;
57 else
58 pwr >>= -exp;
59 if (s & 0x80000000)
60 pwr = -pwr;
61
62 /* extract integer part, which becomes exponent part of result */
63 exp = (pwr >> 23) + 126;
64 if (exp >= 254)
65 return 0x7f800000;
66 if (exp < -23)
67 return 0;
68
69 /* table lookup on top 3 bits of fraction to get mantissa */
70 mant = exp2s[(pwr >> 20) & 7];
71
72 /* linear interpolation using remaining 20 bits of fraction */
73 asm("mulhwu %0,%1,%2" : "=r" (frac)
74 : "r" (pwr << 12), "r" (0x172b83ff));
75 asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
76 mant += frac;
77
78 if (exp >= 0)
79 return mant + (exp << 23);
80
81 /* denormalized result */
82 exp = -exp;
83 mant += 1 << (exp - 1);
84 return mant >> exp;
85}
86
87/*
88 * Computes an estimate of log_2(x). The `s' argument is the 32-bit
89 * single-precision floating-point representation of x.
90 */
91static unsigned int elog2(unsigned int s)
92{
93 int exp, mant, lz, frac;
94
95 exp = s & 0x7f800000;
96 mant = s & 0x7fffff;
97 if (exp == 0x7f800000) { /* Inf or NaN */
98 if (mant != 0)
99 s |= 0x400000; /* turn NaN into QNaN */
100 return s;
101 }
102 if ((exp | mant) == 0) /* +0 or -0 */
103 return 0xff800000; /* return -Inf */
104
105 if (exp == 0) {
106 /* denormalized */
107 asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
108 mant <<= lz - 8;
109 exp = (-118 - lz) << 23;
110 } else {
111 mant |= 0x800000;
112 exp -= 127 << 23;
113 }
114
115 if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
116 exp |= 0x400000; /* 0.5 * 2^23 */
117 asm("mulhwu %0,%1,%2" : "=r" (mant)
118 : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
119 }
120 if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
121 exp |= 0x200000; /* 0.25 * 2^23 */
122 asm("mulhwu %0,%1,%2" : "=r" (mant)
123 : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
124 }
125 if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
126 exp |= 0x100000; /* 0.125 * 2^23 */
127 asm("mulhwu %0,%1,%2" : "=r" (mant)
128 : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
129 }
130 if (mant > 0x800000) { /* 1.0 * 2^23 */
131 /* calculate (mant - 1) * 1.381097463 */
132 /* 1.381097463 == 0.125 / (2^0.125 - 1) */
133 asm("mulhwu %0,%1,%2" : "=r" (frac)
134 : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
135 exp += frac;
136 }
137 s = exp & 0x80000000;
138 if (exp != 0) {
139 if (s)
140 exp = -exp;
141 asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
142 lz = 8 - lz;
143 if (lz > 0)
144 exp >>= lz;
145 else if (lz < 0)
146 exp <<= -lz;
147 s += ((lz + 126) << 23) + exp;
148 }
149 return s;
150}
151
152#define VSCR_SAT 1
153
154static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
155{
156 int exp, mant;
157
158 exp = (x >> 23) & 0xff;
159 mant = x & 0x7fffff;
160 if (exp == 255 && mant != 0)
161 return 0; /* NaN -> 0 */
162 exp = exp - 127 + scale;
163 if (exp < 0)
164 return 0; /* round towards zero */
165 if (exp >= 31) {
166 /* saturate, unless the result would be -2^31 */
167 if (x + (scale << 23) != 0xcf000000)
168 *vscrp |= VSCR_SAT;
169 return (x & 0x80000000)? 0x80000000: 0x7fffffff;
170 }
171 mant |= 0x800000;
172 mant = (mant << 7) >> (30 - exp);
173 return (x & 0x80000000)? -mant: mant;
174}
175
176static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
177{
178 int exp;
179 unsigned int mant;
180
181 exp = (x >> 23) & 0xff;
182 mant = x & 0x7fffff;
183 if (exp == 255 && mant != 0)
184 return 0; /* NaN -> 0 */
185 exp = exp - 127 + scale;
186 if (exp < 0)
187 return 0; /* round towards zero */
188 if (x & 0x80000000) {
189 /* negative => saturate to 0 */
190 *vscrp |= VSCR_SAT;
191 return 0;
192 }
193 if (exp >= 32) {
194 /* saturate */
195 *vscrp |= VSCR_SAT;
196 return 0xffffffff;
197 }
198 mant |= 0x800000;
199 mant = (mant << 8) >> (31 - exp);
200 return mant;
201}
202
203/* Round to floating integer, towards 0 */
204static unsigned int rfiz(unsigned int x)
205{
206 int exp;
207
208 exp = ((x >> 23) & 0xff) - 127;
209 if (exp == 128 && (x & 0x7fffff) != 0)
210 return x | 0x400000; /* NaN -> make it a QNaN */
211 if (exp >= 23)
212 return x; /* it's an integer already (or Inf) */
213 if (exp < 0)
214 return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
215 return x & ~(0x7fffff >> exp);
216}
217
218/* Round to floating integer, towards +/- Inf */
219static unsigned int rfii(unsigned int x)
220{
221 int exp, mask;
222
223 exp = ((x >> 23) & 0xff) - 127;
224 if (exp == 128 && (x & 0x7fffff) != 0)
225 return x | 0x400000; /* NaN -> make it a QNaN */
226 if (exp >= 23)
227 return x; /* it's an integer already (or Inf) */
228 if ((x & 0x7fffffff) == 0)
229 return x; /* +/-0 -> +/-0 */
230 if (exp < 0)
231 /* 0 < |x| < 1.0 rounds to +/- 1.0 */
232 return (x & 0x80000000) | 0x3f800000;
233 mask = 0x7fffff >> exp;
234 /* mantissa overflows into exponent - that's OK,
235 it can't overflow into the sign bit */
236 return (x + mask) & ~mask;
237}
238
239/* Round to floating integer, to nearest */
240static unsigned int rfin(unsigned int x)
241{
242 int exp, half;
243
244 exp = ((x >> 23) & 0xff) - 127;
245 if (exp == 128 && (x & 0x7fffff) != 0)
246 return x | 0x400000; /* NaN -> make it a QNaN */
247 if (exp >= 23)
248 return x; /* it's an integer already (or Inf) */
249 if (exp < -1)
250 return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
251 if (exp == -1)
252 /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
253 return (x & 0x80000000) | 0x3f800000;
254 half = 0x400000 >> exp;
255 /* add 0.5 to the magnitude and chop off the fraction bits */
256 return (x + half) & ~(0x7fffff >> exp);
257}
258
259int
260emulate_altivec(struct pt_regs *regs)
261{
262 unsigned int instr, i;
263 unsigned int va, vb, vc, vd;
264 vector128 *vrs;
265
266 if (get_user(instr, (unsigned int __user *) regs->nip))
267 return -EFAULT;
268 if ((instr >> 26) != 4)
269 return -EINVAL; /* not an altivec instruction */
270 vd = (instr >> 21) & 0x1f;
271 va = (instr >> 16) & 0x1f;
272 vb = (instr >> 11) & 0x1f;
273 vc = (instr >> 6) & 0x1f;
274
275 vrs = current->thread.vr;
276 switch (instr & 0x3f) {
277 case 10:
278 switch (vc) {
279 case 0: /* vaddfp */
280 vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
281 break;
282 case 1: /* vsubfp */
283 vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
284 break;
285 case 4: /* vrefp */
286 vrefp(&vrs[vd], &vrs[vb]);
287 break;
288 case 5: /* vrsqrtefp */
289 vrsqrtefp(&vrs[vd], &vrs[vb]);
290 break;
291 case 6: /* vexptefp */
292 for (i = 0; i < 4; ++i)
293 vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
294 break;
295 case 7: /* vlogefp */
296 for (i = 0; i < 4; ++i)
297 vrs[vd].u[i] = elog2(vrs[vb].u[i]);
298 break;
299 case 8: /* vrfin */
300 for (i = 0; i < 4; ++i)
301 vrs[vd].u[i] = rfin(vrs[vb].u[i]);
302 break;
303 case 9: /* vrfiz */
304 for (i = 0; i < 4; ++i)
305 vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
306 break;
307 case 10: /* vrfip */
308 for (i = 0; i < 4; ++i) {
309 u32 x = vrs[vb].u[i];
310 x = (x & 0x80000000)? rfiz(x): rfii(x);
311 vrs[vd].u[i] = x;
312 }
313 break;
314 case 11: /* vrfim */
315 for (i = 0; i < 4; ++i) {
316 u32 x = vrs[vb].u[i];
317 x = (x & 0x80000000)? rfii(x): rfiz(x);
318 vrs[vd].u[i] = x;
319 }
320 break;
321 case 14: /* vctuxs */
322 for (i = 0; i < 4; ++i)
323 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
324 &current->thread.vscr.u[3]);
325 break;
326 case 15: /* vctsxs */
327 for (i = 0; i < 4; ++i)
328 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
329 &current->thread.vscr.u[3]);
330 break;
331 default:
332 return -EINVAL;
333 }
334 break;
335 case 46: /* vmaddfp */
336 vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
337 break;
338 case 47: /* vnmsubfp */
339 vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 return 0;
346}
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 0306510bc4ff..022f220e772f 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
1#include <asm/page.h>
1#include <asm-generic/vmlinux.lds.h> 2#include <asm-generic/vmlinux.lds.h>
2 3
3OUTPUT_ARCH(powerpc:common64) 4OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@ SECTIONS
17 LOCK_TEXT 18 LOCK_TEXT
18 KPROBES_TEXT 19 KPROBES_TEXT
19 *(.fixup) 20 *(.fixup)
20 . = ALIGN(4096); 21 . = ALIGN(PAGE_SIZE);
21 _etext = .; 22 _etext = .;
22 } 23 }
23 24
@@ -43,7 +44,7 @@ SECTIONS
43 44
44 45
45 /* will be freed after init */ 46 /* will be freed after init */
46 . = ALIGN(4096); 47 . = ALIGN(PAGE_SIZE);
47 __init_begin = .; 48 __init_begin = .;
48 49
49 .init.text : { 50 .init.text : {
@@ -83,7 +84,7 @@ SECTIONS
83 84
84 SECURITY_INIT 85 SECURITY_INIT
85 86
86 . = ALIGN(4096); 87 . = ALIGN(PAGE_SIZE);
87 .init.ramfs : { 88 .init.ramfs : {
88 __initramfs_start = .; 89 __initramfs_start = .;
89 *(.init.ramfs) 90 *(.init.ramfs)
@@ -96,18 +97,22 @@ SECTIONS
96 __per_cpu_end = .; 97 __per_cpu_end = .;
97 } 98 }
98 99
100 . = ALIGN(PAGE_SIZE);
99 . = ALIGN(16384); 101 . = ALIGN(16384);
100 __init_end = .; 102 __init_end = .;
101 /* freed after init ends here */ 103 /* freed after init ends here */
102 104
103 105
104 /* Read/write sections */ 106 /* Read/write sections */
107 . = ALIGN(PAGE_SIZE);
105 . = ALIGN(16384); 108 . = ALIGN(16384);
109 _sdata = .;
106 /* The initial task and kernel stack */ 110 /* The initial task and kernel stack */
107 .data.init_task : { 111 .data.init_task : {
108 *(.data.init_task) 112 *(.data.init_task)
109 } 113 }
110 114
115 . = ALIGN(PAGE_SIZE);
111 .data.page_aligned : { 116 .data.page_aligned : {
112 *(.data.page_aligned) 117 *(.data.page_aligned)
113 } 118 }
@@ -129,18 +134,18 @@ SECTIONS
129 __toc_start = .; 134 __toc_start = .;
130 *(.got) 135 *(.got)
131 *(.toc) 136 *(.toc)
132 . = ALIGN(4096); 137 . = ALIGN(PAGE_SIZE);
133 _edata = .; 138 _edata = .;
134 } 139 }
135 140
136 141
137 . = ALIGN(4096); 142 . = ALIGN(PAGE_SIZE);
138 .bss : { 143 .bss : {
139 __bss_start = .; 144 __bss_start = .;
140 *(.bss) 145 *(.bss)
141 __bss_stop = .; 146 __bss_stop = .;
142 } 147 }
143 148
144 . = ALIGN(4096); 149 . = ALIGN(PAGE_SIZE);
145 _end = . ; 150 _end = . ;
146} 151}
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
deleted file mode 100644
index 3695d00d347f..000000000000
--- a/arch/ppc64/mm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5EXTRA_CFLAGS += -mno-minimal-toc
6
7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
8 slb_low.o slb.o stab.o mmap.o
9obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
deleted file mode 100644
index be64b157afce..000000000000
--- a/arch/ppc64/mm/init.c
+++ /dev/null
@@ -1,869 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76int mem_init_done;
77unsigned long ioremap_bot = IMALLOC_BASE;
78static unsigned long phbs_io_bot = PHBS_IO_BASE;
79
80extern pgd_t swapper_pg_dir[];
81extern struct task_struct *current_set[NR_CPUS];
82
83unsigned long klimit = (unsigned long)_end;
84
85unsigned long _SDR1=0;
86unsigned long _ASR=0;
87
88/* max amount of RAM to use */
89unsigned long __max_memory;
90
91/* info on what we think the IO hole is */
92unsigned long io_hole_start;
93unsigned long io_hole_size;
94
95void show_mem(void)
96{
97 unsigned long total = 0, reserved = 0;
98 unsigned long shared = 0, cached = 0;
99 struct page *page;
100 pg_data_t *pgdat;
101 unsigned long i;
102
103 printk("Mem-info:\n");
104 show_free_areas();
105 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
106 for_each_pgdat(pgdat) {
107 for (i = 0; i < pgdat->node_spanned_pages; i++) {
108 page = pgdat_page_nr(pgdat, i);
109 total++;
110 if (PageReserved(page))
111 reserved++;
112 else if (PageSwapCache(page))
113 cached++;
114 else if (page_count(page))
115 shared += page_count(page) - 1;
116 }
117 }
118 printk("%ld pages of RAM\n", total);
119 printk("%ld reserved pages\n", reserved);
120 printk("%ld pages shared\n", shared);
121 printk("%ld pages swap cached\n", cached);
122}
123
124#ifdef CONFIG_PPC_ISERIES
125
126void __iomem *ioremap(unsigned long addr, unsigned long size)
127{
128 return (void __iomem *)addr;
129}
130
131extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
132 unsigned long flags)
133{
134 return (void __iomem *)addr;
135}
136
137void iounmap(volatile void __iomem *addr)
138{
139 return;
140}
141
142#else
143
144/*
145 * map_io_page currently only called by __ioremap
146 * map_io_page adds an entry to the ioremap page table
147 * and adds an entry to the HPT, possibly bolting it
148 */
149static int map_io_page(unsigned long ea, unsigned long pa, int flags)
150{
151 pgd_t *pgdp;
152 pud_t *pudp;
153 pmd_t *pmdp;
154 pte_t *ptep;
155 unsigned long vsid;
156
157 if (mem_init_done) {
158 spin_lock(&init_mm.page_table_lock);
159 pgdp = pgd_offset_k(ea);
160 pudp = pud_alloc(&init_mm, pgdp, ea);
161 if (!pudp)
162 return -ENOMEM;
163 pmdp = pmd_alloc(&init_mm, pudp, ea);
164 if (!pmdp)
165 return -ENOMEM;
166 ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
167 if (!ptep)
168 return -ENOMEM;
169 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
170 __pgprot(flags)));
171 spin_unlock(&init_mm.page_table_lock);
172 } else {
173 unsigned long va, vpn, hash, hpteg;
174
175 /*
176 * If the mm subsystem is not fully up, we cannot create a
177 * linux page table entry for this mapping. Simply bolt an
178 * entry in the hardware page table.
179 */
180 vsid = get_kernel_vsid(ea);
181 va = (vsid << 28) | (ea & 0xFFFFFFF);
182 vpn = va >> PAGE_SHIFT;
183
184 hash = hpt_hash(vpn, 0);
185
186 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
187
188 /* Panic if a pte grpup is full */
189 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
190 HPTE_V_BOLTED,
191 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
192 == -1) {
193 panic("map_io_page: could not insert mapping");
194 }
195 }
196 return 0;
197}
198
199
200static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
201 unsigned long ea, unsigned long size,
202 unsigned long flags)
203{
204 unsigned long i;
205
206 if ((flags & _PAGE_PRESENT) == 0)
207 flags |= pgprot_val(PAGE_KERNEL);
208
209 for (i = 0; i < size; i += PAGE_SIZE)
210 if (map_io_page(ea+i, pa+i, flags))
211 return NULL;
212
213 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
214}
215
216
217void __iomem *
218ioremap(unsigned long addr, unsigned long size)
219{
220 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
221}
222
223void __iomem * __ioremap(unsigned long addr, unsigned long size,
224 unsigned long flags)
225{
226 unsigned long pa, ea;
227 void __iomem *ret;
228
229 /*
230 * Choose an address to map it to.
231 * Once the imalloc system is running, we use it.
232 * Before that, we map using addresses going
233 * up from ioremap_bot. imalloc will use
234 * the addresses from ioremap_bot through
235 * IMALLOC_END
236 *
237 */
238 pa = addr & PAGE_MASK;
239 size = PAGE_ALIGN(addr + size) - pa;
240
241 if (size == 0)
242 return NULL;
243
244 if (mem_init_done) {
245 struct vm_struct *area;
246 area = im_get_free_area(size);
247 if (area == NULL)
248 return NULL;
249 ea = (unsigned long)(area->addr);
250 ret = __ioremap_com(addr, pa, ea, size, flags);
251 if (!ret)
252 im_free(area->addr);
253 } else {
254 ea = ioremap_bot;
255 ret = __ioremap_com(addr, pa, ea, size, flags);
256 if (ret)
257 ioremap_bot += size;
258 }
259 return ret;
260}
261
262#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
263
264int __ioremap_explicit(unsigned long pa, unsigned long ea,
265 unsigned long size, unsigned long flags)
266{
267 struct vm_struct *area;
268 void __iomem *ret;
269
270 /* For now, require page-aligned values for pa, ea, and size */
271 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
272 !IS_PAGE_ALIGNED(size)) {
273 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
274 return 1;
275 }
276
277 if (!mem_init_done) {
278 /* Two things to consider in this case:
279 * 1) No records will be kept (imalloc, etc) that the region
280 * has been remapped
281 * 2) It won't be easy to iounmap() the region later (because
282 * of 1)
283 */
284 ;
285 } else {
286 area = im_get_area(ea, size,
287 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
288 if (area == NULL) {
289 /* Expected when PHB-dlpar is in play */
290 return 1;
291 }
292 if (ea != (unsigned long) area->addr) {
293 printk(KERN_ERR "unexpected addr return from "
294 "im_get_area\n");
295 return 1;
296 }
297 }
298
299 ret = __ioremap_com(pa, pa, ea, size, flags);
300 if (ret == NULL) {
301 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
302 return 1;
303 }
304 if (ret != (void *) ea) {
305 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
306 return 1;
307 }
308
309 return 0;
310}
311
312/*
313 * Unmap an IO region and remove it from imalloc'd list.
314 * Access to IO memory should be serialized by driver.
315 * This code is modeled after vmalloc code - unmap_vm_area()
316 *
317 * XXX what about calls before mem_init_done (ie python_countermeasures())
318 */
319void iounmap(volatile void __iomem *token)
320{
321 void *addr;
322
323 if (!mem_init_done)
324 return;
325
326 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
327
328 im_free(addr);
329}
330
331static int iounmap_subset_regions(unsigned long addr, unsigned long size)
332{
333 struct vm_struct *area;
334
335 /* Check whether subsets of this region exist */
336 area = im_get_area(addr, size, IM_REGION_SUPERSET);
337 if (area == NULL)
338 return 1;
339
340 while (area) {
341 iounmap((void __iomem *) area->addr);
342 area = im_get_area(addr, size,
343 IM_REGION_SUPERSET);
344 }
345
346 return 0;
347}
348
349int iounmap_explicit(volatile void __iomem *start, unsigned long size)
350{
351 struct vm_struct *area;
352 unsigned long addr;
353 int rc;
354
355 addr = (unsigned long __force) start & PAGE_MASK;
356
357 /* Verify that the region either exists or is a subset of an existing
358 * region. In the latter case, split the parent region to create
359 * the exact region
360 */
361 area = im_get_area(addr, size,
362 IM_REGION_EXISTS | IM_REGION_SUBSET);
363 if (area == NULL) {
364 /* Determine whether subset regions exist. If so, unmap */
365 rc = iounmap_subset_regions(addr, size);
366 if (rc) {
367 printk(KERN_ERR
368 "%s() cannot unmap nonexistent range 0x%lx\n",
369 __FUNCTION__, addr);
370 return 1;
371 }
372 } else {
373 iounmap((void __iomem *) area->addr);
374 }
375 /*
376 * FIXME! This can't be right:
377 iounmap(area->addr);
378 * Maybe it should be "iounmap(area);"
379 */
380 return 0;
381}
382
383#endif
384
385EXPORT_SYMBOL(ioremap);
386EXPORT_SYMBOL(__ioremap);
387EXPORT_SYMBOL(iounmap);
388
389void free_initmem(void)
390{
391 unsigned long addr;
392
393 addr = (unsigned long)__init_begin;
394 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
395 memset((void *)addr, 0xcc, PAGE_SIZE);
396 ClearPageReserved(virt_to_page(addr));
397 set_page_count(virt_to_page(addr), 1);
398 free_page(addr);
399 totalram_pages++;
400 }
401 printk ("Freeing unused kernel memory: %luk freed\n",
402 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
403}
404
405#ifdef CONFIG_BLK_DEV_INITRD
406void free_initrd_mem(unsigned long start, unsigned long end)
407{
408 if (start < end)
409 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
410 for (; start < end; start += PAGE_SIZE) {
411 ClearPageReserved(virt_to_page(start));
412 set_page_count(virt_to_page(start), 1);
413 free_page(start);
414 totalram_pages++;
415 }
416}
417#endif
418
419static DEFINE_SPINLOCK(mmu_context_lock);
420static DEFINE_IDR(mmu_context_idr);
421
422int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
423{
424 int index;
425 int err;
426
427again:
428 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
429 return -ENOMEM;
430
431 spin_lock(&mmu_context_lock);
432 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
433 spin_unlock(&mmu_context_lock);
434
435 if (err == -EAGAIN)
436 goto again;
437 else if (err)
438 return err;
439
440 if (index > MAX_CONTEXT) {
441 idr_remove(&mmu_context_idr, index);
442 return -ENOMEM;
443 }
444
445 mm->context.id = index;
446
447 return 0;
448}
449
450void destroy_context(struct mm_struct *mm)
451{
452 spin_lock(&mmu_context_lock);
453 idr_remove(&mmu_context_idr, mm->context.id);
454 spin_unlock(&mmu_context_lock);
455
456 mm->context.id = NO_CONTEXT;
457}
458
459/*
460 * Do very early mm setup.
461 */
462void __init mm_init_ppc64(void)
463{
464#ifndef CONFIG_PPC_ISERIES
465 unsigned long i;
466#endif
467
468 ppc64_boot_msg(0x100, "MM Init");
469
470 /* This is the story of the IO hole... please, keep seated,
471 * unfortunately, we are out of oxygen masks at the moment.
472 * So we need some rough way to tell where your big IO hole
473 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
474 * that area as well, on POWER4 we don't have one, etc...
475 * We need that as a "hint" when sizing the TCE table on POWER3
476 * So far, the simplest way that seem work well enough for us it
477 * to just assume that the first discontinuity in our physical
478 * RAM layout is the IO hole. That may not be correct in the future
479 * (and isn't on iSeries but then we don't care ;)
480 */
481
482#ifndef CONFIG_PPC_ISERIES
483 for (i = 1; i < lmb.memory.cnt; i++) {
484 unsigned long base, prevbase, prevsize;
485
486 prevbase = lmb.memory.region[i-1].base;
487 prevsize = lmb.memory.region[i-1].size;
488 base = lmb.memory.region[i].base;
489 if (base > (prevbase + prevsize)) {
490 io_hole_start = prevbase + prevsize;
491 io_hole_size = base - (prevbase + prevsize);
492 break;
493 }
494 }
495#endif /* CONFIG_PPC_ISERIES */
496 if (io_hole_start)
497 printk("IO Hole assumed to be %lx -> %lx\n",
498 io_hole_start, io_hole_start + io_hole_size - 1);
499
500 ppc64_boot_msg(0x100, "MM Init Done");
501}
502
503/*
504 * This is called by /dev/mem to know if a given address has to
505 * be mapped non-cacheable or not
506 */
507int page_is_ram(unsigned long pfn)
508{
509 int i;
510 unsigned long paddr = (pfn << PAGE_SHIFT);
511
512 for (i=0; i < lmb.memory.cnt; i++) {
513 unsigned long base;
514
515 base = lmb.memory.region[i].base;
516
517 if ((paddr >= base) &&
518 (paddr < (base + lmb.memory.region[i].size))) {
519 return 1;
520 }
521 }
522
523 return 0;
524}
525EXPORT_SYMBOL(page_is_ram);
526
527/*
528 * Initialize the bootmem system and give it all the memory we
529 * have available.
530 */
531#ifndef CONFIG_NEED_MULTIPLE_NODES
532void __init do_init_bootmem(void)
533{
534 unsigned long i;
535 unsigned long start, bootmap_pages;
536 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
537 int boot_mapsize;
538
539 /*
540 * Find an area to use for the bootmem bitmap. Calculate the size of
541 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
542 * Add 1 additional page in case the address isn't page-aligned.
543 */
544 bootmap_pages = bootmem_bootmap_pages(total_pages);
545
546 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
547 BUG_ON(!start);
548
549 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
550
551 max_pfn = max_low_pfn;
552
553 /* Add all physical memory to the bootmem map, mark each area
554 * present.
555 */
556 for (i=0; i < lmb.memory.cnt; i++)
557 free_bootmem(lmb.memory.region[i].base,
558 lmb_size_bytes(&lmb.memory, i));
559
560 /* reserve the sections we're already using */
561 for (i=0; i < lmb.reserved.cnt; i++)
562 reserve_bootmem(lmb.reserved.region[i].base,
563 lmb_size_bytes(&lmb.reserved, i));
564
565 for (i=0; i < lmb.memory.cnt; i++)
566 memory_present(0, lmb_start_pfn(&lmb.memory, i),
567 lmb_end_pfn(&lmb.memory, i));
568}
569
570/*
571 * paging_init() sets up the page tables - in fact we've already done this.
572 */
573void __init paging_init(void)
574{
575 unsigned long zones_size[MAX_NR_ZONES];
576 unsigned long zholes_size[MAX_NR_ZONES];
577 unsigned long total_ram = lmb_phys_mem_size();
578 unsigned long top_of_ram = lmb_end_of_DRAM();
579
580 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
581 top_of_ram, total_ram);
582 printk(KERN_INFO "Memory hole size: %ldMB\n",
583 (top_of_ram - total_ram) >> 20);
584 /*
585 * All pages are DMA-able so we put them all in the DMA zone.
586 */
587 memset(zones_size, 0, sizeof(zones_size));
588 memset(zholes_size, 0, sizeof(zholes_size));
589
590 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
591 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
592
593 free_area_init_node(0, NODE_DATA(0), zones_size,
594 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
595}
596#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
597
598static struct kcore_list kcore_vmem;
599
600static int __init setup_kcore(void)
601{
602 int i;
603
604 for (i=0; i < lmb.memory.cnt; i++) {
605 unsigned long base, size;
606 struct kcore_list *kcore_mem;
607
608 base = lmb.memory.region[i].base;
609 size = lmb.memory.region[i].size;
610
611 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
612 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
613 if (!kcore_mem)
614 panic("mem_init: kmalloc failed\n");
615
616 kclist_add(kcore_mem, __va(base), size);
617 }
618
619 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
620
621 return 0;
622}
623module_init(setup_kcore);
624
625void __init mem_init(void)
626{
627#ifdef CONFIG_NEED_MULTIPLE_NODES
628 int nid;
629#endif
630 pg_data_t *pgdat;
631 unsigned long i;
632 struct page *page;
633 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
634
635 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
636 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
637
638#ifdef CONFIG_NEED_MULTIPLE_NODES
639 for_each_online_node(nid) {
640 if (NODE_DATA(nid)->node_spanned_pages != 0) {
641 printk("freeing bootmem node %x\n", nid);
642 totalram_pages +=
643 free_all_bootmem_node(NODE_DATA(nid));
644 }
645 }
646#else
647 max_mapnr = num_physpages;
648 totalram_pages += free_all_bootmem();
649#endif
650
651 for_each_pgdat(pgdat) {
652 for (i = 0; i < pgdat->node_spanned_pages; i++) {
653 page = pgdat_page_nr(pgdat, i);
654 if (PageReserved(page))
655 reservedpages++;
656 }
657 }
658
659 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
660 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
661 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
662 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
663
664 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
665 "%luk reserved, %luk data, %luk bss, %luk init)\n",
666 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
667 num_physpages << (PAGE_SHIFT-10),
668 codesize >> 10,
669 reservedpages << (PAGE_SHIFT-10),
670 datasize >> 10,
671 bsssize >> 10,
672 initsize >> 10);
673
674 mem_init_done = 1;
675
676 /* Initialize the vDSO */
677 vdso_init();
678}
679
680/*
681 * This is called when a page has been modified by the kernel.
682 * It just marks the page as not i-cache clean. We do the i-cache
683 * flush later when the page is given to a user process, if necessary.
684 */
685void flush_dcache_page(struct page *page)
686{
687 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
688 return;
689 /* avoid an atomic op if possible */
690 if (test_bit(PG_arch_1, &page->flags))
691 clear_bit(PG_arch_1, &page->flags);
692}
693EXPORT_SYMBOL(flush_dcache_page);
694
695void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
696{
697 clear_page(page);
698
699 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
700 return;
701 /*
702 * We shouldnt have to do this, but some versions of glibc
703 * require it (ld.so assumes zero filled pages are icache clean)
704 * - Anton
705 */
706
707 /* avoid an atomic op if possible */
708 if (test_bit(PG_arch_1, &pg->flags))
709 clear_bit(PG_arch_1, &pg->flags);
710}
711EXPORT_SYMBOL(clear_user_page);
712
713void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
714 struct page *pg)
715{
716 copy_page(vto, vfrom);
717
718 /*
719 * We should be able to use the following optimisation, however
720 * there are two problems.
721 * Firstly a bug in some versions of binutils meant PLT sections
722 * were not marked executable.
723 * Secondly the first word in the GOT section is blrl, used
724 * to establish the GOT address. Until recently the GOT was
725 * not marked executable.
726 * - Anton
727 */
728#if 0
729 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
730 return;
731#endif
732
733 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
734 return;
735
736 /* avoid an atomic op if possible */
737 if (test_bit(PG_arch_1, &pg->flags))
738 clear_bit(PG_arch_1, &pg->flags);
739}
740
741void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
742 unsigned long addr, int len)
743{
744 unsigned long maddr;
745
746 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
747 flush_icache_range(maddr, maddr + len);
748}
749EXPORT_SYMBOL(flush_icache_user_range);
750
751/*
752 * This is called at the end of handling a user page fault, when the
753 * fault has been handled by updating a PTE in the linux page tables.
754 * We use it to preload an HPTE into the hash table corresponding to
755 * the updated linux PTE.
756 *
757 * This must always be called with the mm->page_table_lock held
758 */
759void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
760 pte_t pte)
761{
762 unsigned long vsid;
763 void *pgdir;
764 pte_t *ptep;
765 int local = 0;
766 cpumask_t tmp;
767 unsigned long flags;
768
769 /* handle i-cache coherency */
770 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
771 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
772 unsigned long pfn = pte_pfn(pte);
773 if (pfn_valid(pfn)) {
774 struct page *page = pfn_to_page(pfn);
775 if (!PageReserved(page)
776 && !test_bit(PG_arch_1, &page->flags)) {
777 __flush_dcache_icache(page_address(page));
778 set_bit(PG_arch_1, &page->flags);
779 }
780 }
781 }
782
783 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
784 if (!pte_young(pte))
785 return;
786
787 pgdir = vma->vm_mm->pgd;
788 if (pgdir == NULL)
789 return;
790
791 ptep = find_linux_pte(pgdir, ea);
792 if (!ptep)
793 return;
794
795 vsid = get_vsid(vma->vm_mm->context.id, ea);
796
797 local_irq_save(flags);
798 tmp = cpumask_of_cpu(smp_processor_id());
799 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
800 local = 1;
801
802 __hash_page(ea, 0, vsid, ptep, 0x300, local);
803 local_irq_restore(flags);
804}
805
806void __iomem * reserve_phb_iospace(unsigned long size)
807{
808 void __iomem *virt_addr;
809
810 if (phbs_io_bot >= IMALLOC_BASE)
811 panic("reserve_phb_iospace(): phb io space overflow\n");
812
813 virt_addr = (void __iomem *) phbs_io_bot;
814 phbs_io_bot += size;
815
816 return virt_addr;
817}
818
819static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
820{
821 memset(addr, 0, kmem_cache_size(cache));
822}
823
824static const int pgtable_cache_size[2] = {
825 PTE_TABLE_SIZE, PMD_TABLE_SIZE
826};
827static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
828 "pgd_pte_cache", "pud_pmd_cache",
829};
830
831kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
832
833void pgtable_cache_init(void)
834{
835 int i;
836
837 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
838 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
839 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
840 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
841
842 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
843 int size = pgtable_cache_size[i];
844 const char *name = pgtable_cache_name[i];
845
846 pgtable_cache[i] = kmem_cache_create(name,
847 size, size,
848 SLAB_HWCACHE_ALIGN
849 | SLAB_MUST_HWCACHE_ALIGN,
850 zero_ctor,
851 NULL);
852 if (! pgtable_cache[i])
853 panic("pgtable_cache_init(): could not create %s!\n",
854 name);
855 }
856}
857
858pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
859 unsigned long size, pgprot_t vma_prot)
860{
861 if (ppc_md.phys_mem_access_prot)
862 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
863
864 if (!page_is_ram(addr >> PAGE_SHIFT))
865 vma_prot = __pgprot(pgprot_val(vma_prot)
866 | _PAGE_GUARDED | _PAGE_NO_CACHE);
867 return vma_prot;
868}
869EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/ppc64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644
index 162dbf06c142..000000000000
--- a/arch/ppc64/oprofile/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 87d1f8a1f41e..d8c3d8ebad30 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -81,7 +81,7 @@ typedef struct pmac_ide_hwif {
81 81
82} pmac_ide_hwif_t; 82} pmac_ide_hwif_t;
83 83
84static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata; 84static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
85static int pmac_ide_count; 85static int pmac_ide_count;
86 86
87enum { 87enum {
@@ -242,7 +242,7 @@ struct mdma_timings_t {
242 int cycleTime; 242 int cycleTime;
243}; 243};
244 244
245struct mdma_timings_t mdma_timings_33[] __pmacdata = 245struct mdma_timings_t mdma_timings_33[] =
246{ 246{
247 { 240, 240, 480 }, 247 { 240, 240, 480 },
248 { 180, 180, 360 }, 248 { 180, 180, 360 },
@@ -255,7 +255,7 @@ struct mdma_timings_t mdma_timings_33[] __pmacdata =
255 { 0, 0, 0 } 255 { 0, 0, 0 }
256}; 256};
257 257
258struct mdma_timings_t mdma_timings_33k[] __pmacdata = 258struct mdma_timings_t mdma_timings_33k[] =
259{ 259{
260 { 240, 240, 480 }, 260 { 240, 240, 480 },
261 { 180, 180, 360 }, 261 { 180, 180, 360 },
@@ -268,7 +268,7 @@ struct mdma_timings_t mdma_timings_33k[] __pmacdata =
268 { 0, 0, 0 } 268 { 0, 0, 0 }
269}; 269};
270 270
271struct mdma_timings_t mdma_timings_66[] __pmacdata = 271struct mdma_timings_t mdma_timings_66[] =
272{ 272{
273 { 240, 240, 480 }, 273 { 240, 240, 480 },
274 { 180, 180, 360 }, 274 { 180, 180, 360 },
@@ -286,7 +286,7 @@ struct {
286 int addrSetup; /* ??? */ 286 int addrSetup; /* ??? */
287 int rdy2pause; 287 int rdy2pause;
288 int wrDataSetup; 288 int wrDataSetup;
289} kl66_udma_timings[] __pmacdata = 289} kl66_udma_timings[] =
290{ 290{
291 { 0, 180, 120 }, /* Mode 0 */ 291 { 0, 180, 120 }, /* Mode 0 */
292 { 0, 150, 90 }, /* 1 */ 292 { 0, 150, 90 }, /* 1 */
@@ -301,7 +301,7 @@ struct kauai_timing {
301 u32 timing_reg; 301 u32 timing_reg;
302}; 302};
303 303
304static struct kauai_timing kauai_pio_timings[] __pmacdata = 304static struct kauai_timing kauai_pio_timings[] =
305{ 305{
306 { 930 , 0x08000fff }, 306 { 930 , 0x08000fff },
307 { 600 , 0x08000a92 }, 307 { 600 , 0x08000a92 },
@@ -316,7 +316,7 @@ static struct kauai_timing kauai_pio_timings[] __pmacdata =
316 { 120 , 0x04000148 } 316 { 120 , 0x04000148 }
317}; 317};
318 318
319static struct kauai_timing kauai_mdma_timings[] __pmacdata = 319static struct kauai_timing kauai_mdma_timings[] =
320{ 320{
321 { 1260 , 0x00fff000 }, 321 { 1260 , 0x00fff000 },
322 { 480 , 0x00618000 }, 322 { 480 , 0x00618000 },
@@ -330,7 +330,7 @@ static struct kauai_timing kauai_mdma_timings[] __pmacdata =
330 { 0 , 0 }, 330 { 0 , 0 },
331}; 331};
332 332
333static struct kauai_timing kauai_udma_timings[] __pmacdata = 333static struct kauai_timing kauai_udma_timings[] =
334{ 334{
335 { 120 , 0x000070c0 }, 335 { 120 , 0x000070c0 },
336 { 90 , 0x00005d80 }, 336 { 90 , 0x00005d80 },
@@ -341,7 +341,7 @@ static struct kauai_timing kauai_udma_timings[] __pmacdata =
341 { 0 , 0 }, 341 { 0 , 0 },
342}; 342};
343 343
344static struct kauai_timing shasta_pio_timings[] __pmacdata = 344static struct kauai_timing shasta_pio_timings[] =
345{ 345{
346 { 930 , 0x08000fff }, 346 { 930 , 0x08000fff },
347 { 600 , 0x0A000c97 }, 347 { 600 , 0x0A000c97 },
@@ -356,7 +356,7 @@ static struct kauai_timing shasta_pio_timings[] __pmacdata =
356 { 120 , 0x0400010a } 356 { 120 , 0x0400010a }
357}; 357};
358 358
359static struct kauai_timing shasta_mdma_timings[] __pmacdata = 359static struct kauai_timing shasta_mdma_timings[] =
360{ 360{
361 { 1260 , 0x00fff000 }, 361 { 1260 , 0x00fff000 },
362 { 480 , 0x00820800 }, 362 { 480 , 0x00820800 },
@@ -370,7 +370,7 @@ static struct kauai_timing shasta_mdma_timings[] __pmacdata =
370 { 0 , 0 }, 370 { 0 , 0 },
371}; 371};
372 372
373static struct kauai_timing shasta_udma133_timings[] __pmacdata = 373static struct kauai_timing shasta_udma133_timings[] =
374{ 374{
375 { 120 , 0x00035901, }, 375 { 120 , 0x00035901, },
376 { 90 , 0x000348b1, }, 376 { 90 , 0x000348b1, },
@@ -522,7 +522,7 @@ pmu_hd_blink_init(void)
522 * N.B. this can't be an initfunc, because the media-bay task can 522 * N.B. this can't be an initfunc, because the media-bay task can
523 * call ide_[un]register at any time. 523 * call ide_[un]register at any time.
524 */ 524 */
525void __pmac 525void
526pmac_ide_init_hwif_ports(hw_regs_t *hw, 526pmac_ide_init_hwif_ports(hw_regs_t *hw,
527 unsigned long data_port, unsigned long ctrl_port, 527 unsigned long data_port, unsigned long ctrl_port,
528 int *irq) 528 int *irq)
@@ -559,7 +559,7 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
559 * timing register when selecting that unit. This version is for 559 * timing register when selecting that unit. This version is for
560 * ASICs with a single timing register 560 * ASICs with a single timing register
561 */ 561 */
562static void __pmac 562static void
563pmac_ide_selectproc(ide_drive_t *drive) 563pmac_ide_selectproc(ide_drive_t *drive)
564{ 564{
565 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 565 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -579,7 +579,7 @@ pmac_ide_selectproc(ide_drive_t *drive)
579 * timing register when selecting that unit. This version is for 579 * timing register when selecting that unit. This version is for
580 * ASICs with a dual timing register (Kauai) 580 * ASICs with a dual timing register (Kauai)
581 */ 581 */
582static void __pmac 582static void
583pmac_ide_kauai_selectproc(ide_drive_t *drive) 583pmac_ide_kauai_selectproc(ide_drive_t *drive)
584{ 584{
585 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 585 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -600,7 +600,7 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
600/* 600/*
601 * Force an update of controller timing values for a given drive 601 * Force an update of controller timing values for a given drive
602 */ 602 */
603static void __pmac 603static void
604pmac_ide_do_update_timings(ide_drive_t *drive) 604pmac_ide_do_update_timings(ide_drive_t *drive)
605{ 605{
606 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 606 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -633,7 +633,7 @@ pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
633 * to sort that out sooner or later and see if I can finally get the 633 * to sort that out sooner or later and see if I can finally get the
634 * common version to work properly in all cases 634 * common version to work properly in all cases
635 */ 635 */
636static int __pmac 636static int
637pmac_ide_do_setfeature(ide_drive_t *drive, u8 command) 637pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
638{ 638{
639 ide_hwif_t *hwif = HWIF(drive); 639 ide_hwif_t *hwif = HWIF(drive);
@@ -710,7 +710,7 @@ out:
710/* 710/*
711 * Old tuning functions (called on hdparm -p), sets up drive PIO timings 711 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
712 */ 712 */
713static void __pmac 713static void
714pmac_ide_tuneproc(ide_drive_t *drive, u8 pio) 714pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
715{ 715{
716 ide_pio_data_t d; 716 ide_pio_data_t d;
@@ -801,7 +801,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
801/* 801/*
802 * Calculate KeyLargo ATA/66 UDMA timings 802 * Calculate KeyLargo ATA/66 UDMA timings
803 */ 803 */
804static int __pmac 804static int
805set_timings_udma_ata4(u32 *timings, u8 speed) 805set_timings_udma_ata4(u32 *timings, u8 speed)
806{ 806{
807 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks; 807 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
@@ -829,7 +829,7 @@ set_timings_udma_ata4(u32 *timings, u8 speed)
829/* 829/*
830 * Calculate Kauai ATA/100 UDMA timings 830 * Calculate Kauai ATA/100 UDMA timings
831 */ 831 */
832static int __pmac 832static int
833set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed) 833set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
834{ 834{
835 struct ide_timing *t = ide_timing_find_mode(speed); 835 struct ide_timing *t = ide_timing_find_mode(speed);
@@ -849,7 +849,7 @@ set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
849/* 849/*
850 * Calculate Shasta ATA/133 UDMA timings 850 * Calculate Shasta ATA/133 UDMA timings
851 */ 851 */
852static int __pmac 852static int
853set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed) 853set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
854{ 854{
855 struct ide_timing *t = ide_timing_find_mode(speed); 855 struct ide_timing *t = ide_timing_find_mode(speed);
@@ -869,7 +869,7 @@ set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
869/* 869/*
870 * Calculate MDMA timings for all cells 870 * Calculate MDMA timings for all cells
871 */ 871 */
872static int __pmac 872static int
873set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, 873set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
874 u8 speed, int drive_cycle_time) 874 u8 speed, int drive_cycle_time)
875{ 875{
@@ -1014,7 +1014,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
1014 * our dedicated function is more precise as it uses the drive provided 1014 * our dedicated function is more precise as it uses the drive provided
1015 * cycle time value. We should probably fix this one to deal with that too... 1015 * cycle time value. We should probably fix this one to deal with that too...
1016 */ 1016 */
1017static int __pmac 1017static int
1018pmac_ide_tune_chipset (ide_drive_t *drive, byte speed) 1018pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
1019{ 1019{
1020 int unit = (drive->select.b.unit & 0x01); 1020 int unit = (drive->select.b.unit & 0x01);
@@ -1092,7 +1092,7 @@ pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
1092 * Blast some well known "safe" values to the timing registers at init or 1092 * Blast some well known "safe" values to the timing registers at init or
1093 * wakeup from sleep time, before we do real calculation 1093 * wakeup from sleep time, before we do real calculation
1094 */ 1094 */
1095static void __pmac 1095static void
1096sanitize_timings(pmac_ide_hwif_t *pmif) 1096sanitize_timings(pmac_ide_hwif_t *pmif)
1097{ 1097{
1098 unsigned int value, value2 = 0; 1098 unsigned int value, value2 = 0;
@@ -1123,13 +1123,13 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
1123 pmif->timings[2] = pmif->timings[3] = value2; 1123 pmif->timings[2] = pmif->timings[3] = value2;
1124} 1124}
1125 1125
1126unsigned long __pmac 1126unsigned long
1127pmac_ide_get_base(int index) 1127pmac_ide_get_base(int index)
1128{ 1128{
1129 return pmac_ide[index].regbase; 1129 return pmac_ide[index].regbase;
1130} 1130}
1131 1131
1132int __pmac 1132int
1133pmac_ide_check_base(unsigned long base) 1133pmac_ide_check_base(unsigned long base)
1134{ 1134{
1135 int ix; 1135 int ix;
@@ -1140,7 +1140,7 @@ pmac_ide_check_base(unsigned long base)
1140 return -1; 1140 return -1;
1141} 1141}
1142 1142
1143int __pmac 1143int
1144pmac_ide_get_irq(unsigned long base) 1144pmac_ide_get_irq(unsigned long base)
1145{ 1145{
1146 int ix; 1146 int ix;
@@ -1151,7 +1151,7 @@ pmac_ide_get_irq(unsigned long base)
1151 return 0; 1151 return 0;
1152} 1152}
1153 1153
1154static int ide_majors[] __pmacdata = { 3, 22, 33, 34, 56, 57 }; 1154static int ide_majors[] = { 3, 22, 33, 34, 56, 57 };
1155 1155
1156dev_t __init 1156dev_t __init
1157pmac_find_ide_boot(char *bootdevice, int n) 1157pmac_find_ide_boot(char *bootdevice, int n)
@@ -1701,7 +1701,7 @@ pmac_ide_probe(void)
1701 * pmac_ide_build_dmatable builds the DBDMA command list 1701 * pmac_ide_build_dmatable builds the DBDMA command list
1702 * for a transfer and sets the DBDMA channel to point to it. 1702 * for a transfer and sets the DBDMA channel to point to it.
1703 */ 1703 */
1704static int __pmac 1704static int
1705pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) 1705pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1706{ 1706{
1707 struct dbdma_cmd *table; 1707 struct dbdma_cmd *table;
@@ -1785,7 +1785,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1785} 1785}
1786 1786
1787/* Teardown mappings after DMA has completed. */ 1787/* Teardown mappings after DMA has completed. */
1788static void __pmac 1788static void
1789pmac_ide_destroy_dmatable (ide_drive_t *drive) 1789pmac_ide_destroy_dmatable (ide_drive_t *drive)
1790{ 1790{
1791 ide_hwif_t *hwif = drive->hwif; 1791 ide_hwif_t *hwif = drive->hwif;
@@ -1802,7 +1802,7 @@ pmac_ide_destroy_dmatable (ide_drive_t *drive)
1802/* 1802/*
1803 * Pick up best MDMA timing for the drive and apply it 1803 * Pick up best MDMA timing for the drive and apply it
1804 */ 1804 */
1805static int __pmac 1805static int
1806pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode) 1806pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1807{ 1807{
1808 ide_hwif_t *hwif = HWIF(drive); 1808 ide_hwif_t *hwif = HWIF(drive);
@@ -1859,7 +1859,7 @@ pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1859/* 1859/*
1860 * Pick up best UDMA timing for the drive and apply it 1860 * Pick up best UDMA timing for the drive and apply it
1861 */ 1861 */
1862static int __pmac 1862static int
1863pmac_ide_udma_enable(ide_drive_t *drive, u16 mode) 1863pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1864{ 1864{
1865 ide_hwif_t *hwif = HWIF(drive); 1865 ide_hwif_t *hwif = HWIF(drive);
@@ -1915,7 +1915,7 @@ pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1915 * Check what is the best DMA timing setting for the drive and 1915 * Check what is the best DMA timing setting for the drive and
1916 * call appropriate functions to apply it. 1916 * call appropriate functions to apply it.
1917 */ 1917 */
1918static int __pmac 1918static int
1919pmac_ide_dma_check(ide_drive_t *drive) 1919pmac_ide_dma_check(ide_drive_t *drive)
1920{ 1920{
1921 struct hd_driveid *id = drive->id; 1921 struct hd_driveid *id = drive->id;
@@ -1967,7 +1967,7 @@ pmac_ide_dma_check(ide_drive_t *drive)
1967 * Prepare a DMA transfer. We build the DMA table, adjust the timings for 1967 * Prepare a DMA transfer. We build the DMA table, adjust the timings for
1968 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion 1968 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
1969 */ 1969 */
1970static int __pmac 1970static int
1971pmac_ide_dma_setup(ide_drive_t *drive) 1971pmac_ide_dma_setup(ide_drive_t *drive)
1972{ 1972{
1973 ide_hwif_t *hwif = HWIF(drive); 1973 ide_hwif_t *hwif = HWIF(drive);
@@ -1997,7 +1997,7 @@ pmac_ide_dma_setup(ide_drive_t *drive)
1997 return 0; 1997 return 0;
1998} 1998}
1999 1999
2000static void __pmac 2000static void
2001pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 2001pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
2002{ 2002{
2003 /* issue cmd to drive */ 2003 /* issue cmd to drive */
@@ -2008,7 +2008,7 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
2008 * Kick the DMA controller into life after the DMA command has been issued 2008 * Kick the DMA controller into life after the DMA command has been issued
2009 * to the drive. 2009 * to the drive.
2010 */ 2010 */
2011static void __pmac 2011static void
2012pmac_ide_dma_start(ide_drive_t *drive) 2012pmac_ide_dma_start(ide_drive_t *drive)
2013{ 2013{
2014 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2014 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2024,7 +2024,7 @@ pmac_ide_dma_start(ide_drive_t *drive)
2024/* 2024/*
2025 * After a DMA transfer, make sure the controller is stopped 2025 * After a DMA transfer, make sure the controller is stopped
2026 */ 2026 */
2027static int __pmac 2027static int
2028pmac_ide_dma_end (ide_drive_t *drive) 2028pmac_ide_dma_end (ide_drive_t *drive)
2029{ 2029{
2030 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2030 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2052,7 +2052,7 @@ pmac_ide_dma_end (ide_drive_t *drive)
2052 * that's not implemented yet), on the other hand, we don't have shared interrupts 2052 * that's not implemented yet), on the other hand, we don't have shared interrupts
2053 * so it's not really a problem 2053 * so it's not really a problem
2054 */ 2054 */
2055static int __pmac 2055static int
2056pmac_ide_dma_test_irq (ide_drive_t *drive) 2056pmac_ide_dma_test_irq (ide_drive_t *drive)
2057{ 2057{
2058 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2058 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2108,19 +2108,19 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
2108 return 1; 2108 return 1;
2109} 2109}
2110 2110
2111static int __pmac 2111static int
2112pmac_ide_dma_host_off (ide_drive_t *drive) 2112pmac_ide_dma_host_off (ide_drive_t *drive)
2113{ 2113{
2114 return 0; 2114 return 0;
2115} 2115}
2116 2116
2117static int __pmac 2117static int
2118pmac_ide_dma_host_on (ide_drive_t *drive) 2118pmac_ide_dma_host_on (ide_drive_t *drive)
2119{ 2119{
2120 return 0; 2120 return 0;
2121} 2121}
2122 2122
2123static int __pmac 2123static int
2124pmac_ide_dma_lostirq (ide_drive_t *drive) 2124pmac_ide_dma_lostirq (ide_drive_t *drive)
2125{ 2125{
2126 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 2126 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 5e0811dc6536..2b8a6e821d44 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -27,7 +27,7 @@ static volatile unsigned char __iomem *anslcd_ptr;
27 27
28#undef DEBUG 28#undef DEBUG
29 29
30static void __pmac 30static void
31anslcd_write_byte_ctrl ( unsigned char c ) 31anslcd_write_byte_ctrl ( unsigned char c )
32{ 32{
33#ifdef DEBUG 33#ifdef DEBUG
@@ -43,14 +43,14 @@ anslcd_write_byte_ctrl ( unsigned char c )
43 } 43 }
44} 44}
45 45
46static void __pmac 46static void
47anslcd_write_byte_data ( unsigned char c ) 47anslcd_write_byte_data ( unsigned char c )
48{ 48{
49 out_8(anslcd_ptr + ANSLCD_DATA_IX, c); 49 out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
50 udelay(anslcd_short_delay); 50 udelay(anslcd_short_delay);
51} 51}
52 52
53static ssize_t __pmac 53static ssize_t
54anslcd_write( struct file * file, const char __user * buf, 54anslcd_write( struct file * file, const char __user * buf,
55 size_t count, loff_t *ppos ) 55 size_t count, loff_t *ppos )
56{ 56{
@@ -73,7 +73,7 @@ anslcd_write( struct file * file, const char __user * buf,
73 return p - buf; 73 return p - buf;
74} 74}
75 75
76static int __pmac 76static int
77anslcd_ioctl( struct inode * inode, struct file * file, 77anslcd_ioctl( struct inode * inode, struct file * file,
78 unsigned int cmd, unsigned long arg ) 78 unsigned int cmd, unsigned long arg )
79{ 79{
@@ -115,7 +115,7 @@ anslcd_ioctl( struct inode * inode, struct file * file,
115 } 115 }
116} 116}
117 117
118static int __pmac 118static int
119anslcd_open( struct inode * inode, struct file * file ) 119anslcd_open( struct inode * inode, struct file * file )
120{ 120{
121 return 0; 121 return 0;
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index c0712a1ea5af..b856bb67169c 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -167,19 +167,19 @@ enum {
167 * Functions for polling content of media bay 167 * Functions for polling content of media bay
168 */ 168 */
169 169
170static u8 __pmac 170static u8
171ohare_mb_content(struct media_bay_info *bay) 171ohare_mb_content(struct media_bay_info *bay)
172{ 172{
173 return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7; 173 return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7;
174} 174}
175 175
176static u8 __pmac 176static u8
177heathrow_mb_content(struct media_bay_info *bay) 177heathrow_mb_content(struct media_bay_info *bay)
178{ 178{
179 return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7; 179 return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7;
180} 180}
181 181
182static u8 __pmac 182static u8
183keylargo_mb_content(struct media_bay_info *bay) 183keylargo_mb_content(struct media_bay_info *bay)
184{ 184{
185 int new_gpio; 185 int new_gpio;
@@ -205,7 +205,7 @@ keylargo_mb_content(struct media_bay_info *bay)
205 * into reset state as well 205 * into reset state as well
206 */ 206 */
207 207
208static void __pmac 208static void
209ohare_mb_power(struct media_bay_info* bay, int on_off) 209ohare_mb_power(struct media_bay_info* bay, int on_off)
210{ 210{
211 if (on_off) { 211 if (on_off) {
@@ -224,7 +224,7 @@ ohare_mb_power(struct media_bay_info* bay, int on_off)
224 MB_BIC(bay, OHARE_MBCR, 0x00000F00); 224 MB_BIC(bay, OHARE_MBCR, 0x00000F00);
225} 225}
226 226
227static void __pmac 227static void
228heathrow_mb_power(struct media_bay_info* bay, int on_off) 228heathrow_mb_power(struct media_bay_info* bay, int on_off)
229{ 229{
230 if (on_off) { 230 if (on_off) {
@@ -243,7 +243,7 @@ heathrow_mb_power(struct media_bay_info* bay, int on_off)
243 MB_BIC(bay, HEATHROW_MBCR, 0x00000F00); 243 MB_BIC(bay, HEATHROW_MBCR, 0x00000F00);
244} 244}
245 245
246static void __pmac 246static void
247keylargo_mb_power(struct media_bay_info* bay, int on_off) 247keylargo_mb_power(struct media_bay_info* bay, int on_off)
248{ 248{
249 if (on_off) { 249 if (on_off) {
@@ -267,7 +267,7 @@ keylargo_mb_power(struct media_bay_info* bay, int on_off)
267 * enable the related busses 267 * enable the related busses
268 */ 268 */
269 269
270static int __pmac 270static int
271ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 271ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
272{ 272{
273 switch(device_id) { 273 switch(device_id) {
@@ -287,7 +287,7 @@ ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
287 return -ENODEV; 287 return -ENODEV;
288} 288}
289 289
290static int __pmac 290static int
291heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 291heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
292{ 292{
293 switch(device_id) { 293 switch(device_id) {
@@ -307,7 +307,7 @@ heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
307 return -ENODEV; 307 return -ENODEV;
308} 308}
309 309
310static int __pmac 310static int
311keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id) 311keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
312{ 312{
313 switch(device_id) { 313 switch(device_id) {
@@ -330,43 +330,43 @@ keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
330 * Functions for tweaking resets 330 * Functions for tweaking resets
331 */ 331 */
332 332
333static void __pmac 333static void
334ohare_mb_un_reset(struct media_bay_info* bay) 334ohare_mb_un_reset(struct media_bay_info* bay)
335{ 335{
336 MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N); 336 MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N);
337} 337}
338 338
339static void __pmac keylargo_mb_init(struct media_bay_info *bay) 339static void keylargo_mb_init(struct media_bay_info *bay)
340{ 340{
341 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE); 341 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE);
342} 342}
343 343
344static void __pmac heathrow_mb_un_reset(struct media_bay_info* bay) 344static void heathrow_mb_un_reset(struct media_bay_info* bay)
345{ 345{
346 MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N); 346 MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N);
347} 347}
348 348
349static void __pmac keylargo_mb_un_reset(struct media_bay_info* bay) 349static void keylargo_mb_un_reset(struct media_bay_info* bay)
350{ 350{
351 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET); 351 MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET);
352} 352}
353 353
354static void __pmac ohare_mb_un_reset_ide(struct media_bay_info* bay) 354static void ohare_mb_un_reset_ide(struct media_bay_info* bay)
355{ 355{
356 MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N); 356 MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N);
357} 357}
358 358
359static void __pmac heathrow_mb_un_reset_ide(struct media_bay_info* bay) 359static void heathrow_mb_un_reset_ide(struct media_bay_info* bay)
360{ 360{
361 MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N); 361 MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N);
362} 362}
363 363
364static void __pmac keylargo_mb_un_reset_ide(struct media_bay_info* bay) 364static void keylargo_mb_un_reset_ide(struct media_bay_info* bay)
365{ 365{
366 MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N); 366 MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N);
367} 367}
368 368
369static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff) 369static inline void set_mb_power(struct media_bay_info* bay, int onoff)
370{ 370{
371 /* Power up up and assert the bay reset line */ 371 /* Power up up and assert the bay reset line */
372 if (onoff) { 372 if (onoff) {
@@ -382,7 +382,7 @@ static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff)
382 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 382 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
383} 383}
384 384
385static void __pmac poll_media_bay(struct media_bay_info* bay) 385static void poll_media_bay(struct media_bay_info* bay)
386{ 386{
387 int id = bay->ops->content(bay); 387 int id = bay->ops->content(bay);
388 388
@@ -415,7 +415,7 @@ static void __pmac poll_media_bay(struct media_bay_info* bay)
415 } 415 }
416} 416}
417 417
418int __pmac check_media_bay(struct device_node *which_bay, int what) 418int check_media_bay(struct device_node *which_bay, int what)
419{ 419{
420#ifdef CONFIG_BLK_DEV_IDE 420#ifdef CONFIG_BLK_DEV_IDE
421 int i; 421 int i;
@@ -432,7 +432,7 @@ int __pmac check_media_bay(struct device_node *which_bay, int what)
432} 432}
433EXPORT_SYMBOL(check_media_bay); 433EXPORT_SYMBOL(check_media_bay);
434 434
435int __pmac check_media_bay_by_base(unsigned long base, int what) 435int check_media_bay_by_base(unsigned long base, int what)
436{ 436{
437#ifdef CONFIG_BLK_DEV_IDE 437#ifdef CONFIG_BLK_DEV_IDE
438 int i; 438 int i;
@@ -449,7 +449,7 @@ int __pmac check_media_bay_by_base(unsigned long base, int what)
449 return -ENODEV; 449 return -ENODEV;
450} 450}
451 451
452int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, 452int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
453 int irq, int index) 453 int irq, int index)
454{ 454{
455#ifdef CONFIG_BLK_DEV_IDE 455#ifdef CONFIG_BLK_DEV_IDE
@@ -489,7 +489,7 @@ int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long
489 return -ENODEV; 489 return -ENODEV;
490} 490}
491 491
492static void __pmac media_bay_step(int i) 492static void media_bay_step(int i)
493{ 493{
494 struct media_bay_info* bay = &media_bays[i]; 494 struct media_bay_info* bay = &media_bays[i];
495 495
@@ -619,7 +619,7 @@ static void __pmac media_bay_step(int i)
619 * with the IDE driver. It needs to be a thread because 619 * with the IDE driver. It needs to be a thread because
620 * ide_register can't be called from interrupt context. 620 * ide_register can't be called from interrupt context.
621 */ 621 */
622static int __pmac media_bay_task(void *x) 622static int media_bay_task(void *x)
623{ 623{
624 int i; 624 int i;
625 625
@@ -704,7 +704,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
704 704
705} 705}
706 706
707static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state) 707static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
708{ 708{
709 struct media_bay_info *bay = macio_get_drvdata(mdev); 709 struct media_bay_info *bay = macio_get_drvdata(mdev);
710 710
@@ -719,7 +719,7 @@ static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
719 return 0; 719 return 0;
720} 720}
721 721
722static int __pmac media_bay_resume(struct macio_dev *mdev) 722static int media_bay_resume(struct macio_dev *mdev)
723{ 723{
724 struct media_bay_info *bay = macio_get_drvdata(mdev); 724 struct media_bay_info *bay = macio_get_drvdata(mdev);
725 725
@@ -760,7 +760,7 @@ static int __pmac media_bay_resume(struct macio_dev *mdev)
760 760
761/* Definitions of "ops" structures. 761/* Definitions of "ops" structures.
762 */ 762 */
763static struct mb_ops ohare_mb_ops __pmacdata = { 763static struct mb_ops ohare_mb_ops = {
764 .name = "Ohare", 764 .name = "Ohare",
765 .content = ohare_mb_content, 765 .content = ohare_mb_content,
766 .power = ohare_mb_power, 766 .power = ohare_mb_power,
@@ -769,7 +769,7 @@ static struct mb_ops ohare_mb_ops __pmacdata = {
769 .un_reset_ide = ohare_mb_un_reset_ide, 769 .un_reset_ide = ohare_mb_un_reset_ide,
770}; 770};
771 771
772static struct mb_ops heathrow_mb_ops __pmacdata = { 772static struct mb_ops heathrow_mb_ops = {
773 .name = "Heathrow", 773 .name = "Heathrow",
774 .content = heathrow_mb_content, 774 .content = heathrow_mb_content,
775 .power = heathrow_mb_power, 775 .power = heathrow_mb_power,
@@ -778,7 +778,7 @@ static struct mb_ops heathrow_mb_ops __pmacdata = {
778 .un_reset_ide = heathrow_mb_un_reset_ide, 778 .un_reset_ide = heathrow_mb_un_reset_ide,
779}; 779};
780 780
781static struct mb_ops keylargo_mb_ops __pmacdata = { 781static struct mb_ops keylargo_mb_ops = {
782 .name = "KeyLargo", 782 .name = "KeyLargo",
783 .init = keylargo_mb_init, 783 .init = keylargo_mb_init,
784 .content = keylargo_mb_content, 784 .content = keylargo_mb_content,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 9b38674fbf75..34f3c7e2d832 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1094,7 +1094,7 @@ static int smu_release(struct inode *inode, struct file *file)
1094} 1094}
1095 1095
1096 1096
1097static struct file_operations smu_device_fops __pmacdata = { 1097static struct file_operations smu_device_fops = {
1098 .llseek = no_llseek, 1098 .llseek = no_llseek,
1099 .read = smu_read, 1099 .read = smu_read,
1100 .write = smu_write, 1100 .write = smu_write,
@@ -1103,7 +1103,7 @@ static struct file_operations smu_device_fops __pmacdata = {
1103 .release = smu_release, 1103 .release = smu_release,
1104}; 1104};
1105 1105
1106static struct miscdevice pmu_device __pmacdata = { 1106static struct miscdevice pmu_device = {
1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops 1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1108}; 1108};
1109 1109
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 417deb5de108..d843a6c9c6df 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(cuda_lock);
37 37
38#ifdef CONFIG_MAC 38#ifdef CONFIG_MAC
39#define CUDA_IRQ IRQ_MAC_ADB 39#define CUDA_IRQ IRQ_MAC_ADB
40#define __openfirmware
41#define eieio() 40#define eieio()
42#else 41#else
43#define CUDA_IRQ vias->intrs[0].line 42#define CUDA_IRQ vias->intrs[0].line
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 645a2e5c70ab..91920a1140fa 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -244,7 +244,7 @@ int pmu_wink(struct adb_request *req);
244 * - the number of response bytes which the PMU will return, or 244 * - the number of response bytes which the PMU will return, or
245 * -1 if it will send a length byte. 245 * -1 if it will send a length byte.
246 */ 246 */
247static const s8 pmu_data_len[256][2] __openfirmwaredata = { 247static const s8 pmu_data_len[256][2] = {
248/* 0 1 2 3 4 5 6 7 */ 248/* 0 1 2 3 4 5 6 7 */
249/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 249/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
250/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 250/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
@@ -295,7 +295,7 @@ static struct backlight_controller pmu_backlight_controller = {
295}; 295};
296#endif /* CONFIG_PMAC_BACKLIGHT */ 296#endif /* CONFIG_PMAC_BACKLIGHT */
297 297
298int __openfirmware 298int
299find_via_pmu(void) 299find_via_pmu(void)
300{ 300{
301 if (via != 0) 301 if (via != 0)
@@ -374,7 +374,7 @@ find_via_pmu(void)
374} 374}
375 375
376#ifdef CONFIG_ADB 376#ifdef CONFIG_ADB
377static int __openfirmware 377static int
378pmu_probe(void) 378pmu_probe(void)
379{ 379{
380 return vias == NULL? -ENODEV: 0; 380 return vias == NULL? -ENODEV: 0;
@@ -405,7 +405,7 @@ static int __init via_pmu_start(void)
405 bright_req_2.complete = 1; 405 bright_req_2.complete = 1;
406 batt_req.complete = 1; 406 batt_req.complete = 1;
407 407
408#ifdef CONFIG_PPC32 408#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
409 if (pmu_kind == PMU_KEYLARGO_BASED) 409 if (pmu_kind == PMU_KEYLARGO_BASED)
410 openpic_set_irq_priority(vias->intrs[0].line, 410 openpic_set_irq_priority(vias->intrs[0].line,
411 OPENPIC_PRIORITY_DEFAULT + 1); 411 OPENPIC_PRIORITY_DEFAULT + 1);
@@ -520,7 +520,7 @@ static int __init via_pmu_dev_init(void)
520 520
521device_initcall(via_pmu_dev_init); 521device_initcall(via_pmu_dev_init);
522 522
523static int __openfirmware 523static int
524init_pmu(void) 524init_pmu(void)
525{ 525{
526 int timeout; 526 int timeout;
@@ -588,17 +588,6 @@ pmu_get_model(void)
588 return pmu_kind; 588 return pmu_kind;
589} 589}
590 590
591#ifndef CONFIG_PPC64
592static inline void wakeup_decrementer(void)
593{
594 set_dec(tb_ticks_per_jiffy);
595 /* No currently-supported powerbook has a 601,
596 * so use get_tbl, not native
597 */
598 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
599}
600#endif
601
602static void pmu_set_server_mode(int server_mode) 591static void pmu_set_server_mode(int server_mode)
603{ 592{
604 struct adb_request req; 593 struct adb_request req;
@@ -625,7 +614,7 @@ static void pmu_set_server_mode(int server_mode)
625/* This new version of the code for 2400/3400/3500 powerbooks 614/* This new version of the code for 2400/3400/3500 powerbooks
626 * is inspired from the implementation in gkrellm-pmu 615 * is inspired from the implementation in gkrellm-pmu
627 */ 616 */
628static void __pmac 617static void
629done_battery_state_ohare(struct adb_request* req) 618done_battery_state_ohare(struct adb_request* req)
630{ 619{
631 /* format: 620 /* format:
@@ -713,7 +702,7 @@ done_battery_state_ohare(struct adb_request* req)
713 clear_bit(0, &async_req_locks); 702 clear_bit(0, &async_req_locks);
714} 703}
715 704
716static void __pmac 705static void
717done_battery_state_smart(struct adb_request* req) 706done_battery_state_smart(struct adb_request* req)
718{ 707{
719 /* format: 708 /* format:
@@ -791,7 +780,7 @@ done_battery_state_smart(struct adb_request* req)
791 clear_bit(0, &async_req_locks); 780 clear_bit(0, &async_req_locks);
792} 781}
793 782
794static void __pmac 783static void
795query_battery_state(void) 784query_battery_state(void)
796{ 785{
797 if (test_and_set_bit(0, &async_req_locks)) 786 if (test_and_set_bit(0, &async_req_locks))
@@ -804,7 +793,7 @@ query_battery_state(void)
804 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 793 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
805} 794}
806 795
807static int __pmac 796static int
808proc_get_info(char *page, char **start, off_t off, 797proc_get_info(char *page, char **start, off_t off,
809 int count, int *eof, void *data) 798 int count, int *eof, void *data)
810{ 799{
@@ -819,7 +808,7 @@ proc_get_info(char *page, char **start, off_t off,
819 return p - page; 808 return p - page;
820} 809}
821 810
822static int __pmac 811static int
823proc_get_irqstats(char *page, char **start, off_t off, 812proc_get_irqstats(char *page, char **start, off_t off,
824 int count, int *eof, void *data) 813 int count, int *eof, void *data)
825{ 814{
@@ -846,7 +835,7 @@ proc_get_irqstats(char *page, char **start, off_t off,
846 return p - page; 835 return p - page;
847} 836}
848 837
849static int __pmac 838static int
850proc_get_batt(char *page, char **start, off_t off, 839proc_get_batt(char *page, char **start, off_t off,
851 int count, int *eof, void *data) 840 int count, int *eof, void *data)
852{ 841{
@@ -870,7 +859,7 @@ proc_get_batt(char *page, char **start, off_t off,
870 return p - page; 859 return p - page;
871} 860}
872 861
873static int __pmac 862static int
874proc_read_options(char *page, char **start, off_t off, 863proc_read_options(char *page, char **start, off_t off,
875 int count, int *eof, void *data) 864 int count, int *eof, void *data)
876{ 865{
@@ -887,7 +876,7 @@ proc_read_options(char *page, char **start, off_t off,
887 return p - page; 876 return p - page;
888} 877}
889 878
890static int __pmac 879static int
891proc_write_options(struct file *file, const char __user *buffer, 880proc_write_options(struct file *file, const char __user *buffer,
892 unsigned long count, void *data) 881 unsigned long count, void *data)
893{ 882{
@@ -934,7 +923,7 @@ proc_write_options(struct file *file, const char __user *buffer,
934 923
935#ifdef CONFIG_ADB 924#ifdef CONFIG_ADB
936/* Send an ADB command */ 925/* Send an ADB command */
937static int __pmac 926static int
938pmu_send_request(struct adb_request *req, int sync) 927pmu_send_request(struct adb_request *req, int sync)
939{ 928{
940 int i, ret; 929 int i, ret;
@@ -1014,7 +1003,7 @@ pmu_send_request(struct adb_request *req, int sync)
1014} 1003}
1015 1004
1016/* Enable/disable autopolling */ 1005/* Enable/disable autopolling */
1017static int __pmac 1006static int
1018pmu_adb_autopoll(int devs) 1007pmu_adb_autopoll(int devs)
1019{ 1008{
1020 struct adb_request req; 1009 struct adb_request req;
@@ -1037,7 +1026,7 @@ pmu_adb_autopoll(int devs)
1037} 1026}
1038 1027
1039/* Reset the ADB bus */ 1028/* Reset the ADB bus */
1040static int __pmac 1029static int
1041pmu_adb_reset_bus(void) 1030pmu_adb_reset_bus(void)
1042{ 1031{
1043 struct adb_request req; 1032 struct adb_request req;
@@ -1072,7 +1061,7 @@ pmu_adb_reset_bus(void)
1072#endif /* CONFIG_ADB */ 1061#endif /* CONFIG_ADB */
1073 1062
1074/* Construct and send a pmu request */ 1063/* Construct and send a pmu request */
1075int __openfirmware 1064int
1076pmu_request(struct adb_request *req, void (*done)(struct adb_request *), 1065pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
1077 int nbytes, ...) 1066 int nbytes, ...)
1078{ 1067{
@@ -1098,7 +1087,7 @@ pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
1098 return pmu_queue_request(req); 1087 return pmu_queue_request(req);
1099} 1088}
1100 1089
1101int __pmac 1090int
1102pmu_queue_request(struct adb_request *req) 1091pmu_queue_request(struct adb_request *req)
1103{ 1092{
1104 unsigned long flags; 1093 unsigned long flags;
@@ -1190,7 +1179,7 @@ pmu_done(struct adb_request *req)
1190 (*done)(req); 1179 (*done)(req);
1191} 1180}
1192 1181
1193static void __pmac 1182static void
1194pmu_start(void) 1183pmu_start(void)
1195{ 1184{
1196 struct adb_request *req; 1185 struct adb_request *req;
@@ -1214,7 +1203,7 @@ pmu_start(void)
1214 send_byte(req->data[0]); 1203 send_byte(req->data[0]);
1215} 1204}
1216 1205
1217void __openfirmware 1206void
1218pmu_poll(void) 1207pmu_poll(void)
1219{ 1208{
1220 if (!via) 1209 if (!via)
@@ -1224,7 +1213,7 @@ pmu_poll(void)
1224 via_pmu_interrupt(0, NULL, NULL); 1213 via_pmu_interrupt(0, NULL, NULL);
1225} 1214}
1226 1215
1227void __openfirmware 1216void
1228pmu_poll_adb(void) 1217pmu_poll_adb(void)
1229{ 1218{
1230 if (!via) 1219 if (!via)
@@ -1239,7 +1228,7 @@ pmu_poll_adb(void)
1239 || req_awaiting_reply)); 1228 || req_awaiting_reply));
1240} 1229}
1241 1230
1242void __openfirmware 1231void
1243pmu_wait_complete(struct adb_request *req) 1232pmu_wait_complete(struct adb_request *req)
1244{ 1233{
1245 if (!via) 1234 if (!via)
@@ -1253,7 +1242,7 @@ pmu_wait_complete(struct adb_request *req)
1253 * This is done to avoid spurrious shutdowns when we know we'll have 1242 * This is done to avoid spurrious shutdowns when we know we'll have
1254 * interrupts switched off for a long time 1243 * interrupts switched off for a long time
1255 */ 1244 */
1256void __openfirmware 1245void
1257pmu_suspend(void) 1246pmu_suspend(void)
1258{ 1247{
1259 unsigned long flags; 1248 unsigned long flags;
@@ -1293,7 +1282,7 @@ pmu_suspend(void)
1293 } while (1); 1282 } while (1);
1294} 1283}
1295 1284
1296void __openfirmware 1285void
1297pmu_resume(void) 1286pmu_resume(void)
1298{ 1287{
1299 unsigned long flags; 1288 unsigned long flags;
@@ -1323,7 +1312,7 @@ pmu_resume(void)
1323} 1312}
1324 1313
1325/* Interrupt data could be the result data from an ADB cmd */ 1314/* Interrupt data could be the result data from an ADB cmd */
1326static void __pmac 1315static void
1327pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs) 1316pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
1328{ 1317{
1329 unsigned char ints, pirq; 1318 unsigned char ints, pirq;
@@ -1435,7 +1424,7 @@ next:
1435 goto next; 1424 goto next;
1436} 1425}
1437 1426
1438static struct adb_request* __pmac 1427static struct adb_request*
1439pmu_sr_intr(struct pt_regs *regs) 1428pmu_sr_intr(struct pt_regs *regs)
1440{ 1429{
1441 struct adb_request *req; 1430 struct adb_request *req;
@@ -1541,7 +1530,7 @@ pmu_sr_intr(struct pt_regs *regs)
1541 return NULL; 1530 return NULL;
1542} 1531}
1543 1532
1544static irqreturn_t __pmac 1533static irqreturn_t
1545via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs) 1534via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
1546{ 1535{
1547 unsigned long flags; 1536 unsigned long flags;
@@ -1629,7 +1618,7 @@ no_free_slot:
1629 return IRQ_RETVAL(handled); 1618 return IRQ_RETVAL(handled);
1630} 1619}
1631 1620
1632void __pmac 1621void
1633pmu_unlock(void) 1622pmu_unlock(void)
1634{ 1623{
1635 unsigned long flags; 1624 unsigned long flags;
@@ -1642,7 +1631,7 @@ pmu_unlock(void)
1642} 1631}
1643 1632
1644 1633
1645static irqreturn_t __pmac 1634static irqreturn_t
1646gpio1_interrupt(int irq, void *arg, struct pt_regs *regs) 1635gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
1647{ 1636{
1648 unsigned long flags; 1637 unsigned long flags;
@@ -1663,12 +1652,12 @@ gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
1663} 1652}
1664 1653
1665#ifdef CONFIG_PMAC_BACKLIGHT 1654#ifdef CONFIG_PMAC_BACKLIGHT
1666static int backlight_to_bright[] __pmacdata = { 1655static int backlight_to_bright[] = {
1667 0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e, 1656 0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e,
1668 0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e 1657 0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e
1669}; 1658};
1670 1659
1671static int __openfirmware 1660static int
1672pmu_set_backlight_enable(int on, int level, void* data) 1661pmu_set_backlight_enable(int on, int level, void* data)
1673{ 1662{
1674 struct adb_request req; 1663 struct adb_request req;
@@ -1688,7 +1677,7 @@ pmu_set_backlight_enable(int on, int level, void* data)
1688 return 0; 1677 return 0;
1689} 1678}
1690 1679
1691static void __openfirmware 1680static void
1692pmu_bright_complete(struct adb_request *req) 1681pmu_bright_complete(struct adb_request *req)
1693{ 1682{
1694 if (req == &bright_req_1) 1683 if (req == &bright_req_1)
@@ -1697,7 +1686,7 @@ pmu_bright_complete(struct adb_request *req)
1697 clear_bit(2, &async_req_locks); 1686 clear_bit(2, &async_req_locks);
1698} 1687}
1699 1688
1700static int __openfirmware 1689static int
1701pmu_set_backlight_level(int level, void* data) 1690pmu_set_backlight_level(int level, void* data)
1702{ 1691{
1703 if (vias == NULL) 1692 if (vias == NULL)
@@ -1717,7 +1706,7 @@ pmu_set_backlight_level(int level, void* data)
1717} 1706}
1718#endif /* CONFIG_PMAC_BACKLIGHT */ 1707#endif /* CONFIG_PMAC_BACKLIGHT */
1719 1708
1720void __pmac 1709void
1721pmu_enable_irled(int on) 1710pmu_enable_irled(int on)
1722{ 1711{
1723 struct adb_request req; 1712 struct adb_request req;
@@ -1732,7 +1721,7 @@ pmu_enable_irled(int on)
1732 pmu_wait_complete(&req); 1721 pmu_wait_complete(&req);
1733} 1722}
1734 1723
1735void __pmac 1724void
1736pmu_restart(void) 1725pmu_restart(void)
1737{ 1726{
1738 struct adb_request req; 1727 struct adb_request req;
@@ -1757,7 +1746,7 @@ pmu_restart(void)
1757 ; 1746 ;
1758} 1747}
1759 1748
1760void __pmac 1749void
1761pmu_shutdown(void) 1750pmu_shutdown(void)
1762{ 1751{
1763 struct adb_request req; 1752 struct adb_request req;
@@ -2076,7 +2065,7 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
2076} 2065}
2077 2066
2078/* Sleep is broadcast last-to-first */ 2067/* Sleep is broadcast last-to-first */
2079static int __pmac 2068static int
2080broadcast_sleep(int when, int fallback) 2069broadcast_sleep(int when, int fallback)
2081{ 2070{
2082 int ret = PBOOK_SLEEP_OK; 2071 int ret = PBOOK_SLEEP_OK;
@@ -2101,7 +2090,7 @@ broadcast_sleep(int when, int fallback)
2101} 2090}
2102 2091
2103/* Wake is broadcast first-to-last */ 2092/* Wake is broadcast first-to-last */
2104static int __pmac 2093static int
2105broadcast_wake(void) 2094broadcast_wake(void)
2106{ 2095{
2107 int ret = PBOOK_SLEEP_OK; 2096 int ret = PBOOK_SLEEP_OK;
@@ -2132,7 +2121,7 @@ static struct pci_save {
2132} *pbook_pci_saves; 2121} *pbook_pci_saves;
2133static int pbook_npci_saves; 2122static int pbook_npci_saves;
2134 2123
2135static void __pmac 2124static void
2136pbook_alloc_pci_save(void) 2125pbook_alloc_pci_save(void)
2137{ 2126{
2138 int npci; 2127 int npci;
@@ -2149,7 +2138,7 @@ pbook_alloc_pci_save(void)
2149 pbook_npci_saves = npci; 2138 pbook_npci_saves = npci;
2150} 2139}
2151 2140
2152static void __pmac 2141static void
2153pbook_free_pci_save(void) 2142pbook_free_pci_save(void)
2154{ 2143{
2155 if (pbook_pci_saves == NULL) 2144 if (pbook_pci_saves == NULL)
@@ -2159,7 +2148,7 @@ pbook_free_pci_save(void)
2159 pbook_npci_saves = 0; 2148 pbook_npci_saves = 0;
2160} 2149}
2161 2150
2162static void __pmac 2151static void
2163pbook_pci_save(void) 2152pbook_pci_save(void)
2164{ 2153{
2165 struct pci_save *ps = pbook_pci_saves; 2154 struct pci_save *ps = pbook_pci_saves;
@@ -2190,7 +2179,7 @@ pbook_pci_save(void)
2190 * during boot, it will be in the pci dev list. If it's disabled at this point 2179 * during boot, it will be in the pci dev list. If it's disabled at this point
2191 * (and it will probably be), then you can't access it's config space. 2180 * (and it will probably be), then you can't access it's config space.
2192 */ 2181 */
2193static void __pmac 2182static void
2194pbook_pci_restore(void) 2183pbook_pci_restore(void)
2195{ 2184{
2196 u16 cmd; 2185 u16 cmd;
@@ -2238,7 +2227,7 @@ pbook_pci_restore(void)
2238 2227
2239#ifdef DEBUG_SLEEP 2228#ifdef DEBUG_SLEEP
2240/* N.B. This doesn't work on the 3400 */ 2229/* N.B. This doesn't work on the 3400 */
2241void __pmac 2230void
2242pmu_blink(int n) 2231pmu_blink(int n)
2243{ 2232{
2244 struct adb_request req; 2233 struct adb_request req;
@@ -2277,9 +2266,9 @@ pmu_blink(int n)
2277 * Put the powerbook to sleep. 2266 * Put the powerbook to sleep.
2278 */ 2267 */
2279 2268
2280static u32 save_via[8] __pmacdata; 2269static u32 save_via[8];
2281 2270
2282static void __pmac 2271static void
2283save_via_state(void) 2272save_via_state(void)
2284{ 2273{
2285 save_via[0] = in_8(&via[ANH]); 2274 save_via[0] = in_8(&via[ANH]);
@@ -2291,7 +2280,7 @@ save_via_state(void)
2291 save_via[6] = in_8(&via[T1CL]); 2280 save_via[6] = in_8(&via[T1CL]);
2292 save_via[7] = in_8(&via[T1CH]); 2281 save_via[7] = in_8(&via[T1CH]);
2293} 2282}
2294static void __pmac 2283static void
2295restore_via_state(void) 2284restore_via_state(void)
2296{ 2285{
2297 out_8(&via[ANH], save_via[0]); 2286 out_8(&via[ANH], save_via[0]);
@@ -2307,7 +2296,7 @@ restore_via_state(void)
2307 out_8(&via[IER], IER_SET | SR_INT | CB1_INT); 2296 out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
2308} 2297}
2309 2298
2310static int __pmac 2299static int
2311pmac_suspend_devices(void) 2300pmac_suspend_devices(void)
2312{ 2301{
2313 int ret; 2302 int ret;
@@ -2397,7 +2386,7 @@ pmac_suspend_devices(void)
2397 return 0; 2386 return 0;
2398} 2387}
2399 2388
2400static int __pmac 2389static int
2401pmac_wakeup_devices(void) 2390pmac_wakeup_devices(void)
2402{ 2391{
2403 mdelay(100); 2392 mdelay(100);
@@ -2436,7 +2425,7 @@ pmac_wakeup_devices(void)
2436#define GRACKLE_NAP (1<<4) 2425#define GRACKLE_NAP (1<<4)
2437#define GRACKLE_SLEEP (1<<3) 2426#define GRACKLE_SLEEP (1<<3)
2438 2427
2439int __pmac 2428int
2440powerbook_sleep_grackle(void) 2429powerbook_sleep_grackle(void)
2441{ 2430{
2442 unsigned long save_l2cr; 2431 unsigned long save_l2cr;
@@ -2520,7 +2509,7 @@ powerbook_sleep_grackle(void)
2520 return 0; 2509 return 0;
2521} 2510}
2522 2511
2523static int __pmac 2512static int
2524powerbook_sleep_Core99(void) 2513powerbook_sleep_Core99(void)
2525{ 2514{
2526 unsigned long save_l2cr; 2515 unsigned long save_l2cr;
@@ -2620,7 +2609,7 @@ powerbook_sleep_Core99(void)
2620#define PB3400_MEM_CTRL 0xf8000000 2609#define PB3400_MEM_CTRL 0xf8000000
2621#define PB3400_MEM_CTRL_SLEEP 0x70 2610#define PB3400_MEM_CTRL_SLEEP 0x70
2622 2611
2623static int __pmac 2612static int
2624powerbook_sleep_3400(void) 2613powerbook_sleep_3400(void)
2625{ 2614{
2626 int ret, i, x; 2615 int ret, i, x;
@@ -2720,9 +2709,9 @@ struct pmu_private {
2720}; 2709};
2721 2710
2722static LIST_HEAD(all_pmu_pvt); 2711static LIST_HEAD(all_pmu_pvt);
2723static DEFINE_SPINLOCK(all_pvt_lock __pmacdata); 2712static DEFINE_SPINLOCK(all_pvt_lock);
2724 2713
2725static void __pmac 2714static void
2726pmu_pass_intr(unsigned char *data, int len) 2715pmu_pass_intr(unsigned char *data, int len)
2727{ 2716{
2728 struct pmu_private *pp; 2717 struct pmu_private *pp;
@@ -2751,7 +2740,7 @@ pmu_pass_intr(unsigned char *data, int len)
2751 spin_unlock_irqrestore(&all_pvt_lock, flags); 2740 spin_unlock_irqrestore(&all_pvt_lock, flags);
2752} 2741}
2753 2742
2754static int __pmac 2743static int
2755pmu_open(struct inode *inode, struct file *file) 2744pmu_open(struct inode *inode, struct file *file)
2756{ 2745{
2757 struct pmu_private *pp; 2746 struct pmu_private *pp;
@@ -2773,7 +2762,7 @@ pmu_open(struct inode *inode, struct file *file)
2773 return 0; 2762 return 0;
2774} 2763}
2775 2764
2776static ssize_t __pmac 2765static ssize_t
2777pmu_read(struct file *file, char __user *buf, 2766pmu_read(struct file *file, char __user *buf,
2778 size_t count, loff_t *ppos) 2767 size_t count, loff_t *ppos)
2779{ 2768{
@@ -2825,14 +2814,14 @@ pmu_read(struct file *file, char __user *buf,
2825 return ret; 2814 return ret;
2826} 2815}
2827 2816
2828static ssize_t __pmac 2817static ssize_t
2829pmu_write(struct file *file, const char __user *buf, 2818pmu_write(struct file *file, const char __user *buf,
2830 size_t count, loff_t *ppos) 2819 size_t count, loff_t *ppos)
2831{ 2820{
2832 return 0; 2821 return 0;
2833} 2822}
2834 2823
2835static unsigned int __pmac 2824static unsigned int
2836pmu_fpoll(struct file *filp, poll_table *wait) 2825pmu_fpoll(struct file *filp, poll_table *wait)
2837{ 2826{
2838 struct pmu_private *pp = filp->private_data; 2827 struct pmu_private *pp = filp->private_data;
@@ -2849,7 +2838,7 @@ pmu_fpoll(struct file *filp, poll_table *wait)
2849 return mask; 2838 return mask;
2850} 2839}
2851 2840
2852static int __pmac 2841static int
2853pmu_release(struct inode *inode, struct file *file) 2842pmu_release(struct inode *inode, struct file *file)
2854{ 2843{
2855 struct pmu_private *pp = file->private_data; 2844 struct pmu_private *pp = file->private_data;
@@ -2874,8 +2863,7 @@ pmu_release(struct inode *inode, struct file *file)
2874 return 0; 2863 return 0;
2875} 2864}
2876 2865
2877/* Note: removed __openfirmware here since it causes link errors */ 2866static int
2878static int __pmac
2879pmu_ioctl(struct inode * inode, struct file *filp, 2867pmu_ioctl(struct inode * inode, struct file *filp,
2880 u_int cmd, u_long arg) 2868 u_int cmd, u_long arg)
2881{ 2869{
@@ -2957,7 +2945,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2957 return error; 2945 return error;
2958} 2946}
2959 2947
2960static struct file_operations pmu_device_fops __pmacdata = { 2948static struct file_operations pmu_device_fops = {
2961 .read = pmu_read, 2949 .read = pmu_read,
2962 .write = pmu_write, 2950 .write = pmu_write,
2963 .poll = pmu_fpoll, 2951 .poll = pmu_fpoll,
@@ -2966,7 +2954,7 @@ static struct file_operations pmu_device_fops __pmacdata = {
2966 .release = pmu_release, 2954 .release = pmu_release,
2967}; 2955};
2968 2956
2969static struct miscdevice pmu_device __pmacdata = { 2957static struct miscdevice pmu_device = {
2970 PMU_MINOR, "pmu", &pmu_device_fops 2958 PMU_MINOR, "pmu", &pmu_device_fops
2971}; 2959};
2972 2960
@@ -2982,7 +2970,7 @@ device_initcall(pmu_device_init);
2982 2970
2983 2971
2984#ifdef DEBUG_SLEEP 2972#ifdef DEBUG_SLEEP
2985static inline void __pmac 2973static inline void
2986polled_handshake(volatile unsigned char __iomem *via) 2974polled_handshake(volatile unsigned char __iomem *via)
2987{ 2975{
2988 via[B] &= ~TREQ; eieio(); 2976 via[B] &= ~TREQ; eieio();
@@ -2993,7 +2981,7 @@ polled_handshake(volatile unsigned char __iomem *via)
2993 ; 2981 ;
2994} 2982}
2995 2983
2996static inline void __pmac 2984static inline void
2997polled_send_byte(volatile unsigned char __iomem *via, int x) 2985polled_send_byte(volatile unsigned char __iomem *via, int x)
2998{ 2986{
2999 via[ACR] |= SR_OUT | SR_EXT; eieio(); 2987 via[ACR] |= SR_OUT | SR_EXT; eieio();
@@ -3001,7 +2989,7 @@ polled_send_byte(volatile unsigned char __iomem *via, int x)
3001 polled_handshake(via); 2989 polled_handshake(via);
3002} 2990}
3003 2991
3004static inline int __pmac 2992static inline int
3005polled_recv_byte(volatile unsigned char __iomem *via) 2993polled_recv_byte(volatile unsigned char __iomem *via)
3006{ 2994{
3007 int x; 2995 int x;
@@ -3013,7 +3001,7 @@ polled_recv_byte(volatile unsigned char __iomem *via)
3013 return x; 3001 return x;
3014} 3002}
3015 3003
3016int __pmac 3004int
3017pmu_polled_request(struct adb_request *req) 3005pmu_polled_request(struct adb_request *req)
3018{ 3006{
3019 unsigned long flags; 3007 unsigned long flags;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index 820dc52e30bc..6f80d76ac17c 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -835,7 +835,7 @@ static struct pci_save {
835} *pbook_pci_saves; 835} *pbook_pci_saves;
836static int n_pbook_pci_saves; 836static int n_pbook_pci_saves;
837 837
838static inline void __openfirmware 838static inline void
839pbook_pci_save(void) 839pbook_pci_save(void)
840{ 840{
841 int npci; 841 int npci;
@@ -863,7 +863,7 @@ pbook_pci_save(void)
863 } 863 }
864} 864}
865 865
866static inline void __openfirmware 866static inline void
867pbook_pci_restore(void) 867pbook_pci_restore(void)
868{ 868{
869 u16 cmd; 869 u16 cmd;
@@ -902,7 +902,7 @@ pbook_pci_restore(void)
902#define IRQ_ENABLE ((unsigned int *)0xf3000024) 902#define IRQ_ENABLE ((unsigned int *)0xf3000024)
903#define MEM_CTRL ((unsigned int *)0xf8000070) 903#define MEM_CTRL ((unsigned int *)0xf8000070)
904 904
905int __openfirmware powerbook_sleep(void) 905int powerbook_sleep(void)
906{ 906{
907 int ret, i, x; 907 int ret, i, x;
908 static int save_backlight; 908 static int save_backlight;
@@ -1001,25 +1001,24 @@ int __openfirmware powerbook_sleep(void)
1001/* 1001/*
1002 * Support for /dev/pmu device 1002 * Support for /dev/pmu device
1003 */ 1003 */
1004static int __openfirmware pmu_open(struct inode *inode, struct file *file) 1004static int pmu_open(struct inode *inode, struct file *file)
1005{ 1005{
1006 return 0; 1006 return 0;
1007} 1007}
1008 1008
1009static ssize_t __openfirmware pmu_read(struct file *file, char *buf, 1009static ssize_t pmu_read(struct file *file, char *buf,
1010 size_t count, loff_t *ppos) 1010 size_t count, loff_t *ppos)
1011{ 1011{
1012 return 0; 1012 return 0;
1013} 1013}
1014 1014
1015static ssize_t __openfirmware pmu_write(struct file *file, const char *buf, 1015static ssize_t pmu_write(struct file *file, const char *buf,
1016 size_t count, loff_t *ppos) 1016 size_t count, loff_t *ppos)
1017{ 1017{
1018 return 0; 1018 return 0;
1019} 1019}
1020 1020
1021/* Note: removed __openfirmware here since it causes link errors */ 1021static int pmu_ioctl(struct inode * inode, struct file *filp,
1022static int /*__openfirmware*/ pmu_ioctl(struct inode * inode, struct file *filp,
1023 u_int cmd, u_long arg) 1022 u_int cmd, u_long arg)
1024{ 1023{
1025 int error; 1024 int error;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 3d56cf5a4e23..db3bc2f6f0fa 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -70,8 +70,9 @@
70#include <linux/delay.h> 70#include <linux/delay.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/ethtool.h> 72#include <linux/ethtool.h>
73
74#include <asm/abs_addr.h>
73#include <asm/iSeries/mf.h> 75#include <asm/iSeries/mf.h>
74#include <asm/iSeries/iSeries_pci.h>
75#include <asm/uaccess.h> 76#include <asm/uaccess.h>
76 77
77#include <asm/iSeries/HvLpConfig.h> 78#include <asm/iSeries/HvLpConfig.h>
@@ -1397,13 +1398,13 @@ static inline void veth_build_dma_list(struct dma_chunk *list,
1397 * it just at the granularity of iSeries real->absolute 1398 * it just at the granularity of iSeries real->absolute
1398 * mapping? Indeed, given the way the allocator works, can we 1399 * mapping? Indeed, given the way the allocator works, can we
1399 * count on them being absolutely contiguous? */ 1400 * count on them being absolutely contiguous? */
1400 list[0].addr = ISERIES_HV_ADDR(p); 1401 list[0].addr = iseries_hv_addr(p);
1401 list[0].size = min(length, 1402 list[0].size = min(length,
1402 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK)); 1403 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
1403 1404
1404 done = list[0].size; 1405 done = list[0].size;
1405 while (done < length) { 1406 while (done < length) {
1406 list[i].addr = ISERIES_HV_ADDR(p + done); 1407 list[i].addr = iseries_hv_addr(p + done);
1407 list[i].size = min(length-done, PAGE_SIZE); 1408 list[i].size = min(length-done, PAGE_SIZE);
1408 done += list[i].size; 1409 done += list[i].size;
1409 i++; 1410 i++;
@@ -1496,8 +1497,8 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1496 cnx->dst_inst, 1497 cnx->dst_inst,
1497 HvLpDma_AddressType_RealAddress, 1498 HvLpDma_AddressType_RealAddress,
1498 HvLpDma_AddressType_TceIndex, 1499 HvLpDma_AddressType_TceIndex,
1499 ISERIES_HV_ADDR(&local_list), 1500 iseries_hv_addr(&local_list),
1500 ISERIES_HV_ADDR(&remote_list), 1501 iseries_hv_addr(&remote_list),
1501 length); 1502 length);
1502 if (rc != HvLpDma_Rc_Good) { 1503 if (rc != HvLpDma_Rc_Good) {
1503 dev_kfree_skb_irq(skb); 1504 dev_kfree_skb_irq(skb);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index a3453555a94e..5b6b0b6038a7 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -629,12 +629,4 @@ void __init proc_misc_init(void)
629 if (entry) 629 if (entry)
630 entry->proc_fops = &proc_sysrq_trigger_operations; 630 entry->proc_fops = &proc_sysrq_trigger_operations;
631#endif 631#endif
632#ifdef CONFIG_PPC32
633 {
634 extern struct file_operations ppc_htab_operations;
635 entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
636 if (entry)
637 entry->proc_fops = &ppc_htab_operations;
638 }
639#endif
640} 632}
diff --git a/include/asm-ppc64/a.out.h b/include/asm-powerpc/a.out.h
index 3871e252a6f1..c7393a977364 100644
--- a/include/asm-ppc64/a.out.h
+++ b/include/asm-powerpc/a.out.h
@@ -1,14 +1,5 @@
1#ifndef __PPC64_A_OUT_H__ 1#ifndef _ASM_POWERPC_A_OUT_H
2#define __PPC64_A_OUT_H__ 2#define _ASM_POWERPC_A_OUT_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12 3
13struct exec 4struct exec
14{ 5{
@@ -27,6 +18,7 @@ struct exec
27#define N_SYMSIZE(a) ((a).a_syms) 18#define N_SYMSIZE(a) ((a).a_syms)
28 19
29#ifdef __KERNEL__ 20#ifdef __KERNEL__
21#ifdef __powerpc64__
30 22
31#define STACK_TOP_USER64 TASK_SIZE_USER64 23#define STACK_TOP_USER64 TASK_SIZE_USER64
32#define STACK_TOP_USER32 TASK_SIZE_USER32 24#define STACK_TOP_USER32 TASK_SIZE_USER32
@@ -34,6 +26,11 @@ struct exec
34#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 26#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
35 STACK_TOP_USER32 : STACK_TOP_USER64) 27 STACK_TOP_USER32 : STACK_TOP_USER64)
36 28
29#else /* __powerpc64__ */
30
31#define STACK_TOP TASK_SIZE
32
33#endif /* __powerpc64__ */
37#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
38 35
39#endif /* __PPC64_A_OUT_H__ */ 36#endif /* _ASM_POWERPC_A_OUT_H */
diff --git a/include/asm-ppc/atomic.h b/include/asm-powerpc/atomic.h
index eeafd505836e..ed4b345ed75d 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -1,29 +1,20 @@
1#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
1/* 4/*
2 * PowerPC atomic operations 5 * PowerPC atomic operations
3 */ 6 */
4 7
5#ifndef _ASM_PPC_ATOMIC_H_
6#define _ASM_PPC_ATOMIC_H_
7
8typedef struct { volatile int counter; } atomic_t; 8typedef struct { volatile int counter; } atomic_t;
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <asm/synch.h>
11 12
12#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
13 14
14#define atomic_read(v) ((v)->counter) 15#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) (((v)->counter) = (i)) 16#define atomic_set(v,i) (((v)->counter) = (i))
16 17
17extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
18
19#ifdef CONFIG_SMP
20#define SMP_SYNC "sync"
21#define SMP_ISYNC "\n\tisync"
22#else
23#define SMP_SYNC ""
24#define SMP_ISYNC
25#endif
26
27/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. 18/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
28 * The old ATOMIC_SYNC_FIX covered some but not all of this. 19 * The old ATOMIC_SYNC_FIX covered some but not all of this.
29 */ 20 */
@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
53 int t; 44 int t;
54 45
55 __asm__ __volatile__( 46 __asm__ __volatile__(
47 EIEIO_ON_SMP
56"1: lwarx %0,0,%2 # atomic_add_return\n\ 48"1: lwarx %0,0,%2 # atomic_add_return\n\
57 add %0,%1,%0\n" 49 add %0,%1,%0\n"
58 PPC405_ERR77(0,%2) 50 PPC405_ERR77(0,%2)
59" stwcx. %0,0,%2 \n\ 51" stwcx. %0,0,%2 \n\
60 bne- 1b" 52 bne- 1b"
61 SMP_ISYNC 53 ISYNC_ON_SMP
62 : "=&r" (t) 54 : "=&r" (t)
63 : "r" (a), "r" (&v->counter) 55 : "r" (a), "r" (&v->counter)
64 : "cc", "memory"); 56 : "cc", "memory");
@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
88 int t; 80 int t;
89 81
90 __asm__ __volatile__( 82 __asm__ __volatile__(
83 EIEIO_ON_SMP
91"1: lwarx %0,0,%2 # atomic_sub_return\n\ 84"1: lwarx %0,0,%2 # atomic_sub_return\n\
92 subf %0,%1,%0\n" 85 subf %0,%1,%0\n"
93 PPC405_ERR77(0,%2) 86 PPC405_ERR77(0,%2)
94" stwcx. %0,0,%2 \n\ 87" stwcx. %0,0,%2 \n\
95 bne- 1b" 88 bne- 1b"
96 SMP_ISYNC 89 ISYNC_ON_SMP
97 : "=&r" (t) 90 : "=&r" (t)
98 : "r" (a), "r" (&v->counter) 91 : "r" (a), "r" (&v->counter)
99 : "cc", "memory"); 92 : "cc", "memory");
@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
121 int t; 114 int t;
122 115
123 __asm__ __volatile__( 116 __asm__ __volatile__(
117 EIEIO_ON_SMP
124"1: lwarx %0,0,%1 # atomic_inc_return\n\ 118"1: lwarx %0,0,%1 # atomic_inc_return\n\
125 addic %0,%0,1\n" 119 addic %0,%0,1\n"
126 PPC405_ERR77(0,%1) 120 PPC405_ERR77(0,%1)
127" stwcx. %0,0,%1 \n\ 121" stwcx. %0,0,%1 \n\
128 bne- 1b" 122 bne- 1b"
129 SMP_ISYNC 123 ISYNC_ON_SMP
130 : "=&r" (t) 124 : "=&r" (t)
131 : "r" (&v->counter) 125 : "r" (&v->counter)
132 : "cc", "memory"); 126 : "cc", "memory");
@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
164 int t; 158 int t;
165 159
166 __asm__ __volatile__( 160 __asm__ __volatile__(
161 EIEIO_ON_SMP
167"1: lwarx %0,0,%1 # atomic_dec_return\n\ 162"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n" 163 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1) 164 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\ 165" stwcx. %0,0,%1\n\
171 bne- 1b" 166 bne- 1b"
172 SMP_ISYNC 167 ISYNC_ON_SMP
173 : "=&r" (t) 168 : "=&r" (t)
174 : "r" (&v->counter) 169 : "r" (&v->counter)
175 : "cc", "memory"); 170 : "cc", "memory");
@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
189 int t; 184 int t;
190 185
191 __asm__ __volatile__( 186 __asm__ __volatile__(
187 EIEIO_ON_SMP
192"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 188"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
193 addic. %0,%0,-1\n\ 189 addic. %0,%0,-1\n\
194 blt- 2f\n" 190 blt- 2f\n"
195 PPC405_ERR77(0,%1) 191 PPC405_ERR77(0,%1)
196" stwcx. %0,0,%1\n\ 192" stwcx. %0,0,%1\n\
197 bne- 1b" 193 bne- 1b"
198 SMP_ISYNC 194 ISYNC_ON_SMP
199 "\n\ 195 "\n\
2002:" : "=&r" (t) 1962:" : "=&r" (t)
201 : "r" (&v->counter) 197 : "r" (&v->counter)
@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
204 return t; 200 return t;
205} 201}
206 202
207#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") 203#define smp_mb__before_atomic_dec() smp_mb()
208#define smp_mb__before_atomic_dec() __MB 204#define smp_mb__after_atomic_dec() smp_mb()
209#define smp_mb__after_atomic_dec() __MB 205#define smp_mb__before_atomic_inc() smp_mb()
210#define smp_mb__before_atomic_inc() __MB 206#define smp_mb__after_atomic_inc() smp_mb()
211#define smp_mb__after_atomic_inc() __MB
212 207
213#endif /* __KERNEL__ */ 208#endif /* __KERNEL__ */
214#endif /* _ASM_PPC_ATOMIC_H_ */ 209#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-ppc64/auxvec.h b/include/asm-powerpc/auxvec.h
index ac6381a106e1..79d8c4732309 100644
--- a/include/asm-ppc64/auxvec.h
+++ b/include/asm-powerpc/auxvec.h
@@ -1,5 +1,5 @@
1#ifndef __PPC64_AUXVEC_H 1#ifndef _ASM_POWERPC_AUXVEC_H
2#define __PPC64_AUXVEC_H 2#define _ASM_POWERPC_AUXVEC_H
3 3
4/* 4/*
5 * We need to put in some extra aux table entries to tell glibc what 5 * We need to put in some extra aux table entries to tell glibc what
@@ -14,6 +14,8 @@
14/* The vDSO location. We have to use the same value as x86 for glibc's 14/* The vDSO location. We have to use the same value as x86 for glibc's
15 * sake :-) 15 * sake :-)
16 */ 16 */
17#ifdef __powerpc64__
17#define AT_SYSINFO_EHDR 33 18#define AT_SYSINFO_EHDR 33
19#endif
18 20
19#endif /* __PPC64_AUXVEC_H */ 21#endif
diff --git a/include/asm-ppc/backlight.h b/include/asm-powerpc/backlight.h
index 3a1c3dede2a0..1ba1f27a0b63 100644
--- a/include/asm-ppc/backlight.h
+++ b/include/asm-powerpc/backlight.h
@@ -1,12 +1,13 @@
1/* 1/*
2 * Routines for handling backlight control on PowerBooks 2 * Routines for handling backlight control on PowerBooks
3 * 3 *
4 * For now, implementation resides in arch/ppc/kernel/pmac_support.c 4 * For now, implementation resides in
5 * arch/powerpc/platforms/powermac/pmac_support.c
5 * 6 *
6 */ 7 */
8#ifndef __ASM_POWERPC_BACKLIGHT_H
9#define __ASM_POWERPC_BACKLIGHT_H
7#ifdef __KERNEL__ 10#ifdef __KERNEL__
8#ifndef __ASM_PPC_BACKLIGHT_H
9#define __ASM_PPC_BACKLIGHT_H
10 11
11/* Abstract values */ 12/* Abstract values */
12#define BACKLIGHT_OFF 0 13#define BACKLIGHT_OFF 0
@@ -26,5 +27,5 @@ extern int get_backlight_enable(void);
26extern int set_backlight_level(int level); 27extern int set_backlight_level(int level);
27extern int get_backlight_level(void); 28extern int get_backlight_level(void);
28 29
29#endif
30#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
31#endif
diff --git a/include/asm-ppc64/bug.h b/include/asm-powerpc/bug.h
index 160178278861..e4d028e87020 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_BUG_H 1#ifndef _ASM_POWERPC_BUG_H
2#define _PPC64_BUG_H 2#define _ASM_POWERPC_BUG_H
3 3
4/* 4/*
5 * Define an illegal instr to trap on the bug. 5 * Define an illegal instr to trap on the bug.
@@ -11,9 +11,21 @@
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \
21 ".long " #label ", " #line ", " #file ", " #func "\n"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */
25
14struct bug_entry { 26struct bug_entry {
15 unsigned long bug_addr; 27 unsigned long bug_addr;
16 long line; 28 int line;
17 const char *file; 29 const char *file;
18 const char *function; 30 const char *function;
19}; 31};
@@ -32,28 +44,28 @@ struct bug_entry *find_bug(unsigned long bugaddr);
32 __asm__ __volatile__( \ 44 __asm__ __volatile__( \
33 "1: twi 31,0,0\n" \ 45 "1: twi 31,0,0\n" \
34 ".section __bug_table,\"a\"\n\t" \ 46 ".section __bug_table,\"a\"\n\t" \
35 " .llong 1b,%0,%1,%2\n" \ 47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \
36 ".previous" \ 48 ".previous" \
37 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
38} while (0) 50} while (0)
39 51
40#define BUG_ON(x) do { \ 52#define BUG_ON(x) do { \
41 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
42 "1: tdnei %0,0\n" \ 54 TRAP_OP(%0,0) \
43 ".section __bug_table,\"a\"\n\t" \ 55 ".section __bug_table,\"a\"\n\t" \
44 " .llong 1b,%1,%2,%3\n" \ 56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \
45 ".previous" \ 57 ".previous" \
46 : : "r" ((long long)(x)), "i" (__LINE__), \ 58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \
47 "i" (__FILE__), "i" (__FUNCTION__)); \ 59 "i" (__FILE__), "i" (__FUNCTION__)); \
48} while (0) 60} while (0)
49 61
50#define WARN_ON(x) do { \ 62#define WARN_ON(x) do { \
51 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
52 "1: tdnei %0,0\n" \ 64 TRAP_OP(%0,0) \
53 ".section __bug_table,\"a\"\n\t" \ 65 ".section __bug_table,\"a\"\n\t" \
54 " .llong 1b,%1,%2,%3\n" \ 66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \
55 ".previous" \ 67 ".previous" \
56 : : "r" ((long long)(x)), \ 68 : : "r" ((DATA_TYPE)(x)), \
57 "i" (__LINE__ + BUG_WARNING_TRAP), \ 69 "i" (__LINE__ + BUG_WARNING_TRAP), \
58 "i" (__FILE__), "i" (__FUNCTION__)); \ 70 "i" (__FILE__), "i" (__FUNCTION__)); \
59} while (0) 71} while (0)
@@ -61,9 +73,9 @@ struct bug_entry *find_bug(unsigned long bugaddr);
61#define HAVE_ARCH_BUG 73#define HAVE_ARCH_BUG
62#define HAVE_ARCH_BUG_ON 74#define HAVE_ARCH_BUG_ON
63#define HAVE_ARCH_WARN_ON 75#define HAVE_ARCH_WARN_ON
64#endif 76#endif /* CONFIG_BUG */
65#endif 77#endif /* __ASSEMBLY __ */
66 78
67#include <asm-generic/bug.h> 79#include <asm-generic/bug.h>
68 80
69#endif 81#endif /* _ASM_POWERPC_BUG_H */
diff --git a/include/asm-ppc64/byteorder.h b/include/asm-powerpc/byteorder.h
index 8b57da62b674..b37752214a16 100644
--- a/include/asm-ppc64/byteorder.h
+++ b/include/asm-powerpc/byteorder.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_BYTEORDER_H 1#ifndef _ASM_POWERPC_BYTEORDER_H
2#define _PPC64_BYTEORDER_H 2#define _ASM_POWERPC_BYTEORDER_H
3 3
4/* 4/*
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -77,10 +77,13 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
77 77
78#ifndef __STRICT_ANSI__ 78#ifndef __STRICT_ANSI__
79#define __BYTEORDER_HAS_U64__ 79#define __BYTEORDER_HAS_U64__
80#endif 80#ifndef __powerpc64__
81#define __SWAB_64_THRU_32__
82#endif /* __powerpc64__ */
83#endif /* __STRICT_ANSI__ */
81 84
82#endif /* __GNUC__ */ 85#endif /* __GNUC__ */
83 86
84#include <linux/byteorder/big_endian.h> 87#include <linux/byteorder/big_endian.h>
85 88
86#endif /* _PPC64_BYTEORDER_H */ 89#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
new file mode 100644
index 000000000000..c019501daceb
--- /dev/null
+++ b/include/asm-powerpc/cputable.h
@@ -0,0 +1,427 @@
1#ifndef __ASM_POWERPC_CPUTABLE_H
2#define __ASM_POWERPC_CPUTABLE_H
3
4#include <linux/config.h>
5#include <asm/ppc_asm.h> /* for ASM_CONST */
6
7#define PPC_FEATURE_32 0x80000000
8#define PPC_FEATURE_64 0x40000000
9#define PPC_FEATURE_601_INSTR 0x20000000
10#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
11#define PPC_FEATURE_HAS_FPU 0x08000000
12#define PPC_FEATURE_HAS_MMU 0x04000000
13#define PPC_FEATURE_HAS_4xxMAC 0x02000000
14#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
15#define PPC_FEATURE_HAS_SPE 0x00800000
16#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
17#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
18#define PPC_FEATURE_NO_TB 0x00100000
19
20#ifdef __KERNEL__
21#ifndef __ASSEMBLY__
22
23/* This structure can grow, it's real size is used by head.S code
24 * via the mkdefs mechanism.
25 */
26struct cpu_spec;
27struct op_powerpc_model;
28
29typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
30
31struct cpu_spec {
32 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
33 unsigned int pvr_mask;
34 unsigned int pvr_value;
35
36 char *cpu_name;
37 unsigned long cpu_features; /* Kernel features */
38 unsigned int cpu_user_features; /* Userland features */
39
40 /* cache line sizes */
41 unsigned int icache_bsize;
42 unsigned int dcache_bsize;
43
44 /* number of performance monitor counters */
45 unsigned int num_pmcs;
46
47 /* this is called to initialize various CPU bits like L1 cache,
48 * BHT, SPD, etc... from head.S before branching to identify_machine
49 */
50 cpu_setup_t cpu_setup;
51
52 /* Used by oprofile userspace to select the right counters */
53 char *oprofile_cpu_type;
54
55 /* Processor specific oprofile operations */
56 struct op_powerpc_model *oprofile_model;
57};
58
59extern struct cpu_spec *cur_cpu_spec;
60
61extern void identify_cpu(unsigned long offset, unsigned long cpu);
62extern void do_cpu_ftr_fixups(unsigned long offset);
63
64#endif /* __ASSEMBLY__ */
65
66/* CPU kernel features */
67
68/* Retain the 32b definitions all use bottom half of word */
69#define CPU_FTR_SPLIT_ID_CACHE ASM_CONST(0x0000000000000001)
70#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
71#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
72#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
73#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
74#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
75#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
76#define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080)
77#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
78#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
79#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
80#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
81#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
82#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
83#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
84#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
85#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
86#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
87#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
88#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
89
90#ifdef __powerpc64__
91/* Add the 64b processor unique features in the top half of the word */
92#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
93#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
94#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
95#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
96#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
97#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
98#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
99#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
100#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
101#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
102#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
103#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
104#else
105/* ensure on 32b processors the flags are available for compiling but
106 * don't do anything */
107#define CPU_FTR_SLB ASM_CONST(0x0)
108#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
109#define CPU_FTR_TLBIEL ASM_CONST(0x0)
110#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
111#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0)
112#define CPU_FTR_IABR ASM_CONST(0x0)
113#define CPU_FTR_MMCRA ASM_CONST(0x0)
114#define CPU_FTR_CTRL ASM_CONST(0x0)
115#define CPU_FTR_SMT ASM_CONST(0x0)
116#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
117#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
118#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
119#endif
120
121#ifndef __ASSEMBLY__
122
123#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
124 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
125 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
126
127/* iSeries doesn't support large pages */
128#ifdef CONFIG_PPC_ISERIES
129#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
130#else
131#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
132#endif /* CONFIG_PPC_ISERIES */
133
134/* We only set the altivec features if the kernel was compiled with altivec
135 * support
136 */
137#ifdef CONFIG_ALTIVEC
138#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
139#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
140#else
141#define CPU_FTR_ALTIVEC_COMP 0
142#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
143#endif
144
145/* We need to mark all pages as being coherent if we're SMP or we
146 * have a 74[45]x and an MPC107 host bridge.
147 */
148#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
149#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
150#else
151#define CPU_FTR_COMMON 0
152#endif
153
154/* The powersave features NAP & DOZE seems to confuse BDI when
155 debugging. So if a BDI is used, disable theses
156 */
157#ifndef CONFIG_BDI_SWITCH
158#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
159#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
160#else
161#define CPU_FTR_MAYBE_CAN_DOZE 0
162#define CPU_FTR_MAYBE_CAN_NAP 0
163#endif
164
165#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
166 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
167 !defined(CONFIG_BOOKE))
168
169enum {
170 CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
171 CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
172 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
173 CPU_FTR_MAYBE_CAN_NAP,
174 CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
176 CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
177 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
178 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
179 CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
180 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
181 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
182 CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
183 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
184 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
185 CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
186 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
187 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
188 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
189 CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
190 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
191 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
192 CPU_FTR_NO_DPM,
193 CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
194 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
195 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
196 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
197 CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
198 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
199 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
200 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
201 CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
202 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
203 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
204 CPU_FTR_MAYBE_CAN_NAP,
205 CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
206 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
207 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
208 CPU_FTR_MAYBE_CAN_NAP,
209 CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
210 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
211 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
212 CPU_FTR_NEED_COHERENT,
213 CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
214 CPU_FTR_USE_TB |
215 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
216 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
217 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
218 CPU_FTR_NEED_COHERENT,
219 CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
220 CPU_FTR_USE_TB |
221 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
222 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
223 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
224 CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
225 CPU_FTR_USE_TB |
226 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
227 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
228 CPU_FTR_NEED_COHERENT,
229 CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB |
231 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
233 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
234 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
235 CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
236 CPU_FTR_USE_TB |
237 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
238 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
239 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
240 CPU_FTR_NEED_COHERENT,
241 CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
242 CPU_FTR_USE_TB |
243 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
244 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
245 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
246 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
247 CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB |
249 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
250 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
251 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
252 CPU_FTR_NEED_COHERENT,
253 CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
254 CPU_FTR_USE_TB |
255 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
256 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
257 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
258 CPU_FTR_NEED_COHERENT,
259 CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
260 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
261 CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
262 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
263 CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
264 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
265 CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
266 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
267 CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
268 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
269 CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
270 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
271 CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
272 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
273 CPU_FTR_MAYBE_CAN_NAP,
274 CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
275 CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
276 CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
277 CPU_FTRS_E200 = CPU_FTR_USE_TB,
278 CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
279 CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
280 CPU_FTR_BIG_PHYS,
281 CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
282#ifdef __powerpc64__
283 CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
284 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
285 CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
286 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
287 CPU_FTR_MMCRA | CPU_FTR_CTRL,
288 CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
289 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
290 CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
291 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
292 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
293 CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
294 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
295 CPU_FTR_MMCRA | CPU_FTR_SMT |
296 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
297 CPU_FTR_MMCRA_SIHV,
298 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
299 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
300 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
301 CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
302 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
303#endif
304
305 CPU_FTRS_POSSIBLE =
306#if CLASSIC_PPC
307 CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
308 CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
309 CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
310 CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
311 CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
312 CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
313 CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
314 CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
315#else
316 CPU_FTRS_GENERIC_32 |
317#endif
318#ifdef CONFIG_PPC64BRIDGE
319 CPU_FTRS_POWER3_32 |
320#endif
321#ifdef CONFIG_POWER4
322 CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
323#endif
324#ifdef CONFIG_8xx
325 CPU_FTRS_8XX |
326#endif
327#ifdef CONFIG_40x
328 CPU_FTRS_40X |
329#endif
330#ifdef CONFIG_44x
331 CPU_FTRS_44X |
332#endif
333#ifdef CONFIG_E200
334 CPU_FTRS_E200 |
335#endif
336#ifdef CONFIG_E500
337 CPU_FTRS_E500 | CPU_FTRS_E500_2 |
338#endif
339#ifdef __powerpc64__
340 CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
341 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
342#endif
343 0,
344
345 CPU_FTRS_ALWAYS =
346#if CLASSIC_PPC
347 CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
348 CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
349 CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
350 CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
351 CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
352 CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
353 CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
354 CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
355#else
356 CPU_FTRS_GENERIC_32 &
357#endif
358#ifdef CONFIG_PPC64BRIDGE
359 CPU_FTRS_POWER3_32 &
360#endif
361#ifdef CONFIG_POWER4
362 CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
363#endif
364#ifdef CONFIG_8xx
365 CPU_FTRS_8XX &
366#endif
367#ifdef CONFIG_40x
368 CPU_FTRS_40X &
369#endif
370#ifdef CONFIG_44x
371 CPU_FTRS_44X &
372#endif
373#ifdef CONFIG_E200
374 CPU_FTRS_E200 &
375#endif
376#ifdef CONFIG_E500
377 CPU_FTRS_E500 & CPU_FTRS_E500_2 &
378#endif
379#ifdef __powerpc64__
380 CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
381 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
382#endif
383 CPU_FTRS_POSSIBLE,
384};
385
386static inline int cpu_has_feature(unsigned long feature)
387{
388 return (CPU_FTRS_ALWAYS & feature) ||
389 (CPU_FTRS_POSSIBLE
390 & cur_cpu_spec->cpu_features
391 & feature);
392}
393
394#endif /* !__ASSEMBLY__ */
395
396#ifdef __ASSEMBLY__
397
398#define BEGIN_FTR_SECTION 98:
399
400#ifndef __powerpc64__
401#define END_FTR_SECTION(msk, val) \
40299: \
403 .section __ftr_fixup,"a"; \
404 .align 2; \
405 .long msk; \
406 .long val; \
407 .long 98b; \
408 .long 99b; \
409 .previous
410#else /* __powerpc64__ */
411#define END_FTR_SECTION(msk, val) \
41299: \
413 .section __ftr_fixup,"a"; \
414 .align 3; \
415 .llong msk; \
416 .llong val; \
417 .llong 98b; \
418 .llong 99b; \
419 .previous
420#endif /* __powerpc64__ */
421
422#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
423#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
424#endif /* __ASSEMBLY__ */
425
426#endif /* __KERNEL__ */
427#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/include/asm-ppc/dbdma.h b/include/asm-powerpc/dbdma.h
index 8973565f95d3..8973565f95d3 100644
--- a/include/asm-ppc/dbdma.h
+++ b/include/asm-powerpc/dbdma.h
diff --git a/include/asm-ppc/dma.h b/include/asm-powerpc/dma.h
index cc8e5cd8c9d2..926378d2cd94 100644
--- a/include/asm-ppc/dma.h
+++ b/include/asm-powerpc/dma.h
@@ -1,18 +1,14 @@
1#ifndef _ASM_POWERPC_DMA_H
2#define _ASM_POWERPC_DMA_H
3
1/* 4/*
2 * include/asm-ppc/dma.h: Defines for using and allocating dma channels. 5 * Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992. 6 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen 7 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992. 8 * and John Boyd, Nov. 1992.
6 * Changes for ppc sound by Christoph Nadig 9 * Changes for ppc sound by Christoph Nadig
7 */ 10 */
8 11
9#ifdef __KERNEL__
10
11#include <linux/config.h>
12#include <asm/io.h>
13#include <linux/spinlock.h>
14#include <asm/system.h>
15
16/* 12/*
17 * Note: Adapted for PowerPC by Gary Thomas 13 * Note: Adapted for PowerPC by Gary Thomas
18 * Modified by Cort Dougan <cort@cs.nmt.edu> 14 * Modified by Cort Dougan <cort@cs.nmt.edu>
@@ -25,8 +21,10 @@
25 * with a grain of salt. 21 * with a grain of salt.
26 */ 22 */
27 23
28#ifndef _ASM_DMA_H 24#include <linux/config.h>
29#define _ASM_DMA_H 25#include <asm/io.h>
26#include <linux/spinlock.h>
27#include <asm/system.h>
30 28
31#ifndef MAX_DMA_CHANNELS 29#ifndef MAX_DMA_CHANNELS
32#define MAX_DMA_CHANNELS 8 30#define MAX_DMA_CHANNELS 8
@@ -34,11 +32,9 @@
34 32
35/* The maximum address that we can perform a DMA transfer to on this platform */ 33/* The maximum address that we can perform a DMA transfer to on this platform */
36/* Doesn't really apply... */ 34/* Doesn't really apply... */
37#define MAX_DMA_ADDRESS 0xFFFFFFFF 35#define MAX_DMA_ADDRESS (~0UL)
38 36
39/* in arch/ppc/kernel/setup.c -- Cort */ 37#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
40extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
41extern unsigned long ISA_DMA_THRESHOLD;
42 38
43#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER 39#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
44#define dma_outb outb_p 40#define dma_outb outb_p
@@ -171,7 +167,18 @@ extern long ppc_cs4232_dma, ppc_cs4232_dma2;
171#define DMA1_EXT_REG 0x40B 167#define DMA1_EXT_REG 0x40B
172#define DMA2_EXT_REG 0x4D6 168#define DMA2_EXT_REG 0x4D6
173 169
170#ifndef __powerpc64__
171 /* in arch/ppc/kernel/setup.c -- Cort */
172 extern unsigned int DMA_MODE_WRITE;
173 extern unsigned int DMA_MODE_READ;
174 extern unsigned long ISA_DMA_THRESHOLD;
175#else
176 #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
177 #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
178#endif
179
174#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ 180#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
181
175#define DMA_AUTOINIT 0x10 182#define DMA_AUTOINIT 0x10
176 183
177extern spinlock_t dma_spin_lock; 184extern spinlock_t dma_spin_lock;
@@ -200,8 +207,9 @@ static __inline__ void enable_dma(unsigned int dmanr)
200 if (dmanr <= 3) { 207 if (dmanr <= 3) {
201 dma_outb(dmanr, DMA1_MASK_REG); 208 dma_outb(dmanr, DMA1_MASK_REG);
202 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */ 209 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
203 } else 210 } else {
204 dma_outb(dmanr & 3, DMA2_MASK_REG); 211 dma_outb(dmanr & 3, DMA2_MASK_REG);
212 }
205} 213}
206 214
207static __inline__ void disable_dma(unsigned int dmanr) 215static __inline__ void disable_dma(unsigned int dmanr)
@@ -290,19 +298,26 @@ static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
290static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys) 298static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
291{ 299{
292 if (dmanr <= 3) { 300 if (dmanr <= 3) {
293 dma_outb(phys & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); 301 dma_outb(phys & 0xff,
294 dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); 302 ((dmanr & 3) << 1) + IO_DMA1_BASE);
303 dma_outb((phys >> 8) & 0xff,
304 ((dmanr & 3) << 1) + IO_DMA1_BASE);
295 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { 305 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
296 dma_outb(phys & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 306 dma_outb(phys & 0xff,
297 dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 307 ((dmanr & 3) << 2) + IO_DMA2_BASE);
308 dma_outb((phys >> 8) & 0xff,
309 ((dmanr & 3) << 2) + IO_DMA2_BASE);
298 dma_outb((dmanr & 3), DMA2_EXT_REG); 310 dma_outb((dmanr & 3), DMA2_EXT_REG);
299 } else { 311 } else {
300 dma_outb((phys >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 312 dma_outb((phys >> 1) & 0xff,
301 dma_outb((phys >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); 313 ((dmanr & 3) << 2) + IO_DMA2_BASE);
314 dma_outb((phys >> 9) & 0xff,
315 ((dmanr & 3) << 2) + IO_DMA2_BASE);
302 } 316 }
303 set_dma_page(dmanr, phys >> 16); 317 set_dma_page(dmanr, phys >> 16);
304} 318}
305 319
320
306/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for 321/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
307 * a specific DMA channel. 322 * a specific DMA channel.
308 * You must ensure the parameters are valid. 323 * You must ensure the parameters are valid.
@@ -315,21 +330,24 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
315{ 330{
316 count--; 331 count--;
317 if (dmanr <= 3) { 332 if (dmanr <= 3) {
318 dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); 333 dma_outb(count & 0xff,
319 dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + 334 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
320 IO_DMA1_BASE); 335 dma_outb((count >> 8) & 0xff,
336 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
321 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { 337 } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
322 dma_outb(count & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); 338 dma_outb(count & 0xff,
323 dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 2) + 2 + 339 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
324 IO_DMA2_BASE); 340 dma_outb((count >> 8) & 0xff,
341 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
325 } else { 342 } else {
326 dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + 343 dma_outb((count >> 1) & 0xff,
327 IO_DMA2_BASE); 344 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
328 dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + 345 dma_outb((count >> 9) & 0xff,
329 IO_DMA2_BASE); 346 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
330 } 347 }
331} 348}
332 349
350
333/* Get DMA residue count. After a DMA transfer, this 351/* Get DMA residue count. After a DMA transfer, this
334 * should return zero. Reading this while a DMA transfer is 352 * should return zero. Reading this while a DMA transfer is
335 * still in progress will return unpredictable results. 353 * still in progress will return unpredictable results.
@@ -340,8 +358,8 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
340 */ 358 */
341static __inline__ int get_dma_residue(unsigned int dmanr) 359static __inline__ int get_dma_residue(unsigned int dmanr)
342{ 360{
343 unsigned int io_port = (dmanr <= 3) ? 361 unsigned int io_port = (dmanr <= 3)
344 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE 362 ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
345 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; 363 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
346 364
347 /* using short to get 16-bit wrap around */ 365 /* using short to get 16-bit wrap around */
@@ -352,7 +370,6 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
352 370
353 return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2) 371 return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
354 ? count : (count << 1); 372 ? count : (count << 1);
355
356} 373}
357 374
358/* These are in kernel/dma.c: */ 375/* These are in kernel/dma.c: */
@@ -367,5 +384,7 @@ extern int isa_dma_bridge_buggy;
367#else 384#else
368#define isa_dma_bridge_buggy (0) 385#define isa_dma_bridge_buggy (0)
369#endif 386#endif
370#endif /* _ASM_DMA_H */ 387
371#endif /* __KERNEL__ */ 388#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
389
390#endif /* _ASM_POWERPC_DMA_H */
diff --git a/include/asm-ppc64/elf.h b/include/asm-powerpc/elf.h
index c919a89343db..d22b10021b5d 100644
--- a/include/asm-ppc64/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -1,10 +1,11 @@
1#ifndef __PPC64_ELF_H 1#ifndef _ASM_POWERPC_ELF_H
2#define __PPC64_ELF_H 2#define _ASM_POWERPC_ELF_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/cputable.h> 6#include <asm/cputable.h>
7#include <asm/auxvec.h> 7#include <asm/auxvec.h>
8#include <asm/page.h>
8 9
9/* PowerPC relocations defined by the ABIs */ 10/* PowerPC relocations defined by the ABIs */
10#define R_PPC_NONE 0 11#define R_PPC_NONE 0
@@ -75,7 +76,7 @@
75#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ 76#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
76#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ 77#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
77 78
78/* Keep this the last entry. */ 79/* keep this the last entry. */
79#define R_PPC_NUM 95 80#define R_PPC_NUM 95
80 81
81/* 82/*
@@ -90,8 +91,6 @@
90 91
91#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ 92#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
92#define ELF_NFPREG 33 /* includes fpscr */ 93#define ELF_NFPREG 33 /* includes fpscr */
93#define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
94#define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
95 94
96typedef unsigned long elf_greg_t64; 95typedef unsigned long elf_greg_t64;
97typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; 96typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
@@ -100,8 +99,21 @@ typedef unsigned int elf_greg_t32;
100typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG]; 99typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
101 100
102/* 101/*
103 * These are used to set parameters in the core dumps. 102 * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
104 */ 103 */
104#ifdef __powerpc64__
105# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
106# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
107# define ELF_GREG_TYPE elf_greg_t64
108#else
109# define ELF_NEVRREG 34 /* includes acc (as 2) */
110# define ELF_NVRREG 33 /* includes vscr */
111# define ELF_GREG_TYPE elf_greg_t32
112# define ELF_ARCH EM_PPC
113# define ELF_CLASS ELFCLASS32
114# define ELF_DATA ELFDATA2MSB
115#endif /* __powerpc64__ */
116
105#ifndef ELF_ARCH 117#ifndef ELF_ARCH
106# define ELF_ARCH EM_PPC64 118# define ELF_ARCH EM_PPC64
107# define ELF_CLASS ELFCLASS64 119# define ELF_CLASS ELFCLASS64
@@ -114,8 +126,9 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
114 typedef elf_greg_t32 elf_greg_t; 126 typedef elf_greg_t32 elf_greg_t;
115 typedef elf_gregset_t32 elf_gregset_t; 127 typedef elf_gregset_t32 elf_gregset_t;
116# define elf_addr_t u32 128# define elf_addr_t u32
117#endif 129#endif /* ELF_ARCH */
118 130
131/* Floating point registers */
119typedef double elf_fpreg_t; 132typedef double elf_fpreg_t;
120typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 133typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
121 134
@@ -125,7 +138,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
125 * The entry with index 32 contains the vscr as the last word (offset 12) 138 * The entry with index 32 contains the vscr as the last word (offset 12)
126 * within the quadword. This allows the vscr to be stored as either a 139 * within the quadword. This allows the vscr to be stored as either a
127 * quadword (since it must be copied via a vector register to/from storage) 140 * quadword (since it must be copied via a vector register to/from storage)
128 * or as a word. The entry with index 33 contains the vrsave as the first 141 * or as a word.
142 *
143 * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
129 * word (offset 0) within the quadword. 144 * word (offset 0) within the quadword.
130 * 145 *
131 * This definition of the VMX state is compatible with the current PPC32 146 * This definition of the VMX state is compatible with the current PPC32
@@ -138,7 +153,9 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
138 */ 153 */
139typedef __vector128 elf_vrreg_t; 154typedef __vector128 elf_vrreg_t;
140typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG]; 155typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
156#ifdef __powerpc64__
141typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; 157typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
158#endif
142 159
143/* 160/*
144 * This is used to ensure we don't load something for the wrong architecture. 161 * This is used to ensure we don't load something for the wrong architecture.
@@ -146,7 +163,7 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
146#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) 163#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
147 164
148#define USE_ELF_CORE_DUMP 165#define USE_ELF_CORE_DUMP
149#define ELF_EXEC_PAGESIZE 4096 166#define ELF_EXEC_PAGESIZE PAGE_SIZE
150 167
151/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 168/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
152 use of this is to invoke "./ld.so someprog" to test out a new version of 169 use of this is to invoke "./ld.so someprog" to test out a new version of
@@ -158,26 +175,30 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
158#ifdef __KERNEL__ 175#ifdef __KERNEL__
159 176
160/* Common routine for both 32-bit and 64-bit processes */ 177/* Common routine for both 32-bit and 64-bit processes */
161static inline void ppc64_elf_core_copy_regs(elf_gregset_t elf_regs, 178static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
162 struct pt_regs *regs) 179 struct pt_regs *regs)
163{ 180{
164 int i; 181 int i;
165 int gprs = sizeof(struct pt_regs)/sizeof(elf_greg_t64); 182 int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
166 183
167 if (gprs > ELF_NGREG) 184 if (gprs > ELF_NGREG)
168 gprs = ELF_NGREG; 185 gprs = ELF_NGREG;
169 186
170 for (i=0; i < gprs; i++) 187 for (i=0; i < gprs; i++)
171 elf_regs[i] = (elf_greg_t)((elf_greg_t64 *)regs)[i]; 188 elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
189
190 memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \
191 sizeof(elf_gregset_t) - sizeof(struct pt_regs));
192
172} 193}
173#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs); 194#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
174 195
175static inline int dump_task_regs(struct task_struct *tsk, 196static inline int dump_task_regs(struct task_struct *tsk,
176 elf_gregset_t *elf_regs) 197 elf_gregset_t *elf_regs)
177{ 198{
178 struct pt_regs *regs = tsk->thread.regs; 199 struct pt_regs *regs = tsk->thread.regs;
179 if (regs) 200 if (regs)
180 ppc64_elf_core_copy_regs(*elf_regs, regs); 201 ppc_elf_core_copy_regs(*elf_regs, regs);
181 202
182 return 1; 203 return 1;
183} 204}
@@ -186,15 +207,17 @@ static inline int dump_task_regs(struct task_struct *tsk,
186extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); 207extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
187#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 208#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
188 209
189/* XXX Should we define the XFPREGS using altivec ??? */ 210#endif /* __KERNEL__ */
190
191#endif
192 211
193/* This yields a mask that user programs can use to figure out what 212/* ELF_HWCAP yields a mask that user programs can use to figure out what
194 instruction set this cpu supports. This could be done in userspace, 213 instruction set this cpu supports. This could be done in userspace,
195 but it's not easy, and we've already done it here. */ 214 but it's not easy, and we've already done it here. */
196 215# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
197#define ELF_HWCAP (cur_cpu_spec->cpu_user_features) 216#ifdef __powerpc64__
217# define ELF_PLAT_INIT(_r, load_addr) do { \
218 _r->gpr[2] = load_addr; \
219} while (0)
220#endif /* __powerpc64__ */
198 221
199/* This yields a string that ld.so will use to load implementation 222/* This yields a string that ld.so will use to load implementation
200 specific libraries for optimization. This is more specific in 223 specific libraries for optimization. This is more specific in
@@ -205,14 +228,10 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
205 228
206#define ELF_PLATFORM (NULL) 229#define ELF_PLATFORM (NULL)
207 230
208#define ELF_PLAT_INIT(_r, load_addr) do { \
209 memset(_r->gpr, 0, sizeof(_r->gpr)); \
210 _r->ctr = _r->link = _r->xer = _r->ccr = 0; \
211 _r->gpr[2] = load_addr; \
212} while (0)
213
214#ifdef __KERNEL__ 231#ifdef __KERNEL__
215#define SET_PERSONALITY(ex, ibcs2) \ 232
233#ifdef __powerpc64__
234# define SET_PERSONALITY(ex, ibcs2) \
216do { \ 235do { \
217 unsigned long new_flags = 0; \ 236 unsigned long new_flags = 0; \
218 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 237 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
@@ -225,7 +244,6 @@ do { \
225 if (personality(current->personality) != PER_LINUX32) \ 244 if (personality(current->personality) != PER_LINUX32) \
226 set_personality(PER_LINUX); \ 245 set_personality(PER_LINUX); \
227} while (0) 246} while (0)
228
229/* 247/*
230 * An executable for which elf_read_implies_exec() returns TRUE will 248 * An executable for which elf_read_implies_exec() returns TRUE will
231 * have the READ_IMPLIES_EXEC personality flag set automatically. This 249 * have the READ_IMPLIES_EXEC personality flag set automatically. This
@@ -233,19 +251,26 @@ do { \
233 * the 64bit ABI has never had these issues dont enable the workaround 251 * the 64bit ABI has never had these issues dont enable the workaround
234 * even if we have an executable stack. 252 * even if we have an executable stack.
235 */ 253 */
236#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ 254# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
237 (exec_stk != EXSTACK_DISABLE_X) : 0) 255 (exec_stk != EXSTACK_DISABLE_X) : 0)
256#else
257# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
258#endif /* __powerpc64__ */
238 259
239#endif 260#endif /* __KERNEL__ */
240 261
241extern int dcache_bsize; 262extern int dcache_bsize;
242extern int icache_bsize; 263extern int icache_bsize;
243extern int ucache_bsize; 264extern int ucache_bsize;
244 265
245/* We do have an arch_setup_additional_pages for vDSO matters */ 266#ifdef __powerpc64__
246#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
247struct linux_binprm; 267struct linux_binprm;
268#define ARCH_HAS_SETUP_ADDITIONAL_PAGES /* vDSO has arch_setup_additional_pages */
248extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); 269extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack);
270#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
271#else
272#define VDSO_AUX_ENT(a,b)
273#endif /* __powerpc64__ */
249 274
250/* 275/*
251 * The requirements here are: 276 * The requirements here are:
@@ -265,9 +290,8 @@ do { \
265 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ 290 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
266 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ 291 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
267 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ 292 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
268 /* vDSO base */ \ 293 VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
269 NEW_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base); \ 294} while (0)
270 } while (0)
271 295
272/* PowerPC64 relocations defined by the ABIs */ 296/* PowerPC64 relocations defined by the ABIs */
273#define R_PPC64_NONE R_PPC_NONE 297#define R_PPC64_NONE R_PPC_NONE
@@ -384,4 +408,4 @@ do { \
384/* Keep this the last entry. */ 408/* Keep this the last entry. */
385#define R_PPC64_NUM 107 409#define R_PPC64_NUM 107
386 410
387#endif /* __PPC64_ELF_H */ 411#endif /* _ASM_POWERPC_ELF_H */
diff --git a/include/asm-ppc/hardirq.h b/include/asm-powerpc/hardirq.h
index 94f1411b1a93..3b3e3b49ec12 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-powerpc/hardirq.h
@@ -1,11 +1,8 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_POWERPC_HARDIRQ_H
2#ifndef __ASM_HARDIRQ_H 2#define _ASM_POWERPC_HARDIRQ_H
3#define __ASM_HARDIRQ_H
4 3
5#include <linux/config.h>
6#include <linux/cache.h>
7#include <linux/smp_lock.h>
8#include <asm/irq.h> 4#include <asm/irq.h>
5#include <asm/bug.h>
9 6
10/* The __last_jiffy_stamp field is needed to ensure that no decrementer 7/* The __last_jiffy_stamp field is needed to ensure that no decrementer
11 * interrupt is lost on SMP machines. Since on most CPUs it is in the same 8 * interrupt is lost on SMP machines. Since on most CPUs it is in the same
@@ -13,7 +10,7 @@
13 * for uniformity. 10 * for uniformity.
14 */ 11 */
15typedef struct { 12typedef struct {
16 unsigned long __softirq_pending; /* set_bit is used on this */ 13 unsigned int __softirq_pending; /* set_bit is used on this */
17 unsigned int __last_jiffy_stamp; 14 unsigned int __last_jiffy_stamp;
18} ____cacheline_aligned irq_cpustat_t; 15} ____cacheline_aligned irq_cpustat_t;
19 16
@@ -27,5 +24,4 @@ static inline void ack_bad_irq(int irq)
27 BUG(); 24 BUG();
28} 25}
29 26
30#endif /* __ASM_HARDIRQ_H */ 27#endif /* _ASM_POWERPC_HARDIRQ_H */
31#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/heathrow.h b/include/asm-powerpc/heathrow.h
index 22ac179856b9..22ac179856b9 100644
--- a/include/asm-ppc/heathrow.h
+++ b/include/asm-powerpc/heathrow.h
diff --git a/include/asm-ppc64/hw_irq.h b/include/asm-powerpc/hw_irq.h
index baea40e695ec..c37b31b96337 100644
--- a/include/asm-ppc64/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -1,22 +1,17 @@
1/* 1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 *
4 * Use inline IRQs where possible - Anton Blanchard <anton@au.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 3 */
4#ifndef _ASM_POWERPC_HW_IRQ_H
5#define _ASM_POWERPC_HW_IRQ_H
6
11#ifdef __KERNEL__ 7#ifdef __KERNEL__
12#ifndef _PPC64_HW_IRQ_H
13#define _PPC64_HW_IRQ_H
14 8
15#include <linux/config.h> 9#include <linux/config.h>
16#include <linux/errno.h> 10#include <linux/errno.h>
17#include <asm/irq.h> 11#include <asm/ptrace.h>
12#include <asm/processor.h>
18 13
19int timer_interrupt(struct pt_regs *); 14extern void timer_interrupt(struct pt_regs *);
20extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); 15extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
21 16
22#ifdef CONFIG_PPC_ISERIES 17#ifdef CONFIG_PPC_ISERIES
@@ -33,45 +28,60 @@ extern void local_irq_restore(unsigned long);
33 28
34#else 29#else
35 30
36#define local_save_flags(flags) ((flags) = mfmsr()) 31#if defined(CONFIG_BOOKE)
32#define SET_MSR_EE(x) mtmsr(x)
33#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
34#elif defined(__powerpc64__)
35#define SET_MSR_EE(x) __mtmsrd(x, 1)
37#define local_irq_restore(flags) do { \ 36#define local_irq_restore(flags) do { \
38 __asm__ __volatile__("": : :"memory"); \ 37 __asm__ __volatile__("": : :"memory"); \
39 __mtmsrd((flags), 1); \ 38 __mtmsrd((flags), 1); \
40} while(0) 39} while(0)
40#else
41#define SET_MSR_EE(x) mtmsr(x)
42#define local_irq_restore(flags) mtmsr(flags)
43#endif
41 44
42static inline void local_irq_disable(void) 45static inline void local_irq_disable(void)
43{ 46{
47#ifdef CONFIG_BOOKE
48 __asm__ __volatile__("wrteei 0": : :"memory");
49#else
44 unsigned long msr; 50 unsigned long msr;
45 msr = mfmsr();
46 __mtmsrd(msr & ~MSR_EE, 1);
47 __asm__ __volatile__("": : :"memory"); 51 __asm__ __volatile__("": : :"memory");
52 msr = mfmsr();
53 SET_MSR_EE(msr & ~MSR_EE);
54#endif
48} 55}
49 56
50static inline void local_irq_enable(void) 57static inline void local_irq_enable(void)
51{ 58{
59#ifdef CONFIG_BOOKE
60 __asm__ __volatile__("wrteei 1": : :"memory");
61#else
52 unsigned long msr; 62 unsigned long msr;
53 __asm__ __volatile__("": : :"memory"); 63 __asm__ __volatile__("": : :"memory");
54 msr = mfmsr(); 64 msr = mfmsr();
55 __mtmsrd(msr | MSR_EE, 1); 65 SET_MSR_EE(msr | MSR_EE);
66#endif
56} 67}
57 68
58static inline void __do_save_and_cli(unsigned long *flags) 69static inline void local_irq_save_ptr(unsigned long *flags)
59{ 70{
60 unsigned long msr; 71 unsigned long msr;
61 msr = mfmsr(); 72 msr = mfmsr();
62 *flags = msr; 73 *flags = msr;
63 __mtmsrd(msr & ~MSR_EE, 1); 74#ifdef CONFIG_BOOKE
75 __asm__ __volatile__("wrteei 0": : :"memory");
76#else
77 SET_MSR_EE(msr & ~MSR_EE);
78#endif
64 __asm__ __volatile__("": : :"memory"); 79 __asm__ __volatile__("": : :"memory");
65} 80}
66 81
67#define local_irq_save(flags) __do_save_and_cli(&flags) 82#define local_save_flags(flags) ((flags) = mfmsr())
68 83#define local_irq_save(flags) local_irq_save_ptr(&flags)
69#define irqs_disabled() \ 84#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
70({ \
71 unsigned long flags; \
72 local_save_flags(flags); \
73 !(flags & MSR_EE); \
74})
75 85
76#endif /* CONFIG_PPC_ISERIES */ 86#endif /* CONFIG_PPC_ISERIES */
77 87
@@ -99,6 +109,6 @@ static inline void __do_save_and_cli(unsigned long *flags)
99 */ 109 */
100struct hw_interrupt_type; 110struct hw_interrupt_type;
101static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} 111static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
102 112
103#endif /* _PPC64_HW_IRQ_H */ 113#endif /* __KERNEL__ */
104#endif /* __KERNEL__ */ 114#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/include/asm-ppc/i8259.h b/include/asm-powerpc/i8259.h
index 091b71295de4..9521ad47740f 100644
--- a/include/asm-ppc/i8259.h
+++ b/include/asm-powerpc/i8259.h
@@ -1,5 +1,5 @@
1#ifndef _PPC_KERNEL_i8259_H 1#ifndef _ASM_POWERPC_I8259_H
2#define _PPC_KERNEL_i8259_H 2#define _ASM_POWERPC_I8259_H
3 3
4#include <linux/irq.h> 4#include <linux/irq.h>
5 5
@@ -8,4 +8,4 @@ extern struct hw_interrupt_type i8259_pic;
8extern void i8259_init(long intack_addr); 8extern void i8259_init(long intack_addr);
9extern int i8259_irq(struct pt_regs *regs); 9extern int i8259_irq(struct pt_regs *regs);
10 10
11#endif /* _PPC_KERNEL_i8259_H */ 11#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-ppc64/iommu.h b/include/asm-powerpc/iommu.h
index c2f3b6e8a42f..9d91bdd667ae 100644
--- a/include/asm-ppc64/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * iommu.h
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup: 3 * Rewrite, cleanup:
5 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation 4 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
@@ -22,6 +21,7 @@
22#ifndef _ASM_IOMMU_H 21#ifndef _ASM_IOMMU_H
23#define _ASM_IOMMU_H 22#define _ASM_IOMMU_H
24 23
24#include <linux/config.h>
25#include <asm/types.h> 25#include <asm/types.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/device.h> 27#include <linux/device.h>
@@ -29,44 +29,11 @@
29 29
30/* 30/*
31 * IOMAP_MAX_ORDER defines the largest contiguous block 31 * IOMAP_MAX_ORDER defines the largest contiguous block
32 * of dma (tce) space we can get. IOMAP_MAX_ORDER = 13 32 * of dma space we can get. IOMAP_MAX_ORDER = 13
33 * allows up to 2**12 pages (4096 * 4096) = 16 MB 33 * allows up to 2**12 pages (4096 * 4096) = 16 MB
34 */ 34 */
35#define IOMAP_MAX_ORDER 13 35#define IOMAP_MAX_ORDER 13
36 36
37/*
38 * Tces come in two formats, one for the virtual bus and a different
39 * format for PCI
40 */
41#define TCE_VB 0
42#define TCE_PCI 1
43
44/* tce_entry
45 * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
46 * abstracted so layout is irrelevant.
47 */
48union tce_entry {
49 unsigned long te_word;
50 struct {
51 unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
52 unsigned int tb_rsvd :6;
53 unsigned long tb_rpn :40; /* Real page number */
54 unsigned int tb_valid :1; /* Tce is valid (vb only) */
55 unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
56 unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
57 unsigned int tb_pciwr :1; /* Write allowed (pci only) */
58 unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
59 } te_bits;
60#define te_cacheBits te_bits.tb_cacheBits
61#define te_rpn te_bits.tb_rpn
62#define te_valid te_bits.tb_valid
63#define te_allio te_bits.tb_allio
64#define te_lpindex te_bits.tb_lpindex
65#define te_pciwr te_bits.tb_pciwr
66#define te_rdwr te_bits.tb_rdwr
67};
68
69
70struct iommu_table { 37struct iommu_table {
71 unsigned long it_busno; /* Bus number this table belongs to */ 38 unsigned long it_busno; /* Bus number this table belongs to */
72 unsigned long it_size; /* Size of iommu table in entries */ 39 unsigned long it_size; /* Size of iommu table in entries */
@@ -83,6 +50,7 @@ struct iommu_table {
83}; 50};
84 51
85struct scatterlist; 52struct scatterlist;
53struct device_node;
86 54
87#ifdef CONFIG_PPC_MULTIPLATFORM 55#ifdef CONFIG_PPC_MULTIPLATFORM
88 56
@@ -104,9 +72,8 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn);
104 72
105#ifdef CONFIG_PPC_ISERIES 73#ifdef CONFIG_PPC_ISERIES
106 74
107struct iSeries_Device_Node;
108/* Creates table for an individual device node */ 75/* Creates table for an individual device node */
109extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn); 76extern void iommu_devnode_init_iSeries(struct device_node *dn);
110 77
111#endif /* CONFIG_PPC_ISERIES */ 78#endif /* CONFIG_PPC_ISERIES */
112 79
diff --git a/include/asm-ppc/irq.h b/include/asm-powerpc/irq.h
index bd9674807f05..07c2b3fc4c66 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -1,11 +1,23 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2#ifndef _ASM_IRQ_H 2#ifndef _ASM_POWERPC_IRQ_H
3#define _ASM_IRQ_H 3#define _ASM_POWERPC_IRQ_H
4
5/*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
4 11
5#include <linux/config.h> 12#include <linux/config.h>
6#include <asm/machdep.h> /* ppc_md */ 13#include <linux/threads.h>
14
15#include <asm/types.h>
7#include <asm/atomic.h> 16#include <asm/atomic.h>
8 17
18/* this number is used when no interrupt has been assigned */
19#define NO_IRQ (-1)
20
9/* 21/*
10 * These constants are used for passing information about interrupt 22 * These constants are used for passing information about interrupt
11 * signal polarity and level/edge sensing to the low-level PIC chip 23 * signal polarity and level/edge sensing to the low-level PIC chip
@@ -24,6 +36,50 @@
24 */ 36 */
25#define ARCH_HAS_IRQ_PER_CPU 37#define ARCH_HAS_IRQ_PER_CPU
26 38
39#define get_irq_desc(irq) (&irq_desc[(irq)])
40
41/* Define a way to iterate across irqs. */
42#define for_each_irq(i) \
43 for ((i) = 0; (i) < NR_IRQS; ++(i))
44
45#ifdef CONFIG_PPC64
46
47/*
48 * Maximum number of interrupt sources that we can handle.
49 */
50#define NR_IRQS 512
51
52/* Interrupt numbers are virtual in case they are sparsely
53 * distributed by the hardware.
54 */
55extern unsigned int virt_irq_to_real_map[NR_IRQS];
56
57/* Create a mapping for a real_irq if it doesn't already exist.
58 * Return the virtual irq as a convenience.
59 */
60int virt_irq_create_mapping(unsigned int real_irq);
61void virt_irq_init(void);
62
63static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
64{
65 return virt_irq_to_real_map[virt_irq];
66}
67
68extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
69
70/*
71 * List of interrupt controllers.
72 */
73#define IC_INVALID 0
74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3
77#define IC_ISERIES 4
78
79extern u64 ppc64_interrupt_controller;
80
81#else /* 32-bit */
82
27#if defined(CONFIG_40x) 83#if defined(CONFIG_40x)
28#include <asm/ibm4xx.h> 84#include <asm/ibm4xx.h>
29 85
@@ -66,11 +122,6 @@
66#define NR_UIC_IRQS UIC_WIDTH 122#define NR_UIC_IRQS UIC_WIDTH
67#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) 123#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
68#endif 124#endif
69static __inline__ int
70irq_canonicalize(int irq)
71{
72 return (irq);
73}
74 125
75#elif defined(CONFIG_44x) 126#elif defined(CONFIG_44x)
76#include <asm/ibm44x.h> 127#include <asm/ibm44x.h>
@@ -78,12 +129,6 @@ irq_canonicalize(int irq)
78#define NR_UIC_IRQS 32 129#define NR_UIC_IRQS 32
79#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) 130#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
80 131
81static __inline__ int
82irq_canonicalize(int irq)
83{
84 return (irq);
85}
86
87#elif defined(CONFIG_8xx) 132#elif defined(CONFIG_8xx)
88 133
89/* Now include the board configuration specific associations. 134/* Now include the board configuration specific associations.
@@ -170,20 +215,9 @@ irq_canonicalize(int irq)
170 */ 215 */
171#define mk_int_int_mask(IL) (1 << (7 - (IL/2))) 216#define mk_int_int_mask(IL) (1 << (7 - (IL/2)))
172 217
173/* always the same on 8xx -- Cort */
174static __inline__ int irq_canonicalize(int irq)
175{
176 return irq;
177}
178
179#elif defined(CONFIG_83xx) 218#elif defined(CONFIG_83xx)
180#include <asm/mpc83xx.h> 219#include <asm/mpc83xx.h>
181 220
182static __inline__ int irq_canonicalize(int irq)
183{
184 return irq;
185}
186
187#define NR_IRQS (NR_IPIC_INTS) 221#define NR_IRQS (NR_IPIC_INTS)
188 222
189#elif defined(CONFIG_85xx) 223#elif defined(CONFIG_85xx)
@@ -307,17 +341,13 @@ static __inline__ int irq_canonicalize(int irq)
307#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET) 341#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
308#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET) 342#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
309 343
310static __inline__ int irq_canonicalize(int irq)
311{
312 return irq;
313}
314
315#else /* CONFIG_40x + CONFIG_8xx */ 344#else /* CONFIG_40x + CONFIG_8xx */
316/* 345/*
317 * this is the # irq's for all ppc arch's (pmac/chrp/prep) 346 * this is the # irq's for all ppc arch's (pmac/chrp/prep)
318 * so it is the max of them all 347 * so it is the max of them all
319 */ 348 */
320#define NR_IRQS 256 349#define NR_IRQS 256
350#define __DO_IRQ_CANON 1
321 351
322#ifndef CONFIG_8260 352#ifndef CONFIG_8260
323 353
@@ -394,25 +424,79 @@ static __inline__ int irq_canonicalize(int irq)
394 424
395#endif /* CONFIG_8260 */ 425#endif /* CONFIG_8260 */
396 426
427#endif
428
429#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
430/* pedantic: these are long because they are used with set_bit --RR */
431extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
432extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
433extern atomic_t ppc_n_lost_interrupts;
434
435#endif
436
397/* 437/*
398 * This gets called from serial.c, which is now used on 438 * Because many systems have two overlapping names spaces for
399 * powermacs as well as prep/chrp boxes. 439 * interrupts (ISA and XICS for example), and the ISA interrupts
400 * Prep and chrp both have cascaded 8259 PICs. 440 * have historically not been easy to renumber, we allow ISA
441 * interrupts to take values 0 - 15, and shift up the remaining
442 * interrupts by 0x10.
401 */ 443 */
444#define NUM_ISA_INTERRUPTS 0x10
445extern int __irq_offset_value;
446
447static inline int irq_offset_up(int irq)
448{
449 return(irq + __irq_offset_value);
450}
451
452static inline int irq_offset_down(int irq)
453{
454 return(irq - __irq_offset_value);
455}
456
457static inline int irq_offset_value(void)
458{
459 return __irq_offset_value;
460}
461
462#ifdef __DO_IRQ_CANON
463extern int ppc_do_canonicalize_irqs;
464#else
465#define ppc_do_canonicalize_irqs 0
466#endif
467
402static __inline__ int irq_canonicalize(int irq) 468static __inline__ int irq_canonicalize(int irq)
403{ 469{
404 if (ppc_md.irq_canonicalize) 470 if (ppc_do_canonicalize_irqs && irq == 2)
405 return ppc_md.irq_canonicalize(irq); 471 irq = 9;
406 return irq; 472 return irq;
407} 473}
408 474
409#endif 475extern int distribute_irqs;
410 476
411#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 477struct irqaction;
412/* pedantic: these are long because they are used with set_bit --RR */ 478struct pt_regs;
413extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 479
414extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 480#ifdef CONFIG_IRQSTACKS
415extern atomic_t ppc_n_lost_interrupts; 481/*
482 * Per-cpu stacks for handling hard and soft interrupts.
483 */
484extern struct thread_info *hardirq_ctx[NR_CPUS];
485extern struct thread_info *softirq_ctx[NR_CPUS];
486
487extern void irq_ctx_init(void);
488extern void call_do_softirq(struct thread_info *tp);
489extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
490 struct irqaction *action, struct thread_info *tp);
491
492#define __ARCH_HAS_DO_SOFTIRQ
493
494#else
495#define irq_ctx_init()
496
497#endif /* CONFIG_IRQSTACKS */
498
499extern void do_IRQ(struct pt_regs *regs);
416 500
417#endif /* _ASM_IRQ_H */ 501#endif /* _ASM_IRQ_H */
418#endif /* __KERNEL__ */ 502#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/kdebug.h b/include/asm-powerpc/kdebug.h
index d383d161cf8d..9dcbac674811 100644
--- a/include/asm-ppc64/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_KDEBUG_H 1#ifndef _ASM_POWERPC_KDEBUG_H
2#define _PPC64_KDEBUG_H 1 2#define _ASM_POWERPC_KDEBUG_H
3 3
4/* nearly identical to x86_64/i386 code */ 4/* nearly identical to x86_64/i386 code */
5 5
@@ -21,7 +21,7 @@ struct die_args {
21 then free. 21 then free.
22 */ 22 */
23int register_die_notifier(struct notifier_block *nb); 23int register_die_notifier(struct notifier_block *nb);
24extern struct notifier_block *ppc64_die_chain; 24extern struct notifier_block *powerpc_die_chain;
25 25
26/* Grossly misnamed. */ 26/* Grossly misnamed. */
27enum die_val { 27enum die_val {
@@ -30,14 +30,13 @@ enum die_val {
30 DIE_DABR_MATCH, 30 DIE_DABR_MATCH,
31 DIE_BPT, 31 DIE_BPT,
32 DIE_SSTEP, 32 DIE_SSTEP,
33 DIE_GPF,
34 DIE_PAGE_FAULT, 33 DIE_PAGE_FAULT,
35}; 34};
36 35
37static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) 36static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
38{ 37{
39 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; 38 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
40 return notifier_call_chain(&ppc64_die_chain, val, &args); 39 return notifier_call_chain(&powerpc_die_chain, val, &args);
41} 40}
42 41
43#endif 42#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-ppc/keylargo.h b/include/asm-powerpc/keylargo.h
index a669a3f0f5a2..a669a3f0f5a2 100644
--- a/include/asm-ppc/keylargo.h
+++ b/include/asm-powerpc/keylargo.h
diff --git a/include/asm-powerpc/kmap_types.h b/include/asm-powerpc/kmap_types.h
new file mode 100644
index 000000000000..b6bac6f61c16
--- /dev/null
+++ b/include/asm-powerpc/kmap_types.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_POWERPC_KMAP_TYPES_H
2#define _ASM_POWERPC_KMAP_TYPES_H
3
4#ifdef __KERNEL__
5
6/*
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13enum km_type {
14 KM_BOUNCE_READ,
15 KM_SKB_SUNRPC_DATA,
16 KM_SKB_DATA_SOFTIRQ,
17 KM_USER0,
18 KM_USER1,
19 KM_BIO_SRC_IRQ,
20 KM_BIO_DST_IRQ,
21 KM_PTE0,
22 KM_PTE1,
23 KM_IRQ0,
24 KM_IRQ1,
25 KM_SOFTIRQ0,
26 KM_SOFTIRQ1,
27 KM_PPC_SYNC_PAGE,
28 KM_PPC_SYNC_ICACHE,
29 KM_TYPE_NR
30};
31
32#endif /* __KERNEL__ */
33#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/include/asm-ppc64/kprobes.h b/include/asm-powerpc/kprobes.h
index d9129d2b038e..b2f09f17fbe0 100644
--- a/include/asm-ppc64/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -1,8 +1,7 @@
1#ifndef _ASM_KPROBES_H 1#ifndef _ASM_POWERPC_KPROBES_H
2#define _ASM_KPROBES_H 2#define _ASM_POWERPC_KPROBES_H
3/* 3/*
4 * Kernel Probes (KProbes) 4 * Kernel Probes (KProbes)
5 * include/asm-ppc64/kprobes.h
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -64,4 +63,4 @@ static inline int kprobe_exceptions_notify(struct notifier_block *self,
64 return 0; 63 return 0;
65} 64}
66#endif 65#endif
67#endif /* _ASM_KPROBES_H */ 66#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-ppc64/lmb.h b/include/asm-powerpc/lmb.h
index de91e034bd98..ea0afe343545 100644
--- a/include/asm-ppc64/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -50,7 +50,7 @@ extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
50extern unsigned long __init lmb_phys_mem_size(void); 50extern unsigned long __init lmb_phys_mem_size(void);
51extern unsigned long __init lmb_end_of_DRAM(void); 51extern unsigned long __init lmb_end_of_DRAM(void);
52extern unsigned long __init lmb_abs_to_phys(unsigned long); 52extern unsigned long __init lmb_abs_to_phys(unsigned long);
53extern void __init lmb_enforce_memory_limit(void); 53extern void __init lmb_enforce_memory_limit(unsigned long);
54 54
55extern void lmb_dump_all(void); 55extern void lmb_dump_all(void);
56 56
diff --git a/include/asm-ppc64/machdep.h b/include/asm-powerpc/machdep.h
index 8027160ec96d..b3a93b476d97 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -1,6 +1,6 @@
1#ifdef __KERNEL__
2#ifndef _PPC64_MACHDEP_H 1#ifndef _PPC64_MACHDEP_H
3#define _PPC64_MACHDEP_H 2#define _PPC64_MACHDEP_H
3#ifdef __KERNEL__
4 4
5/* 5/*
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -16,6 +16,11 @@
16 16
17#include <asm/setup.h> 17#include <asm/setup.h>
18 18
19/* We export this macro for external modules like Alsa to know if
20 * ppc_md.feature_call is implemented or not
21 */
22#define CONFIG_PPC_HAS_FEATURE_CALLS
23
19struct pt_regs; 24struct pt_regs;
20struct pci_bus; 25struct pci_bus;
21struct device_node; 26struct device_node;
@@ -39,6 +44,7 @@ struct smp_ops_t {
39#endif 44#endif
40 45
41struct machdep_calls { 46struct machdep_calls {
47#ifdef CONFIG_PPC64
42 void (*hpte_invalidate)(unsigned long slot, 48 void (*hpte_invalidate)(unsigned long slot,
43 unsigned long va, 49 unsigned long va,
44 int large, 50 int large,
@@ -56,9 +62,8 @@ struct machdep_calls {
56 unsigned long vflags, 62 unsigned long vflags,
57 unsigned long rflags); 63 unsigned long rflags);
58 long (*hpte_remove)(unsigned long hpte_group); 64 long (*hpte_remove)(unsigned long hpte_group);
59 void (*flush_hash_range)(unsigned long context, 65 void (*flush_hash_range)(unsigned long number, int local);
60 unsigned long number, 66
61 int local);
62 /* special for kexec, to be called in real mode, linar mapping is 67 /* special for kexec, to be called in real mode, linar mapping is
63 * destroyed as well */ 68 * destroyed as well */
64 void (*hpte_clear_all)(void); 69 void (*hpte_clear_all)(void);
@@ -75,18 +80,21 @@ struct machdep_calls {
75 void (*iommu_dev_setup)(struct pci_dev *dev); 80 void (*iommu_dev_setup)(struct pci_dev *dev);
76 void (*iommu_bus_setup)(struct pci_bus *bus); 81 void (*iommu_bus_setup)(struct pci_bus *bus);
77 void (*irq_bus_setup)(struct pci_bus *bus); 82 void (*irq_bus_setup)(struct pci_bus *bus);
83#endif
78 84
79 int (*probe)(int platform); 85 int (*probe)(int platform);
80 void (*setup_arch)(void); 86 void (*setup_arch)(void);
81 void (*init_early)(void); 87 void (*init_early)(void);
82 /* Optional, may be NULL. */ 88 /* Optional, may be NULL. */
83 void (*get_cpuinfo)(struct seq_file *m); 89 void (*show_cpuinfo)(struct seq_file *m);
90 void (*show_percpuinfo)(struct seq_file *m, int i);
84 91
85 void (*init_IRQ)(void); 92 void (*init_IRQ)(void);
86 int (*get_irq)(struct pt_regs *); 93 int (*get_irq)(struct pt_regs *);
87 void (*cpu_irq_down)(int secondary); 94 void (*cpu_irq_down)(int secondary);
88 95
89 /* PCI stuff */ 96 /* PCI stuff */
97 /* Called after scanning the bus, before allocating resources */
90 void (*pcibios_fixup)(void); 98 void (*pcibios_fixup)(void);
91 int (*pci_probe_mode)(struct pci_bus *); 99 int (*pci_probe_mode)(struct pci_bus *);
92 100
@@ -96,9 +104,13 @@ struct machdep_calls {
96 void (*panic)(char *str); 104 void (*panic)(char *str);
97 void (*cpu_die)(void); 105 void (*cpu_die)(void);
98 106
107 long (*time_init)(void); /* Optional, may be NULL */
108
99 int (*set_rtc_time)(struct rtc_time *); 109 int (*set_rtc_time)(struct rtc_time *);
100 void (*get_rtc_time)(struct rtc_time *); 110 void (*get_rtc_time)(struct rtc_time *);
101 void (*get_boot_time)(struct rtc_time *); 111 unsigned long (*get_boot_time)(void);
112 unsigned char (*rtc_read_val)(int addr);
113 void (*rtc_write_val)(int addr, unsigned char val);
102 114
103 void (*calibrate_decr)(void); 115 void (*calibrate_decr)(void);
104 116
@@ -107,10 +119,12 @@ struct machdep_calls {
107 /* Interface for platform error logging */ 119 /* Interface for platform error logging */
108 void (*log_error)(char *buf, unsigned int err_type, int fatal); 120 void (*log_error)(char *buf, unsigned int err_type, int fatal);
109 121
122 unsigned char (*nvram_read_val)(int addr);
123 void (*nvram_write_val)(int addr, unsigned char val);
110 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index); 124 ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
111 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index); 125 ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
112 ssize_t (*nvram_size)(void); 126 ssize_t (*nvram_size)(void);
113 int (*nvram_sync)(void); 127 void (*nvram_sync)(void);
114 128
115 /* Exception handlers */ 129 /* Exception handlers */
116 void (*system_reset_exception)(struct pt_regs *regs); 130 void (*system_reset_exception)(struct pt_regs *regs);
@@ -135,14 +149,92 @@ struct machdep_calls {
135 pgprot_t vma_prot); 149 pgprot_t vma_prot);
136 150
137 /* Idle loop for this platform, leave empty for default idle loop */ 151 /* Idle loop for this platform, leave empty for default idle loop */
138 int (*idle_loop)(void); 152 void (*idle_loop)(void);
139 153
140 /* Function to enable pmcs for this platform, called once per cpu. */ 154 /* Function to enable performance monitor counters for this
155 platform, called once per cpu. */
141 void (*enable_pmcs)(void); 156 void (*enable_pmcs)(void);
157
158#ifdef CONFIG_PPC32 /* XXX for now */
159 /* A general init function, called by ppc_init in init/main.c.
160 May be NULL. */
161 void (*init)(void);
162
163 void (*idle)(void);
164 void (*power_save)(void);
165
166 void (*heartbeat)(void);
167 unsigned long heartbeat_reset;
168 unsigned long heartbeat_count;
169
170 void (*setup_io_mappings)(void);
171
172 void (*early_serial_map)(void);
173 void (*kgdb_map_scc)(void);
174
175 /*
176 * optional PCI "hooks"
177 */
178
179 /* Called after PPC generic resource fixup to perform
180 machine specific fixups */
181 void (*pcibios_fixup_resources)(struct pci_dev *);
182
183 /* Called for each PCI bus in the system when it's probed */
184 void (*pcibios_fixup_bus)(struct pci_bus *);
185
186 /* Called when pci_enable_device() is called (initial=0) or
187 * when a device with no assigned resource is found (initial=1).
188 * Returns 0 to allow assignment/enabling of the device. */
189 int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
190
191 /* For interrupt routing */
192 unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
193 int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
194
195 /* Called in indirect_* to avoid touching devices */
196 int (*pci_exclude_device)(unsigned char, unsigned char);
197
198 /* Called at then very end of pcibios_init() */
199 void (*pcibios_after_init)(void);
200
201 /* this is for modules, since _machine can be a define -- Cort */
202 int ppc_machine;
203
204#ifdef CONFIG_KEXEC
205 /* Called to shutdown machine specific hardware not already controlled
206 * by other drivers.
207 * XXX Should we move this one out of kexec scope?
208 */
209 void (*machine_shutdown)(void);
210
211 /* Called to do the minimal shutdown needed to run a kexec'd kernel
212 * to run successfully.
213 * XXX Should we move this one out of kexec scope?
214 */
215 void (*machine_crash_shutdown)(void);
216
217 /* Called to do what every setup is needed on image and the
218 * reboot code buffer. Returns 0 on success.
219 * Provide your own (maybe dummy) implementation if your platform
220 * claims to support kexec.
221 */
222 int (*machine_kexec_prepare)(struct kimage *image);
223
224 /* Called to handle any machine specific cleanup on image */
225 void (*machine_kexec_cleanup)(struct kimage *image);
226
227 /* Called to perform the _real_ kexec.
228 * Do NOT allocate memory or fail here. We are past the point of
229 * no return.
230 */
231 void (*machine_kexec)(struct kimage *image);
232#endif /* CONFIG_KEXEC */
233#endif /* CONFIG_PPC32 */
142}; 234};
143 235
144extern int default_idle(void); 236extern void default_idle(void);
145extern int native_idle(void); 237extern void native_idle(void);
146 238
147extern struct machdep_calls ppc_md; 239extern struct machdep_calls ppc_md;
148extern char cmd_line[COMMAND_LINE_SIZE]; 240extern char cmd_line[COMMAND_LINE_SIZE];
@@ -162,6 +254,13 @@ extern sys_ctrler_t sys_ctrler;
162 254
163#endif /* CONFIG_PPC_PMAC */ 255#endif /* CONFIG_PPC_PMAC */
164 256
257extern void setup_pci_ptrs(void);
258
259#ifdef CONFIG_SMP
260/* Poor default implementations */
261extern void __devinit smp_generic_give_timebase(void);
262extern void __devinit smp_generic_take_timebase(void);
263#endif /* CONFIG_SMP */
165 264
166 265
167/* Functions to produce codes on the leds. 266/* Functions to produce codes on the leds.
@@ -181,5 +280,5 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
181 ppc_md.log_error(buf, err_type, fatal); 280 ppc_md.log_error(buf, err_type, fatal);
182} 281}
183 282
184#endif /* _PPC64_MACHDEP_H */
185#endif /* __KERNEL__ */ 283#endif /* __KERNEL__ */
284#endif /* _PPC64_MACHDEP_H */
diff --git a/include/asm-ppc/macio.h b/include/asm-powerpc/macio.h
index b553dd4b139e..b553dd4b139e 100644
--- a/include/asm-ppc/macio.h
+++ b/include/asm-powerpc/macio.h
diff --git a/include/asm-ppc/mediabay.h b/include/asm-powerpc/mediabay.h
index 9daa3252d7b6..9daa3252d7b6 100644
--- a/include/asm-ppc/mediabay.h
+++ b/include/asm-powerpc/mediabay.h
diff --git a/arch/ppc64/kernel/mpic.h b/include/asm-powerpc/mpic.h
index ca78a7f10528..7083d1f74260 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -1,3 +1,6 @@
1#ifndef _ASM_POWERPC_MPIC_H
2#define _ASM_POWERPC_MPIC_H
3
1#include <linux/irq.h> 4#include <linux/irq.h>
2 5
3/* 6/*
@@ -258,12 +261,21 @@ extern void mpic_setup_this_cpu(void);
258/* Clean up for kexec (or cpu offline or ...) */ 261/* Clean up for kexec (or cpu offline or ...) */
259extern void mpic_teardown_this_cpu(int secondary); 262extern void mpic_teardown_this_cpu(int secondary);
260 263
264/* Get the current cpu priority for this cpu (0..15) */
265extern int mpic_cpu_get_priority(void);
266
267/* Set the current cpu priority for this cpu */
268extern void mpic_cpu_set_priority(int prio);
269
261/* Request IPIs on primary mpic */ 270/* Request IPIs on primary mpic */
262extern void mpic_request_ipis(void); 271extern void mpic_request_ipis(void);
263 272
264/* Send an IPI (non offseted number 0..3) */ 273/* Send an IPI (non offseted number 0..3) */
265extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); 274extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
266 275
276/* Send a message (IPI) to a given target (cpu number or MSG_*) */
277void smp_mpic_message_pass(int target, int msg);
278
267/* Fetch interrupt from a given mpic */ 279/* Fetch interrupt from a given mpic */
268extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 280extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
269/* This one gets to the primary mpic */ 281/* This one gets to the primary mpic */
@@ -271,3 +283,5 @@ extern int mpic_get_irq(struct pt_regs *regs);
271 283
272/* global mpic for pSeries */ 284/* global mpic for pSeries */
273extern struct mpic *pSeries_mpic; 285extern struct mpic *pSeries_mpic;
286
287#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-ppc/of_device.h b/include/asm-powerpc/of_device.h
index 575bce418f80..ddb16aae0bd6 100644
--- a/include/asm-ppc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -1,5 +1,5 @@
1#ifndef __OF_DEVICE_H__ 1#ifndef _ASM_POWERPC_OF_DEVICE_H
2#define __OF_DEVICE_H__ 2#define _ASM_POWERPC_OF_DEVICE_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/mod_devicetable.h> 5#include <linux/mod_devicetable.h>
@@ -61,5 +61,4 @@ extern struct of_device *of_platform_device_create(struct device_node *np,
61 struct device *parent); 61 struct device *parent);
62extern void of_release_dev(struct device *dev); 62extern void of_release_dev(struct device *dev);
63 63
64#endif /* __OF_DEVICE_H__ */ 64#endif /* _ASM_POWERPC_OF_DEVICE_H */
65
diff --git a/include/asm-ppc/ohare.h b/include/asm-powerpc/ohare.h
index 023b59772231..023b59772231 100644
--- a/include/asm-ppc/ohare.h
+++ b/include/asm-powerpc/ohare.h
diff --git a/include/asm-ppc64/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index b04f1dfb1421..8013cd273ced 100644
--- a/include/asm-ppc64/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -9,39 +9,49 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef OP_IMPL_H 12#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
13#define OP_IMPL_H 1 13#define _ASM_POWERPC_OPROFILE_IMPL_H
14 14
15#define OP_MAX_COUNTER 8 15#define OP_MAX_COUNTER 8
16 16
17/* Per-counter configuration as set via oprofilefs. */ 17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config { 18struct op_counter_config {
19#ifdef __powerpc64__
19 unsigned long valid; 20 unsigned long valid;
21#endif
20 unsigned long enabled; 22 unsigned long enabled;
21 unsigned long event; 23 unsigned long event;
22 unsigned long count; 24 unsigned long count;
23 unsigned long kernel; 25 unsigned long kernel;
26#ifdef __powerpc64__
24 /* We dont support per counter user/kernel selection */ 27 /* We dont support per counter user/kernel selection */
28#endif
25 unsigned long user; 29 unsigned long user;
26 unsigned long unit_mask; 30 unsigned long unit_mask;
27}; 31};
28 32
29/* System-wide configuration as set via oprofilefs. */ 33/* System-wide configuration as set via oprofilefs. */
30struct op_system_config { 34struct op_system_config {
35#ifdef __powerpc64__
31 unsigned long mmcr0; 36 unsigned long mmcr0;
32 unsigned long mmcr1; 37 unsigned long mmcr1;
33 unsigned long mmcra; 38 unsigned long mmcra;
39#endif
34 unsigned long enable_kernel; 40 unsigned long enable_kernel;
35 unsigned long enable_user; 41 unsigned long enable_user;
42#ifdef __powerpc64__
36 unsigned long backtrace_spinlocks; 43 unsigned long backtrace_spinlocks;
44#endif
37}; 45};
38 46
39/* Per-arch configuration */ 47/* Per-arch configuration */
40struct op_ppc64_model { 48struct op_powerpc_model {
41 void (*reg_setup) (struct op_counter_config *, 49 void (*reg_setup) (struct op_counter_config *,
42 struct op_system_config *, 50 struct op_system_config *,
43 int num_counters); 51 int num_counters);
52#ifdef __powerpc64__
44 void (*cpu_setup) (void *); 53 void (*cpu_setup) (void *);
54#endif
45 void (*start) (struct op_counter_config *); 55 void (*start) (struct op_counter_config *);
46 void (*stop) (void); 56 void (*stop) (void);
47 void (*handle_interrupt) (struct pt_regs *, 57 void (*handle_interrupt) (struct pt_regs *,
@@ -49,8 +59,9 @@ struct op_ppc64_model {
49 int num_counters; 59 int num_counters;
50}; 60};
51 61
52extern struct op_ppc64_model op_model_rs64; 62#ifdef __powerpc64__
53extern struct op_ppc64_model op_model_power4; 63extern struct op_powerpc_model op_model_rs64;
64extern struct op_powerpc_model op_model_power4;
54 65
55static inline unsigned int ctr_read(unsigned int i) 66static inline unsigned int ctr_read(unsigned int i)
56{ 67{
@@ -107,5 +118,6 @@ static inline void ctr_write(unsigned int i, unsigned int val)
107 break; 118 break;
108 } 119 }
109} 120}
121#endif /* __powerpc64__ */
110 122
111#endif 123#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/include/asm-ppc64/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
index c0db1ea7f7d1..c0db1ea7f7d1 100644
--- a/include/asm-ppc64/pSeries_reconfig.h
+++ b/include/asm-powerpc/pSeries_reconfig.h
diff --git a/include/asm-ppc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
index e9683bcff19b..e9683bcff19b 100644
--- a/include/asm-ppc/pmac_feature.h
+++ b/include/asm-powerpc/pmac_feature.h
diff --git a/include/asm-ppc/pmac_low_i2c.h b/include/asm-powerpc/pmac_low_i2c.h
index 809a5963d5e7..809a5963d5e7 100644
--- a/include/asm-ppc/pmac_low_i2c.h
+++ b/include/asm-powerpc/pmac_low_i2c.h
diff --git a/include/asm-ppc64/pmc.h b/include/asm-powerpc/pmc.h
index d1d297dbccfe..2f3c3fc2b796 100644
--- a/include/asm-ppc64/pmc.h
+++ b/include/asm-powerpc/pmc.h
@@ -16,8 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#ifndef _PPC64_PMC_H 19#ifndef _POWERPC_PMC_H
20#define _PPC64_PMC_H 20#define _POWERPC_PMC_H
21 21
22#include <asm/ptrace.h> 22#include <asm/ptrace.h>
23 23
@@ -26,6 +26,21 @@ typedef void (*perf_irq_t)(struct pt_regs *);
26int reserve_pmc_hardware(perf_irq_t new_perf_irq); 26int reserve_pmc_hardware(perf_irq_t new_perf_irq);
27void release_pmc_hardware(void); 27void release_pmc_hardware(void);
28 28
29#ifdef CONFIG_PPC64
29void power4_enable_pmcs(void); 30void power4_enable_pmcs(void);
31#endif
30 32
31#endif /* _PPC64_PMC_H */ 33#ifdef CONFIG_FSL_BOOKE
34void init_pmc_stop(int ctr);
35void set_pmc_event(int ctr, int event);
36void set_pmc_user_kernel(int ctr, int user, int kernel);
37void set_pmc_marked(int ctr, int mark0, int mark1);
38void pmc_start_ctr(int ctr, int enable);
39void pmc_start_ctrs(int enable);
40void pmc_stop_ctrs(void);
41void dump_pmcs(void);
42
43extern struct op_powerpc_model op_model_fsl_booke;
44#endif
45
46#endif /* _POWERPC_PMC_H */
diff --git a/include/asm-ppc64/posix_types.h b/include/asm-powerpc/posix_types.h
index 516de7201b5d..c6391077224f 100644
--- a/include/asm-ppc64/posix_types.h
+++ b/include/asm-powerpc/posix_types.h
@@ -1,44 +1,54 @@
1#ifndef _PPC64_POSIX_TYPES_H 1#ifndef _ASM_POWERPC_POSIX_TYPES_H
2#define _PPC64_POSIX_TYPES_H 2#define _ASM_POWERPC_POSIX_TYPES_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot 6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used. 7 * assume GCC is being used.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */ 8 */
14 9
15typedef unsigned long __kernel_ino_t; 10typedef unsigned long __kernel_ino_t;
16typedef unsigned long __kernel_nlink_t;
17typedef unsigned int __kernel_mode_t; 11typedef unsigned int __kernel_mode_t;
18typedef long __kernel_off_t; 12typedef long __kernel_off_t;
19typedef long long __kernel_loff_t;
20typedef int __kernel_pid_t; 13typedef int __kernel_pid_t;
21typedef int __kernel_ipc_pid_t;
22typedef unsigned int __kernel_uid_t; 14typedef unsigned int __kernel_uid_t;
23typedef unsigned int __kernel_gid_t; 15typedef unsigned int __kernel_gid_t;
24typedef unsigned long __kernel_size_t;
25typedef long __kernel_ssize_t;
26typedef long __kernel_ptrdiff_t; 16typedef long __kernel_ptrdiff_t;
27typedef long __kernel_time_t; 17typedef long __kernel_time_t;
18typedef long __kernel_clock_t;
28typedef int __kernel_timer_t; 19typedef int __kernel_timer_t;
29typedef int __kernel_clockid_t; 20typedef int __kernel_clockid_t;
30typedef long __kernel_suseconds_t; 21typedef long __kernel_suseconds_t;
31typedef long __kernel_clock_t;
32typedef int __kernel_daddr_t; 22typedef int __kernel_daddr_t;
33typedef char * __kernel_caddr_t; 23typedef char * __kernel_caddr_t;
34typedef unsigned short __kernel_uid16_t; 24typedef unsigned short __kernel_uid16_t;
35typedef unsigned short __kernel_gid16_t; 25typedef unsigned short __kernel_gid16_t;
36typedef unsigned int __kernel_uid32_t; 26typedef unsigned int __kernel_uid32_t;
37typedef unsigned int __kernel_gid32_t; 27typedef unsigned int __kernel_gid32_t;
38
39typedef unsigned int __kernel_old_uid_t; 28typedef unsigned int __kernel_old_uid_t;
40typedef unsigned int __kernel_old_gid_t; 29typedef unsigned int __kernel_old_gid_t;
30
31#ifdef __powerpc64__
32typedef unsigned long __kernel_nlink_t;
33typedef int __kernel_ipc_pid_t;
34typedef unsigned long __kernel_size_t;
35typedef long __kernel_ssize_t;
41typedef unsigned long __kernel_old_dev_t; 36typedef unsigned long __kernel_old_dev_t;
37#else
38typedef unsigned short __kernel_nlink_t;
39typedef short __kernel_ipc_pid_t;
40typedef unsigned int __kernel_size_t;
41typedef int __kernel_ssize_t;
42typedef unsigned int __kernel_old_dev_t;
43#endif
44
45#ifdef __powerpc64__
46typedef long long __kernel_loff_t;
47#else
48#ifdef __GNUC__
49typedef long long __kernel_loff_t;
50#endif
51#endif
42 52
43typedef struct { 53typedef struct {
44 int val[2]; 54 int val[2];
@@ -116,4 +126,4 @@ static __inline__ void __FD_ZERO(__kernel_fd_set *p)
116 126
117#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ 127#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
118#endif /* __GNUC__ */ 128#endif /* __GNUC__ */
119#endif /* _PPC64_POSIX_TYPES_H */ 129#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/ppc64/kernel/pci.h b/include/asm-powerpc/ppc-pci.h
index 5eb2cc320566..a88728fba8f6 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -6,8 +6,8 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#ifndef __PPC_KERNEL_PCI_H__ 9#ifndef _ASM_POWERPC_PPC_PCI_H
10#define __PPC_KERNEL_PCI_H__ 10#define _ASM_POWERPC_PPC_PCI_H
11 11
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <asm/pci-bridge.h> 13#include <asm/pci-bridge.h>
@@ -51,4 +51,4 @@ extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses; 51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev); 52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53 53
54#endif /* __PPC_KERNEL_PCI_H__ */ 54#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-ppc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index bb53e2def363..96367e04fa58 100644
--- a/include/asm-ppc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -1,38 +1,42 @@
1/* 1/*
2 * include/asm-ppc/ppc_asm.h
3 *
4 * Definitions used by various bits of low-level assembly code on PowerPC.
5 *
6 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */ 3 */
4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H
13 6
7#include <linux/stringify.h>
14#include <linux/config.h> 8#include <linux/config.h>
15 9
10#ifdef __ASSEMBLY__
11
16/* 12/*
17 * Macros for storing registers into and loading registers from 13 * Macros for storing registers into and loading registers from
18 * exception frames. 14 * exception frames.
19 */ 15 */
16#ifdef __powerpc64__
17#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
18#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
19#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
20#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
21#else
20#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) 22#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
23#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
24#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
25 SAVE_10GPRS(22, base)
26#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
27 REST_10GPRS(22, base)
28#endif
29
30
21#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) 31#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
22#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) 32#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
23#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) 33#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
24#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) 34#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
25#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
26#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) 35#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
27#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) 36#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
28#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 37#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
29#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 38#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
30 39
31#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
32 SAVE_10GPRS(22, base)
33#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
34 REST_10GPRS(22, base)
35
36#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base) 40#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
37#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 41#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
38#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 42#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
@@ -47,32 +51,165 @@
47#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 51#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
48 52
49#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base 53#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
50#define SAVE_2VR(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 54#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
51#define SAVE_4VR(n,b,base) SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base) 55#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
52#define SAVE_8VR(n,b,base) SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base) 56#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
53#define SAVE_16VR(n,b,base) SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base) 57#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
54#define SAVE_32VR(n,b,base) SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base) 58#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
55#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base 59#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
56#define REST_2VR(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 60#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
57#define REST_4VR(n,b,base) REST_2VR(n,b,base); REST_2VR(n+2,b,base) 61#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
58#define REST_8VR(n,b,base) REST_4VR(n,b,base); REST_4VR(n+4,b,base) 62#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
59#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base) 63#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
60#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base) 64#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
61 65
62#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) 66#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
63#define SAVE_2EVR(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) 67#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
64#define SAVE_4EVR(n,s,base) SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base) 68#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
65#define SAVE_8EVR(n,s,base) SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base) 69#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
66#define SAVE_16EVR(n,s,base) SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base) 70#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
67#define SAVE_32EVR(n,s,base) SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base) 71#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
68
69#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n 72#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
70#define REST_2EVR(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base) 73#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
71#define REST_4EVR(n,s,base) REST_2EVR(n,s,base); REST_2EVR(n+2,s,base) 74#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
72#define REST_8EVR(n,s,base) REST_4EVR(n,s,base); REST_4EVR(n+4,s,base) 75#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
73#define REST_16EVR(n,s,base) REST_8EVR(n,s,base); REST_8EVR(n+8,s,base) 76#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
74#define REST_32EVR(n,s,base) REST_16EVR(n,s,base); REST_16EVR(n+16,s,base) 77#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
78
79/* Macros to adjust thread priority for Iseries hardware multithreading */
80#define HMT_VERY_LOW or 31,31,31 # very low priority\n"
81#define HMT_LOW or 1,1,1
82#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n"
83#define HMT_MEDIUM or 2,2,2
84#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n"
85#define HMT_HIGH or 3,3,3
86
87/* handle instructions that older assemblers may not know */
88#define RFCI .long 0x4c000066 /* rfci instruction */
89#define RFDI .long 0x4c00004e /* rfdi instruction */
90#define RFMCI .long 0x4c00004c /* rfmci instruction */
91
92#ifdef CONFIG_PPC64
93
94#define XGLUE(a,b) a##b
95#define GLUE(a,b) XGLUE(a,b)
96
97#define _GLOBAL(name) \
98 .section ".text"; \
99 .align 2 ; \
100 .globl name; \
101 .globl GLUE(.,name); \
102 .section ".opd","aw"; \
103name: \
104 .quad GLUE(.,name); \
105 .quad .TOC.@tocbase; \
106 .quad 0; \
107 .previous; \
108 .type GLUE(.,name),@function; \
109GLUE(.,name):
110
111#define _KPROBE(name) \
112 .section ".kprobes.text","a"; \
113 .align 2 ; \
114 .globl name; \
115 .globl GLUE(.,name); \
116 .section ".opd","aw"; \
117name: \
118 .quad GLUE(.,name); \
119 .quad .TOC.@tocbase; \
120 .quad 0; \
121 .previous; \
122 .type GLUE(.,name),@function; \
123GLUE(.,name):
124
125#define _STATIC(name) \
126 .section ".text"; \
127 .align 2 ; \
128 .section ".opd","aw"; \
129name: \
130 .quad GLUE(.,name); \
131 .quad .TOC.@tocbase; \
132 .quad 0; \
133 .previous; \
134 .type GLUE(.,name),@function; \
135GLUE(.,name):
136
137#else /* 32-bit */
138
139#define _GLOBAL(n) \
140 .text; \
141 .stabs __stringify(n:F-1),N_FUN,0,0,n;\
142 .globl n; \
143n:
144
145#define _KPROBE(n) \
146 .section ".kprobes.text","a"; \
147 .globl n; \
148n:
149
150#endif
151
152/*
153 * LOADADDR( rn, name )
154 * loads the address of 'name' into 'rn'
155 *
156 * LOADBASE( rn, name )
157 * loads the address (less the low 16 bits) of 'name' into 'rn'
158 * suitable for base+disp addressing
159 */
160#ifdef __powerpc64__
161#define LOADADDR(rn,name) \
162 lis rn,name##@highest; \
163 ori rn,rn,name##@higher; \
164 rldicr rn,rn,32,31; \
165 oris rn,rn,name##@h; \
166 ori rn,rn,name##@l
167
168#define LOADBASE(rn,name) \
169 .section .toc,"aw"; \
1701: .tc name[TC],name; \
171 .previous; \
172 ld rn,1b@toc(r2)
173
174#define OFF(name) 0
175
176#define SET_REG_TO_CONST(reg, value) \
177 lis reg,(((value)>>48)&0xFFFF); \
178 ori reg,reg,(((value)>>32)&0xFFFF); \
179 rldicr reg,reg,32,31; \
180 oris reg,reg,(((value)>>16)&0xFFFF); \
181 ori reg,reg,((value)&0xFFFF);
182
183#define SET_REG_TO_LABEL(reg, label) \
184 lis reg,(label)@highest; \
185 ori reg,reg,(label)@higher; \
186 rldicr reg,reg,32,31; \
187 oris reg,reg,(label)@h; \
188 ori reg,reg,(label)@l;
189
190/* operations for longs and pointers */
191#define LDL ld
192#define STL std
193#define CMPI cmpdi
194
195#else /* 32-bit */
196#define LOADADDR(rn,name) \
197 lis rn,name@ha; \
198 addi rn,rn,name@l
199
200#define LOADBASE(rn,name) \
201 lis rn,name@ha
202
203#define OFF(name) name@l
204
205/* operations for longs and pointers */
206#define LDL lwz
207#define STL stw
208#define CMPI cmpwi
75 209
210#endif
211
212/* various errata or part fixups */
76#ifdef CONFIG_PPC601_SYNC_FIX 213#ifdef CONFIG_PPC601_SYNC_FIX
77#define SYNC \ 214#define SYNC \
78BEGIN_FTR_SECTION \ 215BEGIN_FTR_SECTION \
@@ -93,6 +230,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
93#define ISYNC_601 230#define ISYNC_601
94#endif 231#endif
95 232
233
96#ifndef CONFIG_SMP 234#ifndef CONFIG_SMP
97#define TLBSYNC 235#define TLBSYNC
98#else /* CONFIG_SMP */ 236#else /* CONFIG_SMP */
@@ -104,6 +242,7 @@ BEGIN_FTR_SECTION \
104END_FTR_SECTION_IFCLR(CPU_FTR_601) 242END_FTR_SECTION_IFCLR(CPU_FTR_601)
105#endif 243#endif
106 244
245
107/* 246/*
108 * This instruction is not implemented on the PPC 603 or 601; however, on 247 * This instruction is not implemented on the PPC 603 or 601; however, on
109 * the 403GCX and 405GP tlbia IS defined and tlbie is not. 248 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
@@ -121,14 +260,44 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
121 bdnz 0b 260 bdnz 0b
122#endif 261#endif
123 262
124#ifdef CONFIG_BOOKE 263
264#ifdef CONFIG_IBM405_ERR77
265#define PPC405_ERR77(ra,rb) dcbt ra, rb;
266#define PPC405_ERR77_SYNC sync;
267#else
268#define PPC405_ERR77(ra,rb)
269#define PPC405_ERR77_SYNC
270#endif
271
272
273#ifdef CONFIG_IBM440EP_ERR42
274#define PPC440EP_ERR42 isync
275#else
276#define PPC440EP_ERR42
277#endif
278
279
280#if defined(CONFIG_BOOKE)
125#define tophys(rd,rs) \ 281#define tophys(rd,rs) \
126 addis rd,rs,0 282 addis rd,rs,0
127 283
128#define tovirt(rd,rs) \ 284#define tovirt(rd,rs) \
129 addis rd,rs,0 285 addis rd,rs,0
130 286
131#else /* CONFIG_BOOKE */ 287#elif defined(CONFIG_PPC64)
288/* PPPBBB - DRENG If KERNELBASE is always 0xC0...,
289 * Then we can easily do this with one asm insn. -Peter
290 */
291#define tophys(rd,rs) \
292 lis rd,((KERNELBASE>>48)&0xFFFF); \
293 rldicr rd,rd,32,31; \
294 sub rd,rs,rd
295
296#define tovirt(rd,rs) \
297 lis rd,((KERNELBASE>>48)&0xFFFF); \
298 rldicr rd,rd,32,31; \
299 add rd,rs,rd
300#else
132/* 301/*
133 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the 302 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
134 * physical base address of RAM at compile time. 303 * physical base address of RAM at compile time.
@@ -146,22 +315,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
146 .align 1; \ 315 .align 1; \
147 .long 0b; \ 316 .long 0b; \
148 .previous 317 .previous
149#endif /* CONFIG_BOOKE */ 318#endif
150 319
151/* 320#ifdef CONFIG_PPC64
152 * On 64-bit cpus, we use the rfid instruction instead of rfi, but 321#define RFI rfid
153 * we then have to make sure we preserve the top 32 bits except for 322#define MTMSRD(r) mtmsrd r
154 * the 64-bit mode bit, which we clear.
155 */
156#ifdef CONFIG_PPC64BRIDGE
157#define FIX_SRR1(ra, rb) \
158 mr rb,ra; \
159 mfmsr ra; \
160 clrldi ra,ra,1; /* turn off 64-bit mode */ \
161 rldimi ra,rb,0,32
162#define RFI .long 0x4c000024 /* rfid instruction */
163#define MTMSRD(r) .long (0x7c000164 + ((r) << 21)) /* mtmsrd */
164#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
165 323
166#else 324#else
167#define FIX_SRR1(ra, rb) 325#define FIX_SRR1(ra, rb)
@@ -172,24 +330,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
172#endif 330#endif
173#define MTMSRD(r) mtmsr r 331#define MTMSRD(r) mtmsr r
174#define CLR_TOP32(r) 332#define CLR_TOP32(r)
175#endif /* CONFIG_PPC64BRIDGE */
176
177#define RFCI .long 0x4c000066 /* rfci instruction */
178#define RFDI .long 0x4c00004e /* rfdi instruction */
179#define RFMCI .long 0x4c00004c /* rfmci instruction */
180
181#ifdef CONFIG_IBM405_ERR77
182#define PPC405_ERR77(ra,rb) dcbt ra, rb;
183#define PPC405_ERR77_SYNC sync;
184#else
185#define PPC405_ERR77(ra,rb)
186#define PPC405_ERR77_SYNC
187#endif
188
189#ifdef CONFIG_IBM440EP_ERR42
190#define PPC440EP_ERR42 isync
191#else
192#define PPC440EP_ERR42
193#endif 333#endif
194 334
195/* The boring bits... */ 335/* The boring bits... */
@@ -277,6 +417,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
277#define fr30 30 417#define fr30 30
278#define fr31 31 418#define fr31 31
279 419
420/* AltiVec Registers (VPRs) */
421
280#define vr0 0 422#define vr0 0
281#define vr1 1 423#define vr1 1
282#define vr2 2 424#define vr2 2
@@ -310,6 +452,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
310#define vr30 30 452#define vr30 30
311#define vr31 31 453#define vr31 31
312 454
455/* SPE Registers (EVPRs) */
456
313#define evr0 0 457#define evr0 0
314#define evr1 1 458#define evr1 1
315#define evr2 2 459#define evr2 2
@@ -348,3 +492,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
348#define N_RSYM 64 492#define N_RSYM 64
349#define N_SLINE 68 493#define N_SLINE 68
350#define N_SO 100 494#define N_SO 100
495
496#define ASM_CONST(x) x
497#else
498 #define __ASM_CONST(x) x##UL
499 #define ASM_CONST(x) __ASM_CONST(x)
500#endif /* __ASSEMBLY__ */
501
502#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-ppc/processor.h b/include/asm-powerpc/processor.h
index b05b5d9cae20..9592f533e058 100644
--- a/include/asm-ppc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -1,21 +1,28 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_POWERPC_PROCESSOR_H
2#ifndef __ASM_PPC_PROCESSOR_H 2#define _ASM_POWERPC_PROCESSOR_H
3#define __ASM_PPC_PROCESSOR_H
4 3
5/* 4/*
6 * Default implementation of macro that returns current 5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
7 * instruction pointer ("program counter"). 6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
8 */ 11 */
9#define current_text_addr() ({ __label__ _l; _l: &&_l;})
10 12
11#include <linux/config.h> 13#include <linux/config.h>
12#include <linux/stringify.h> 14#include <asm/reg.h>
13 15
16#ifndef __ASSEMBLY__
17#include <linux/compiler.h>
14#include <asm/ptrace.h> 18#include <asm/ptrace.h>
15#include <asm/types.h> 19#include <asm/types.h>
16#include <asm/mpc8xx.h> 20#ifdef CONFIG_PPC64
17#include <asm/reg.h> 21#include <asm/systemcfg.h>
22#endif
18 23
24#ifdef CONFIG_PPC32
25/* 32-bit platform types */
19/* We only need to define a new _MACH_xxx for machines which are part of 26/* We only need to define a new _MACH_xxx for machines which are part of
20 * a configuration which supports more than one type of different machine. 27 * a configuration which supports more than one type of different machine.
21 * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac. 28 * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
@@ -36,20 +43,6 @@
36#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ 43#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
37#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ 44#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
38 45
39#define _GLOBAL(n)\
40 .stabs __stringify(n:F-1),N_FUN,0,0,n;\
41 .globl n;\
42n:
43
44/*
45 * this is the minimum allowable io space due to the location
46 * of the io areas on prep (first one at 0x80000000) but
47 * as soon as I get around to remapping the io areas with the BATs
48 * to match the mac we can raise this. -- Cort
49 */
50#define TASK_SIZE (CONFIG_TASK_SIZE)
51
52#ifndef __ASSEMBLY__
53#ifdef CONFIG_PPC_MULTIPLATFORM 46#ifdef CONFIG_PPC_MULTIPLATFORM
54extern int _machine; 47extern int _machine;
55 48
@@ -67,17 +60,49 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
67#else 60#else
68#define _machine 0 61#define _machine 0
69#endif /* CONFIG_PPC_MULTIPLATFORM */ 62#endif /* CONFIG_PPC_MULTIPLATFORM */
63#endif /* CONFIG_PPC32 */
64
65#ifdef CONFIG_PPC64
66/* Platforms supported by PPC64 */
67#define PLATFORM_PSERIES 0x0100
68#define PLATFORM_PSERIES_LPAR 0x0101
69#define PLATFORM_ISERIES_LPAR 0x0201
70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000
74
75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform)
77#define _MACH_Pmac PLATFORM_POWERMAC
78#endif
79
80/*
81 * Default implementation of macro that returns current
82 * instruction pointer ("program counter").
83 */
84#define current_text_addr() ({ __label__ _l; _l: &&_l;})
85
86/* Macros for adjusting thread priority (hardware multi-threading) */
87#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
88#define HMT_low() asm volatile("or 1,1,1 # low priority")
89#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
90#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
91#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
92#define HMT_high() asm volatile("or 3,3,3 # high priority")
93
94#ifdef __KERNEL__
95
96extern int have_of;
70 97
71struct task_struct; 98struct task_struct;
72void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); 99void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
73void release_thread(struct task_struct *); 100void release_thread(struct task_struct *);
74 101
75/* Prepare to copy thread state - unlazy all lazy status */ 102/* Prepare to copy thread state - unlazy all lazy status */
76extern void prepare_to_copy(struct task_struct *tsk); 103extern void prepare_to_copy(struct task_struct *tsk);
77 104
78/* 105/* Create a new kernel thread. */
79 * Create a new kernel thread.
80 */
81extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 106extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
82 107
83/* Lazy FPU handling on uni-processor */ 108/* Lazy FPU handling on uni-processor */
@@ -85,10 +110,37 @@ extern struct task_struct *last_task_used_math;
85extern struct task_struct *last_task_used_altivec; 110extern struct task_struct *last_task_used_altivec;
86extern struct task_struct *last_task_used_spe; 111extern struct task_struct *last_task_used_spe;
87 112
113#ifdef CONFIG_PPC32
114#define TASK_SIZE (CONFIG_TASK_SIZE)
115
88/* This decides where the kernel will search for a free chunk of vm 116/* This decides where the kernel will search for a free chunk of vm
89 * space during mmap's. 117 * space during mmap's.
90 */ 118 */
91#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) 119#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
120#endif
121
122#ifdef CONFIG_PPC64
123/* 64-bit user address space is 44-bits (16TB user VM) */
124#define TASK_SIZE_USER64 (0x0000100000000000UL)
125
126/*
127 * 32-bit user address space is 4GB - 1 page
128 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
129 */
130#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
131
132#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
133 TASK_SIZE_USER32 : TASK_SIZE_USER64)
134
135/* This decides where the kernel will search for a free chunk of vm
136 * space during mmap's.
137 */
138#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
139#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
140
141#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
142 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
143#endif
92 144
93typedef struct { 145typedef struct {
94 unsigned long seg; 146 unsigned long seg;
@@ -96,18 +148,31 @@ typedef struct {
96 148
97struct thread_struct { 149struct thread_struct {
98 unsigned long ksp; /* Kernel stack pointer */ 150 unsigned long ksp; /* Kernel stack pointer */
151#ifdef CONFIG_PPC64
152 unsigned long ksp_vsid;
153#endif
99 struct pt_regs *regs; /* Pointer to saved register state */ 154 struct pt_regs *regs; /* Pointer to saved register state */
100 mm_segment_t fs; /* for get_fs() validation */ 155 mm_segment_t fs; /* for get_fs() validation */
156#ifdef CONFIG_PPC32
101 void *pgdir; /* root of page-table tree */ 157 void *pgdir; /* root of page-table tree */
102 int fpexc_mode; /* floating-point exception mode */
103 signed long last_syscall; 158 signed long last_syscall;
159#endif
104#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) 160#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
105 unsigned long dbcr0; /* debug control register values */ 161 unsigned long dbcr0; /* debug control register values */
106 unsigned long dbcr1; 162 unsigned long dbcr1;
107#endif 163#endif
108 double fpr[32]; /* Complete floating point set */ 164 double fpr[32]; /* Complete floating point set */
165#ifdef CONFIG_PPC32
109 unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */ 166 unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
167#endif
110 unsigned long fpscr; /* Floating point status */ 168 unsigned long fpscr; /* Floating point status */
169 int fpexc_mode; /* floating-point exception mode */
170#ifdef CONFIG_PPC64
171 unsigned long start_tb; /* Start purr when proc switched in */
172 unsigned long accum_tb; /* Total accumilated purr for process */
173 unsigned long vdso_base; /* base of the vDSO library */
174#endif
175 unsigned long dabr; /* Data address breakpoint register */
111#ifdef CONFIG_ALTIVEC 176#ifdef CONFIG_ALTIVEC
112 /* Complete AltiVec register set */ 177 /* Complete AltiVec register set */
113 vector128 vr[32] __attribute((aligned(16))); 178 vector128 vr[32] __attribute((aligned(16)));
@@ -128,51 +193,58 @@ struct thread_struct {
128 193
129#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) 194#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
130 195
196
197#ifdef CONFIG_PPC32
131#define INIT_THREAD { \ 198#define INIT_THREAD { \
132 .ksp = INIT_SP, \ 199 .ksp = INIT_SP, \
133 .fs = KERNEL_DS, \ 200 .fs = KERNEL_DS, \
134 .pgdir = swapper_pg_dir, \ 201 .pgdir = swapper_pg_dir, \
135 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 202 .fpexc_mode = MSR_FE0 | MSR_FE1, \
136} 203}
204#else
205#define INIT_THREAD { \
206 .ksp = INIT_SP, \
207 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
208 .fs = KERNEL_DS, \
209 .fpr = {0}, \
210 .fpscr = 0, \
211 .fpexc_mode = MSR_FE0|MSR_FE1, \
212}
213#endif
137 214
138/* 215/*
139 * Return saved PC of a blocked thread. For now, this is the "user" PC 216 * Return saved PC of a blocked thread. For now, this is the "user" PC
140 */ 217 */
141#define thread_saved_pc(tsk) \ 218#define thread_saved_pc(tsk) \
142 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 219 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
143 220
144unsigned long get_wchan(struct task_struct *p); 221unsigned long get_wchan(struct task_struct *p);
145 222
146#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 223#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
147#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) 224#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
148 225
149/* Get/set floating-point exception mode */ 226/* Get/set floating-point exception mode */
150#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) 227#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
151#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) 228#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
152 229
153extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); 230extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
154extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); 231extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
155 232
156static inline unsigned int __unpack_fe01(unsigned int msr_bits) 233static inline unsigned int __unpack_fe01(unsigned long msr_bits)
157{ 234{
158 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 235 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
159} 236}
160 237
161static inline unsigned int __pack_fe01(unsigned int fpmode) 238static inline unsigned long __pack_fe01(unsigned int fpmode)
162{ 239{
163 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); 240 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
164} 241}
165 242
166/* in process.c - for early bootup debug -- Cort */ 243#ifdef CONFIG_PPC64
167int ll_printk(const char *, ...); 244#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
168void ll_puts(const char *); 245#else
169
170/* In misc.c */
171void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
172
173#define have_of (_machine == _MACH_chrp || _machine == _MACH_Pmac)
174
175#define cpu_relax() barrier() 246#define cpu_relax() barrier()
247#endif
176 248
177/* 249/*
178 * Prefetch macros. 250 * Prefetch macros.
@@ -181,21 +253,28 @@ void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
181#define ARCH_HAS_PREFETCHW 253#define ARCH_HAS_PREFETCHW
182#define ARCH_HAS_SPINLOCK_PREFETCH 254#define ARCH_HAS_SPINLOCK_PREFETCH
183 255
184extern inline void prefetch(const void *x) 256static inline void prefetch(const void *x)
185{ 257{
186 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); 258 if (unlikely(!x))
259 return;
260
261 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
187} 262}
188 263
189extern inline void prefetchw(const void *x) 264static inline void prefetchw(const void *x)
190{ 265{
191 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); 266 if (unlikely(!x))
267 return;
268
269 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
192} 270}
193 271
194#define spin_lock_prefetch(x) prefetchw(x) 272#define spin_lock_prefetch(x) prefetchw(x)
195 273
196extern int emulate_altivec(struct pt_regs *regs); 274#ifdef CONFIG_PPC64
197 275#define HAVE_ARCH_PICK_MMAP_LAYOUT
198#endif /* !__ASSEMBLY__ */ 276#endif
199 277
200#endif /* __ASM_PPC_PROCESSOR_H */
201#endif /* __KERNEL__ */ 278#endif /* __KERNEL__ */
279#endif /* __ASSEMBLY__ */
280#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
new file mode 100644
index 000000000000..8a21791c7cae
--- /dev/null
+++ b/include/asm-powerpc/prom.h
@@ -0,0 +1,219 @@
1#ifndef _POWERPC_PROM_H
2#define _POWERPC_PROM_H
3#ifdef __KERNEL__
4
5/*
6 * Definitions for talking to the Open Firmware PROM on
7 * Power Macintosh computers.
8 *
9 * Copyright (C) 1996-2005 Paul Mackerras.
10 *
11 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#include <linux/config.h>
19#include <linux/types.h>
20#include <linux/proc_fs.h>
21#include <asm/atomic.h>
22
23/* Definitions used by the flattened device tree */
24#define OF_DT_HEADER 0xd00dfeed /* marker */
25#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
26#define OF_DT_END_NODE 0x2 /* End node */
27#define OF_DT_PROP 0x3 /* Property: name off, size,
28 * content */
29#define OF_DT_NOP 0x4 /* nop */
30#define OF_DT_END 0x9
31
32#define OF_DT_VERSION 0x10
33
34/*
35 * This is what gets passed to the kernel by prom_init or kexec
36 *
37 * The dt struct contains the device tree structure, full pathes and
38 * property contents. The dt strings contain a separate block with just
39 * the strings for the property names, and is fully page aligned and
40 * self contained in a page, so that it can be kept around by the kernel,
41 * each property name appears only once in this page (cheap compression)
42 *
43 * the mem_rsvmap contains a map of reserved ranges of physical memory,
44 * passing it here instead of in the device-tree itself greatly simplifies
45 * the job of everybody. It's just a list of u64 pairs (base/size) that
46 * ends when size is 0
47 */
48struct boot_param_header
49{
50 u32 magic; /* magic word OF_DT_HEADER */
51 u32 totalsize; /* total size of DT block */
52 u32 off_dt_struct; /* offset to structure */
53 u32 off_dt_strings; /* offset to strings */
54 u32 off_mem_rsvmap; /* offset to memory reserve map */
55 u32 version; /* format version */
56 u32 last_comp_version; /* last compatible version */
57 /* version 2 fields below */
58 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
59 /* version 3 fields below */
60 u32 dt_strings_size; /* size of the DT strings block */
61};
62
63
64
65typedef u32 phandle;
66typedef u32 ihandle;
67
68struct address_range {
69 unsigned long space;
70 unsigned long address;
71 unsigned long size;
72};
73
74struct interrupt_info {
75 int line;
76 int sense; /* +ve/-ve logic, edge or level, etc. */
77};
78
79struct pci_address {
80 u32 a_hi;
81 u32 a_mid;
82 u32 a_lo;
83};
84
85struct isa_address {
86 u32 a_hi;
87 u32 a_lo;
88};
89
90struct isa_range {
91 struct isa_address isa_addr;
92 struct pci_address pci_addr;
93 unsigned int size;
94};
95
96struct reg_property {
97 unsigned long address;
98 unsigned long size;
99};
100
101struct reg_property32 {
102 unsigned int address;
103 unsigned int size;
104};
105
106struct reg_property64 {
107 unsigned long address;
108 unsigned long size;
109};
110
111struct property {
112 char *name;
113 int length;
114 unsigned char *value;
115 struct property *next;
116};
117
118struct device_node {
119 char *name;
120 char *type;
121 phandle node;
122 phandle linux_phandle;
123 int n_addrs;
124 struct address_range *addrs;
125 int n_intrs;
126 struct interrupt_info *intrs;
127 char *full_name;
128
129 struct property *properties;
130 struct device_node *parent;
131 struct device_node *child;
132 struct device_node *sibling;
133 struct device_node *next; /* next device of same type */
134 struct device_node *allnext; /* next in list of all nodes */
135 struct proc_dir_entry *pde; /* this node's proc directory */
136 struct kref kref;
137 unsigned long _flags;
138 void *data;
139};
140
141extern struct device_node *of_chosen;
142
143/* flag descriptions */
144#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
145
146#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
147#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
148
149#define HAVE_ARCH_DEVTREE_FIXUPS
150
151static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
152{
153 dn->pde = de;
154}
155
156
157/* OBSOLETE: Old style node lookup */
158extern struct device_node *find_devices(const char *name);
159extern struct device_node *find_type_devices(const char *type);
160extern struct device_node *find_path_device(const char *path);
161extern struct device_node *find_compatible_devices(const char *type,
162 const char *compat);
163extern struct device_node *find_all_nodes(void);
164
165/* New style node lookup */
166extern struct device_node *of_find_node_by_name(struct device_node *from,
167 const char *name);
168extern struct device_node *of_find_node_by_type(struct device_node *from,
169 const char *type);
170extern struct device_node *of_find_compatible_node(struct device_node *from,
171 const char *type, const char *compat);
172extern struct device_node *of_find_node_by_path(const char *path);
173extern struct device_node *of_find_node_by_phandle(phandle handle);
174extern struct device_node *of_find_all_nodes(struct device_node *prev);
175extern struct device_node *of_get_parent(const struct device_node *node);
176extern struct device_node *of_get_next_child(const struct device_node *node,
177 struct device_node *prev);
178extern struct device_node *of_node_get(struct device_node *node);
179extern void of_node_put(struct device_node *node);
180
181/* For updating the device tree at runtime */
182extern void of_attach_node(struct device_node *);
183extern void of_detach_node(const struct device_node *);
184
185/* Other Prototypes */
186extern void finish_device_tree(void);
187extern void unflatten_device_tree(void);
188extern void early_init_devtree(void *);
189extern int device_is_compatible(struct device_node *device, const char *);
190extern int machine_is_compatible(const char *compat);
191extern unsigned char *get_property(struct device_node *node, const char *name,
192 int *lenp);
193extern void print_properties(struct device_node *node);
194extern int prom_n_addr_cells(struct device_node* np);
195extern int prom_n_size_cells(struct device_node* np);
196extern int prom_n_intr_cells(struct device_node* np);
197extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
198extern void prom_add_property(struct device_node* np, struct property* prop);
199
200#ifdef CONFIG_PPC32
201/*
202 * PCI <-> OF matching functions
203 * (XXX should these be here?)
204 */
205struct pci_bus;
206struct pci_dev;
207extern int pci_device_from_OF_node(struct device_node *node,
208 u8* bus, u8* devfn);
209extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
210extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
211extern void pci_create_OF_bus_map(void);
212#endif
213
214extern struct resource *request_OF_resource(struct device_node* node,
215 int index, const char* name_postfix);
216extern int release_OF_resource(struct device_node* node, int index);
217
218#endif /* __KERNEL__ */
219#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-ppc/reg.h b/include/asm-powerpc/reg.h
index 73c33e3ef9c6..06a1f0f2db21 100644
--- a/include/asm-ppc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -6,53 +6,99 @@
6 * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. 6 * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
7 */ 7 */
8 8
9#ifndef _ASM_POWERPC_REG_H
10#define _ASM_POWERPC_REG_H
9#ifdef __KERNEL__ 11#ifdef __KERNEL__
10#ifndef __ASM_PPC_REGS_H__
11#define __ASM_PPC_REGS_H__
12 12
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <asm/cputable.h>
14 15
15/* Pickup Book E specific registers. */ 16/* Pickup Book E specific registers. */
16#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 17#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
17#include <asm/reg_booke.h> 18#include <asm/reg_booke.h>
18#endif 19#endif
19 20
20/* Machine State Register (MSR) Fields */ 21#define MSR_SF_LG 63 /* Enable 64 bit mode */
21#define MSR_SF (1<<63) 22#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
22#define MSR_ISF (1<<61) 23#define MSR_HV_LG 60 /* Hypervisor state */
23#define MSR_VEC (1<<25) /* Enable AltiVec */ 24#define MSR_VEC_LG 25 /* Enable AltiVec */
24#define MSR_POW (1<<18) /* Enable Power Management */ 25#define MSR_POW_LG 18 /* Enable Power Management */
25#define MSR_WE (1<<18) /* Wait State Enable */ 26#define MSR_WE_LG 18 /* Wait State Enable */
26#define MSR_TGPR (1<<17) /* TLB Update registers in use */ 27#define MSR_TGPR_LG 17 /* TLB Update registers in use */
27#define MSR_CE (1<<17) /* Critical Interrupt Enable */ 28#define MSR_CE_LG 17 /* Critical Interrupt Enable */
28#define MSR_ILE (1<<16) /* Interrupt Little Endian */ 29#define MSR_ILE_LG 16 /* Interrupt Little Endian */
29#define MSR_EE (1<<15) /* External Interrupt Enable */ 30#define MSR_EE_LG 15 /* External Interrupt Enable */
30#define MSR_PR (1<<14) /* Problem State / Privilege Level */ 31#define MSR_PR_LG 14 /* Problem State / Privilege Level */
31#define MSR_FP (1<<13) /* Floating Point enable */ 32#define MSR_FP_LG 13 /* Floating Point enable */
32#define MSR_ME (1<<12) /* Machine Check Enable */ 33#define MSR_ME_LG 12 /* Machine Check Enable */
33#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */ 34#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
34#define MSR_SE (1<<10) /* Single Step */ 35#define MSR_SE_LG 10 /* Single Step */
35#define MSR_BE (1<<9) /* Branch Trace */ 36#define MSR_BE_LG 9 /* Branch Trace */
36#define MSR_DE (1<<9) /* Debug Exception Enable */ 37#define MSR_DE_LG 9 /* Debug Exception Enable */
37#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */ 38#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
38#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */ 39#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
39#define MSR_IR (1<<5) /* Instruction Relocate */ 40#define MSR_IR_LG 5 /* Instruction Relocate */
40#define MSR_DR (1<<4) /* Data Relocate */ 41#define MSR_DR_LG 4 /* Data Relocate */
41#define MSR_PE (1<<3) /* Protection Enable */ 42#define MSR_PE_LG 3 /* Protection Enable */
42#define MSR_PX (1<<2) /* Protection Exclusive Mode */ 43#define MSR_PX_LG 2 /* Protection Exclusive Mode */
43#define MSR_RI (1<<1) /* Recoverable Exception */ 44#define MSR_PMM_LG 2 /* Performance monitor */
44#define MSR_LE (1<<0) /* Little Endian */ 45#define MSR_RI_LG 1 /* Recoverable Exception */
46#define MSR_LE_LG 0 /* Little Endian */
45 47
48#ifdef __ASSEMBLY__
49#define __MASK(X) (1<<(X))
50#else
51#define __MASK(X) (1UL<<(X))
52#endif
53
54#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
55#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
56#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
57#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
58#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
59#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
60#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
61#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
62#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
63#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
64#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
65#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
66#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
67#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
68#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
69#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
70#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
71#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
72#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
73#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
74#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
75#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
76#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
77#ifndef MSR_PMM
78#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
79#endif
80#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
81#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
82
83#ifdef CONFIG_PPC64
84#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
85#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
86
87#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
88#define MSR_USER64 MSR_USER32 | MSR_SF
89
90#else /* 32-bit */
46/* Default MSR for kernel mode. */ 91/* Default MSR for kernel mode. */
92#ifndef MSR_KERNEL /* reg_booke.h also defines this */
47#ifdef CONFIG_APUS_FAST_EXCEPT 93#ifdef CONFIG_APUS_FAST_EXCEPT
48#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR) 94#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
49#endif 95#else
50
51#ifndef MSR_KERNEL
52#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) 96#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
53#endif 97#endif
98#endif
54 99
55#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 100#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
101#endif
56 102
57/* Floating Point Status and Control Register (FPSCR) Fields */ 103/* Floating Point Status and Control Register (FPSCR) Fields */
58#define FPSCR_FX 0x80000000 /* FPU exception summary */ 104#define FPSCR_FX 0x80000000 /* FPU exception summary */
@@ -60,7 +106,7 @@
60#define FPSCR_VX 0x20000000 /* Invalid operation summary */ 106#define FPSCR_VX 0x20000000 /* Invalid operation summary */
61#define FPSCR_OX 0x10000000 /* Overflow exception summary */ 107#define FPSCR_OX 0x10000000 /* Overflow exception summary */
62#define FPSCR_UX 0x08000000 /* Underflow exception summary */ 108#define FPSCR_UX 0x08000000 /* Underflow exception summary */
63#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */ 109#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
64#define FPSCR_XX 0x02000000 /* Inexact exception summary */ 110#define FPSCR_XX 0x02000000 /* Inexact exception summary */
65#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ 111#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
66#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ 112#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
@@ -85,8 +131,18 @@
85 131
86/* Special Purpose Registers (SPRNs)*/ 132/* Special Purpose Registers (SPRNs)*/
87#define SPRN_CTR 0x009 /* Count Register */ 133#define SPRN_CTR 0x009 /* Count Register */
134#define SPRN_CTRLF 0x088
135#define SPRN_CTRLT 0x098
136#define CTRL_RUNLATCH 0x1
88#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ 137#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
138#define DABR_TRANSLATION (1UL << 2)
89#define SPRN_DAR 0x013 /* Data Address Register */ 139#define SPRN_DAR 0x013 /* Data Address Register */
140#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
141#define DSISR_NOHPTE 0x40000000 /* no translation found */
142#define DSISR_PROTFAULT 0x08000000 /* protection fault */
143#define DSISR_ISSTORE 0x02000000 /* access was a store */
144#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
145#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
90#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ 146#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
91#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 147#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
92#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 148#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
@@ -131,7 +187,6 @@
131#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ 187#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
132#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ 188#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
133#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ 189#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
134#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
135#define SPRN_EAR 0x11A /* External Address Register */ 190#define SPRN_EAR 0x11A /* External Address Register */
136#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 191#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
137#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ 192#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
@@ -187,6 +242,16 @@
187#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ 242#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
188#define SPRN_HID4 0x3F4 /* 970 HID4 */ 243#define SPRN_HID4 0x3F4 /* 970 HID4 */
189#define SPRN_HID5 0x3F6 /* 970 HID5 */ 244#define SPRN_HID5 0x3F6 /* 970 HID5 */
245#define SPRN_HID6 0x3F9 /* BE HID 6 */
246#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
247#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
248#define SPRN_TSCR 0x399 /* Thread switch control on BE */
249#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
250#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
251#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
252#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
253#define SPRN_TSC 0x3FD /* Thread switch control on others */
254#define SPRN_TST 0x3FC /* Thread switch timeout on others */
190#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) 255#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
191#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ 256#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
192#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ 257#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
@@ -270,22 +335,18 @@
270#define L3CR_L3DO 0x00000040 /* L3 data only mode */ 335#define L3CR_L3DO 0x00000040 /* L3 data only mode */
271#define L3CR_PMEN 0x00000004 /* L3 private memory enable */ 336#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
272#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ 337#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
338
273#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ 339#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
274#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ 340#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
275#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ 341#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
276#define SPRN_LDSTDB 0x3f4 /* */ 342#define SPRN_LDSTDB 0x3f4 /* */
277#define SPRN_LR 0x008 /* Link Register */ 343#define SPRN_LR 0x008 /* Link Register */
278#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
279#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
280#ifndef SPRN_PIR 344#ifndef SPRN_PIR
281#define SPRN_PIR 0x3FF /* Processor Identification Register */ 345#define SPRN_PIR 0x3FF /* Processor Identification Register */
282#endif 346#endif
283#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
284#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
285#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
286#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
287#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ 347#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
288#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ 348#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
349#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
289#define SPRN_PVR 0x11F /* Processor Version Register */ 350#define SPRN_PVR 0x11F /* Processor Version Register */
290#define SPRN_RPA 0x3D6 /* Required Physical Address Register */ 351#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
291#define SPRN_SDA 0x3BF /* Sampled Data Address Register */ 352#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
@@ -327,6 +388,52 @@
327#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ 388#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
328#define SPRN_XER 0x001 /* Fixed Point Exception Register */ 389#define SPRN_XER 0x001 /* Fixed Point Exception Register */
329 390
391/* Performance monitor SPRs */
392#ifdef CONFIG_PPC64
393#define SPRN_MMCR0 795
394#define MMCR0_FC 0x80000000UL /* freeze counters */
395#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
396#define MMCR0_KERNEL_DISABLE MMCR0_FCS
397#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
398#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
399#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
400#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
401#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
402#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
403#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
404#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
405#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
406#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
407#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
408#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
409#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
410#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
411#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
412#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
413#define SPRN_MMCR1 798
414#define SPRN_MMCRA 0x312
415#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
416#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
417#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
418#define SPRN_PMC1 787
419#define SPRN_PMC2 788
420#define SPRN_PMC3 789
421#define SPRN_PMC4 790
422#define SPRN_PMC5 791
423#define SPRN_PMC6 792
424#define SPRN_PMC7 793
425#define SPRN_PMC8 794
426#define SPRN_SIAR 780
427#define SPRN_SDAR 781
428
429#else /* 32-bit */
430#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
431#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
432#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
433#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
434#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
435#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
436
330/* Bit definitions for MMCR0 and PMC1 / PMC2. */ 437/* Bit definitions for MMCR0 and PMC1 / PMC2. */
331#define MMCR0_PMC1_CYCLES (1 << 7) 438#define MMCR0_PMC1_CYCLES (1 << 7)
332#define MMCR0_PMC1_ICACHEMISS (5 << 7) 439#define MMCR0_PMC1_ICACHEMISS (5 << 7)
@@ -336,14 +443,15 @@
336#define MMCR0_PMC2_ITLB 0x7 443#define MMCR0_PMC2_ITLB 0x7
337#define MMCR0_PMC2_LOADMISSTIME 0x5 444#define MMCR0_PMC2_LOADMISSTIME 0x5
338#define MMCR0_PMXE (1 << 26) 445#define MMCR0_PMXE (1 << 26)
339 446#endif
340/* Processor Version Register */
341 447
342/* Processor Version Register (PVR) field extraction */ 448/* Processor Version Register (PVR) field extraction */
343 449
344#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ 450#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
345#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ 451#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
346 452
453#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
454
347/* 455/*
348 * IBM has further subdivided the standard PowerPC 16-bit version and 456 * IBM has further subdivided the standard PowerPC 16-bit version and
349 * revision subfields of the PVR for the PowerPC 403s into the following: 457 * revision subfields of the PVR for the PowerPC 403s into the following:
@@ -399,42 +507,105 @@
399#define PVR_8245 0x80811014 507#define PVR_8245 0x80811014
400#define PVR_8260 PVR_8240 508#define PVR_8260 PVR_8240
401 509
402#if 0 510/* 64-bit processors */
403/* Segment Registers */ 511/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
404#define SR0 0 512#define PV_NORTHSTAR 0x0033
405#define SR1 1 513#define PV_PULSAR 0x0034
406#define SR2 2 514#define PV_POWER4 0x0035
407#define SR3 3 515#define PV_ICESTAR 0x0036
408#define SR4 4 516#define PV_SSTAR 0x0037
409#define SR5 5 517#define PV_POWER4p 0x0038
410#define SR6 6 518#define PV_970 0x0039
411#define SR7 7 519#define PV_POWER5 0x003A
412#define SR8 8 520#define PV_POWER5p 0x003B
413#define SR9 9 521#define PV_970FX 0x003C
414#define SR10 10 522#define PV_630 0x0040
415#define SR11 11 523#define PV_630p 0x0041
416#define SR12 12 524#define PV_970MP 0x0044
417#define SR13 13 525#define PV_BE 0x0070
418#define SR14 14 526
419#define SR15 15 527/*
420#endif 528 * Number of entries in the SLB. If this ever changes we should handle
529 * it with a use a cpu feature fixup.
530 */
531#define SLB_NUM_ENTRIES 64
421 532
422/* Macros for setting and retrieving special purpose registers */ 533/* Macros for setting and retrieving special purpose registers */
423#ifndef __ASSEMBLY__ 534#ifndef __ASSEMBLY__
424#define mfmsr() ({unsigned int rval; \ 535#define mfmsr() ({unsigned long rval; \
425 asm volatile("mfmsr %0" : "=r" (rval)); rval;}) 536 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
537#ifdef CONFIG_PPC64
538#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
539 : : "r" (v))
540#define mtmsrd(v) __mtmsrd((v), 0)
541#else
426#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) 542#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
543#endif
427 544
428#define mfspr(rn) ({unsigned int rval; \ 545#define mfspr(rn) ({unsigned long rval; \
429 asm volatile("mfspr %0," __stringify(rn) \ 546 asm volatile("mfspr %0," __stringify(rn) \
430 : "=r" (rval)); rval;}) 547 : "=r" (rval)); rval;})
431#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) 548#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
432 549
550#define mftb() ({unsigned long rval; \
551 asm volatile("mftb %0" : "=r" (rval)); rval;})
552#define mftbl() ({unsigned long rval; \
553 asm volatile("mftbl %0" : "=r" (rval)); rval;})
554
555#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
556#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
557
558#ifdef CONFIG_PPC32
433#define mfsrin(v) ({unsigned int rval; \ 559#define mfsrin(v) ({unsigned int rval; \
434 asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ 560 asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
435 rval;}) 561 rval;})
562#endif
436 563
437#define proc_trap() asm volatile("trap") 564#define proc_trap() asm volatile("trap")
565
566#ifdef CONFIG_PPC64
567static inline void ppc64_runlatch_on(void)
568{
569 unsigned long ctrl;
570
571 if (cpu_has_feature(CPU_FTR_CTRL)) {
572 ctrl = mfspr(SPRN_CTRLF);
573 ctrl |= CTRL_RUNLATCH;
574 mtspr(SPRN_CTRLT, ctrl);
575 }
576}
577
578static inline void ppc64_runlatch_off(void)
579{
580 unsigned long ctrl;
581
582 if (cpu_has_feature(CPU_FTR_CTRL)) {
583 ctrl = mfspr(SPRN_CTRLF);
584 ctrl &= ~CTRL_RUNLATCH;
585 mtspr(SPRN_CTRLT, ctrl);
586 }
587}
588
589static inline void set_tb(unsigned int upper, unsigned int lower)
590{
591 mttbl(0);
592 mttbu(upper);
593 mttbl(lower);
594}
595#endif
596
597#define __get_SP() ({unsigned long sp; \
598 asm volatile("mr %0,1": "=r" (sp)); sp;})
599
600#else /* __ASSEMBLY__ */
601
602#define RUNLATCH_ON(REG) \
603BEGIN_FTR_SECTION \
604 mfspr (REG),SPRN_CTRLF; \
605 ori (REG),(REG),CTRL_RUNLATCH; \
606 mtspr SPRN_CTRLT,(REG); \
607END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
608
438#endif /* __ASSEMBLY__ */ 609#endif /* __ASSEMBLY__ */
439#endif /* __ASM_PPC_REGS_H__ */
440#endif /* __KERNEL__ */ 610#endif /* __KERNEL__ */
611#endif /* _ASM_POWERPC_REG_H */
diff --git a/include/asm-ppc64/rtas.h b/include/asm-powerpc/rtas.h
index e7d1b5222802..2c050332471d 100644
--- a/include/asm-ppc64/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_RTAS_H 1#ifndef _POWERPC_RTAS_H
2#define _PPC64_RTAS_H 2#define _POWERPC_RTAS_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/page.h> 5#include <asm/page.h>
@@ -190,7 +190,7 @@ extern void rtas_progress(char *s, unsigned short hex);
190extern void rtas_initialize(void); 190extern void rtas_initialize(void);
191 191
192struct rtc_time; 192struct rtc_time;
193extern void rtas_get_boot_time(struct rtc_time *rtc_time); 193extern unsigned long rtas_get_boot_time(void);
194extern void rtas_get_rtc_time(struct rtc_time *rtc_time); 194extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
195extern int rtas_set_rtc_time(struct rtc_time *rtc_time); 195extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
196 196
@@ -246,4 +246,4 @@ extern unsigned long rtas_rmo_buf;
246 246
247#define GLOBAL_INTERRUPT_QUEUE 9005 247#define GLOBAL_INTERRUPT_QUEUE 9005
248 248
249#endif /* _PPC64_RTAS_H */ 249#endif /* _POWERPC_RTAS_H */
diff --git a/include/asm-powerpc/rtc.h b/include/asm-powerpc/rtc.h
new file mode 100644
index 000000000000..f5802926b6c0
--- /dev/null
+++ b/include/asm-powerpc/rtc.h
@@ -0,0 +1,78 @@
1/*
2 * Real-time clock definitions and interfaces
3 *
4 * Author: Tom Rini <trini@mvista.com>
5 *
6 * 2002 (c) MontaVista, Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 *
11 * Based on:
12 * include/asm-m68k/rtc.h
13 *
14 * Copyright Richard Zidlicky
15 * implementation details for genrtc/q40rtc driver
16 *
17 * And the old drivers/macintosh/rtc.c which was heavily based on:
18 * Linux/SPARC Real Time Clock Driver
19 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
20 *
21 * With additional work by Paul Mackerras and Franz Sirl.
22 */
23
24#ifndef __ASM_POWERPC_RTC_H__
25#define __ASM_POWERPC_RTC_H__
26
27#ifdef __KERNEL__
28
29#include <linux/rtc.h>
30
31#include <asm/machdep.h>
32#include <asm/time.h>
33
34#define RTC_PIE 0x40 /* periodic interrupt enable */
35#define RTC_AIE 0x20 /* alarm interrupt enable */
36#define RTC_UIE 0x10 /* update-finished interrupt enable */
37
38/* some dummy definitions */
39#define RTC_BATT_BAD 0x100 /* battery bad */
40#define RTC_SQWE 0x08 /* enable square-wave output */
41#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
42#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
43#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
44
45static inline unsigned int get_rtc_time(struct rtc_time *time)
46{
47 if (ppc_md.get_rtc_time)
48 ppc_md.get_rtc_time(time);
49 return RTC_24H;
50}
51
52/* Set the current date and time in the real time clock. */
53static inline int set_rtc_time(struct rtc_time *time)
54{
55 if (ppc_md.set_rtc_time)
56 return ppc_md.set_rtc_time(time);
57 return -EINVAL;
58}
59
60static inline unsigned int get_rtc_ss(void)
61{
62 struct rtc_time h;
63
64 get_rtc_time(&h);
65 return h.tm_sec;
66}
67
68static inline int get_rtc_pll(struct rtc_pll_info *pll)
69{
70 return -EINVAL;
71}
72static inline int set_rtc_pll(struct rtc_pll_info *pll)
73{
74 return -EINVAL;
75}
76
77#endif /* __KERNEL__ */
78#endif /* __ASM_POWERPC_RTC_H__ */
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-powerpc/rwsem.h
index bd5c2f093575..0a5b83a3c949 100644
--- a/include/asm-ppc64/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -1,18 +1,14 @@
1#ifndef _ASM_POWERPC_RWSEM_H
2#define _ASM_POWERPC_RWSEM_H
3
4#ifdef __KERNEL__
5
1/* 6/*
2 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 7 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
4 * by Paul Mackerras <paulus@samba.org>. 9 * by Paul Mackerras <paulus@samba.org>.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _PPC64_RWSEM_H
13#define _PPC64_RWSEM_H
14
15#ifdef __KERNEL__
16#include <linux/list.h> 12#include <linux/list.h>
17#include <linux/spinlock.h> 13#include <linux/spinlock.h>
18#include <asm/atomic.h> 14#include <asm/atomic.h>
@@ -163,5 +159,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
163 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 159 return atomic_add_return(delta, (atomic_t *)(&sem->count));
164} 160}
165 161
166#endif /* __KERNEL__ */ 162#endif /* __KERNEL__ */
167#endif /* _PPC_RWSEM_XADD_H */ 163#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/include/asm-ppc64/seccomp.h b/include/asm-powerpc/seccomp.h
index c130c334bda1..1e1cfe12882b 100644
--- a/include/asm-ppc64/seccomp.h
+++ b/include/asm-powerpc/seccomp.h
@@ -1,11 +1,6 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_POWERPC_SECCOMP_H
2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on ppc64"
7#endif
8 2
3#include <linux/thread_info.h>
9#include <linux/unistd.h> 4#include <linux/unistd.h>
10 5
11#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
@@ -18,4 +13,4 @@
18#define __NR_seccomp_exit_32 __NR_exit 13#define __NR_seccomp_exit_32 __NR_exit
19#define __NR_seccomp_sigreturn_32 __NR_sigreturn 14#define __NR_seccomp_sigreturn_32 __NR_sigreturn
20 15
21#endif /* _ASM_SECCOMP_H */ 16#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/include/asm-ppc64/sections.h b/include/asm-powerpc/sections.h
index 308ca6f5ced2..47be2ac2a925 100644
--- a/include/asm-ppc64/sections.h
+++ b/include/asm-powerpc/sections.h
@@ -1,22 +1,11 @@
1#ifndef _PPC64_SECTIONS_H 1#ifndef _ASM_POWERPC_SECTIONS_H
2#define _PPC64_SECTIONS_H 2#define _ASM_POWERPC_SECTIONS_H
3
4extern char _end[];
5 3
6#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
7 5
8#define __pmac 6#ifdef __powerpc64__
9#define __pmacdata
10
11#define __prep
12#define __prepdata
13
14#define __chrp
15#define __chrpdata
16
17#define __openfirmware
18#define __openfirmwaredata
19 7
8extern char _end[];
20 9
21static inline int in_kernel_text(unsigned long addr) 10static inline int in_kernel_text(unsigned long addr)
22{ 11{
@@ -27,3 +16,5 @@ static inline int in_kernel_text(unsigned long addr)
27} 16}
28 17
29#endif 18#endif
19
20#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/include/asm-ppc64/semaphore.h b/include/asm-powerpc/semaphore.h
index aefe7753ea41..fd42fe97158f 100644
--- a/include/asm-ppc64/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_SEMAPHORE_H 1#ifndef _ASM_POWERPC_SEMAPHORE_H
2#define _PPC64_SEMAPHORE_H 2#define _ASM_POWERPC_SEMAPHORE_H
3 3
4/* 4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are 5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
@@ -95,4 +95,4 @@ static inline void up(struct semaphore * sem)
95 95
96#endif /* __KERNEL__ */ 96#endif /* __KERNEL__ */
97 97
98#endif /* !(_PPC64_SEMAPHORE_H) */ 98#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-ppc64/smu.h b/include/asm-powerpc/smu.h
index dee8eefe47bc..dee8eefe47bc 100644
--- a/include/asm-ppc64/smu.h
+++ b/include/asm-powerpc/smu.h
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-powerpc/spinlock_types.h
index a37c8eabb9f2..74236c9f05b1 100644
--- a/include/asm-ppc64/spinlock_types.h
+++ b/include/asm-powerpc/spinlock_types.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SPINLOCK_TYPES_H 1#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H 2#define _ASM_POWERPC_SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H 4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly" 5# error "please don't include this file directly"
diff --git a/include/asm-ppc64/statfs.h b/include/asm-powerpc/statfs.h
index 3c985e5246a7..67024026c10d 100644
--- a/include/asm-ppc64/statfs.h
+++ b/include/asm-powerpc/statfs.h
@@ -1,12 +1,11 @@
1#ifndef _PPC64_STATFS_H 1#ifndef _ASM_POWERPC_STATFS_H
2#define _PPC64_STATFS_H 2#define _ASM_POWERPC_STATFS_H
3 3
4/* 4/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
5 * This program is free software; you can redistribute it and/or 5
6 * modify it under the terms of the GNU General Public License 6#ifndef __powerpc64__
7 * as published by the Free Software Foundation; either version 7#include <asm-generic/statfs.h>
8 * 2 of the License, or (at your option) any later version. 8#else
9 */
10 9
11#ifndef __KERNEL_STRICT_NAMES 10#ifndef __KERNEL_STRICT_NAMES
12#include <linux/types.h> 11#include <linux/types.h>
@@ -57,5 +56,5 @@ struct compat_statfs64 {
57 __u32 f_frsize; 56 __u32 f_frsize;
58 __u32 f_spare[5]; 57 __u32 f_spare[5];
59}; 58};
60 59#endif /* ! __powerpc64__ */
61#endif /* _PPC64_STATFS_H */ 60#endif
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 000000000000..4660c0394a77
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_POWERPC_SYNCH_H
2#define _ASM_POWERPC_SYNCH_H
3
4#include <linux/config.h>
5
6#ifdef __powerpc64__
7#define __SUBARCH_HAS_LWSYNC
8#endif
9
10#ifdef __SUBARCH_HAS_LWSYNC
11# define LWSYNC lwsync
12#else
13# define LWSYNC sync
14#endif
15
16
17/*
18 * Arguably the bitops and *xchg operations don't imply any memory barrier
19 * or SMP ordering, but in fact a lot of drivers expect them to imply
20 * both, since they do on x86 cpus.
21 */
22#ifdef CONFIG_SMP
23#define EIEIO_ON_SMP "eieio\n"
24#define ISYNC_ON_SMP "\n\tisync"
25#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
26#else
27#define EIEIO_ON_SMP
28#define ISYNC_ON_SMP
29#define SYNC_ON_SMP
30#endif
31
32static inline void eieio(void)
33{
34 __asm__ __volatile__ ("eieio" : : : "memory");
35}
36
37static inline void isync(void)
38{
39 __asm__ __volatile__ ("isync" : : : "memory");
40}
41
42#ifdef CONFIG_SMP
43#define eieio_on_smp() eieio()
44#define isync_on_smp() isync()
45#else
46#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
47#define isync_on_smp() __asm__ __volatile__("": : :"memory")
48#endif
49
50#endif /* _ASM_POWERPC_SYNCH_H */
51
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644
index 000000000000..d60c8c928922
--- /dev/null
+++ b/include/asm-powerpc/system.h
@@ -0,0 +1,362 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_SYSTEM_H
5#define _ASM_POWERPC_SYSTEM_H
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9
10#include <asm/hw_irq.h>
11#include <asm/ppc_asm.h>
12#include <asm/atomic.h>
13
14/*
15 * Memory barrier.
16 * The sync instruction guarantees that all memory accesses initiated
17 * by this processor have been performed (with respect to all other
18 * mechanisms that access memory). The eieio instruction is a barrier
19 * providing an ordering (separately) for (a) cacheable stores and (b)
20 * loads and stores to non-cacheable memory (e.g. I/O devices).
21 *
22 * mb() prevents loads and stores being reordered across this point.
23 * rmb() prevents loads being reordered across this point.
24 * wmb() prevents stores being reordered across this point.
25 * read_barrier_depends() prevents data-dependent loads being reordered
26 * across this point (nop on PPC).
27 *
28 * We have to use the sync instructions for mb(), since lwsync doesn't
29 * order loads with respect to previous stores. Lwsync is fine for
30 * rmb(), though. Note that lwsync is interpreted as sync by
31 * 32-bit and older 64-bit CPUs.
32 *
33 * For wmb(), we use sync since wmb is used in drivers to order
34 * stores to system memory with respect to writes to the device.
35 * However, smp_wmb() can be a lighter-weight eieio barrier on
36 * SMP since it is only used to order updates to system memory.
37 */
38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
39#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
40#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41#define read_barrier_depends() do { } while(0)
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44#define set_wmb(var, value) do { var = value; wmb(); } while (0)
45
46#ifdef CONFIG_SMP
47#define smp_mb() mb()
48#define smp_rmb() rmb()
49#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
50#define smp_read_barrier_depends() read_barrier_depends()
51#else
52#define smp_mb() barrier()
53#define smp_rmb() barrier()
54#define smp_wmb() barrier()
55#define smp_read_barrier_depends() do { } while(0)
56#endif /* CONFIG_SMP */
57
58#ifdef __KERNEL__
59struct task_struct;
60struct pt_regs;
61
62#ifdef CONFIG_DEBUGGER
63
64extern int (*__debugger)(struct pt_regs *regs);
65extern int (*__debugger_ipi)(struct pt_regs *regs);
66extern int (*__debugger_bpt)(struct pt_regs *regs);
67extern int (*__debugger_sstep)(struct pt_regs *regs);
68extern int (*__debugger_iabr_match)(struct pt_regs *regs);
69extern int (*__debugger_dabr_match)(struct pt_regs *regs);
70extern int (*__debugger_fault_handler)(struct pt_regs *regs);
71
72#define DEBUGGER_BOILERPLATE(__NAME) \
73static inline int __NAME(struct pt_regs *regs) \
74{ \
75 if (unlikely(__ ## __NAME)) \
76 return __ ## __NAME(regs); \
77 return 0; \
78}
79
80DEBUGGER_BOILERPLATE(debugger)
81DEBUGGER_BOILERPLATE(debugger_ipi)
82DEBUGGER_BOILERPLATE(debugger_bpt)
83DEBUGGER_BOILERPLATE(debugger_sstep)
84DEBUGGER_BOILERPLATE(debugger_iabr_match)
85DEBUGGER_BOILERPLATE(debugger_dabr_match)
86DEBUGGER_BOILERPLATE(debugger_fault_handler)
87
88#ifdef CONFIG_XMON
89extern void xmon_init(int enable);
90#endif
91
92#else
93static inline int debugger(struct pt_regs *regs) { return 0; }
94static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
95static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
96static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
97static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
98static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
99static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
100#endif
101
102extern int set_dabr(unsigned long dabr);
103extern void print_backtrace(unsigned long *);
104extern void show_regs(struct pt_regs * regs);
105extern void flush_instruction_cache(void);
106extern void hard_reset_now(void);
107extern void poweroff_now(void);
108
109#ifdef CONFIG_6xx
110extern long _get_L2CR(void);
111extern long _get_L3CR(void);
112extern void _set_L2CR(unsigned long);
113extern void _set_L3CR(unsigned long);
114#else
115#define _get_L2CR() 0L
116#define _get_L3CR() 0L
117#define _set_L2CR(val) do { } while(0)
118#define _set_L3CR(val) do { } while(0)
119#endif
120
121extern void via_cuda_init(void);
122extern void read_rtc_time(void);
123extern void pmac_find_display(void);
124extern void giveup_fpu(struct task_struct *);
125extern void disable_kernel_fp(void);
126extern void enable_kernel_fp(void);
127extern void flush_fp_to_thread(struct task_struct *);
128extern void enable_kernel_altivec(void);
129extern void giveup_altivec(struct task_struct *);
130extern void load_up_altivec(struct task_struct *);
131extern int emulate_altivec(struct pt_regs *);
132extern void giveup_spe(struct task_struct *);
133extern void load_up_spe(struct task_struct *);
134extern int fix_alignment(struct pt_regs *);
135extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
136extern void cvt_df(double *from, float *to, unsigned long *fpscr);
137
138#ifdef CONFIG_ALTIVEC
139extern void flush_altivec_to_thread(struct task_struct *);
140#else
141static inline void flush_altivec_to_thread(struct task_struct *t)
142{
143}
144#endif
145
146#ifdef CONFIG_SPE
147extern void flush_spe_to_thread(struct task_struct *);
148#else
149static inline void flush_spe_to_thread(struct task_struct *t)
150{
151}
152#endif
153
154extern int call_rtas(const char *, int, int, unsigned long *, ...);
155extern void cacheable_memzero(void *p, unsigned int nb);
156extern void *cacheable_memcpy(void *, const void *, unsigned int);
157extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
158extern void bad_page_fault(struct pt_regs *, unsigned long, int);
159extern int die(const char *, struct pt_regs *, long);
160extern void _exception(int, struct pt_regs *, int, unsigned long);
161#ifdef CONFIG_BOOKE_WDT
162extern u32 booke_wdt_enabled;
163extern u32 booke_wdt_period;
164#endif /* CONFIG_BOOKE_WDT */
165
166/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
167extern unsigned char e2a(unsigned char);
168
169struct device_node;
170extern void note_scsi_host(struct device_node *, void *);
171
172extern struct task_struct *__switch_to(struct task_struct *,
173 struct task_struct *);
174#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
175
176struct thread_struct;
177extern struct task_struct *_switch(struct thread_struct *prev,
178 struct thread_struct *next);
179
180extern unsigned int rtas_data;
181extern int mem_init_done; /* set on boot once kmalloc can be called */
182
183extern int powersave_nap; /* set if nap mode can be used in idle loop */
184
185/*
186 * Atomic exchange
187 *
188 * Changes the memory location '*ptr' to be val and returns
189 * the previous value stored there.
190 */
191static __inline__ unsigned long
192__xchg_u32(volatile void *p, unsigned long val)
193{
194 unsigned long prev;
195
196 __asm__ __volatile__(
197 EIEIO_ON_SMP
198"1: lwarx %0,0,%2 \n"
199 PPC405_ERR77(0,%2)
200" stwcx. %3,0,%2 \n\
201 bne- 1b"
202 ISYNC_ON_SMP
203 : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
204 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
205 : "cc", "memory");
206
207 return prev;
208}
209
210#ifdef CONFIG_PPC64
211static __inline__ unsigned long
212__xchg_u64(volatile void *p, unsigned long val)
213{
214 unsigned long prev;
215
216 __asm__ __volatile__(
217 EIEIO_ON_SMP
218"1: ldarx %0,0,%2 \n"
219 PPC405_ERR77(0,%2)
220" stdcx. %3,0,%2 \n\
221 bne- 1b"
222 ISYNC_ON_SMP
223 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
224 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
225 : "cc", "memory");
226
227 return prev;
228}
229#endif
230
231/*
232 * This function doesn't exist, so you'll get a linker error
233 * if something tries to do an invalid xchg().
234 */
235extern void __xchg_called_with_bad_pointer(void);
236
237static __inline__ unsigned long
238__xchg(volatile void *ptr, unsigned long x, unsigned int size)
239{
240 switch (size) {
241 case 4:
242 return __xchg_u32(ptr, x);
243#ifdef CONFIG_PPC64
244 case 8:
245 return __xchg_u64(ptr, x);
246#endif
247 }
248 __xchg_called_with_bad_pointer();
249 return x;
250}
251
252#define xchg(ptr,x) \
253 ({ \
254 __typeof__(*(ptr)) _x_ = (x); \
255 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
256 })
257
258#define tas(ptr) (xchg((ptr),1))
259
260/*
261 * Compare and exchange - if *p == old, set it to new,
262 * and return the old value of *p.
263 */
264#define __HAVE_ARCH_CMPXCHG 1
265
266static __inline__ unsigned long
267__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
268{
269 unsigned int prev;
270
271 __asm__ __volatile__ (
272 EIEIO_ON_SMP
273"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
274 cmpw 0,%0,%3\n\
275 bne- 2f\n"
276 PPC405_ERR77(0,%2)
277" stwcx. %4,0,%2\n\
278 bne- 1b"
279 ISYNC_ON_SMP
280 "\n\
2812:"
282 : "=&r" (prev), "=m" (*p)
283 : "r" (p), "r" (old), "r" (new), "m" (*p)
284 : "cc", "memory");
285
286 return prev;
287}
288
289#ifdef CONFIG_PPC64
290static __inline__ unsigned long
291__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
292{
293 unsigned long prev;
294
295 __asm__ __volatile__ (
296 EIEIO_ON_SMP
297"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
298 cmpd 0,%0,%3\n\
299 bne- 2f\n\
300 stdcx. %4,0,%2\n\
301 bne- 1b"
302 ISYNC_ON_SMP
303 "\n\
3042:"
305 : "=&r" (prev), "=m" (*p)
306 : "r" (p), "r" (old), "r" (new), "m" (*p)
307 : "cc", "memory");
308
309 return prev;
310}
311#endif
312
313/* This function doesn't exist, so you'll get a linker error
314 if something tries to do an invalid cmpxchg(). */
315extern void __cmpxchg_called_with_bad_pointer(void);
316
317static __inline__ unsigned long
318__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
319 unsigned int size)
320{
321 switch (size) {
322 case 4:
323 return __cmpxchg_u32(ptr, old, new);
324#ifdef CONFIG_PPC64
325 case 8:
326 return __cmpxchg_u64(ptr, old, new);
327#endif
328 }
329 __cmpxchg_called_with_bad_pointer();
330 return old;
331}
332
333#define cmpxchg(ptr,o,n) \
334 ({ \
335 __typeof__(*(ptr)) _o_ = (o); \
336 __typeof__(*(ptr)) _n_ = (n); \
337 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
338 (unsigned long)_n_, sizeof(*(ptr))); \
339 })
340
341#ifdef CONFIG_PPC64
342/*
343 * We handle most unaligned accesses in hardware. On the other hand
344 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
345 * powers of 2 writes until it reaches sufficient alignment).
346 *
347 * Based on this we disable the IP header alignment in network drivers.
348 */
349#define NET_IP_ALIGN 0
350#endif
351
352#define arch_align_stack(x) (x)
353
354/* Used in very early kernel initialization. */
355extern unsigned long reloc_offset(void);
356extern unsigned long add_reloc_offset(unsigned long);
357extern void reloc_got2(unsigned long);
358
359#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
360
361#endif /* __KERNEL__ */
362#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-ppc64/thread_info.h b/include/asm-powerpc/thread_info.h
index 0494df6fca74..0b4c24551c21 100644
--- a/include/asm-ppc64/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -1,15 +1,25 @@
1/* thread_info.h: PPC low-level thread information 1/* thread_info.h: PowerPC low-level thread information
2 * adapted from the i386 version by Paul Mackerras 2 * adapted from the i386 version by Paul Mackerras
3 * 3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com) 4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller 5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */ 6 */
7 7
8#ifndef _ASM_THREAD_INFO_H 8#ifndef _ASM_POWERPC_THREAD_INFO_H
9#define _ASM_THREAD_INFO_H 9#define _ASM_POWERPC_THREAD_INFO_H
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13/* We have 8k stacks on ppc32 and 16k on ppc64 */
14
15#ifdef CONFIG_PPC64
16#define THREAD_SHIFT 14
17#else
18#define THREAD_SHIFT 13
19#endif
20
21#define THREAD_SIZE (1 << THREAD_SHIFT)
22
13#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
14#include <linux/config.h> 24#include <linux/config.h>
15#include <linux/cache.h> 25#include <linux/cache.h>
@@ -24,7 +34,8 @@ struct thread_info {
24 struct task_struct *task; /* main task structure */ 34 struct task_struct *task; /* main task structure */
25 struct exec_domain *exec_domain; /* execution domain */ 35 struct exec_domain *exec_domain; /* execution domain */
26 int cpu; /* cpu we're on */ 36 int cpu; /* cpu we're on */
27 int preempt_count; /* 0 => preemptable, <0 => BUG */ 37 int preempt_count; /* 0 => preemptable,
38 <0 => BUG */
28 struct restart_block restart_block; 39 struct restart_block restart_block;
29 /* set by force_successful_syscall_return */ 40 /* set by force_successful_syscall_return */
30 unsigned char syscall_noerror; 41 unsigned char syscall_noerror;
@@ -54,9 +65,6 @@ struct thread_info {
54 65
55/* thread information allocation */ 66/* thread information allocation */
56 67
57#define THREAD_ORDER 2
58#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
59#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
60#ifdef CONFIG_DEBUG_STACK_USAGE 68#ifdef CONFIG_DEBUG_STACK_USAGE
61#define alloc_thread_info(tsk) \ 69#define alloc_thread_info(tsk) \
62 ({ \ 70 ({ \
@@ -68,7 +76,7 @@ struct thread_info {
68 ret; \ 76 ret; \
69 }) 77 })
70#else 78#else
71#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 79#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
72#endif 80#endif
73#define free_thread_info(ti) kfree(ti) 81#define free_thread_info(ti) kfree(ti)
74#define get_thread_info(ti) get_task_struct((ti)->task) 82#define get_thread_info(ti) get_task_struct((ti)->task)
@@ -77,9 +85,11 @@ struct thread_info {
77/* how to get the thread information struct from C */ 85/* how to get the thread information struct from C */
78static inline struct thread_info *current_thread_info(void) 86static inline struct thread_info *current_thread_info(void)
79{ 87{
80 struct thread_info *ti; 88 register unsigned long sp asm("r1");
81 __asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT)); 89
82 return ti; 90 /* gcc4, at least, is smart enough to turn this into a single
91 * rlwinm for ppc32 and clrrdi for ppc64 */
92 return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
83} 93}
84 94
85#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
@@ -122,4 +132,4 @@ static inline struct thread_info *current_thread_info(void)
122 132
123#endif /* __KERNEL__ */ 133#endif /* __KERNEL__ */
124 134
125#endif /* _ASM_THREAD_INFO_H */ 135#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
new file mode 100644
index 000000000000..99bfe3281768
--- /dev/null
+++ b/include/asm-powerpc/time.h
@@ -0,0 +1,214 @@
1/*
2 * Common time prototypes and such for all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef __POWERPC_TIME_H
14#define __POWERPC_TIME_H
15
16#ifdef __KERNEL__
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/percpu.h>
20
21#include <asm/processor.h>
22#ifdef CONFIG_PPC64
23#include <asm/paca.h>
24#include <asm/iSeries/HvCall.h>
25#endif
26
27/* time.c */
28extern unsigned long tb_ticks_per_jiffy;
29extern unsigned long tb_ticks_per_usec;
30extern unsigned long tb_ticks_per_sec;
31extern u64 tb_to_xs;
32extern unsigned tb_to_us;
33extern u64 tb_last_stamp;
34
35DECLARE_PER_CPU(unsigned long, last_jiffy);
36
37struct rtc_time;
38extern void to_tm(int tim, struct rtc_time * tm);
39extern time_t last_rtc_update;
40
41extern void generic_calibrate_decr(void);
42extern void wakeup_decrementer(void);
43
44/* Some sane defaults: 125 MHz timebase, 1GHz processor */
45extern unsigned long ppc_proc_freq;
46#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
47extern unsigned long ppc_tb_freq;
48#define DEFAULT_TB_FREQ 125000000UL
49
50/*
51 * By putting all of this stuff into a single struct we
52 * reduce the number of cache lines touched by do_gettimeofday.
53 * Both by collecting all of the data in one cache line and
54 * by touching only one TOC entry on ppc64.
55 */
56struct gettimeofday_vars {
57 u64 tb_to_xs;
58 u64 stamp_xsec;
59 u64 tb_orig_stamp;
60};
61
62struct gettimeofday_struct {
63 unsigned long tb_ticks_per_sec;
64 struct gettimeofday_vars vars[2];
65 struct gettimeofday_vars * volatile varp;
66 unsigned var_idx;
67 unsigned tb_to_us;
68};
69
70struct div_result {
71 u64 result_high;
72 u64 result_low;
73};
74
75/* Accessor functions for the timebase (RTC on 601) registers. */
76/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
77#ifdef CONFIG_6xx
78#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB))
79#else
80#define __USE_RTC() 0
81#endif
82
83/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
84static inline unsigned long get_tbl(void)
85{
86 unsigned long tbl;
87
88#if defined(CONFIG_403GCX)
89 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
90#else
91 asm volatile("mftb %0" : "=r" (tbl));
92#endif
93 return tbl;
94}
95
96static inline unsigned int get_tbu(void)
97{
98 unsigned int tbu;
99
100#if defined(CONFIG_403GCX)
101 asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
102#else
103 asm volatile("mftbu %0" : "=r" (tbu));
104#endif
105 return tbu;
106}
107
108static inline unsigned int get_rtcl(void)
109{
110 unsigned int rtcl;
111
112 asm volatile("mfrtcl %0" : "=r" (rtcl));
113 return rtcl;
114}
115
116#ifdef CONFIG_PPC64
117static inline u64 get_tb(void)
118{
119 return mftb();
120}
121#else
122static inline u64 get_tb(void)
123{
124 unsigned int tbhi, tblo, tbhi2;
125
126 do {
127 tbhi = get_tbu();
128 tblo = get_tbl();
129 tbhi2 = get_tbu();
130 } while (tbhi != tbhi2);
131
132 return ((u64)tbhi << 32) | tblo;
133}
134#endif
135
136static inline void set_tb(unsigned int upper, unsigned int lower)
137{
138 mtspr(SPRN_TBWL, 0);
139 mtspr(SPRN_TBWU, upper);
140 mtspr(SPRN_TBWL, lower);
141}
142
143/* Accessor functions for the decrementer register.
144 * The 4xx doesn't even have a decrementer. I tried to use the
145 * generic timer interrupt code, which seems OK, with the 4xx PIT
146 * in auto-reload mode. The problem is PIT stops counting when it
147 * hits zero. If it would wrap, we could use it just like a decrementer.
148 */
149static inline unsigned int get_dec(void)
150{
151#if defined(CONFIG_40x)
152 return (mfspr(SPRN_PIT));
153#else
154 return (mfspr(SPRN_DEC));
155#endif
156}
157
158static inline void set_dec(int val)
159{
160#if defined(CONFIG_40x)
161 return; /* Have to let it auto-reload */
162#elif defined(CONFIG_8xx_CPU6)
163 set_dec_cpu6(val);
164#else
165#ifdef CONFIG_PPC_ISERIES
166 struct paca_struct *lpaca = get_paca();
167 int cur_dec;
168
169 if (lpaca->lppaca.shared_proc) {
170 lpaca->lppaca.virtual_decr = val;
171 cur_dec = get_dec();
172 if (cur_dec > val)
173 HvCall_setVirtualDecr();
174 } else
175#endif
176 mtspr(SPRN_DEC, val);
177#endif /* not 40x or 8xx_CPU6 */
178}
179
180static inline unsigned long tb_ticks_since(unsigned long tstamp)
181{
182 if (__USE_RTC()) {
183 int delta = get_rtcl() - (unsigned int) tstamp;
184 return delta < 0 ? delta + 1000000000 : delta;
185 }
186 return get_tbl() - tstamp;
187}
188
189#define mulhwu(x,y) \
190({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
191
192#ifdef CONFIG_PPC64
193#define mulhdu(x,y) \
194({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
195#else
196extern u64 mulhdu(u64, u64);
197#endif
198
199extern void smp_space_timers(unsigned int);
200
201extern unsigned mulhwu_scale_factor(unsigned, unsigned);
202extern void div128_by_32(u64 dividend_high, u64 dividend_low,
203 unsigned divisor, struct div_result *dr);
204
205/* Used to store Processor Utilization register (purr) values */
206
207struct cpu_usage {
208 u64 current_tb; /* Holds the current purr register values */
209};
210
211DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
212
213#endif /* __KERNEL__ */
214#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc64/types.h b/include/asm-powerpc/types.h
index bf294c1761b2..7d803cb547bb 100644
--- a/include/asm-ppc64/types.h
+++ b/include/asm-powerpc/types.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_TYPES_H 1#ifndef _ASM_POWERPC_TYPES_H
2#define _PPC64_TYPES_H 2#define _ASM_POWERPC_TYPES_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
@@ -16,7 +16,11 @@
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19#ifdef __powerpc64__
19typedef unsigned int umode_t; 20typedef unsigned int umode_t;
21#else
22typedef unsigned short umode_t;
23#endif
20 24
21/* 25/*
22 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the 26 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
@@ -32,8 +36,15 @@ typedef unsigned short __u16;
32typedef __signed__ int __s32; 36typedef __signed__ int __s32;
33typedef unsigned int __u32; 37typedef unsigned int __u32;
34 38
39#ifdef __powerpc64__
35typedef __signed__ long __s64; 40typedef __signed__ long __s64;
36typedef unsigned long __u64; 41typedef unsigned long __u64;
42#else
43#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
44typedef __signed__ long long __s64;
45typedef unsigned long long __u64;
46#endif
47#endif /* __powerpc64__ */
37 48
38typedef struct { 49typedef struct {
39 __u32 u[4]; 50 __u32 u[4];
@@ -45,7 +56,11 @@ typedef struct {
45/* 56/*
46 * These aren't exported outside the kernel to avoid name space clashes 57 * These aren't exported outside the kernel to avoid name space clashes
47 */ 58 */
59#ifdef __powerpc64__
48#define BITS_PER_LONG 64 60#define BITS_PER_LONG 64
61#else
62#define BITS_PER_LONG 32
63#endif
49 64
50#ifndef __ASSEMBLY__ 65#ifndef __ASSEMBLY__
51 66
@@ -58,8 +73,13 @@ typedef unsigned short u16;
58typedef signed int s32; 73typedef signed int s32;
59typedef unsigned int u32; 74typedef unsigned int u32;
60 75
76#ifdef __powerpc64__
61typedef signed long s64; 77typedef signed long s64;
62typedef unsigned long u64; 78typedef unsigned long u64;
79#else
80typedef signed long long s64;
81typedef unsigned long long u64;
82#endif
63 83
64typedef __vector128 vector128; 84typedef __vector128 vector128;
65 85
@@ -72,8 +92,13 @@ typedef struct {
72 unsigned long env; 92 unsigned long env;
73} func_descr_t; 93} func_descr_t;
74 94
95#ifdef CONFIG_LBD
96typedef u64 sector_t;
97#define HAVE_SECTOR_T
98#endif
99
75#endif /* __ASSEMBLY__ */ 100#endif /* __ASSEMBLY__ */
76 101
77#endif /* __KERNEL__ */ 102#endif /* __KERNEL__ */
78 103
79#endif /* _PPC64_TYPES_H */ 104#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/include/asm-ppc/uninorth.h b/include/asm-powerpc/uninorth.h
index f737732c3861..f737732c3861 100644
--- a/include/asm-ppc/uninorth.h
+++ b/include/asm-powerpc/uninorth.h
diff --git a/include/asm-ppc/unistd.h b/include/asm-powerpc/unistd.h
index 3173ab3d2eb9..c2d039e338a8 100644
--- a/include/asm-ppc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -3,7 +3,13 @@
3 3
4/* 4/*
5 * This file contains the system call numbers. 5 * This file contains the system call numbers.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
6 */ 11 */
12
7#define __NR_restart_syscall 0 13#define __NR_restart_syscall 0
8#define __NR_exit 1 14#define __NR_exit 1
9#define __NR_fork 2 15#define __NR_fork 2
@@ -196,19 +202,23 @@
196#define __NR_vfork 189 202#define __NR_vfork 189
197#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ 203#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
198#define __NR_readahead 191 204#define __NR_readahead 191
205#ifndef __powerpc64__ /* these are 32-bit only */
199#define __NR_mmap2 192 206#define __NR_mmap2 192
200#define __NR_truncate64 193 207#define __NR_truncate64 193
201#define __NR_ftruncate64 194 208#define __NR_ftruncate64 194
202#define __NR_stat64 195 209#define __NR_stat64 195
203#define __NR_lstat64 196 210#define __NR_lstat64 196
204#define __NR_fstat64 197 211#define __NR_fstat64 197
212#endif
205#define __NR_pciconfig_read 198 213#define __NR_pciconfig_read 198
206#define __NR_pciconfig_write 199 214#define __NR_pciconfig_write 199
207#define __NR_pciconfig_iobase 200 215#define __NR_pciconfig_iobase 200
208#define __NR_multiplexer 201 216#define __NR_multiplexer 201
209#define __NR_getdents64 202 217#define __NR_getdents64 202
210#define __NR_pivot_root 203 218#define __NR_pivot_root 203
219#ifndef __powerpc64__
211#define __NR_fcntl64 204 220#define __NR_fcntl64 204
221#endif
212#define __NR_madvise 205 222#define __NR_madvise 205
213#define __NR_mincore 206 223#define __NR_mincore 206
214#define __NR_gettid 207 224#define __NR_gettid 207
@@ -230,7 +240,9 @@
230#define __NR_sched_getaffinity 223 240#define __NR_sched_getaffinity 223
231/* 224 currently unused */ 241/* 224 currently unused */
232#define __NR_tuxcall 225 242#define __NR_tuxcall 225
243#ifndef __powerpc64__
233#define __NR_sendfile64 226 244#define __NR_sendfile64 226
245#endif
234#define __NR_io_setup 227 246#define __NR_io_setup 227
235#define __NR_io_destroy 228 247#define __NR_io_destroy 228
236#define __NR_io_getevents 229 248#define __NR_io_getevents 229
@@ -258,14 +270,16 @@
258#define __NR_utimes 251 270#define __NR_utimes 251
259#define __NR_statfs64 252 271#define __NR_statfs64 252
260#define __NR_fstatfs64 253 272#define __NR_fstatfs64 253
273#ifndef __powerpc64__
261#define __NR_fadvise64_64 254 274#define __NR_fadvise64_64 254
275#endif
262#define __NR_rtas 255 276#define __NR_rtas 255
263#define __NR_sys_debug_setcontext 256 277#define __NR_sys_debug_setcontext 256
264/* Number 257 is reserved for vserver */ 278/* Number 257 is reserved for vserver */
265/* 258 currently unused */ 279/* 258 currently unused */
266/* Number 259 is reserved for new sys_mbind */ 280#define __NR_mbind 259
267/* Number 260 is reserved for new sys_get_mempolicy */ 281#define __NR_get_mempolicy 260
268/* Number 261 is reserved for new sys_set_mempolicy */ 282#define __NR_set_mempolicy 261
269#define __NR_mq_open 262 283#define __NR_mq_open 262
270#define __NR_mq_unlink 263 284#define __NR_mq_unlink 263
271#define __NR_mq_timedsend 264 285#define __NR_mq_timedsend 264
@@ -285,7 +299,12 @@
285 299
286#define __NR_syscalls 278 300#define __NR_syscalls 278
287 301
288#define __NR(n) #n 302#ifdef __KERNEL__
303#define __NR__exit __NR_exit
304#define NR_syscalls __NR_syscalls
305#endif
306
307#ifndef __ASSEMBLY__
289 308
290/* On powerpc a system call basically clobbers the same registers like a 309/* On powerpc a system call basically clobbers the same registers like a
291 * function call, with the exception of LR (which is needed for the 310 * function call, with the exception of LR (which is needed for the
@@ -389,7 +408,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
389{ \ 408{ \
390 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \ 409 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
391} 410}
392
393#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ 411#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
394type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ 412type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
395{ \ 413{ \
@@ -398,12 +416,13 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
398 416
399#ifdef __KERNEL__ 417#ifdef __KERNEL__
400 418
401#define __NR__exit __NR_exit 419#include <linux/config.h>
402#define NR_syscalls __NR_syscalls 420#include <linux/types.h>
421#include <linux/compiler.h>
422#include <linux/linkage.h>
403 423
404#define __ARCH_WANT_IPC_PARSE_VERSION 424#define __ARCH_WANT_IPC_PARSE_VERSION
405#define __ARCH_WANT_OLD_READDIR 425#define __ARCH_WANT_OLD_READDIR
406#define __ARCH_WANT_OLD_STAT
407#define __ARCH_WANT_STAT64 426#define __ARCH_WANT_STAT64
408#define __ARCH_WANT_SYS_ALARM 427#define __ARCH_WANT_SYS_ALARM
409#define __ARCH_WANT_SYS_GETHOSTNAME 428#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -423,23 +442,17 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
423#define __ARCH_WANT_SYS_SIGPENDING 442#define __ARCH_WANT_SYS_SIGPENDING
424#define __ARCH_WANT_SYS_SIGPROCMASK 443#define __ARCH_WANT_SYS_SIGPROCMASK
425#define __ARCH_WANT_SYS_RT_SIGACTION 444#define __ARCH_WANT_SYS_RT_SIGACTION
426 445#ifdef CONFIG_PPC32
427/* 446#define __ARCH_WANT_OLD_STAT
428 * Forking from kernel space will result in the child getting a new, 447#endif
429 * empty kernel stack area. Thus the child cannot access automatic 448#ifdef CONFIG_PPC64
430 * variables set in the parent unless they are in registers, and the 449#define __ARCH_WANT_COMPAT_SYS_TIME
431 * procedure where the fork was done cannot return to its caller in 450#endif
432 * the child.
433 */
434
435#ifdef __KERNEL_SYSCALLS__
436
437#include <linux/compiler.h>
438#include <linux/types.h>
439 451
440/* 452/*
441 * System call prototypes. 453 * System call prototypes.
442 */ 454 */
455#ifdef __KERNEL_SYSCALLS__
443extern pid_t setsid(void); 456extern pid_t setsid(void);
444extern int write(int fd, const char *buf, off_t count); 457extern int write(int fd, const char *buf, off_t count);
445extern int read(int fd, char *buf, off_t count); 458extern int read(int fd, char *buf, off_t count);
@@ -449,10 +462,13 @@ extern int execve(const char *file, char **argv, char **envp);
449extern int open(const char *file, int flag, int mode); 462extern int open(const char *file, int flag, int mode);
450extern int close(int fd); 463extern int close(int fd);
451extern pid_t waitpid(pid_t pid, int *wait_stat, int options); 464extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
465#endif /* __KERNEL_SYSCALLS__ */
452 466
453unsigned long sys_mmap(unsigned long addr, size_t len, 467/*
454 unsigned long prot, unsigned long flags, 468 * Functions that implement syscalls.
455 unsigned long fd, off_t offset); 469 */
470unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
471 unsigned long flags, unsigned long fd, off_t offset);
456unsigned long sys_mmap2(unsigned long addr, size_t len, 472unsigned long sys_mmap2(unsigned long addr, size_t len,
457 unsigned long prot, unsigned long flags, 473 unsigned long prot, unsigned long flags,
458 unsigned long fd, unsigned long pgoff); 474 unsigned long fd, unsigned long pgoff);
@@ -461,22 +477,19 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
461 unsigned long a3, unsigned long a4, unsigned long a5, 477 unsigned long a3, unsigned long a4, unsigned long a5,
462 struct pt_regs *regs); 478 struct pt_regs *regs);
463int sys_clone(unsigned long clone_flags, unsigned long usp, 479int sys_clone(unsigned long clone_flags, unsigned long usp,
464 int __user *parent_tidp, void __user *child_threadptr, 480 int __user *parent_tidp, void __user *child_threadptr,
465 int __user *child_tidp, int p6, 481 int __user *child_tidp, int p6, struct pt_regs *regs);
466 struct pt_regs *regs); 482int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
467int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, 483 unsigned long p4, unsigned long p5, unsigned long p6,
468 struct pt_regs *regs); 484 struct pt_regs *regs);
469int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, 485int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
486 unsigned long p4, unsigned long p5, unsigned long p6,
470 struct pt_regs *regs); 487 struct pt_regs *regs);
471int sys_pipe(int __user *fildes); 488int sys_pipe(int __user *fildes);
472int sys_ptrace(long request, long pid, long addr, long data); 489int sys_ptrace(long request, long pid, long addr, long data);
473struct sigaction; 490struct sigaction;
474long sys_rt_sigaction(int sig, 491long sys_rt_sigaction(int sig, const struct sigaction __user *act,
475 const struct sigaction __user *act, 492 struct sigaction __user *oact, size_t sigsetsize);
476 struct sigaction __user *oact,
477 size_t sigsetsize);
478
479#endif /* __KERNEL_SYSCALLS__ */
480 493
481/* 494/*
482 * "Conditional" syscalls 495 * "Conditional" syscalls
@@ -484,10 +497,14 @@ long sys_rt_sigaction(int sig,
484 * What we want is __attribute__((weak,alias("sys_ni_syscall"))), 497 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
485 * but it doesn't work on all toolchains, so we just do it by hand 498 * but it doesn't work on all toolchains, so we just do it by hand
486 */ 499 */
487#ifndef cond_syscall 500#ifdef CONFIG_PPC32
488#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 501#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
502#else
503#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
489#endif 504#endif
490 505
491#endif /* __KERNEL__ */ 506#endif /* __KERNEL__ */
507
508#endif /* __ASSEMBLY__ */
492 509
493#endif /* _ASM_PPC_UNISTD_H_ */ 510#endif /* _ASM_PPC_UNISTD_H_ */
diff --git a/include/asm-ppc64/vga.h b/include/asm-powerpc/vga.h
index c09849743f45..f8d350aabf1a 100644
--- a/include/asm-ppc64/vga.h
+++ b/include/asm-powerpc/vga.h
@@ -1,16 +1,14 @@
1#ifndef _ASM_POWERPC_VGA_H_
2#define _ASM_POWERPC_VGA_H_
3
4#ifdef __KERNEL__
5
1/* 6/*
2 * Access to VGA videoram 7 * Access to VGA videoram
3 * 8 *
4 * (c) 1998 Martin Mares <mj@ucw.cz> 9 * (c) 1998 Martin Mares <mj@ucw.cz>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _LINUX_ASM_VGA_H_
13#define _LINUX_ASM_VGA_H_
14 12
15#include <asm/io.h> 13#include <asm/io.h>
16 14
@@ -42,9 +40,15 @@ static inline u16 scr_readw(volatile const u16 *addr)
42#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ 40#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
43 41
44extern unsigned long vgacon_remap_base; 42extern unsigned long vgacon_remap_base;
43
44#ifdef __powerpc64__
45#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0)) 45#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
46#else
47#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
48#endif
46 49
47#define vga_readb(x) (*(x)) 50#define vga_readb(x) (*(x))
48#define vga_writeb(x,y) (*(y) = (x)) 51#define vga_writeb(x,y) (*(y) = (x))
49 52
50#endif 53#endif /* __KERNEL__ */
54#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
new file mode 100644
index 000000000000..ca5f33277e0c
--- /dev/null
+++ b/include/asm-powerpc/xmon.h
@@ -0,0 +1,12 @@
1#ifndef __PPC_XMON_H
2#define __PPC_XMON_H
3#ifdef __KERNEL__
4
5struct pt_regs;
6
7extern void xmon(struct pt_regs *excp);
8extern void xmon_printf(const char *fmt, ...);
9extern void xmon_init(int);
10
11#endif
12#endif
diff --git a/include/asm-ppc/a.out.h b/include/asm-ppc/a.out.h
deleted file mode 100644
index 8979a94c4a81..000000000000
--- a/include/asm-ppc/a.out.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __PPC_A_OUT_H__
2#define __PPC_A_OUT_H__
3
4/* grabbed from the intel stuff */
5#define STACK_TOP TASK_SIZE
6
7
8struct exec
9{
10 unsigned long a_info; /* Use macros N_MAGIC, etc for access */
11 unsigned a_text; /* length of text, in bytes */
12 unsigned a_data; /* length of data, in bytes */
13 unsigned a_bss; /* length of uninitialized data area for file, in bytes */
14 unsigned a_syms; /* length of symbol table data in file, in bytes */
15 unsigned a_entry; /* start address */
16 unsigned a_trsize; /* length of relocation info for text, in bytes */
17 unsigned a_drsize; /* length of relocation info for data, in bytes */
18};
19
20
21#define N_TRSIZE(a) ((a).a_trsize)
22#define N_DRSIZE(a) ((a).a_drsize)
23#define N_SYMSIZE(a) ((a).a_syms)
24
25
26#endif
diff --git a/include/asm-ppc/auxvec.h b/include/asm-ppc/auxvec.h
deleted file mode 100644
index 172358df29c8..000000000000
--- a/include/asm-ppc/auxvec.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __PPC_AUXVEC_H
2#define __PPC_AUXVEC_H
3
4/*
5 * We need to put in some extra aux table entries to tell glibc what
6 * the cache block size is, so it can use the dcbz instruction safely.
7 */
8#define AT_DCACHEBSIZE 19
9#define AT_ICACHEBSIZE 20
10#define AT_UCACHEBSIZE 21
11/* A special ignored type value for PPC, for glibc compatibility. */
12#define AT_IGNOREPPC 22
13
14#endif
diff --git a/include/asm-ppc/bug.h b/include/asm-ppc/bug.h
deleted file mode 100644
index 8b34fd682b0d..000000000000
--- a/include/asm-ppc/bug.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _PPC_BUG_H
2#define _PPC_BUG_H
3
4struct bug_entry {
5 unsigned long bug_addr;
6 int line;
7 const char *file;
8 const char *function;
9};
10
11/*
12 * If this bit is set in the line number it means that the trap
13 * is for WARN_ON rather than BUG or BUG_ON.
14 */
15#define BUG_WARNING_TRAP 0x1000000
16
17#ifdef CONFIG_BUG
18#define BUG() do { \
19 __asm__ __volatile__( \
20 "1: twi 31,0,0\n" \
21 ".section __bug_table,\"a\"\n\t" \
22 " .long 1b,%0,%1,%2\n" \
23 ".previous" \
24 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
25} while (0)
26
27#define BUG_ON(x) do { \
28 if (!__builtin_constant_p(x) || (x)) { \
29 __asm__ __volatile__( \
30 "1: twnei %0,0\n" \
31 ".section __bug_table,\"a\"\n\t" \
32 " .long 1b,%1,%2,%3\n" \
33 ".previous" \
34 : : "r" (x), "i" (__LINE__), "i" (__FILE__), \
35 "i" (__FUNCTION__)); \
36 } \
37} while (0)
38
39#define WARN_ON(x) do { \
40 if (!__builtin_constant_p(x) || (x)) { \
41 __asm__ __volatile__( \
42 "1: twnei %0,0\n" \
43 ".section __bug_table,\"a\"\n\t" \
44 " .long 1b,%1,%2,%3\n" \
45 ".previous" \
46 : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
47 "i" (__FILE__), "i" (__FUNCTION__)); \
48 } \
49} while (0)
50
51#define HAVE_ARCH_BUG
52#define HAVE_ARCH_BUG_ON
53#define HAVE_ARCH_WARN_ON
54#endif
55
56#include <asm-generic/bug.h>
57
58#endif
diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h
deleted file mode 100644
index c63c81ec7968..000000000000
--- a/include/asm-ppc/byteorder.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _PPC_BYTEORDER_H
2#define _PPC_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6
7#ifdef __GNUC__
8#ifdef __KERNEL__
9
10extern __inline__ unsigned ld_le16(const volatile unsigned short *addr)
11{
12 unsigned val;
13
14 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
15 return val;
16}
17
18extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
19{
20 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
21}
22
23extern __inline__ unsigned ld_le32(const volatile unsigned *addr)
24{
25 unsigned val;
26
27 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
28 return val;
29}
30
31extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
32{
33 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
34}
35
36static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
37{
38 __u16 result;
39
40 __asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value));
41 return result;
42}
43
44static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
45{
46 __u32 result;
47
48 __asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value));
49 __asm__("rlwimi %0,%2,8,8,15" : "=&r" (result) : "0" (result), "r" (value));
50 __asm__("rlwimi %0,%2,24,0,7" : "=&r" (result) : "0" (result), "r" (value));
51
52 return result;
53}
54#define __arch__swab32(x) ___arch__swab32(x)
55#define __arch__swab16(x) ___arch__swab16(x)
56
57/* The same, but returns converted value from the location pointer by addr. */
58#define __arch__swab16p(addr) ld_le16(addr)
59#define __arch__swab32p(addr) ld_le32(addr)
60
61/* The same, but do the conversion in situ, ie. put the value back to addr. */
62#define __arch__swab16s(addr) st_le16(addr,*addr)
63#define __arch__swab32s(addr) st_le32(addr,*addr)
64
65#endif /* __KERNEL__ */
66
67#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
68# define __BYTEORDER_HAS_U64__
69# define __SWAB_64_THRU_32__
70#endif
71
72#endif /* __GNUC__ */
73
74#include <linux/byteorder/big_endian.h>
75
76#endif /* _PPC_BYTEORDER_H */
diff --git a/include/asm-ppc/cache.h b/include/asm-ppc/cache.h
index 38f2f1be4a87..7a157d0f4b5f 100644
--- a/include/asm-ppc/cache.h
+++ b/include/asm-ppc/cache.h
@@ -9,21 +9,18 @@
9 9
10/* bytes per L1 cache line */ 10/* bytes per L1 cache line */
11#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 11#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
12#define L1_CACHE_LINE_SIZE 16 12#define L1_CACHE_SHIFT 4
13#define LG_L1_CACHE_LINE_SIZE 4
14#define MAX_COPY_PREFETCH 1 13#define MAX_COPY_PREFETCH 1
15#elif defined(CONFIG_PPC64BRIDGE) 14#elif defined(CONFIG_PPC64BRIDGE)
16#define L1_CACHE_LINE_SIZE 128 15#define L1_CACHE_SHIFT 7
17#define LG_L1_CACHE_LINE_SIZE 7
18#define MAX_COPY_PREFETCH 1 16#define MAX_COPY_PREFETCH 1
19#else 17#else
20#define L1_CACHE_LINE_SIZE 32 18#define L1_CACHE_SHIFT 5
21#define LG_L1_CACHE_LINE_SIZE 5
22#define MAX_COPY_PREFETCH 4 19#define MAX_COPY_PREFETCH 4
23#endif 20#endif
24 21
25#define L1_CACHE_BYTES L1_CACHE_LINE_SIZE 22#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
26#define L1_CACHE_SHIFT LG_L1_CACHE_LINE_SIZE 23
27#define SMP_CACHE_BYTES L1_CACHE_BYTES 24#define SMP_CACHE_BYTES L1_CACHE_BYTES
28#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ 25#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
29 26
diff --git a/include/asm-ppc/cputable.h b/include/asm-ppc/cputable.h
deleted file mode 100644
index e17c492c870b..000000000000
--- a/include/asm-ppc/cputable.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * include/asm-ppc/cputable.h
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef __ASM_PPC_CPUTABLE_H
13#define __ASM_PPC_CPUTABLE_H
14
15/* Exposed to userland CPU features */
16#define PPC_FEATURE_32 0x80000000
17#define PPC_FEATURE_64 0x40000000
18#define PPC_FEATURE_601_INSTR 0x20000000
19#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
20#define PPC_FEATURE_HAS_FPU 0x08000000
21#define PPC_FEATURE_HAS_MMU 0x04000000
22#define PPC_FEATURE_HAS_4xxMAC 0x02000000
23#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
24#define PPC_FEATURE_HAS_SPE 0x00800000
25#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
26#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
27#define PPC_FEATURE_NO_TB 0x00100000
28
29#ifdef __KERNEL__
30
31#ifndef __ASSEMBLY__
32
33/* This structure can grow, it's real size is used by head.S code
34 * via the mkdefs mecanism.
35 */
36struct cpu_spec;
37
38typedef void (*cpu_setup_t)(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
39
40struct cpu_spec {
41 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
42 unsigned int pvr_mask;
43 unsigned int pvr_value;
44
45 char *cpu_name;
46 unsigned int cpu_features; /* Kernel features */
47 unsigned int cpu_user_features; /* Userland features */
48
49 /* cache line sizes */
50 unsigned int icache_bsize;
51 unsigned int dcache_bsize;
52
53 /* number of performance monitor counters */
54 unsigned int num_pmcs;
55
56 /* this is called to initialize various CPU bits like L1 cache,
57 * BHT, SPD, etc... from head.S before branching to identify_machine
58 */
59 cpu_setup_t cpu_setup;
60};
61
62extern struct cpu_spec cpu_specs[];
63extern struct cpu_spec *cur_cpu_spec[];
64
65static inline unsigned int cpu_has_feature(unsigned int feature)
66{
67 return cur_cpu_spec[0]->cpu_features & feature;
68}
69
70#endif /* __ASSEMBLY__ */
71
72/* CPU kernel features */
73#define CPU_FTR_SPLIT_ID_CACHE 0x00000001
74#define CPU_FTR_L2CR 0x00000002
75#define CPU_FTR_SPEC7450 0x00000004
76#define CPU_FTR_ALTIVEC 0x00000008
77#define CPU_FTR_TAU 0x00000010
78#define CPU_FTR_CAN_DOZE 0x00000020
79#define CPU_FTR_USE_TB 0x00000040
80#define CPU_FTR_604_PERF_MON 0x00000080
81#define CPU_FTR_601 0x00000100
82#define CPU_FTR_HPTE_TABLE 0x00000200
83#define CPU_FTR_CAN_NAP 0x00000400
84#define CPU_FTR_L3CR 0x00000800
85#define CPU_FTR_L3_DISABLE_NAP 0x00001000
86#define CPU_FTR_NAP_DISABLE_L2_PR 0x00002000
87#define CPU_FTR_DUAL_PLL_750FX 0x00004000
88#define CPU_FTR_NO_DPM 0x00008000
89#define CPU_FTR_HAS_HIGH_BATS 0x00010000
90#define CPU_FTR_NEED_COHERENT 0x00020000
91#define CPU_FTR_NO_BTIC 0x00040000
92#define CPU_FTR_BIG_PHYS 0x00080000
93
94#ifdef __ASSEMBLY__
95
96#define BEGIN_FTR_SECTION 98:
97
98#define END_FTR_SECTION(msk, val) \
9999: \
100 .section __ftr_fixup,"a"; \
101 .align 2; \
102 .long msk; \
103 .long val; \
104 .long 98b; \
105 .long 99b; \
106 .previous
107
108#else
109
110#define BEGIN_FTR_SECTION "98:\n"
111#define END_FTR_SECTION(msk, val) \
112"99:\n" \
113" .section __ftr_fixup,\"a\";\n" \
114" .align 2;\n" \
115" .long "#msk";\n" \
116" .long "#val";\n" \
117" .long 98b;\n" \
118" .long 99b;\n" \
119" .previous\n"
120
121
122#endif /* __ASSEMBLY__ */
123
124#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
125#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
126
127#endif /* __ASM_PPC_CPUTABLE_H */
128#endif /* __KERNEL__ */
129
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
deleted file mode 100644
index c25cc35e6ab5..000000000000
--- a/include/asm-ppc/elf.h
+++ /dev/null
@@ -1,151 +0,0 @@
1#ifndef __PPC_ELF_H
2#define __PPC_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7#include <asm/types.h>
8#include <asm/ptrace.h>
9#include <asm/cputable.h>
10#include <asm/auxvec.h>
11
12/* PowerPC relocations defined by the ABIs */
13#define R_PPC_NONE 0
14#define R_PPC_ADDR32 1 /* 32bit absolute address */
15#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
16#define R_PPC_ADDR16 3 /* 16bit absolute address */
17#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
18#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
19#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
20#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
21#define R_PPC_ADDR14_BRTAKEN 8
22#define R_PPC_ADDR14_BRNTAKEN 9
23#define R_PPC_REL24 10 /* PC relative 26 bit */
24#define R_PPC_REL14 11 /* PC relative 16 bit */
25#define R_PPC_REL14_BRTAKEN 12
26#define R_PPC_REL14_BRNTAKEN 13
27#define R_PPC_GOT16 14
28#define R_PPC_GOT16_LO 15
29#define R_PPC_GOT16_HI 16
30#define R_PPC_GOT16_HA 17
31#define R_PPC_PLTREL24 18
32#define R_PPC_COPY 19
33#define R_PPC_GLOB_DAT 20
34#define R_PPC_JMP_SLOT 21
35#define R_PPC_RELATIVE 22
36#define R_PPC_LOCAL24PC 23
37#define R_PPC_UADDR32 24
38#define R_PPC_UADDR16 25
39#define R_PPC_REL32 26
40#define R_PPC_PLT32 27
41#define R_PPC_PLTREL32 28
42#define R_PPC_PLT16_LO 29
43#define R_PPC_PLT16_HI 30
44#define R_PPC_PLT16_HA 31
45#define R_PPC_SDAREL16 32
46#define R_PPC_SECTOFF 33
47#define R_PPC_SECTOFF_LO 34
48#define R_PPC_SECTOFF_HI 35
49#define R_PPC_SECTOFF_HA 36
50/* Keep this the last entry. */
51#define R_PPC_NUM 37
52
53#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
54#define ELF_NFPREG 33 /* includes fpscr */
55#define ELF_NVRREG 33 /* includes vscr */
56#define ELF_NEVRREG 34 /* includes acc (as 2) */
57
58/*
59 * These are used to set parameters in the core dumps.
60 */
61#define ELF_ARCH EM_PPC
62#define ELF_CLASS ELFCLASS32
63#define ELF_DATA ELFDATA2MSB
64
65/* General registers */
66typedef unsigned long elf_greg_t;
67typedef elf_greg_t elf_gregset_t[ELF_NGREG];
68
69/* Floating point registers */
70typedef double elf_fpreg_t;
71typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
72
73/* Altivec registers */
74typedef __vector128 elf_vrreg_t;
75typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
76
77#ifdef __KERNEL__
78
79struct task_struct;
80
81/*
82 * This is used to ensure we don't load something for the wrong architecture.
83 */
84
85#define elf_check_arch(x) ((x)->e_machine == EM_PPC)
86
87/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
88 use of this is to invoke "./ld.so someprog" to test out a new version of
89 the loader. We need to make sure that it is out of the way of the program
90 that it will "exec", and that there is sufficient room for the brk. */
91
92#define ELF_ET_DYN_BASE (0x08000000)
93
94#define USE_ELF_CORE_DUMP
95#define ELF_EXEC_PAGESIZE 4096
96
97#define ELF_CORE_COPY_REGS(gregs, regs) \
98 memcpy((gregs), (regs), sizeof(struct pt_regs)); \
99 memset((char *)(gregs) + sizeof(struct pt_regs), 0, \
100 sizeof(elf_gregset_t) - sizeof(struct pt_regs));
101
102#define ELF_CORE_COPY_TASK_REGS(t, elfregs) \
103 ((t)->thread.regs? \
104 ({ ELF_CORE_COPY_REGS((elfregs), (t)->thread.regs); 1; }): 0)
105
106extern int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpu);
107#define ELF_CORE_COPY_FPREGS(t, fpu) dump_task_fpu((t), (fpu))
108
109/* This yields a mask that user programs can use to figure out what
110 instruction set this cpu supports. This could be done in userspace,
111 but it's not easy, and we've already done it here. */
112
113#define ELF_HWCAP (cur_cpu_spec[0]->cpu_user_features)
114
115/* This yields a string that ld.so will use to load implementation
116 specific libraries for optimization. This is more specific in
117 intent than poking at uname or /proc/cpuinfo.
118
119 For the moment, we have only optimizations for the Intel generations,
120 but that could change... */
121
122#define ELF_PLATFORM (NULL)
123
124#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
125
126extern int dcache_bsize;
127extern int icache_bsize;
128extern int ucache_bsize;
129
130/*
131 * The requirements here are:
132 * - keep the final alignment of sp (sp & 0xf)
133 * - make sure the 32-bit value at the first 16 byte aligned position of
134 * AUXV is greater than 16 for glibc compatibility.
135 * AT_IGNOREPPC is used for that.
136 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
137 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
138 */
139#define ARCH_DLINFO \
140do { \
141 /* Handle glibc compatibility. */ \
142 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
143 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
144 /* Cache size items */ \
145 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
146 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
147 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
148 } while (0)
149
150#endif /* __KERNEL__ */
151#endif
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
deleted file mode 100644
index 47dc7990fb26..000000000000
--- a/include/asm-ppc/hw_irq.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifdef __KERNEL__
5#ifndef _PPC_HW_IRQ_H
6#define _PPC_HW_IRQ_H
7
8#include <asm/ptrace.h>
9#include <asm/reg.h>
10
11extern void timer_interrupt(struct pt_regs *);
12
13#define INLINE_IRQS
14
15#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
16
17#ifdef INLINE_IRQS
18
19static inline void local_irq_disable(void)
20{
21 unsigned long msr;
22 msr = mfmsr();
23 mtmsr(msr & ~MSR_EE);
24 __asm__ __volatile__("": : :"memory");
25}
26
27static inline void local_irq_enable(void)
28{
29 unsigned long msr;
30 __asm__ __volatile__("": : :"memory");
31 msr = mfmsr();
32 mtmsr(msr | MSR_EE);
33}
34
35static inline void local_irq_save_ptr(unsigned long *flags)
36{
37 unsigned long msr;
38 msr = mfmsr();
39 *flags = msr;
40 mtmsr(msr & ~MSR_EE);
41 __asm__ __volatile__("": : :"memory");
42}
43
44#define local_save_flags(flags) ((flags) = mfmsr())
45#define local_irq_save(flags) local_irq_save_ptr(&flags)
46#define local_irq_restore(flags) mtmsr(flags)
47
48#else
49
50extern void local_irq_enable(void);
51extern void local_irq_disable(void);
52extern void local_irq_restore(unsigned long);
53extern void local_save_flags_ptr(unsigned long *);
54
55#define local_save_flags(flags) local_save_flags_ptr(&flags)
56#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
57
58#endif
59
60extern void do_lost_interrupts(unsigned long);
61
62#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
63#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
64#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
65
66/* Should we handle this via lost interrupts and IPIs or should we don't care like
67 * we do now ? --BenH.
68 */
69struct hw_interrupt_type;
70static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
71
72
73#endif /* _PPC_HW_IRQ_H */
74#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 94d83998a759..f7f614dfc648 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -8,6 +8,7 @@
8 8
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/synch.h>
11#include <asm/mmu.h> 12#include <asm/mmu.h>
12 13
13#define SIO_CONFIG_RA 0x398 14#define SIO_CONFIG_RA 0x398
@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
440#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 441#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
441#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) 442#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
442 443
443/*
444 * Enforce In-order Execution of I/O:
445 * Acts as a barrier to ensure all previous I/O accesses have
446 * completed before any further ones are issued.
447 */
448extern inline void eieio(void)
449{
450 __asm__ __volatile__ ("eieio" : : : "memory");
451}
452
453/* Enforce in-order execution of data I/O. 444/* Enforce in-order execution of data I/O.
454 * No distinction between read/write on PPC; use eieio for all three. 445 * No distinction between read/write on PPC; use eieio for all three.
455 */ 446 */
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
deleted file mode 100644
index 6d6fc78731e5..000000000000
--- a/include/asm-ppc/kmap_types.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H
4
5enum km_type {
6 KM_BOUNCE_READ,
7 KM_SKB_SUNRPC_DATA,
8 KM_SKB_DATA_SOFTIRQ,
9 KM_USER0,
10 KM_USER1,
11 KM_BIO_SRC_IRQ,
12 KM_BIO_DST_IRQ,
13 KM_PTE0,
14 KM_PTE1,
15 KM_IRQ0,
16 KM_IRQ1,
17 KM_SOFTIRQ0,
18 KM_SOFTIRQ1,
19 KM_PPC_SYNC_PAGE,
20 KM_PPC_SYNC_ICACHE,
21 KM_TYPE_NR
22};
23
24#endif
25#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index 1d4ab70a56f3..6c6d23abbe91 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -167,7 +167,7 @@ extern sys_ctrler_t sys_ctrler;
167 167
168#ifdef CONFIG_SMP 168#ifdef CONFIG_SMP
169struct smp_ops_t { 169struct smp_ops_t {
170 void (*message_pass)(int target, int msg, unsigned long data, int wait); 170 void (*message_pass)(int target, int msg);
171 int (*probe)(void); 171 int (*probe)(void);
172 void (*kick_cpu)(int nr); 172 void (*kick_cpu)(int nr);
173 void (*setup_cpu)(int nr); 173 void (*setup_cpu)(int nr);
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index afe26ffc2e2d..4f152cca13c1 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -164,13 +164,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
164 struct task_struct *tsk) 164 struct task_struct *tsk)
165{ 165{
166#ifdef CONFIG_ALTIVEC 166#ifdef CONFIG_ALTIVEC
167 asm volatile ( 167 if (cpu_has_feature(CPU_FTR_ALTIVEC))
168 BEGIN_FTR_SECTION 168 asm volatile ("dssall;\n"
169 "dssall;\n"
170#ifndef CONFIG_POWER4 169#ifndef CONFIG_POWER4
171 "sync;\n" /* G4 needs a sync here, G5 apparently not */ 170 "sync;\n" /* G4 needs a sync here, G5 apparently not */
172#endif 171#endif
173 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
174 : : ); 172 : : );
175#endif /* CONFIG_ALTIVEC */ 173#endif /* CONFIG_ALTIVEC */
176 174
diff --git a/include/asm-ppc/open_pic.h b/include/asm-ppc/open_pic.h
index 7848aa610c05..ec2f46629ca2 100644
--- a/include/asm-ppc/open_pic.h
+++ b/include/asm-ppc/open_pic.h
@@ -58,8 +58,7 @@ extern int openpic_get_irq(struct pt_regs *regs);
58extern void openpic_reset_processor_phys(u_int cpumask); 58extern void openpic_reset_processor_phys(u_int cpumask);
59extern void openpic_setup_ISU(int isu_num, unsigned long addr); 59extern void openpic_setup_ISU(int isu_num, unsigned long addr);
60extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask); 60extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
61extern void smp_openpic_message_pass(int target, int msg, unsigned long data, 61extern void smp_openpic_message_pass(int target, int msg);
62 int wait);
63extern void openpic_set_k2_cascade(int irq); 62extern void openpic_set_k2_cascade(int irq);
64extern void openpic_set_priority(u_int pri); 63extern void openpic_set_priority(u_int pri);
65extern u_int openpic_get_priority(void); 64extern u_int openpic_get_priority(void);
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
index 4789dc024240..fc44f7ca62d7 100644
--- a/include/asm-ppc/page.h
+++ b/include/asm-ppc/page.h
@@ -34,6 +34,17 @@ typedef unsigned long pte_basic_t;
34#define PTE_FMT "%.8lx" 34#define PTE_FMT "%.8lx"
35#endif 35#endif
36 36
37/* align addr on a size boundary - adjust address up/down if needed */
38#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
39#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
40
41/* align addr on a size boundary - adjust address up if needed */
42#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
43
44/* to align the pointer to the (next) page boundary */
45#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
46
47
37#undef STRICT_MM_TYPECHECKS 48#undef STRICT_MM_TYPECHECKS
38 49
39#ifdef STRICT_MM_TYPECHECKS 50#ifdef STRICT_MM_TYPECHECKS
@@ -76,13 +87,6 @@ typedef unsigned long pgprot_t;
76 87
77#endif 88#endif
78 89
79
80/* align addr on a size boundary - adjust address up if needed -- Cort */
81#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
82
83/* to align the pointer to the (next) page boundary */
84#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
85
86struct page; 90struct page;
87extern void clear_pages(void *page, int order); 91extern void clear_pages(void *page, int order);
88static inline void clear_page(void *page) { clear_pages(page, 0); } 92static inline void clear_page(void *page) { clear_pages(page, 0); }
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index ffa423456c2b..e58c78f90a5a 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -79,6 +79,11 @@ struct pci_controller {
79 struct resource mem_space; 79 struct resource mem_space;
80}; 80};
81 81
82static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
83{
84 return bus->sysdata;
85}
86
82/* These are used for config access before all the PCI probing 87/* These are used for config access before all the PCI probing
83 has been done. */ 88 has been done. */
84int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn, 89int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 9dd06cd40096..643740dd727b 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -24,9 +24,9 @@ struct pci_dev;
24 * Set this to 1 if you want the kernel to re-assign all PCI 24 * Set this to 1 if you want the kernel to re-assign all PCI
25 * bus numbers 25 * bus numbers
26 */ 26 */
27extern int pci_assign_all_busses; 27extern int pci_assign_all_buses;
28 28
29#define pcibios_assign_all_busses() (pci_assign_all_busses) 29#define pcibios_assign_all_busses() (pci_assign_all_buses)
30#define pcibios_scan_all_fns(a, b) 0 30#define pcibios_scan_all_fns(a, b) 0
31 31
32#define PCIBIOS_MIN_IO 0x1000 32#define PCIBIOS_MIN_IO 0x1000
diff --git a/include/asm-ppc/perfmon.h b/include/asm-ppc/perfmon.h
deleted file mode 100644
index 5e7a89c47b5b..000000000000
--- a/include/asm-ppc/perfmon.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __PERFMON_H
2#define __PERFMON_H
3
4extern void (*perf_irq)(struct pt_regs *);
5
6int request_perfmon_irq(void (*handler)(struct pt_regs *));
7void free_perfmon_irq(void);
8
9#ifdef CONFIG_FSL_BOOKE
10void init_pmc_stop(int ctr);
11void set_pmc_event(int ctr, int event);
12void set_pmc_user_kernel(int ctr, int user, int kernel);
13void set_pmc_marked(int ctr, int mark0, int mark1);
14void pmc_start_ctr(int ctr, int enable);
15void pmc_start_ctrs(int enable);
16void pmc_stop_ctrs(void);
17void dump_pmcs(void);
18
19extern struct op_ppc32_model op_model_fsl_booke;
20#endif
21
22#endif /* __PERFMON_H */
diff --git a/include/asm-ppc/posix_types.h b/include/asm-ppc/posix_types.h
deleted file mode 100644
index a14a82abe8d2..000000000000
--- a/include/asm-ppc/posix_types.h
+++ /dev/null
@@ -1,111 +0,0 @@
1#ifndef _PPC_POSIX_TYPES_H
2#define _PPC_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef unsigned int __kernel_uid_t;
16typedef unsigned int __kernel_gid_t;
17typedef unsigned int __kernel_size_t;
18typedef int __kernel_ssize_t;
19typedef long __kernel_ptrdiff_t;
20typedef long __kernel_time_t;
21typedef long __kernel_suseconds_t;
22typedef long __kernel_clock_t;
23typedef int __kernel_timer_t;
24typedef int __kernel_clockid_t;
25typedef int __kernel_daddr_t;
26typedef char * __kernel_caddr_t;
27typedef short __kernel_ipc_pid_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned int __kernel_old_uid_t;
34typedef unsigned int __kernel_old_gid_t;
35typedef unsigned int __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#ifndef __GNUC__
46
47#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
48#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
49#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
50#define __FD_ZERO(set) \
51 ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
52
53#else /* __GNUC__ */
54
55#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
56 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
57/* With GNU C, use inline functions instead so args are evaluated only once: */
58
59#undef __FD_SET
60static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
61{
62 unsigned long _tmp = fd / __NFDBITS;
63 unsigned long _rem = fd % __NFDBITS;
64 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
65}
66
67#undef __FD_CLR
68static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
69{
70 unsigned long _tmp = fd / __NFDBITS;
71 unsigned long _rem = fd % __NFDBITS;
72 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
73}
74
75#undef __FD_ISSET
76static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
77{
78 unsigned long _tmp = fd / __NFDBITS;
79 unsigned long _rem = fd % __NFDBITS;
80 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
81}
82
83/*
84 * This will unroll the loop for the normal constant case (8 ints,
85 * for a 256-bit fd_set)
86 */
87#undef __FD_ZERO
88static __inline__ void __FD_ZERO(__kernel_fd_set *p)
89{
90 unsigned int *tmp = (unsigned int *)p->fds_bits;
91 int i;
92
93 if (__builtin_constant_p(__FDSET_LONGS)) {
94 switch (__FDSET_LONGS) {
95 case 8:
96 tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
97 tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
98 return;
99 }
100 }
101 i = __FDSET_LONGS;
102 while (i) {
103 i--;
104 *tmp = 0;
105 tmp++;
106 }
107}
108
109#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
110#endif /* __GNUC__ */
111#endif /* _PPC_POSIX_TYPES_H */
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h
index 7043c164b537..c34fb4e37a97 100644
--- a/include/asm-ppc/ptrace.h
+++ b/include/asm-ppc/ptrace.h
@@ -57,7 +57,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
57 57
58#define force_successful_syscall_return() \ 58#define force_successful_syscall_return() \
59 do { \ 59 do { \
60 current_thread_info()->local_flags |= _TIFL_FORCE_NOERROR; \ 60 current_thread_info()->syscall_noerror = 1; \
61 } while(0) 61 } while(0)
62 62
63/* 63/*
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
deleted file mode 100644
index 3e738f483c11..000000000000
--- a/include/asm-ppc/rwsem.h
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
4 * by Paul Mackerras <paulus@samba.org>.
5 */
6
7#ifndef _PPC_RWSEM_H
8#define _PPC_RWSEM_H
9
10#ifdef __KERNEL__
11#include <linux/list.h>
12#include <linux/spinlock.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15
16/*
17 * the semaphore definition
18 */
19struct rw_semaphore {
20 /* XXX this should be able to be an atomic_t -- paulus */
21 signed long count;
22#define RWSEM_UNLOCKED_VALUE 0x00000000
23#define RWSEM_ACTIVE_BIAS 0x00000001
24#define RWSEM_ACTIVE_MASK 0x0000ffff
25#define RWSEM_WAITING_BIAS (-0x00010000)
26#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
27#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
28 spinlock_t wait_lock;
29 struct list_head wait_list;
30#if RWSEM_DEBUG
31 int debug;
32#endif
33};
34
35/*
36 * initialisation
37 */
38#if RWSEM_DEBUG
39#define __RWSEM_DEBUG_INIT , 0
40#else
41#define __RWSEM_DEBUG_INIT /* */
42#endif
43
44#define __RWSEM_INITIALIZER(name) \
45 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
46 LIST_HEAD_INIT((name).wait_list) \
47 __RWSEM_DEBUG_INIT }
48
49#define DECLARE_RWSEM(name) \
50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
51
52extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
53extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
54extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
55extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
56
57static inline void init_rwsem(struct rw_semaphore *sem)
58{
59 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list);
62#if RWSEM_DEBUG
63 sem->debug = 0;
64#endif
65}
66
67/*
68 * lock for reading
69 */
70static inline void __down_read(struct rw_semaphore *sem)
71{
72 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
73 smp_wmb();
74 else
75 rwsem_down_read_failed(sem);
76}
77
78static inline int __down_read_trylock(struct rw_semaphore *sem)
79{
80 int tmp;
81
82 while ((tmp = sem->count) >= 0) {
83 if (tmp == cmpxchg(&sem->count, tmp,
84 tmp + RWSEM_ACTIVE_READ_BIAS)) {
85 smp_wmb();
86 return 1;
87 }
88 }
89 return 0;
90}
91
92/*
93 * lock for writing
94 */
95static inline void __down_write(struct rw_semaphore *sem)
96{
97 int tmp;
98
99 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
100 (atomic_t *)(&sem->count));
101 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
102 smp_wmb();
103 else
104 rwsem_down_write_failed(sem);
105}
106
107static inline int __down_write_trylock(struct rw_semaphore *sem)
108{
109 int tmp;
110
111 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
112 RWSEM_ACTIVE_WRITE_BIAS);
113 smp_wmb();
114 return tmp == RWSEM_UNLOCKED_VALUE;
115}
116
117/*
118 * unlock after reading
119 */
120static inline void __up_read(struct rw_semaphore *sem)
121{
122 int tmp;
123
124 smp_wmb();
125 tmp = atomic_dec_return((atomic_t *)(&sem->count));
126 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
127 rwsem_wake(sem);
128}
129
130/*
131 * unlock after writing
132 */
133static inline void __up_write(struct rw_semaphore *sem)
134{
135 smp_wmb();
136 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
137 (atomic_t *)(&sem->count)) < 0)
138 rwsem_wake(sem);
139}
140
141/*
142 * implement atomic add functionality
143 */
144static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
145{
146 atomic_add(delta, (atomic_t *)(&sem->count));
147}
148
149/*
150 * downgrade write lock to read lock
151 */
152static inline void __downgrade_write(struct rw_semaphore *sem)
153{
154 int tmp;
155
156 smp_wmb();
157 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
158 if (tmp < 0)
159 rwsem_downgrade_wake(sem);
160}
161
162/*
163 * implement exchange and add functionality
164 */
165static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
166{
167 smp_mb();
168 return atomic_add_return(delta, (atomic_t *)(&sem->count));
169}
170
171#endif /* __KERNEL__ */
172#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-ppc/seccomp.h b/include/asm-ppc/seccomp.h
deleted file mode 100644
index 666c4da96d87..000000000000
--- a/include/asm-ppc/seccomp.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/unistd.h>
4
5#define __NR_seccomp_read __NR_read
6#define __NR_seccomp_write __NR_write
7#define __NR_seccomp_exit __NR_exit
8#define __NR_seccomp_sigreturn __NR_rt_sigreturn
9
10#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-ppc/sections.h b/include/asm-ppc/sections.h
deleted file mode 100644
index ba8f43ac9bf3..000000000000
--- a/include/asm-ppc/sections.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_SECTIONS_H
3#define _PPC_SECTIONS_H
4
5#include <asm-generic/sections.h>
6
7#define __pmac __attribute__ ((__section__ (".pmac.text")))
8#define __pmacdata __attribute__ ((__section__ (".pmac.data")))
9#define __pmacfunc(__argpmac) \
10 __argpmac __pmac; \
11 __argpmac
12
13#define __prep __attribute__ ((__section__ (".prep.text")))
14#define __prepdata __attribute__ ((__section__ (".prep.data")))
15#define __prepfunc(__argprep) \
16 __argprep __prep; \
17 __argprep
18
19#define __chrp __attribute__ ((__section__ (".chrp.text")))
20#define __chrpdata __attribute__ ((__section__ (".chrp.data")))
21#define __chrpfunc(__argchrp) \
22 __argchrp __chrp; \
23 __argchrp
24
25/* this is actually just common chrp/pmac code, not OF code -- Cort */
26#define __openfirmware __attribute__ ((__section__ (".openfirmware.text")))
27#define __openfirmwaredata __attribute__ ((__section__ (".openfirmware.data")))
28#define __openfirmwarefunc(__argopenfirmware) \
29 __argopenfirmware __openfirmware; \
30 __argopenfirmware
31
32#endif /* _PPC_SECTIONS_H */
33#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h
deleted file mode 100644
index 89e6e73be08c..000000000000
--- a/include/asm-ppc/semaphore.h
+++ /dev/null
@@ -1,111 +0,0 @@
1#ifndef _PPC_SEMAPHORE_H
2#define _PPC_SEMAPHORE_H
3
4/*
5 * Swiped from asm-sparc/semaphore.h and modified
6 * -- Cort (cort@cs.nmt.edu)
7 *
8 * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
9 * -- Ani Joshi (ajoshi@unixbox.com)
10 *
11 * Remove spinlock-based RW semaphores; RW semaphore definitions are
12 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
13 * Rework semaphores to use atomic_dec_if_positive.
14 * -- Paul Mackerras (paulus@samba.org)
15 */
16
17#ifdef __KERNEL__
18
19#include <asm/atomic.h>
20#include <asm/system.h>
21#include <linux/wait.h>
22#include <linux/rwsem.h>
23
24struct semaphore {
25 /*
26 * Note that any negative value of count is equivalent to 0,
27 * but additionally indicates that some process(es) might be
28 * sleeping on `wait'.
29 */
30 atomic_t count;
31 wait_queue_head_t wait;
32};
33
34#define __SEMAPHORE_INITIALIZER(name, n) \
35{ \
36 .count = ATOMIC_INIT(n), \
37 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
38}
39
40#define __MUTEX_INITIALIZER(name) \
41 __SEMAPHORE_INITIALIZER(name, 1)
42
43#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
44 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
45
46#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
47#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
48
49static inline void sema_init (struct semaphore *sem, int val)
50{
51 atomic_set(&sem->count, val);
52 init_waitqueue_head(&sem->wait);
53}
54
55static inline void init_MUTEX (struct semaphore *sem)
56{
57 sema_init(sem, 1);
58}
59
60static inline void init_MUTEX_LOCKED (struct semaphore *sem)
61{
62 sema_init(sem, 0);
63}
64
65extern void __down(struct semaphore * sem);
66extern int __down_interruptible(struct semaphore * sem);
67extern void __up(struct semaphore * sem);
68
69extern inline void down(struct semaphore * sem)
70{
71 might_sleep();
72
73 /*
74 * Try to get the semaphore, take the slow path if we fail.
75 */
76 if (atomic_dec_return(&sem->count) < 0)
77 __down(sem);
78 smp_wmb();
79}
80
81extern inline int down_interruptible(struct semaphore * sem)
82{
83 int ret = 0;
84
85 might_sleep();
86
87 if (atomic_dec_return(&sem->count) < 0)
88 ret = __down_interruptible(sem);
89 smp_wmb();
90 return ret;
91}
92
93extern inline int down_trylock(struct semaphore * sem)
94{
95 int ret;
96
97 ret = atomic_dec_if_positive(&sem->count) < 0;
98 smp_wmb();
99 return ret;
100}
101
102extern inline void up(struct semaphore * sem)
103{
104 smp_wmb();
105 if (atomic_inc_return(&sem->count) <= 0)
106 __up(sem);
107}
108
109#endif /* __KERNEL__ */
110
111#endif /* !(_PPC_SEMAPHORE_H) */
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h
index 829481c0a9dc..063d7dedc691 100644
--- a/include/asm-ppc/smp.h
+++ b/include/asm-ppc/smp.h
@@ -35,6 +35,7 @@ extern cpumask_t cpu_possible_map;
35extern unsigned long smp_proc_in_lock[]; 35extern unsigned long smp_proc_in_lock[];
36extern volatile unsigned long cpu_callin_map[]; 36extern volatile unsigned long cpu_callin_map[];
37extern int smp_tb_synchronized; 37extern int smp_tb_synchronized;
38extern struct smp_ops_t *smp_ops;
38 39
39extern void smp_send_tlb_invalidate(int); 40extern void smp_send_tlb_invalidate(int);
40extern void smp_send_xmon_break(int cpu); 41extern void smp_send_xmon_break(int cpu);
@@ -45,30 +46,21 @@ extern int __cpu_disable(void);
45extern void __cpu_die(unsigned int cpu); 46extern void __cpu_die(unsigned int cpu);
46extern void cpu_die(void) __attribute__((noreturn)); 47extern void cpu_die(void) __attribute__((noreturn));
47 48
48#define NO_PROC_ID 0xFF /* No processor magic marker */
49#define PROC_CHANGE_PENALTY 20
50
51#define raw_smp_processor_id() (current_thread_info()->cpu) 49#define raw_smp_processor_id() (current_thread_info()->cpu)
52 50
53extern int __cpu_up(unsigned int cpu); 51extern int __cpu_up(unsigned int cpu);
54 52
55extern int smp_hw_index[]; 53extern int smp_hw_index[];
56#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) 54#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
57 55#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
58struct klock_info_struct {
59 unsigned long kernel_flag;
60 unsigned char akp;
61};
62
63extern struct klock_info_struct klock_info;
64#define KLOCK_HELD 0xffffffff
65#define KLOCK_CLEAR 0x0
66 56
67#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
68 58
69#else /* !(CONFIG_SMP) */ 59#else /* !(CONFIG_SMP) */
70 60
71static inline void cpu_die(void) { } 61static inline void cpu_die(void) { }
62#define get_hard_smp_processor_id(cpu) 0
63#define hard_smp_processor_id() 0
72 64
73#endif /* !(CONFIG_SMP) */ 65#endif /* !(CONFIG_SMP) */
74 66
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
index 20edcf2a6e0c..5c64b75f0295 100644
--- a/include/asm-ppc/spinlock.h
+++ b/include/asm-ppc/spinlock.h
@@ -9,7 +9,7 @@
9 * (the type definitions are in asm/raw_spinlock_types.h) 9 * (the type definitions are in asm/raw_spinlock_types.h)
10 */ 10 */
11 11
12#define __raw_spin_is_locked(x) ((x)->lock != 0) 12#define __raw_spin_is_locked(x) ((x)->slock != 0)
13#define __raw_spin_unlock_wait(lock) \ 13#define __raw_spin_unlock_wait(lock) \
14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -31,17 +31,17 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
31 bne- 2b\n\ 31 bne- 2b\n\
32 isync" 32 isync"
33 : "=&r"(tmp) 33 : "=&r"(tmp)
34 : "r"(&lock->lock), "r"(1) 34 : "r"(&lock->slock), "r"(1)
35 : "cr0", "memory"); 35 : "cr0", "memory");
36} 36}
37 37
38static inline void __raw_spin_unlock(raw_spinlock_t *lock) 38static inline void __raw_spin_unlock(raw_spinlock_t *lock)
39{ 39{
40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); 40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory");
41 lock->lock = 0; 41 lock->slock = 0;
42} 42}
43 43
44#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) 44#define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock)))
45 45
46/* 46/*
47 * Read-write spinlocks, allowing multiple readers 47 * Read-write spinlocks, allowing multiple readers
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
deleted file mode 100644
index 7919ccc75b8a..000000000000
--- a/include/asm-ppc/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __ASM_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 volatile unsigned long lock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 volatile signed int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { 0 }
19
20#endif
diff --git a/include/asm-ppc/statfs.h b/include/asm-ppc/statfs.h
deleted file mode 100644
index 807c69954a1b..000000000000
--- a/include/asm-ppc/statfs.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _PPC_STATFS_H
2#define _PPC_STATFS_H
3
4#include <asm-generic/statfs.h>
5#endif
6
7
8
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index d754ab570fe0..1f310783757e 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -74,21 +74,42 @@ extern void read_rtc_time(void);
74extern void pmac_find_display(void); 74extern void pmac_find_display(void);
75extern void giveup_fpu(struct task_struct *); 75extern void giveup_fpu(struct task_struct *);
76extern void enable_kernel_fp(void); 76extern void enable_kernel_fp(void);
77extern void flush_fp_to_thread(struct task_struct *);
77extern void enable_kernel_altivec(void); 78extern void enable_kernel_altivec(void);
78extern void giveup_altivec(struct task_struct *); 79extern void giveup_altivec(struct task_struct *);
79extern void load_up_altivec(struct task_struct *); 80extern void load_up_altivec(struct task_struct *);
81extern int emulate_altivec(struct pt_regs *);
80extern void giveup_spe(struct task_struct *); 82extern void giveup_spe(struct task_struct *);
81extern void load_up_spe(struct task_struct *); 83extern void load_up_spe(struct task_struct *);
82extern int fix_alignment(struct pt_regs *); 84extern int fix_alignment(struct pt_regs *);
83extern void cvt_fd(float *from, double *to, unsigned long *fpscr); 85extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
84extern void cvt_df(double *from, float *to, unsigned long *fpscr); 86extern void cvt_df(double *from, float *to, unsigned long *fpscr);
87
88#ifdef CONFIG_ALTIVEC
89extern void flush_altivec_to_thread(struct task_struct *);
90#else
91static inline void flush_altivec_to_thread(struct task_struct *t)
92{
93}
94#endif
95
96#ifdef CONFIG_SPE
97extern void flush_spe_to_thread(struct task_struct *);
98#else
99static inline void flush_spe_to_thread(struct task_struct *t)
100{
101}
102#endif
103
85extern int call_rtas(const char *, int, int, unsigned long *, ...); 104extern int call_rtas(const char *, int, int, unsigned long *, ...);
86extern void cacheable_memzero(void *p, unsigned int nb); 105extern void cacheable_memzero(void *p, unsigned int nb);
87extern void *cacheable_memcpy(void *, const void *, unsigned int); 106extern void *cacheable_memcpy(void *, const void *, unsigned int);
88extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); 107extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
89extern void bad_page_fault(struct pt_regs *, unsigned long, int); 108extern void bad_page_fault(struct pt_regs *, unsigned long, int);
90extern void die(const char *, struct pt_regs *, long); 109extern int die(const char *, struct pt_regs *, long);
91extern void _exception(int, struct pt_regs *, int, unsigned long); 110extern void _exception(int, struct pt_regs *, int, unsigned long);
111void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
112
92#ifdef CONFIG_BOOKE_WDT 113#ifdef CONFIG_BOOKE_WDT
93extern u32 booke_wdt_enabled; 114extern u32 booke_wdt_enabled;
94extern u32 booke_wdt_period; 115extern u32 booke_wdt_period;
diff --git a/include/asm-ppc/thread_info.h b/include/asm-ppc/thread_info.h
deleted file mode 100644
index 27903db42efc..000000000000
--- a/include/asm-ppc/thread_info.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/* thread_info.h: PPC low-level thread information
2 * adapted from the i386 version by Paul Mackerras
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */
7
8#ifndef _ASM_THREAD_INFO_H
9#define _ASM_THREAD_INFO_H
10
11#ifdef __KERNEL__
12#ifndef __ASSEMBLY__
13/*
14 * low level task data.
15 * If you change this, change the TI_* offsets below to match.
16 */
17struct thread_info {
18 struct task_struct *task; /* main task structure */
19 struct exec_domain *exec_domain; /* execution domain */
20 unsigned long flags; /* low level flags */
21 unsigned long local_flags; /* non-racy flags */
22 int cpu; /* cpu we're on */
23 int preempt_count; /* 0 => preemptable,
24 <0 => BUG */
25 struct restart_block restart_block;
26};
27
28#define INIT_THREAD_INFO(tsk) \
29{ \
30 .task = &tsk, \
31 .exec_domain = &default_exec_domain, \
32 .flags = 0, \
33 .local_flags = 0, \
34 .cpu = 0, \
35 .preempt_count = 1, \
36 .restart_block = { \
37 .fn = do_no_restart_syscall, \
38 }, \
39}
40
41#define init_thread_info (init_thread_union.thread_info)
42#define init_stack (init_thread_union.stack)
43
44/*
45 * macros/functions for gaining access to the thread information structure
46 */
47
48/* how to get the thread information struct from C */
49static inline struct thread_info *current_thread_info(void)
50{
51 struct thread_info *ti;
52 __asm__("rlwinm %0,1,0,0,18" : "=r"(ti));
53 return ti;
54}
55
56/* thread information allocation */
57#define alloc_thread_info(tsk) ((struct thread_info *) \
58 __get_free_pages(GFP_KERNEL, 1))
59#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
60#define get_thread_info(ti) get_task_struct((ti)->task)
61#define put_thread_info(ti) put_task_struct((ti)->task)
62#endif /* __ASSEMBLY__ */
63
64/*
65 * Size of kernel stack for each process.
66 */
67#define THREAD_SIZE 8192 /* 2 pages */
68
69#define PREEMPT_ACTIVE 0x10000000
70
71/*
72 * thread information flag bit numbers
73 */
74#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
75#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
76#define TIF_SIGPENDING 2 /* signal pending */
77#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
78#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
79 TIF_NEED_RESCHED */
80#define TIF_MEMDIE 5
81#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
82#define TIF_SECCOMP 7 /* secure computing */
83
84/* as above, but as bit values */
85#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
86#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
87#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
88#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
89#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
90#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
91#define _TIF_SECCOMP (1<<TIF_SECCOMP)
92
93#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
94
95/*
96 * Non racy (local) flags bit numbers
97 */
98#define TIFL_FORCE_NOERROR 0 /* don't return error from current
99 syscall even if result < 0 */
100
101/* as above, but as bit values */
102#define _TIFL_FORCE_NOERROR (1<<TIFL_FORCE_NOERROR)
103
104
105#endif /* __KERNEL__ */
106
107#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-ppc/types.h b/include/asm-ppc/types.h
deleted file mode 100644
index 77dc24d7d2ad..000000000000
--- a/include/asm-ppc/types.h
+++ /dev/null
@@ -1,69 +0,0 @@
1#ifndef _PPC_TYPES_H
2#define _PPC_TYPES_H
3
4#ifndef __ASSEMBLY__
5
6typedef __signed__ char __s8;
7typedef unsigned char __u8;
8
9typedef __signed__ short __s16;
10typedef unsigned short __u16;
11
12typedef __signed__ int __s32;
13typedef unsigned int __u32;
14
15#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
16typedef __signed__ long long __s64;
17typedef unsigned long long __u64;
18#endif
19
20typedef struct {
21 __u32 u[4];
22} __vector128;
23
24/*
25 * XXX allowed outside of __KERNEL__ for now, until glibc gets
26 * a proper set of asm headers of its own. -- paulus
27 */
28typedef unsigned short umode_t;
29
30#endif /* __ASSEMBLY__ */
31
32#ifdef __KERNEL__
33/*
34 * These aren't exported outside the kernel to avoid name space clashes
35 */
36#define BITS_PER_LONG 32
37
38#ifndef __ASSEMBLY__
39
40#include <linux/config.h>
41
42typedef signed char s8;
43typedef unsigned char u8;
44
45typedef signed short s16;
46typedef unsigned short u16;
47
48typedef signed int s32;
49typedef unsigned int u32;
50
51typedef signed long long s64;
52typedef unsigned long long u64;
53
54typedef __vector128 vector128;
55
56/* DMA addresses are 32-bits wide */
57typedef u32 dma_addr_t;
58typedef u64 dma64_addr_t;
59
60#ifdef CONFIG_LBD
61typedef u64 sector_t;
62#define HAVE_SECTOR_T
63#endif
64
65#endif /* __ASSEMBLY__ */
66
67#endif /* __KERNEL__ */
68
69#endif
diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h
deleted file mode 100644
index c5864734e3e1..000000000000
--- a/include/asm-ppc/vga.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifdef __KERNEL__
8#ifndef _LINUX_ASM_VGA_H_
9#define _LINUX_ASM_VGA_H_
10
11#include <asm/io.h>
12
13#include <linux/config.h>
14
15#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
16
17#define VT_BUF_HAVE_RW
18/*
19 * These are only needed for supporting VGA or MDA text mode, which use little
20 * endian byte ordering.
21 * In other cases, we can optimize by using native byte ordering and
22 * <linux/vt_buffer.h> has already done the right job for us.
23 */
24
25extern inline void scr_writew(u16 val, volatile u16 *addr)
26{
27 st_le16(addr, val);
28}
29
30extern inline u16 scr_readw(volatile const u16 *addr)
31{
32 return ld_le16(addr);
33}
34
35#define VT_BUF_HAVE_MEMCPYW
36#define scr_memcpyw memcpy
37
38#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
39
40extern unsigned long vgacon_remap_base;
41#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
42#define vga_readb(x) (*(x))
43#define vga_writeb(x,y) (*(y) = (x))
44
45#endif
46#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/xmon.h b/include/asm-ppc/xmon.h
deleted file mode 100644
index 042b83e6680d..000000000000
--- a/include/asm-ppc/xmon.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef __PPC_XMON_H
2#define __PPC_XMON_H
3#ifdef __KERNEL__
4
5struct pt_regs;
6
7extern void xmon(struct pt_regs *excp);
8extern void xmon_printf(const char *fmt, ...);
9extern void xmon_map_scc(void);
10extern int xmon_bpt(struct pt_regs *regs);
11extern int xmon_sstep(struct pt_regs *regs);
12extern int xmon_iabr_match(struct pt_regs *regs);
13extern int xmon_dabr_match(struct pt_regs *regs);
14extern void (*xmon_fault_handler)(struct pt_regs *regs);
15
16#endif
17#endif
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-ppc64/abs_addr.h
index 84c24d4cdb71..dc3fc3fefef2 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-ppc64/abs_addr.h
@@ -63,4 +63,11 @@ static inline unsigned long phys_to_abs(unsigned long pa)
63#define virt_to_abs(va) phys_to_abs(__pa(va)) 63#define virt_to_abs(va) phys_to_abs(__pa(va))
64#define abs_to_virt(aa) __va(aa) 64#define abs_to_virt(aa) __va(aa)
65 65
66/*
67 * Converts Virtual Address to Real Address for
68 * Legacy iSeries Hypervisor calls
69 */
70#define iseries_hv_addr(virtaddr) \
71 (0x8000000000000000 | virt_to_abs(virtaddr))
72
66#endif /* _ABS_ADDR_H */ 73#endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h
deleted file mode 100644
index 0e5f25e83bc0..000000000000
--- a/include/asm-ppc64/atomic.h
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * PowerPC64 atomic operations
3 *
4 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _ASM_PPC64_ATOMIC_H_
14#define _ASM_PPC64_ATOMIC_H_
15
16#include <asm/memory.h>
17
18typedef struct { volatile int counter; } atomic_t;
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define atomic_read(v) ((v)->counter)
23#define atomic_set(v,i) (((v)->counter) = (i))
24
25static __inline__ void atomic_add(int a, atomic_t *v)
26{
27 int t;
28
29 __asm__ __volatile__(
30"1: lwarx %0,0,%3 # atomic_add\n\
31 add %0,%2,%0\n\
32 stwcx. %0,0,%3\n\
33 bne- 1b"
34 : "=&r" (t), "=m" (v->counter)
35 : "r" (a), "r" (&v->counter), "m" (v->counter)
36 : "cc");
37}
38
39static __inline__ int atomic_add_return(int a, atomic_t *v)
40{
41 int t;
42
43 __asm__ __volatile__(
44 EIEIO_ON_SMP
45"1: lwarx %0,0,%2 # atomic_add_return\n\
46 add %0,%1,%0\n\
47 stwcx. %0,0,%2\n\
48 bne- 1b"
49 ISYNC_ON_SMP
50 : "=&r" (t)
51 : "r" (a), "r" (&v->counter)
52 : "cc", "memory");
53
54 return t;
55}
56
57#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
58
59static __inline__ void atomic_sub(int a, atomic_t *v)
60{
61 int t;
62
63 __asm__ __volatile__(
64"1: lwarx %0,0,%3 # atomic_sub\n\
65 subf %0,%2,%0\n\
66 stwcx. %0,0,%3\n\
67 bne- 1b"
68 : "=&r" (t), "=m" (v->counter)
69 : "r" (a), "r" (&v->counter), "m" (v->counter)
70 : "cc");
71}
72
73static __inline__ int atomic_sub_return(int a, atomic_t *v)
74{
75 int t;
76
77 __asm__ __volatile__(
78 EIEIO_ON_SMP
79"1: lwarx %0,0,%2 # atomic_sub_return\n\
80 subf %0,%1,%0\n\
81 stwcx. %0,0,%2\n\
82 bne- 1b"
83 ISYNC_ON_SMP
84 : "=&r" (t)
85 : "r" (a), "r" (&v->counter)
86 : "cc", "memory");
87
88 return t;
89}
90
91static __inline__ void atomic_inc(atomic_t *v)
92{
93 int t;
94
95 __asm__ __volatile__(
96"1: lwarx %0,0,%2 # atomic_inc\n\
97 addic %0,%0,1\n\
98 stwcx. %0,0,%2\n\
99 bne- 1b"
100 : "=&r" (t), "=m" (v->counter)
101 : "r" (&v->counter), "m" (v->counter)
102 : "cc");
103}
104
105static __inline__ int atomic_inc_return(atomic_t *v)
106{
107 int t;
108
109 __asm__ __volatile__(
110 EIEIO_ON_SMP
111"1: lwarx %0,0,%1 # atomic_inc_return\n\
112 addic %0,%0,1\n\
113 stwcx. %0,0,%1\n\
114 bne- 1b"
115 ISYNC_ON_SMP
116 : "=&r" (t)
117 : "r" (&v->counter)
118 : "cc", "memory");
119
120 return t;
121}
122
123/*
124 * atomic_inc_and_test - increment and test
125 * @v: pointer of type atomic_t
126 *
127 * Atomically increments @v by 1
128 * and returns true if the result is zero, or false for all
129 * other cases.
130 */
131#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
132
133static __inline__ void atomic_dec(atomic_t *v)
134{
135 int t;
136
137 __asm__ __volatile__(
138"1: lwarx %0,0,%2 # atomic_dec\n\
139 addic %0,%0,-1\n\
140 stwcx. %0,0,%2\n\
141 bne- 1b"
142 : "=&r" (t), "=m" (v->counter)
143 : "r" (&v->counter), "m" (v->counter)
144 : "cc");
145}
146
147static __inline__ int atomic_dec_return(atomic_t *v)
148{
149 int t;
150
151 __asm__ __volatile__(
152 EIEIO_ON_SMP
153"1: lwarx %0,0,%1 # atomic_dec_return\n\
154 addic %0,%0,-1\n\
155 stwcx. %0,0,%1\n\
156 bne- 1b"
157 ISYNC_ON_SMP
158 : "=&r" (t)
159 : "r" (&v->counter)
160 : "cc", "memory");
161
162 return t;
163}
164
165#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
166#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
167
168/*
169 * Atomically test *v and decrement if it is greater than 0.
170 * The function returns the old value of *v minus 1.
171 */
172static __inline__ int atomic_dec_if_positive(atomic_t *v)
173{
174 int t;
175
176 __asm__ __volatile__(
177 EIEIO_ON_SMP
178"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
179 addic. %0,%0,-1\n\
180 blt- 2f\n\
181 stwcx. %0,0,%1\n\
182 bne- 1b"
183 ISYNC_ON_SMP
184 "\n\
1852:" : "=&r" (t)
186 : "r" (&v->counter)
187 : "cc", "memory");
188
189 return t;
190}
191
192#define smp_mb__before_atomic_dec() smp_mb()
193#define smp_mb__after_atomic_dec() smp_mb()
194#define smp_mb__before_atomic_inc() smp_mb()
195#define smp_mb__after_atomic_inc() smp_mb()
196
197#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
index a0f831224f96..dbfa42ef4a99 100644
--- a/include/asm-ppc64/bitops.h
+++ b/include/asm-ppc64/bitops.h
@@ -42,7 +42,7 @@
42 42
43#ifdef __KERNEL__ 43#ifdef __KERNEL__
44 44
45#include <asm/memory.h> 45#include <asm/synch.h>
46 46
47/* 47/*
48 * clear_bit doesn't imply a memory barrier 48 * clear_bit doesn't imply a memory barrier
diff --git a/include/asm-ppc64/btext.h b/include/asm-ppc64/btext.h
index 67aef0cc72c0..71cce36bc630 100644
--- a/include/asm-ppc64/btext.h
+++ b/include/asm-ppc64/btext.h
@@ -15,6 +15,7 @@ extern int boot_text_mapped;
15extern int btext_initialize(struct device_node *np); 15extern int btext_initialize(struct device_node *np);
16 16
17extern void map_boot_text(void); 17extern void map_boot_text(void);
18extern void init_boot_display(void);
18extern void btext_update_display(unsigned long phys, int width, int height, 19extern void btext_update_display(unsigned long phys, int width, int height,
19 int depth, int pitch); 20 int depth, int pitch);
20 21
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
deleted file mode 100644
index acc9b4d6c168..000000000000
--- a/include/asm-ppc64/cputable.h
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * include/asm-ppc64/cputable.h
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef __ASM_PPC_CPUTABLE_H
16#define __ASM_PPC_CPUTABLE_H
17
18#include <linux/config.h>
19#include <asm/page.h> /* for ASM_CONST */
20
21/* Exposed to userland CPU features - Must match ppc32 definitions */
22#define PPC_FEATURE_32 0x80000000
23#define PPC_FEATURE_64 0x40000000
24#define PPC_FEATURE_601_INSTR 0x20000000
25#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
26#define PPC_FEATURE_HAS_FPU 0x08000000
27#define PPC_FEATURE_HAS_MMU 0x04000000
28#define PPC_FEATURE_HAS_4xxMAC 0x02000000
29#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
30
31#ifdef __KERNEL__
32
33#ifndef __ASSEMBLY__
34
35/* This structure can grow, it's real size is used by head.S code
36 * via the mkdefs mechanism.
37 */
38struct cpu_spec;
39struct op_ppc64_model;
40
41typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
42
43struct cpu_spec {
44 /* CPU is matched via (PVR & pvr_mask) == pvr_value */
45 unsigned int pvr_mask;
46 unsigned int pvr_value;
47
48 char *cpu_name;
49 unsigned long cpu_features; /* Kernel features */
50 unsigned int cpu_user_features; /* Userland features */
51
52 /* cache line sizes */
53 unsigned int icache_bsize;
54 unsigned int dcache_bsize;
55
56 /* number of performance monitor counters */
57 unsigned int num_pmcs;
58
59 /* this is called to initialize various CPU bits like L1 cache,
60 * BHT, SPD, etc... from head.S before branching to identify_machine
61 */
62 cpu_setup_t cpu_setup;
63
64 /* Used by oprofile userspace to select the right counters */
65 char *oprofile_cpu_type;
66
67 /* Processor specific oprofile operations */
68 struct op_ppc64_model *oprofile_model;
69};
70
71extern struct cpu_spec cpu_specs[];
72extern struct cpu_spec *cur_cpu_spec;
73
74static inline unsigned long cpu_has_feature(unsigned long feature)
75{
76 return cur_cpu_spec->cpu_features & feature;
77}
78
79#endif /* __ASSEMBLY__ */
80
81/* CPU kernel features */
82
83/* Retain the 32b definitions for the time being - use bottom half of word */
84#define CPU_FTR_SPLIT_ID_CACHE ASM_CONST(0x0000000000000001)
85#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
86#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
87#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
88#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
89#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
90#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
91#define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080)
92#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
93#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
94#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
95#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
96#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
97#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
98#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
99
100/* Add the 64b processor unique features in the top half of the word */
101#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
102#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
103#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
104#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
105#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
106#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
107#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
108/* unused ASM_CONST(0x0000008000000000) */
109#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
110#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
111#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
112#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
113#define CPU_FTR_CTRL ASM_CONST(0x0000100000000000)
114
115#ifndef __ASSEMBLY__
116
117#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
118 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
119
120#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
121 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
122 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
123
124/* iSeries doesn't support large pages */
125#ifdef CONFIG_PPC_ISERIES
126#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
127#else
128#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
129#endif /* CONFIG_PPC_ISERIES */
130
131#endif /* __ASSEMBLY */
132
133#ifdef __ASSEMBLY__
134
135#define BEGIN_FTR_SECTION 98:
136
137#define END_FTR_SECTION(msk, val) \
13899: \
139 .section __ftr_fixup,"a"; \
140 .align 3; \
141 .llong msk; \
142 .llong val; \
143 .llong 98b; \
144 .llong 99b; \
145 .previous
146
147#else
148
149#define BEGIN_FTR_SECTION "98:\n"
150#define END_FTR_SECTION(msk, val) \
151"99:\n" \
152" .section __ftr_fixup,\"a\";\n" \
153" .align 3;\n" \
154" .llong "#msk";\n" \
155" .llong "#val";\n" \
156" .llong 98b;\n" \
157" .llong 99b;\n" \
158" .previous\n"
159
160#endif /* __ASSEMBLY__ */
161
162#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
163#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
164
165#endif /* __ASM_PPC_CPUTABLE_H */
166#endif /* __KERNEL__ */
167
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
new file mode 100644
index 000000000000..cdf8a2dec05f
--- /dev/null
+++ b/include/asm-ppc64/dart.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_DART_H
20#define _ASM_DART_H
21
22
23/* physical base of DART registers */
24#define DART_BASE 0xf8033000UL
25
26/* Offset from base to control register */
27#define DARTCNTL 0
28/* Offset from base to exception register */
29#define DARTEXCP 0x10
30/* Offset from base to TLB tag registers */
31#define DARTTAG 0x1000
32
33
34/* Control Register fields */
35
36/* base address of table (pfn) */
37#define DARTCNTL_BASE_MASK 0xfffff
38#define DARTCNTL_BASE_SHIFT 12
39
40#define DARTCNTL_FLUSHTLB 0x400
41#define DARTCNTL_ENABLE 0x200
42
43/* size of table in pages */
44#define DARTCNTL_SIZE_MASK 0x1ff
45#define DARTCNTL_SIZE_SHIFT 0
46
47
48/* DART table fields */
49
50#define DARTMAP_VALID 0x80000000
51#define DARTMAP_RPNMASK 0x00ffffff
52
53
54#define DART_PAGE_SHIFT 12
55#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
56#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
57
58
59#endif
diff --git a/include/asm-ppc64/dbdma.h b/include/asm-ppc64/dbdma.h
deleted file mode 100644
index f2d5d5dc3377..000000000000
--- a/include/asm-ppc64/dbdma.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/dbdma.h>
2
diff --git a/include/asm-ppc64/dma.h b/include/asm-ppc64/dma.h
deleted file mode 100644
index dfd1f69059ba..000000000000
--- a/include/asm-ppc64/dma.h
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 * Changes for ppc sound by Christoph Nadig
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _ASM_DMA_H
15#define _ASM_DMA_H
16
17#include <linux/config.h>
18#include <asm/io.h>
19#include <linux/spinlock.h>
20#include <asm/system.h>
21
22#ifndef MAX_DMA_CHANNELS
23#define MAX_DMA_CHANNELS 8
24#endif
25
26/* The maximum address that we can perform a DMA transfer to on this platform */
27/* Doesn't really apply... */
28#define MAX_DMA_ADDRESS (~0UL)
29
30#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
31
32#define dma_outb outb
33#define dma_inb inb
34
35/*
36 * NOTES about DMA transfers:
37 *
38 * controller 1: channels 0-3, byte operations, ports 00-1F
39 * controller 2: channels 4-7, word operations, ports C0-DF
40 *
41 * - ALL registers are 8 bits only, regardless of transfer size
42 * - channel 4 is not used - cascades 1 into 2.
43 * - channels 0-3 are byte - addresses/counts are for physical bytes
44 * - channels 5-7 are word - addresses/counts are for physical words
45 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
46 * - transfer count loaded to registers is 1 less than actual count
47 * - controller 2 offsets are all even (2x offsets for controller 1)
48 * - page registers for 5-7 don't use data bit 0, represent 128K pages
49 * - page registers for 0-3 use bit 0, represent 64K pages
50 *
51 * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
52 * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
53 * Note that addresses loaded into registers must be _physical_ addresses,
54 * not logical addresses (which may differ if paging is active).
55 *
56 * Address mapping for channels 0-3:
57 *
58 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
59 * | ... | | ... | | ... |
60 * | ... | | ... | | ... |
61 * | ... | | ... | | ... |
62 * P7 ... P0 A7 ... A0 A7 ... A0
63 * | Page | Addr MSB | Addr LSB | (DMA registers)
64 *
65 * Address mapping for channels 5-7:
66 *
67 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
68 * | ... | \ \ ... \ \ \ ... \ \
69 * | ... | \ \ ... \ \ \ ... \ (not used)
70 * | ... | \ \ ... \ \ \ ... \
71 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
72 * | Page | Addr MSB | Addr LSB | (DMA registers)
73 *
74 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
75 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
76 * the hardware level, so odd-byte transfers aren't possible).
77 *
78 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
79 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
80 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
81 *
82 */
83
84/* 8237 DMA controllers */
85#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
86#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
87
88/* DMA controller registers */
89#define DMA1_CMD_REG 0x08 /* command register (w) */
90#define DMA1_STAT_REG 0x08 /* status register (r) */
91#define DMA1_REQ_REG 0x09 /* request register (w) */
92#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
93#define DMA1_MODE_REG 0x0B /* mode register (w) */
94#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
95#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
96#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
97#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
98#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
99
100#define DMA2_CMD_REG 0xD0 /* command register (w) */
101#define DMA2_STAT_REG 0xD0 /* status register (r) */
102#define DMA2_REQ_REG 0xD2 /* request register (w) */
103#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
104#define DMA2_MODE_REG 0xD6 /* mode register (w) */
105#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
106#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
107#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
108#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
109#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
110
111#define DMA_ADDR_0 0x00 /* DMA address registers */
112#define DMA_ADDR_1 0x02
113#define DMA_ADDR_2 0x04
114#define DMA_ADDR_3 0x06
115#define DMA_ADDR_4 0xC0
116#define DMA_ADDR_5 0xC4
117#define DMA_ADDR_6 0xC8
118#define DMA_ADDR_7 0xCC
119
120#define DMA_CNT_0 0x01 /* DMA count registers */
121#define DMA_CNT_1 0x03
122#define DMA_CNT_2 0x05
123#define DMA_CNT_3 0x07
124#define DMA_CNT_4 0xC2
125#define DMA_CNT_5 0xC6
126#define DMA_CNT_6 0xCA
127#define DMA_CNT_7 0xCE
128
129#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
130#define DMA_LO_PAGE_1 0x83
131#define DMA_LO_PAGE_2 0x81
132#define DMA_LO_PAGE_3 0x82
133#define DMA_LO_PAGE_5 0x8B
134#define DMA_LO_PAGE_6 0x89
135#define DMA_LO_PAGE_7 0x8A
136
137#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
138#define DMA_HI_PAGE_1 0x483
139#define DMA_HI_PAGE_2 0x481
140#define DMA_HI_PAGE_3 0x482
141#define DMA_HI_PAGE_5 0x48B
142#define DMA_HI_PAGE_6 0x489
143#define DMA_HI_PAGE_7 0x48A
144
145#define DMA1_EXT_REG 0x40B
146#define DMA2_EXT_REG 0x4D6
147
148#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
149#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
150#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
151
152#define DMA_AUTOINIT 0x10
153
154extern spinlock_t dma_spin_lock;
155
156static __inline__ unsigned long claim_dma_lock(void)
157{
158 unsigned long flags;
159 spin_lock_irqsave(&dma_spin_lock, flags);
160 return flags;
161}
162
163static __inline__ void release_dma_lock(unsigned long flags)
164{
165 spin_unlock_irqrestore(&dma_spin_lock, flags);
166}
167
168/* enable/disable a specific DMA channel */
169static __inline__ void enable_dma(unsigned int dmanr)
170{
171 unsigned char ucDmaCmd=0x00;
172
173 if (dmanr != 4)
174 {
175 dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
176 dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
177 }
178 if (dmanr<=3)
179 {
180 dma_outb(dmanr, DMA1_MASK_REG);
181 dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
182 } else
183 {
184 dma_outb(dmanr & 3, DMA2_MASK_REG);
185 }
186}
187
188static __inline__ void disable_dma(unsigned int dmanr)
189{
190 if (dmanr<=3)
191 dma_outb(dmanr | 4, DMA1_MASK_REG);
192 else
193 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
194}
195
196/* Clear the 'DMA Pointer Flip Flop'.
197 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
198 * Use this once to initialize the FF to a known state.
199 * After that, keep track of it. :-)
200 * --- In order to do that, the DMA routines below should ---
201 * --- only be used while interrupts are disabled! ---
202 */
203static __inline__ void clear_dma_ff(unsigned int dmanr)
204{
205 if (dmanr<=3)
206 dma_outb(0, DMA1_CLEAR_FF_REG);
207 else
208 dma_outb(0, DMA2_CLEAR_FF_REG);
209}
210
211/* set mode (above) for a specific DMA channel */
212static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
213{
214 if (dmanr<=3)
215 dma_outb(mode | dmanr, DMA1_MODE_REG);
216 else
217 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
218}
219
220/* Set only the page register bits of the transfer address.
221 * This is used for successive transfers when we know the contents of
222 * the lower 16 bits of the DMA current address register, but a 64k boundary
223 * may have been crossed.
224 */
225static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
226{
227 switch(dmanr) {
228 case 0:
229 dma_outb(pagenr, DMA_LO_PAGE_0);
230 dma_outb(pagenr>>8, DMA_HI_PAGE_0);
231 break;
232 case 1:
233 dma_outb(pagenr, DMA_LO_PAGE_1);
234 dma_outb(pagenr>>8, DMA_HI_PAGE_1);
235 break;
236 case 2:
237 dma_outb(pagenr, DMA_LO_PAGE_2);
238 dma_outb(pagenr>>8, DMA_HI_PAGE_2);
239 break;
240 case 3:
241 dma_outb(pagenr, DMA_LO_PAGE_3);
242 dma_outb(pagenr>>8, DMA_HI_PAGE_3);
243 break;
244 case 5:
245 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
246 dma_outb(pagenr>>8, DMA_HI_PAGE_5);
247 break;
248 case 6:
249 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
250 dma_outb(pagenr>>8, DMA_HI_PAGE_6);
251 break;
252 case 7:
253 dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
254 dma_outb(pagenr>>8, DMA_HI_PAGE_7);
255 break;
256 }
257}
258
259
260/* Set transfer address & page bits for specific DMA channel.
261 * Assumes dma flipflop is clear.
262 */
263static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
264{
265 if (dmanr <= 3) {
266 dma_outb( phys & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
267 dma_outb( (phys>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
268 } else {
269 dma_outb( (phys>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
270 dma_outb( (phys>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
271 }
272 set_dma_page(dmanr, phys>>16);
273}
274
275
276/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
277 * a specific DMA channel.
278 * You must ensure the parameters are valid.
279 * NOTE: from a manual: "the number of transfers is one more
280 * than the initial word count"! This is taken into account.
281 * Assumes dma flip-flop is clear.
282 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
283 */
284static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
285{
286 count--;
287 if (dmanr <= 3) {
288 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
289 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
290 } else {
291 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
292 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
293 }
294}
295
296
297/* Get DMA residue count. After a DMA transfer, this
298 * should return zero. Reading this while a DMA transfer is
299 * still in progress will return unpredictable results.
300 * If called before the channel has been used, it may return 1.
301 * Otherwise, it returns the number of _bytes_ left to transfer.
302 *
303 * Assumes DMA flip-flop is clear.
304 */
305static __inline__ int get_dma_residue(unsigned int dmanr)
306{
307 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
308 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
309
310 /* using short to get 16-bit wrap around */
311 unsigned short count;
312
313 count = 1 + dma_inb(io_port);
314 count += dma_inb(io_port) << 8;
315
316 return (dmanr <= 3)? count : (count<<1);
317}
318
319/* These are in kernel/dma.c: */
320extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
321extern void free_dma(unsigned int dmanr); /* release it again */
322
323#ifdef CONFIG_PCI
324extern int isa_dma_bridge_buggy;
325#else
326#define isa_dma_bridge_buggy (0)
327#endif
328#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
329#endif /* _ASM_DMA_H */
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h
index cb2640b3a408..266b460de44e 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-ppc64/futex.h
@@ -5,7 +5,7 @@
5 5
6#include <linux/futex.h> 6#include <linux/futex.h>
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/memory.h> 8#include <asm/synch.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-ppc64/hardirq.h b/include/asm-ppc64/hardirq.h
deleted file mode 100644
index 4ee72bb1fd48..000000000000
--- a/include/asm-ppc64/hardirq.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef __ASM_HARDIRQ_H
2#define __ASM_HARDIRQ_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12#include <linux/cache.h>
13#include <linux/preempt.h>
14
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21static inline void ack_bad_irq(int irq)
22{
23 printk(KERN_CRIT "illegal vector %d received!\n", irq);
24 BUG();
25}
26
27#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index 59c958aea4db..bd7c9532d77b 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -15,7 +15,7 @@
15#ifdef CONFIG_PPC_ISERIES 15#ifdef CONFIG_PPC_ISERIES
16#include <asm/iSeries/iSeries_io.h> 16#include <asm/iSeries/iSeries_io.h>
17#endif 17#endif
18#include <asm/memory.h> 18#include <asm/synch.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20 20
21#include <asm-generic/iomap.h> 21#include <asm-generic/iomap.h>
diff --git a/include/asm-ppc64/irq.h b/include/asm-ppc64/irq.h
deleted file mode 100644
index 99782afb4cde..000000000000
--- a/include/asm-ppc64/irq.h
+++ /dev/null
@@ -1,120 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_IRQ_H
3#define _ASM_IRQ_H
4
5/*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14
15/*
16 * Maximum number of interrupt sources that we can handle.
17 */
18#define NR_IRQS 512
19
20/* this number is used when no interrupt has been assigned */
21#define NO_IRQ (-1)
22
23/*
24 * These constants are used for passing information about interrupt
25 * signal polarity and level/edge sensing to the low-level PIC chip
26 * drivers.
27 */
28#define IRQ_SENSE_MASK 0x1
29#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
30#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
31
32#define IRQ_POLARITY_MASK 0x2
33#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
34#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
35
36/*
37 * IRQ line status macro IRQ_PER_CPU is used
38 */
39#define ARCH_HAS_IRQ_PER_CPU
40
41#define get_irq_desc(irq) (&irq_desc[(irq)])
42
43/* Define a way to iterate across irqs. */
44#define for_each_irq(i) \
45 for ((i) = 0; (i) < NR_IRQS; ++(i))
46
47/* Interrupt numbers are virtual in case they are sparsely
48 * distributed by the hardware.
49 */
50extern unsigned int virt_irq_to_real_map[NR_IRQS];
51
52/* Create a mapping for a real_irq if it doesn't already exist.
53 * Return the virtual irq as a convenience.
54 */
55int virt_irq_create_mapping(unsigned int real_irq);
56void virt_irq_init(void);
57
58static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
59{
60 return virt_irq_to_real_map[virt_irq];
61}
62
63extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
64
65/*
66 * Because many systems have two overlapping names spaces for
67 * interrupts (ISA and XICS for example), and the ISA interrupts
68 * have historically not been easy to renumber, we allow ISA
69 * interrupts to take values 0 - 15, and shift up the remaining
70 * interrupts by 0x10.
71 */
72#define NUM_ISA_INTERRUPTS 0x10
73extern int __irq_offset_value;
74
75static inline int irq_offset_up(int irq)
76{
77 return(irq + __irq_offset_value);
78}
79
80static inline int irq_offset_down(int irq)
81{
82 return(irq - __irq_offset_value);
83}
84
85static inline int irq_offset_value(void)
86{
87 return __irq_offset_value;
88}
89
90static __inline__ int irq_canonicalize(int irq)
91{
92 return irq;
93}
94
95extern int distribute_irqs;
96
97struct irqaction;
98struct pt_regs;
99
100#ifdef CONFIG_IRQSTACKS
101/*
102 * Per-cpu stacks for handling hard and soft interrupts.
103 */
104extern struct thread_info *hardirq_ctx[NR_CPUS];
105extern struct thread_info *softirq_ctx[NR_CPUS];
106
107extern void irq_ctx_init(void);
108extern void call_do_softirq(struct thread_info *tp);
109extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
110 struct irqaction *action, struct thread_info *tp);
111
112#define __ARCH_HAS_DO_SOFTIRQ
113
114#else
115#define irq_ctx_init()
116
117#endif /* CONFIG_IRQSTACKS */
118
119#endif /* _ASM_IRQ_H */
120#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/keylargo.h b/include/asm-ppc64/keylargo.h
deleted file mode 100644
index 4d78e3d0314c..000000000000
--- a/include/asm-ppc64/keylargo.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/keylargo.h>
2
diff --git a/include/asm-ppc64/kmap_types.h b/include/asm-ppc64/kmap_types.h
deleted file mode 100644
index fd1574648223..000000000000
--- a/include/asm-ppc64/kmap_types.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H
4
5enum km_type {
6 KM_BOUNCE_READ,
7 KM_SKB_SUNRPC_DATA,
8 KM_SKB_DATA_SOFTIRQ,
9 KM_USER0,
10 KM_USER1,
11 KM_BIO_SRC_IRQ,
12 KM_BIO_DST_IRQ,
13 KM_PTE0,
14 KM_PTE1,
15 KM_IRQ0,
16 KM_IRQ1,
17 KM_SOFTIRQ0,
18 KM_SOFTIRQ1,
19 KM_TYPE_NR
20};
21
22#endif
23#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/macio.h b/include/asm-ppc64/macio.h
deleted file mode 100644
index a3028b364f70..000000000000
--- a/include/asm-ppc64/macio.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/macio.h>
2
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
deleted file mode 100644
index af53ffb55726..000000000000
--- a/include/asm-ppc64/memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _ASM_PPC64_MEMORY_H_
2#define _ASM_PPC64_MEMORY_H_
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12
13/*
14 * Arguably the bitops and *xchg operations don't imply any memory barrier
15 * or SMP ordering, but in fact a lot of drivers expect them to imply
16 * both, since they do on x86 cpus.
17 */
18#ifdef CONFIG_SMP
19#define EIEIO_ON_SMP "eieio\n"
20#define ISYNC_ON_SMP "\n\tisync"
21#define SYNC_ON_SMP "lwsync\n\t"
22#else
23#define EIEIO_ON_SMP
24#define ISYNC_ON_SMP
25#define SYNC_ON_SMP
26#endif
27
28static inline void eieio(void)
29{
30 __asm__ __volatile__ ("eieio" : : : "memory");
31}
32
33static inline void isync(void)
34{
35 __asm__ __volatile__ ("isync" : : : "memory");
36}
37
38#ifdef CONFIG_SMP
39#define eieio_on_smp() eieio()
40#define isync_on_smp() isync()
41#else
42#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
43#define isync_on_smp() __asm__ __volatile__("": : :"memory")
44#endif
45
46/* Macros for adjusting thread priority (hardware multi-threading) */
47#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
48#define HMT_low() asm volatile("or 1,1,1 # low priority")
49#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
50#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
51#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
52#define HMT_high() asm volatile("or 3,3,3 # high priority")
53
54#define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n"
55#define HMT_LOW "\tor 1,1,1 # low priority\n"
56#define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n"
57#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
58#define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n"
59#define HMT_HIGH "\tor 3,3,3 # high priority\n"
60
61#endif
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 7bc42eb087ad..e0505acb77d9 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -14,6 +14,7 @@
14#define _PPC64_MMU_H_ 14#define _PPC64_MMU_H_
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <asm/ppc_asm.h> /* for ASM_CONST */
17#include <asm/page.h> 18#include <asm/page.h>
18 19
19/* 20/*
@@ -29,7 +30,7 @@
29 30
30/* Location of cpu0's segment table */ 31/* Location of cpu0's segment table */
31#define STAB0_PAGE 0x6 32#define STAB0_PAGE 0x6
32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 33#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
33 34
34#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
35extern char initial_stab[]; 36extern char initial_stab[];
@@ -205,6 +206,10 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
205 unsigned long prpn, 206 unsigned long prpn,
206 unsigned long vflags, unsigned long rflags); 207 unsigned long vflags, unsigned long rflags);
207 208
209extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
210 unsigned long va, unsigned long prpn,
211 unsigned long vflags, unsigned long rflags);
212
208extern void stabs_alloc(void); 213extern void stabs_alloc(void);
209 214
210#endif /* __ASSEMBLY__ */ 215#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/of_device.h b/include/asm-ppc64/of_device.h
deleted file mode 100644
index 7bc136e22590..000000000000
--- a/include/asm-ppc64/of_device.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/of_device.h>
2
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a15422bcf30d..d404431f0a9a 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -11,13 +11,7 @@
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
14 14#include <asm/ppc_asm.h> /* for ASM_CONST */
15#ifdef __ASSEMBLY__
16 #define ASM_CONST(x) x
17#else
18 #define __ASM_CONST(x) x##UL
19 #define ASM_CONST(x) __ASM_CONST(x)
20#endif
21 15
22/* PAGE_SHIFT determines the page size */ 16/* PAGE_SHIFT determines the page size */
23#define PAGE_SHIFT 12 17#define PAGE_SHIFT 12
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
index d8991389ab39..60cf8c838af0 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-ppc64/pci-bridge.h
@@ -2,7 +2,9 @@
2#ifndef _ASM_PCI_BRIDGE_H 2#ifndef _ASM_PCI_BRIDGE_H
3#define _ASM_PCI_BRIDGE_H 3#define _ASM_PCI_BRIDGE_H
4 4
5#include <linux/config.h>
5#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/list.h>
6 8
7/* 9/*
8 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -34,7 +36,7 @@ struct pci_controller {
34 36
35 struct pci_ops *ops; 37 struct pci_ops *ops;
36 volatile unsigned int __iomem *cfg_addr; 38 volatile unsigned int __iomem *cfg_addr;
37 volatile unsigned char __iomem *cfg_data; 39 volatile void __iomem *cfg_data;
38 40
39 /* Currently, we limit ourselves to 1 IO range and 3 mem 41 /* Currently, we limit ourselves to 1 IO range and 3 mem
40 * ranges since the common pci_bus structure can't handle more 42 * ranges since the common pci_bus structure can't handle more
@@ -71,6 +73,12 @@ struct pci_dn {
71 struct iommu_table *iommu_table; /* for phb's or bridges */ 73 struct iommu_table *iommu_table; /* for phb's or bridges */
72 struct pci_dev *pcidev; /* back-pointer to the pci device */ 74 struct pci_dev *pcidev; /* back-pointer to the pci device */
73 struct device_node *node; /* back-pointer to the device_node */ 75 struct device_node *node; /* back-pointer to the device_node */
76#ifdef CONFIG_PPC_ISERIES
77 struct list_head Device_List;
78 int Irq; /* Assigned IRQ */
79 int Flags; /* Possible flags(disable/bist)*/
80 u8 LogicalSlot; /* Hv Slot Index for Tces */
81#endif
74 u32 config_space[16]; /* saved PCI config space */ 82 u32 config_space[16]; /* saved PCI config space */
75}; 83};
76 84
@@ -96,6 +104,16 @@ static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
96 return fetch_dev_dn(dev); 104 return fetch_dev_dn(dev);
97} 105}
98 106
107static inline int pci_device_from_OF_node(struct device_node *np,
108 u8 *bus, u8 *devfn)
109{
110 if (!PCI_DN(np))
111 return -ENODEV;
112 *bus = PCI_DN(np)->busno;
113 *devfn = PCI_DN(np)->devfn;
114 return 0;
115}
116
99static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 117static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
100{ 118{
101 if (bus->self) 119 if (bus->self)
@@ -105,7 +123,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
105} 123}
106 124
107extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, 125extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
108 struct device_node *dev); 126 struct device_node *dev, int primary);
109 127
110extern int pcibios_remove_root_bus(struct pci_controller *phb); 128extern int pcibios_remove_root_bus(struct pci_controller *phb);
111 129
diff --git a/include/asm-ppc64/pmac_feature.h b/include/asm-ppc64/pmac_feature.h
deleted file mode 100644
index e07e36c4cbb2..000000000000
--- a/include/asm-ppc64/pmac_feature.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/pmac_feature.h>
2
diff --git a/include/asm-ppc64/pmac_low_i2c.h b/include/asm-ppc64/pmac_low_i2c.h
deleted file mode 100644
index 7bcfc72c5c8a..000000000000
--- a/include/asm-ppc64/pmac_low_i2c.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/pmac_low_i2c.h>
2
diff --git a/include/asm-ppc64/ppc32.h b/include/asm-ppc64/ppc32.h
index 6b44a8caf395..3945a55d112a 100644
--- a/include/asm-ppc64/ppc32.h
+++ b/include/asm-ppc64/ppc32.h
@@ -70,18 +70,18 @@ typedef struct compat_siginfo {
70#define __old_sigaction32 old_sigaction32 70#define __old_sigaction32 old_sigaction32
71 71
72struct __old_sigaction32 { 72struct __old_sigaction32 {
73 unsigned sa_handler; 73 compat_uptr_t sa_handler;
74 compat_old_sigset_t sa_mask; 74 compat_old_sigset_t sa_mask;
75 unsigned int sa_flags; 75 unsigned int sa_flags;
76 unsigned sa_restorer; /* not used by Linux/SPARC yet */ 76 compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
77}; 77};
78 78
79 79
80 80
81struct sigaction32 { 81struct sigaction32 {
82 unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ 82 compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
83 unsigned int sa_flags; 83 unsigned int sa_flags;
84 unsigned int sa_restorer; /* Another 32 bit pointer */ 84 compat_uptr_t sa_restorer; /* Another 32 bit pointer */
85 compat_sigset_t sa_mask; /* A 32 bit mask */ 85 compat_sigset_t sa_mask; /* A 32 bit mask */
86}; 86};
87 87
@@ -94,9 +94,9 @@ typedef struct sigaltstack_32 {
94struct sigcontext32 { 94struct sigcontext32 {
95 unsigned int _unused[4]; 95 unsigned int _unused[4];
96 int signal; 96 int signal;
97 unsigned int handler; 97 compat_uptr_t handler;
98 unsigned int oldmask; 98 unsigned int oldmask;
99 u32 regs; /* 4 byte pointer to the pt_regs32 structure. */ 99 compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */
100}; 100};
101 101
102struct mcontext32 { 102struct mcontext32 {
@@ -111,7 +111,7 @@ struct ucontext32 {
111 unsigned int uc_link; 111 unsigned int uc_link;
112 stack_32_t uc_stack; 112 stack_32_t uc_stack;
113 int uc_pad[7]; 113 int uc_pad[7];
114 u32 uc_regs; /* points to uc_mcontext field */ 114 compat_uptr_t uc_regs; /* points to uc_mcontext field */
115 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 115 compat_sigset_t uc_sigmask; /* mask last for extensibility */
116 /* glibc has 1024-bit signal masks, ours are 64-bit */ 116 /* glibc has 1024-bit signal masks, ours are 64-bit */
117 int uc_maskext[30]; 117 int uc_maskext[30];
diff --git a/include/asm-ppc64/ppc_asm.h b/include/asm-ppc64/ppc_asm.h
deleted file mode 100644
index 9031d8a29aca..000000000000
--- a/include/asm-ppc64/ppc_asm.h
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * arch/ppc64/kernel/ppc_asm.h
3 *
4 * Definitions used by various bits of low-level assembly code on PowerPC.
5 *
6 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _PPC64_PPC_ASM_H
15#define _PPC64_PPC_ASM_H
16/*
17 * Macros for storing registers into and loading registers from
18 * exception frames.
19 */
20#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
21#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
22#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
23#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
24#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
25#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
26#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
27#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
28#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
29#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
30
31#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
32#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
33
34#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
35#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
36#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
37#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
38#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
39#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
40#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
41#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
42#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
43#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
44#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
45#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
46
47#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
48#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
49#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
50#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
51#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
52#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
53#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
54#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
55#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
56#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
57#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
58#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
59
60/* Macros to adjust thread priority for Iseries hardware multithreading */
61#define HMT_LOW or 1,1,1
62#define HMT_MEDIUM or 2,2,2
63#define HMT_HIGH or 3,3,3
64
65/* Insert the high 32 bits of the MSR into what will be the new
66 MSR (via SRR1 and rfid) This preserves the MSR.SF and MSR.ISF
67 bits. */
68
69#define FIX_SRR1(ra, rb) \
70 mr rb,ra; \
71 mfmsr ra; \
72 rldimi ra,rb,0,32
73
74#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
75
76/*
77 * LOADADDR( rn, name )
78 * loads the address of 'name' into 'rn'
79 *
80 * LOADBASE( rn, name )
81 * loads the address (less the low 16 bits) of 'name' into 'rn'
82 * suitable for base+disp addressing
83 */
84#define LOADADDR(rn,name) \
85 lis rn,name##@highest; \
86 ori rn,rn,name##@higher; \
87 rldicr rn,rn,32,31; \
88 oris rn,rn,name##@h; \
89 ori rn,rn,name##@l
90
91#define LOADBASE(rn,name) \
92 lis rn,name@highest; \
93 ori rn,rn,name@higher; \
94 rldicr rn,rn,32,31; \
95 oris rn,rn,name@ha
96
97
98#define SET_REG_TO_CONST(reg, value) \
99 lis reg,(((value)>>48)&0xFFFF); \
100 ori reg,reg,(((value)>>32)&0xFFFF); \
101 rldicr reg,reg,32,31; \
102 oris reg,reg,(((value)>>16)&0xFFFF); \
103 ori reg,reg,((value)&0xFFFF);
104
105#define SET_REG_TO_LABEL(reg, label) \
106 lis reg,(label)@highest; \
107 ori reg,reg,(label)@higher; \
108 rldicr reg,reg,32,31; \
109 oris reg,reg,(label)@h; \
110 ori reg,reg,(label)@l;
111
112
113/* PPPBBB - DRENG If KERNELBASE is always 0xC0...,
114 * Then we can easily do this with one asm insn. -Peter
115 */
116#define tophys(rd,rs) \
117 lis rd,((KERNELBASE>>48)&0xFFFF); \
118 rldicr rd,rd,32,31; \
119 sub rd,rs,rd
120
121#define tovirt(rd,rs) \
122 lis rd,((KERNELBASE>>48)&0xFFFF); \
123 rldicr rd,rd,32,31; \
124 add rd,rs,rd
125
126/* Condition Register Bit Fields */
127
128#define cr0 0
129#define cr1 1
130#define cr2 2
131#define cr3 3
132#define cr4 4
133#define cr5 5
134#define cr6 6
135#define cr7 7
136
137
138/* General Purpose Registers (GPRs) */
139
140#define r0 0
141#define r1 1
142#define r2 2
143#define r3 3
144#define r4 4
145#define r5 5
146#define r6 6
147#define r7 7
148#define r8 8
149#define r9 9
150#define r10 10
151#define r11 11
152#define r12 12
153#define r13 13
154#define r14 14
155#define r15 15
156#define r16 16
157#define r17 17
158#define r18 18
159#define r19 19
160#define r20 20
161#define r21 21
162#define r22 22
163#define r23 23
164#define r24 24
165#define r25 25
166#define r26 26
167#define r27 27
168#define r28 28
169#define r29 29
170#define r30 30
171#define r31 31
172
173
174/* Floating Point Registers (FPRs) */
175
176#define fr0 0
177#define fr1 1
178#define fr2 2
179#define fr3 3
180#define fr4 4
181#define fr5 5
182#define fr6 6
183#define fr7 7
184#define fr8 8
185#define fr9 9
186#define fr10 10
187#define fr11 11
188#define fr12 12
189#define fr13 13
190#define fr14 14
191#define fr15 15
192#define fr16 16
193#define fr17 17
194#define fr18 18
195#define fr19 19
196#define fr20 20
197#define fr21 21
198#define fr22 22
199#define fr23 23
200#define fr24 24
201#define fr25 25
202#define fr26 26
203#define fr27 27
204#define fr28 28
205#define fr29 29
206#define fr30 30
207#define fr31 31
208
209#define vr0 0
210#define vr1 1
211#define vr2 2
212#define vr3 3
213#define vr4 4
214#define vr5 5
215#define vr6 6
216#define vr7 7
217#define vr8 8
218#define vr9 9
219#define vr10 10
220#define vr11 11
221#define vr12 12
222#define vr13 13
223#define vr14 14
224#define vr15 15
225#define vr16 16
226#define vr17 17
227#define vr18 18
228#define vr19 19
229#define vr20 20
230#define vr21 21
231#define vr22 22
232#define vr23 23
233#define vr24 24
234#define vr25 25
235#define vr26 26
236#define vr27 27
237#define vr28 28
238#define vr29 29
239#define vr30 30
240#define vr31 31
241
242#endif /* _PPC64_PPC_ASM_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
deleted file mode 100644
index 4146189006e3..000000000000
--- a/include/asm-ppc64/processor.h
+++ /dev/null
@@ -1,558 +0,0 @@
1#ifndef __ASM_PPC64_PROCESSOR_H
2#define __ASM_PPC64_PROCESSOR_H
3
4/*
5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/stringify.h>
14#ifndef __ASSEMBLY__
15#include <linux/config.h>
16#include <asm/atomic.h>
17#include <asm/ppcdebug.h>
18#include <asm/a.out.h>
19#endif
20#include <asm/ptrace.h>
21#include <asm/types.h>
22#include <asm/systemcfg.h>
23#include <asm/cputable.h>
24
25/* Machine State Register (MSR) Fields */
26#define MSR_SF_LG 63 /* Enable 64 bit mode */
27#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
28#define MSR_HV_LG 60 /* Hypervisor state */
29#define MSR_VEC_LG 25 /* Enable AltiVec */
30#define MSR_POW_LG 18 /* Enable Power Management */
31#define MSR_WE_LG 18 /* Wait State Enable */
32#define MSR_TGPR_LG 17 /* TLB Update registers in use */
33#define MSR_CE_LG 17 /* Critical Interrupt Enable */
34#define MSR_ILE_LG 16 /* Interrupt Little Endian */
35#define MSR_EE_LG 15 /* External Interrupt Enable */
36#define MSR_PR_LG 14 /* Problem State / Privilege Level */
37#define MSR_FP_LG 13 /* Floating Point enable */
38#define MSR_ME_LG 12 /* Machine Check Enable */
39#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
40#define MSR_SE_LG 10 /* Single Step */
41#define MSR_BE_LG 9 /* Branch Trace */
42#define MSR_DE_LG 9 /* Debug Exception Enable */
43#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
44#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
45#define MSR_IR_LG 5 /* Instruction Relocate */
46#define MSR_DR_LG 4 /* Data Relocate */
47#define MSR_PE_LG 3 /* Protection Enable */
48#define MSR_PX_LG 2 /* Protection Exclusive Mode */
49#define MSR_PMM_LG 2 /* Performance monitor */
50#define MSR_RI_LG 1 /* Recoverable Exception */
51#define MSR_LE_LG 0 /* Little Endian */
52
53#ifdef __ASSEMBLY__
54#define __MASK(X) (1<<(X))
55#else
56#define __MASK(X) (1UL<<(X))
57#endif
58
59#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
60#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
61#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
62#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
63#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
64#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
65#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
66#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
67#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
68#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
69#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
70#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
71#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
72#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
73#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
74#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
75#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
76#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
77#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
78#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
79#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
80#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
81#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
82#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
83#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
84#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
85
86#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
87#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
88
89#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
90#define MSR_USER64 MSR_USER32 | MSR_SF
91
92/* Floating Point Status and Control Register (FPSCR) Fields */
93
94#define FPSCR_FX 0x80000000 /* FPU exception summary */
95#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
96#define FPSCR_VX 0x20000000 /* Invalid operation summary */
97#define FPSCR_OX 0x10000000 /* Overflow exception summary */
98#define FPSCR_UX 0x08000000 /* Underflow exception summary */
99#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
100#define FPSCR_XX 0x02000000 /* Inexact exception summary */
101#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
102#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
103#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
104#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
105#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
106#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
107#define FPSCR_FR 0x00040000 /* Fraction rounded */
108#define FPSCR_FI 0x00020000 /* Fraction inexact */
109#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
110#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
111#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
112#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
113#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
114#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
115#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
116#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
117#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
118#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
119#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
120#define FPSCR_RN 0x00000003 /* FPU rounding control */
121
122/* Special Purpose Registers (SPRNs)*/
123
124#define SPRN_CTR 0x009 /* Count Register */
125#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
126#define DABR_TRANSLATION (1UL << 2)
127#define SPRN_DAR 0x013 /* Data Address Register */
128#define SPRN_DEC 0x016 /* Decrement Register */
129#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
130#define DSISR_NOHPTE 0x40000000 /* no translation found */
131#define DSISR_PROTFAULT 0x08000000 /* protection fault */
132#define DSISR_ISSTORE 0x02000000 /* access was a store */
133#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
134#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
135#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
136#define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
137#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
138#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
139#define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
140#define SPRN_HID4 0x3F4 /* 970 HID4 */
141#define SPRN_HID5 0x3F6 /* 970 HID5 */
142#define SPRN_HID6 0x3F9 /* BE HID 6 */
143#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
144#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
145#define SPRN_TSCR 0x399 /* Thread switch control on BE */
146#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
147#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
148#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
149#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
150#define SPRN_TSC 0x3FD /* Thread switch control on others */
151#define SPRN_TST 0x3FC /* Thread switch timeout on others */
152#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
153#define SPRN_LR 0x008 /* Link Register */
154#define SPRN_PIR 0x3FF /* Processor Identification Register */
155#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
156#define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
157#define SPRN_PVR 0x11F /* Processor Version Register */
158#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
159#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
160#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
161#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
162#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
163#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
164#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
165#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
166#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
167#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
168#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
169#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
170#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
171#define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
172#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
173#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
174#define SPRN_XER 0x001 /* Fixed Point Exception Register */
175#define SPRN_VRSAVE 0x100 /* Vector save */
176#define SPRN_CTRLF 0x088
177#define SPRN_CTRLT 0x098
178#define CTRL_RUNLATCH 0x1
179
180/* Performance monitor SPRs */
181#define SPRN_SIAR 780
182#define SPRN_SDAR 781
183#define SPRN_MMCRA 786
184#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
185#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
186#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
187#define SPRN_PMC1 787
188#define SPRN_PMC2 788
189#define SPRN_PMC3 789
190#define SPRN_PMC4 790
191#define SPRN_PMC5 791
192#define SPRN_PMC6 792
193#define SPRN_PMC7 793
194#define SPRN_PMC8 794
195#define SPRN_MMCR0 795
196#define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
197#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
198#define MMCR0_KERNEL_DISABLE MMCR0_FCS
199#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
200#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
201#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
202#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
203#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
204#define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
205/* time base exception enable */
206#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
207#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
208#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
209#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
210#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
211#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
212#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
213#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
214#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
215#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
216#define SPRN_MMCR1 798
217
218/* Short-hand versions for a number of the above SPRNs */
219
220#define CTR SPRN_CTR /* Counter Register */
221#define DAR SPRN_DAR /* Data Address Register */
222#define DABR SPRN_DABR /* Data Address Breakpoint Register */
223#define DEC SPRN_DEC /* Decrement Register */
224#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
225#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
226#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
227#define NIADORM SPRN_NIADORM /* NIA Dormant Register */
228#define TSC SPRN_TSC /* Thread switch control */
229#define TST SPRN_TST /* Thread switch timeout */
230#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
231#define L2CR SPRN_L2CR /* PPC 750 L2 control register */
232#define __LR SPRN_LR
233#define PVR SPRN_PVR /* Processor Version */
234#define PIR SPRN_PIR /* Processor ID */
235#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
236#define SDR1 SPRN_SDR1 /* MMU hash base register */
237#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
238#define SPR1 SPRN_SPRG1
239#define SPR2 SPRN_SPRG2
240#define SPR3 SPRN_SPRG3
241#define SPRG0 SPRN_SPRG0
242#define SPRG1 SPRN_SPRG1
243#define SPRG2 SPRN_SPRG2
244#define SPRG3 SPRN_SPRG3
245#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
246#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
247#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
248#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
249#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
250#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
251#define XER SPRN_XER
252
253/* Processor Version Register (PVR) field extraction */
254
255#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
256#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
257
258/* Processor Version Numbers */
259#define PV_NORTHSTAR 0x0033
260#define PV_PULSAR 0x0034
261#define PV_POWER4 0x0035
262#define PV_ICESTAR 0x0036
263#define PV_SSTAR 0x0037
264#define PV_POWER4p 0x0038
265#define PV_970 0x0039
266#define PV_POWER5 0x003A
267#define PV_POWER5p 0x003B
268#define PV_970FX 0x003C
269#define PV_630 0x0040
270#define PV_630p 0x0041
271#define PV_970MP 0x0044
272#define PV_BE 0x0070
273
274/* Platforms supported by PPC64 */
275#define PLATFORM_PSERIES 0x0100
276#define PLATFORM_PSERIES_LPAR 0x0101
277#define PLATFORM_ISERIES_LPAR 0x0201
278#define PLATFORM_LPAR 0x0001
279#define PLATFORM_POWERMAC 0x0400
280#define PLATFORM_MAPLE 0x0500
281#define PLATFORM_BPA 0x1000
282
283/* Compatibility with drivers coming from PPC32 world */
284#define _machine (systemcfg->platform)
285#define _MACH_Pmac PLATFORM_POWERMAC
286
287/*
288 * List of interrupt controllers.
289 */
290#define IC_INVALID 0
291#define IC_OPEN_PIC 1
292#define IC_PPC_XIC 2
293#define IC_BPA_IIC 3
294
295#define XGLUE(a,b) a##b
296#define GLUE(a,b) XGLUE(a,b)
297
298#ifdef __ASSEMBLY__
299
300#define _GLOBAL(name) \
301 .section ".text"; \
302 .align 2 ; \
303 .globl name; \
304 .globl GLUE(.,name); \
305 .section ".opd","aw"; \
306name: \
307 .quad GLUE(.,name); \
308 .quad .TOC.@tocbase; \
309 .quad 0; \
310 .previous; \
311 .type GLUE(.,name),@function; \
312GLUE(.,name):
313
314#define _KPROBE(name) \
315 .section ".kprobes.text","a"; \
316 .align 2 ; \
317 .globl name; \
318 .globl GLUE(.,name); \
319 .section ".opd","aw"; \
320name: \
321 .quad GLUE(.,name); \
322 .quad .TOC.@tocbase; \
323 .quad 0; \
324 .previous; \
325 .type GLUE(.,name),@function; \
326GLUE(.,name):
327
328#define _STATIC(name) \
329 .section ".text"; \
330 .align 2 ; \
331 .section ".opd","aw"; \
332name: \
333 .quad GLUE(.,name); \
334 .quad .TOC.@tocbase; \
335 .quad 0; \
336 .previous; \
337 .type GLUE(.,name),@function; \
338GLUE(.,name):
339
340#else /* __ASSEMBLY__ */
341
342/*
343 * Default implementation of macro that returns current
344 * instruction pointer ("program counter").
345 */
346#define current_text_addr() ({ __label__ _l; _l: &&_l;})
347
348/* Macros for setting and retrieving special purpose registers */
349
350#define mfmsr() ({unsigned long rval; \
351 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
352
353#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
354 : : "r" (v))
355#define mtmsrd(v) __mtmsrd((v), 0)
356
357#define mfspr(rn) ({unsigned long rval; \
358 asm volatile("mfspr %0," __stringify(rn) \
359 : "=r" (rval)); rval;})
360#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
361
362#define mftb() ({unsigned long rval; \
363 asm volatile("mftb %0" : "=r" (rval)); rval;})
364
365#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
366#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
367
368#define mfasr() ({unsigned long rval; \
369 asm volatile("mfasr %0" : "=r" (rval)); rval;})
370
371static inline void set_tb(unsigned int upper, unsigned int lower)
372{
373 mttbl(0);
374 mttbu(upper);
375 mttbl(lower);
376}
377
378#define __get_SP() ({unsigned long sp; \
379 asm volatile("mr %0,1": "=r" (sp)); sp;})
380
381#ifdef __KERNEL__
382
383extern int have_of;
384extern u64 ppc64_interrupt_controller;
385
386struct task_struct;
387void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
388void release_thread(struct task_struct *);
389
390/* Prepare to copy thread state - unlazy all lazy status */
391extern void prepare_to_copy(struct task_struct *tsk);
392
393/* Create a new kernel thread. */
394extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
395
396/* Lazy FPU handling on uni-processor */
397extern struct task_struct *last_task_used_math;
398extern struct task_struct *last_task_used_altivec;
399
400/* 64-bit user address space is 44-bits (16TB user VM) */
401#define TASK_SIZE_USER64 (0x0000100000000000UL)
402
403/*
404 * 32-bit user address space is 4GB - 1 page
405 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
406 */
407#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
408
409#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
410 TASK_SIZE_USER32 : TASK_SIZE_USER64)
411
412/* This decides where the kernel will search for a free chunk of vm
413 * space during mmap's.
414 */
415#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
416#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
417
418#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
419 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
420
421typedef struct {
422 unsigned long seg;
423} mm_segment_t;
424
425struct thread_struct {
426 unsigned long ksp; /* Kernel stack pointer */
427 unsigned long ksp_vsid;
428 struct pt_regs *regs; /* Pointer to saved register state */
429 mm_segment_t fs; /* for get_fs() validation */
430 double fpr[32]; /* Complete floating point set */
431 unsigned long fpscr; /* Floating point status (plus pad) */
432 unsigned long fpexc_mode; /* Floating-point exception mode */
433 unsigned long start_tb; /* Start purr when proc switched in */
434 unsigned long accum_tb; /* Total accumilated purr for process */
435 unsigned long vdso_base; /* base of the vDSO library */
436 unsigned long dabr; /* Data address breakpoint register */
437#ifdef CONFIG_ALTIVEC
438 /* Complete AltiVec register set */
439 vector128 vr[32] __attribute((aligned(16)));
440 /* AltiVec status */
441 vector128 vscr __attribute((aligned(16)));
442 unsigned long vrsave;
443 int used_vr; /* set if process has used altivec */
444#endif /* CONFIG_ALTIVEC */
445};
446
447#define ARCH_MIN_TASKALIGN 16
448
449#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
450
451#define INIT_THREAD { \
452 .ksp = INIT_SP, \
453 .regs = (struct pt_regs *)INIT_SP - 1, \
454 .fs = KERNEL_DS, \
455 .fpr = {0}, \
456 .fpscr = 0, \
457 .fpexc_mode = MSR_FE0|MSR_FE1, \
458}
459
460/*
461 * Return saved PC of a blocked thread. For now, this is the "user" PC
462 */
463#define thread_saved_pc(tsk) \
464 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
465
466unsigned long get_wchan(struct task_struct *p);
467
468#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
469#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
470
471/* Get/set floating-point exception mode */
472#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
473#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
474
475extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
476extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
477
478static inline unsigned int __unpack_fe01(unsigned long msr_bits)
479{
480 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
481}
482
483static inline unsigned long __pack_fe01(unsigned int fpmode)
484{
485 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
486}
487
488#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
489
490/*
491 * Prefetch macros.
492 */
493#define ARCH_HAS_PREFETCH
494#define ARCH_HAS_PREFETCHW
495#define ARCH_HAS_SPINLOCK_PREFETCH
496
497static inline void prefetch(const void *x)
498{
499 if (unlikely(!x))
500 return;
501
502 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
503}
504
505static inline void prefetchw(const void *x)
506{
507 if (unlikely(!x))
508 return;
509
510 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
511}
512
513#define spin_lock_prefetch(x) prefetchw(x)
514
515#define HAVE_ARCH_PICK_MMAP_LAYOUT
516
517static inline void ppc64_runlatch_on(void)
518{
519 unsigned long ctrl;
520
521 if (cpu_has_feature(CPU_FTR_CTRL)) {
522 ctrl = mfspr(SPRN_CTRLF);
523 ctrl |= CTRL_RUNLATCH;
524 mtspr(SPRN_CTRLT, ctrl);
525 }
526}
527
528static inline void ppc64_runlatch_off(void)
529{
530 unsigned long ctrl;
531
532 if (cpu_has_feature(CPU_FTR_CTRL)) {
533 ctrl = mfspr(SPRN_CTRLF);
534 ctrl &= ~CTRL_RUNLATCH;
535 mtspr(SPRN_CTRLT, ctrl);
536 }
537}
538
539#endif /* __KERNEL__ */
540
541#endif /* __ASSEMBLY__ */
542
543#ifdef __KERNEL__
544#define RUNLATCH_ON(REG) \
545BEGIN_FTR_SECTION \
546 mfspr (REG),SPRN_CTRLF; \
547 ori (REG),(REG),CTRL_RUNLATCH; \
548 mtspr SPRN_CTRLT,(REG); \
549END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
550#endif
551
552/*
553 * Number of entries in the SLB. If this ever changes we should handle
554 * it with a use a cpu feature fixup.
555 */
556#define SLB_NUM_ENTRIES 64
557
558#endif /* __ASM_PPC64_PROCESSOR_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index c02ec1d6b909..e8d0d2ab4c0f 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -14,6 +14,7 @@
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17#include <linux/config.h>
17#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19 20
@@ -137,6 +138,9 @@ struct device_node {
137 struct kref kref; 138 struct kref kref;
138 unsigned long _flags; 139 unsigned long _flags;
139 void *data; 140 void *data;
141#ifdef CONFIG_PPC_ISERIES
142 struct list_head Device_List;
143#endif
140}; 144};
141 145
142extern struct device_node *of_chosen; 146extern struct device_node *of_chosen;
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h
index d86f742e9a21..c5e9052e7967 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-ppc64/smp.h
@@ -77,7 +77,6 @@ extern int smt_enabled_at_boot;
77 77
78extern int smp_mpic_probe(void); 78extern int smp_mpic_probe(void);
79extern void smp_mpic_setup_cpu(int cpu); 79extern void smp_mpic_setup_cpu(int cpu);
80extern void smp_mpic_message_pass(int target, int msg);
81extern void smp_generic_kick_cpu(int nr); 80extern void smp_generic_kick_cpu(int nr);
82 81
83extern void smp_generic_give_timebase(void); 82extern void smp_generic_give_timebase(void);
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 375015c62f20..2e17ef7dbeb4 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -13,7 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/memory.h> 16#include <asm/synch.h>
17 17
18/* 18/*
19 * Memory barrier. 19 * Memory barrier.
@@ -48,7 +48,7 @@
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49#define smp_mb() mb() 49#define smp_mb() mb()
50#define smp_rmb() rmb() 50#define smp_rmb() rmb()
51#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") 51#define smp_wmb() eieio()
52#define smp_read_barrier_depends() read_barrier_depends() 52#define smp_read_barrier_depends() read_barrier_depends()
53#else 53#else
54#define smp_mb() __asm__ __volatile__("": : :"memory") 54#define smp_mb() __asm__ __volatile__("": : :"memory")
@@ -131,6 +131,10 @@ static inline void flush_altivec_to_thread(struct task_struct *t)
131} 131}
132#endif 132#endif
133 133
134static inline void flush_spe_to_thread(struct task_struct *t)
135{
136}
137
134extern int mem_init_done; /* set on boot once kmalloc can be called */ 138extern int mem_init_done; /* set on boot once kmalloc can be called */
135 139
136/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ 140/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
@@ -144,12 +148,7 @@ struct thread_struct;
144extern struct task_struct * _switch(struct thread_struct *prev, 148extern struct task_struct * _switch(struct thread_struct *prev,
145 struct thread_struct *next); 149 struct thread_struct *next);
146 150
147static inline int __is_processor(unsigned long pv) 151extern int powersave_nap; /* set if nap mode can be used in idle loop */
148{
149 unsigned long pvr;
150 asm("mfspr %0, 0x11F" : "=r" (pvr));
151 return(PVR_VER(pvr) == pv);
152}
153 152
154/* 153/*
155 * Atomic exchange 154 * Atomic exchange
diff --git a/include/asm-ppc64/tce.h b/include/asm-ppc64/tce.h
new file mode 100644
index 000000000000..d40b6b42ab35
--- /dev/null
+++ b/include/asm-ppc64/tce.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 * Rewrite, cleanup:
4 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _ASM_TCE_H
22#define _ASM_TCE_H
23
24/*
25 * Tces come in two formats, one for the virtual bus and a different
26 * format for PCI
27 */
28#define TCE_VB 0
29#define TCE_PCI 1
30
31/* TCE page size is 4096 bytes (1 << 12) */
32
33#define TCE_SHIFT 12
34#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
35#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
36
37
38/* tce_entry
39 * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
40 * abstracted so layout is irrelevant.
41 */
42union tce_entry {
43 unsigned long te_word;
44 struct {
45 unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
46 unsigned int tb_rsvd :6;
47 unsigned long tb_rpn :40; /* Real page number */
48 unsigned int tb_valid :1; /* Tce is valid (vb only) */
49 unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
50 unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
51 unsigned int tb_pciwr :1; /* Write allowed (pci only) */
52 unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
53 } te_bits;
54#define te_cacheBits te_bits.tb_cacheBits
55#define te_rpn te_bits.tb_rpn
56#define te_valid te_bits.tb_valid
57#define te_allio te_bits.tb_allio
58#define te_lpindex te_bits.tb_lpindex
59#define te_pciwr te_bits.tb_pciwr
60#define te_rdwr te_bits.tb_rdwr
61};
62
63
64#endif
diff --git a/include/asm-ppc64/time.h b/include/asm-ppc64/time.h
deleted file mode 100644
index c6c762cad8b0..000000000000
--- a/include/asm-ppc64/time.h
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * Common time prototypes and such for all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef __PPC64_TIME_H
14#define __PPC64_TIME_H
15
16#ifdef __KERNEL__
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/mc146818rtc.h>
20
21#include <asm/processor.h>
22#include <asm/paca.h>
23#include <asm/iSeries/HvCall.h>
24
25/* time.c */
26extern unsigned long tb_ticks_per_jiffy;
27extern unsigned long tb_ticks_per_usec;
28extern unsigned long tb_ticks_per_sec;
29extern unsigned long tb_to_xs;
30extern unsigned tb_to_us;
31extern unsigned long tb_last_stamp;
32
33struct rtc_time;
34extern void to_tm(int tim, struct rtc_time * tm);
35extern time_t last_rtc_update;
36
37void generic_calibrate_decr(void);
38void setup_default_decr(void);
39
40/* Some sane defaults: 125 MHz timebase, 1GHz processor */
41extern unsigned long ppc_proc_freq;
42#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
43extern unsigned long ppc_tb_freq;
44#define DEFAULT_TB_FREQ 125000000UL
45
46/*
47 * By putting all of this stuff into a single struct we
48 * reduce the number of cache lines touched by do_gettimeofday.
49 * Both by collecting all of the data in one cache line and
50 * by touching only one TOC entry
51 */
52struct gettimeofday_vars {
53 unsigned long tb_to_xs;
54 unsigned long stamp_xsec;
55 unsigned long tb_orig_stamp;
56};
57
58struct gettimeofday_struct {
59 unsigned long tb_ticks_per_sec;
60 struct gettimeofday_vars vars[2];
61 struct gettimeofday_vars * volatile varp;
62 unsigned var_idx;
63 unsigned tb_to_us;
64};
65
66struct div_result {
67 unsigned long result_high;
68 unsigned long result_low;
69};
70
71int via_calibrate_decr(void);
72
73static __inline__ unsigned long get_tb(void)
74{
75 return mftb();
76}
77
78/* Accessor functions for the decrementer register. */
79static __inline__ unsigned int get_dec(void)
80{
81 return (mfspr(SPRN_DEC));
82}
83
84static __inline__ void set_dec(int val)
85{
86#ifdef CONFIG_PPC_ISERIES
87 struct paca_struct *lpaca = get_paca();
88 int cur_dec;
89
90 if (lpaca->lppaca.shared_proc) {
91 lpaca->lppaca.virtual_decr = val;
92 cur_dec = get_dec();
93 if (cur_dec > val)
94 HvCall_setVirtualDecr();
95 } else
96#endif
97 mtspr(SPRN_DEC, val);
98}
99
100static inline unsigned long tb_ticks_since(unsigned long tstamp)
101{
102 return get_tb() - tstamp;
103}
104
105#define mulhwu(x,y) \
106({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
107#define mulhdu(x,y) \
108({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
109
110
111unsigned mulhwu_scale_factor(unsigned, unsigned);
112void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
113 unsigned divisor, struct div_result *dr );
114
115/* Used to store Processor Utilization register (purr) values */
116
117struct cpu_usage {
118 u64 current_tb; /* Holds the current purr register values */
119};
120
121DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
122
123#endif /* __KERNEL__ */
124#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 74271d7c1d16..626f505c6ee3 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -20,10 +20,8 @@
20struct mm_struct; 20struct mm_struct;
21struct ppc64_tlb_batch { 21struct ppc64_tlb_batch {
22 unsigned long index; 22 unsigned long index;
23 unsigned long context;
24 struct mm_struct *mm; 23 struct mm_struct *mm;
25 pte_t pte[PPC64_TLB_BATCH_NR]; 24 pte_t pte[PPC64_TLB_BATCH_NR];
26 unsigned long addr[PPC64_TLB_BATCH_NR];
27 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 25 unsigned long vaddr[PPC64_TLB_BATCH_NR];
28 unsigned int large; 26 unsigned int large;
29}; 27};
@@ -48,8 +46,7 @@ static inline void flush_tlb_pending(void)
48#define flush_tlb_kernel_range(start, end) flush_tlb_pending() 46#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
49#define flush_tlb_pgtables(mm, start, end) do { } while (0) 47#define flush_tlb_pgtables(mm, start, end) do { } while (0)
50 48
51extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 49extern void flush_hash_page(unsigned long va, pte_t pte, int local);
52 int local); 50void flush_hash_range(unsigned long number, int local);
53void flush_hash_range(unsigned long context, unsigned long number, int local);
54 51
55#endif /* _PPC64_TLBFLUSH_H */ 52#endif /* _PPC64_TLBFLUSH_H */
diff --git a/include/asm-ppc64/udbg.h b/include/asm-ppc64/udbg.h
index c786604aef02..8192fb8541cc 100644
--- a/include/asm-ppc64/udbg.h
+++ b/include/asm-ppc64/udbg.h
@@ -28,4 +28,7 @@ extern unsigned long udbg_ifdebug(unsigned long flags);
28extern void __init ppcdbg_initialize(void); 28extern void __init ppcdbg_initialize(void);
29 29
30extern void udbg_init_uart(void __iomem *comport, unsigned int speed); 30extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
31
32struct device_node;
33extern void udbg_init_scc(struct device_node *np);
31#endif 34#endif
diff --git a/include/asm-ppc64/uninorth.h b/include/asm-ppc64/uninorth.h
deleted file mode 100644
index 7ad7059f2c80..000000000000
--- a/include/asm-ppc64/uninorth.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-ppc/uninorth.h>
2
diff --git a/include/asm-ppc64/unistd.h b/include/asm-ppc64/unistd.h
deleted file mode 100644
index 977bc980c1af..000000000000
--- a/include/asm-ppc64/unistd.h
+++ /dev/null
@@ -1,487 +0,0 @@
1#ifndef _ASM_PPC_UNISTD_H_
2#define _ASM_PPC_UNISTD_H_
3
4/*
5 * This file contains the system call numbers.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define __NR_restart_syscall 0
14#define __NR_exit 1
15#define __NR_fork 2
16#define __NR_read 3
17#define __NR_write 4
18#define __NR_open 5
19#define __NR_close 6
20#define __NR_waitpid 7
21#define __NR_creat 8
22#define __NR_link 9
23#define __NR_unlink 10
24#define __NR_execve 11
25#define __NR_chdir 12
26#define __NR_time 13
27#define __NR_mknod 14
28#define __NR_chmod 15
29#define __NR_lchown 16
30#define __NR_break 17
31#define __NR_oldstat 18
32#define __NR_lseek 19
33#define __NR_getpid 20
34#define __NR_mount 21
35#define __NR_umount 22
36#define __NR_setuid 23
37#define __NR_getuid 24
38#define __NR_stime 25
39#define __NR_ptrace 26
40#define __NR_alarm 27
41#define __NR_oldfstat 28
42#define __NR_pause 29
43#define __NR_utime 30
44#define __NR_stty 31
45#define __NR_gtty 32
46#define __NR_access 33
47#define __NR_nice 34
48#define __NR_ftime 35
49#define __NR_sync 36
50#define __NR_kill 37
51#define __NR_rename 38
52#define __NR_mkdir 39
53#define __NR_rmdir 40
54#define __NR_dup 41
55#define __NR_pipe 42
56#define __NR_times 43
57#define __NR_prof 44
58#define __NR_brk 45
59#define __NR_setgid 46
60#define __NR_getgid 47
61#define __NR_signal 48
62#define __NR_geteuid 49
63#define __NR_getegid 50
64#define __NR_acct 51
65#define __NR_umount2 52
66#define __NR_lock 53
67#define __NR_ioctl 54
68#define __NR_fcntl 55
69#define __NR_mpx 56
70#define __NR_setpgid 57
71#define __NR_ulimit 58
72#define __NR_oldolduname 59
73#define __NR_umask 60
74#define __NR_chroot 61
75#define __NR_ustat 62
76#define __NR_dup2 63
77#define __NR_getppid 64
78#define __NR_getpgrp 65
79#define __NR_setsid 66
80#define __NR_sigaction 67
81#define __NR_sgetmask 68
82#define __NR_ssetmask 69
83#define __NR_setreuid 70
84#define __NR_setregid 71
85#define __NR_sigsuspend 72
86#define __NR_sigpending 73
87#define __NR_sethostname 74
88#define __NR_setrlimit 75
89#define __NR_getrlimit 76
90#define __NR_getrusage 77
91#define __NR_gettimeofday 78
92#define __NR_settimeofday 79
93#define __NR_getgroups 80
94#define __NR_setgroups 81
95#define __NR_select 82
96#define __NR_symlink 83
97#define __NR_oldlstat 84
98#define __NR_readlink 85
99#define __NR_uselib 86
100#define __NR_swapon 87
101#define __NR_reboot 88
102#define __NR_readdir 89
103#define __NR_mmap 90
104#define __NR_munmap 91
105#define __NR_truncate 92
106#define __NR_ftruncate 93
107#define __NR_fchmod 94
108#define __NR_fchown 95
109#define __NR_getpriority 96
110#define __NR_setpriority 97
111#define __NR_profil 98
112#define __NR_statfs 99
113#define __NR_fstatfs 100
114#define __NR_ioperm 101
115#define __NR_socketcall 102
116#define __NR_syslog 103
117#define __NR_setitimer 104
118#define __NR_getitimer 105
119#define __NR_stat 106
120#define __NR_lstat 107
121#define __NR_fstat 108
122#define __NR_olduname 109
123#define __NR_iopl 110
124#define __NR_vhangup 111
125#define __NR_idle 112
126#define __NR_vm86 113
127#define __NR_wait4 114
128#define __NR_swapoff 115
129#define __NR_sysinfo 116
130#define __NR_ipc 117
131#define __NR_fsync 118
132#define __NR_sigreturn 119
133#define __NR_clone 120
134#define __NR_setdomainname 121
135#define __NR_uname 122
136#define __NR_modify_ldt 123
137#define __NR_adjtimex 124
138#define __NR_mprotect 125
139#define __NR_sigprocmask 126
140#define __NR_create_module 127
141#define __NR_init_module 128
142#define __NR_delete_module 129
143#define __NR_get_kernel_syms 130
144#define __NR_quotactl 131
145#define __NR_getpgid 132
146#define __NR_fchdir 133
147#define __NR_bdflush 134
148#define __NR_sysfs 135
149#define __NR_personality 136
150#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
151#define __NR_setfsuid 138
152#define __NR_setfsgid 139
153#define __NR__llseek 140
154#define __NR_getdents 141
155#define __NR__newselect 142
156#define __NR_flock 143
157#define __NR_msync 144
158#define __NR_readv 145
159#define __NR_writev 146
160#define __NR_getsid 147
161#define __NR_fdatasync 148
162#define __NR__sysctl 149
163#define __NR_mlock 150
164#define __NR_munlock 151
165#define __NR_mlockall 152
166#define __NR_munlockall 153
167#define __NR_sched_setparam 154
168#define __NR_sched_getparam 155
169#define __NR_sched_setscheduler 156
170#define __NR_sched_getscheduler 157
171#define __NR_sched_yield 158
172#define __NR_sched_get_priority_max 159
173#define __NR_sched_get_priority_min 160
174#define __NR_sched_rr_get_interval 161
175#define __NR_nanosleep 162
176#define __NR_mremap 163
177#define __NR_setresuid 164
178#define __NR_getresuid 165
179#define __NR_query_module 166
180#define __NR_poll 167
181#define __NR_nfsservctl 168
182#define __NR_setresgid 169
183#define __NR_getresgid 170
184#define __NR_prctl 171
185#define __NR_rt_sigreturn 172
186#define __NR_rt_sigaction 173
187#define __NR_rt_sigprocmask 174
188#define __NR_rt_sigpending 175
189#define __NR_rt_sigtimedwait 176
190#define __NR_rt_sigqueueinfo 177
191#define __NR_rt_sigsuspend 178
192#define __NR_pread64 179
193#define __NR_pwrite64 180
194#define __NR_chown 181
195#define __NR_getcwd 182
196#define __NR_capget 183
197#define __NR_capset 184
198#define __NR_sigaltstack 185
199#define __NR_sendfile 186
200#define __NR_getpmsg 187 /* some people actually want streams */
201#define __NR_putpmsg 188 /* some people actually want streams */
202#define __NR_vfork 189
203#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
204#define __NR_readahead 191
205/* #define __NR_mmap2 192 32bit only */
206/* #define __NR_truncate64 193 32bit only */
207/* #define __NR_ftruncate64 194 32bit only */
208/* #define __NR_stat64 195 32bit only */
209/* #define __NR_lstat64 196 32bit only */
210/* #define __NR_fstat64 197 32bit only */
211#define __NR_pciconfig_read 198
212#define __NR_pciconfig_write 199
213#define __NR_pciconfig_iobase 200
214#define __NR_multiplexer 201
215#define __NR_getdents64 202
216#define __NR_pivot_root 203
217/* #define __NR_fcntl64 204 32bit only */
218#define __NR_madvise 205
219#define __NR_mincore 206
220#define __NR_gettid 207
221#define __NR_tkill 208
222#define __NR_setxattr 209
223#define __NR_lsetxattr 210
224#define __NR_fsetxattr 211
225#define __NR_getxattr 212
226#define __NR_lgetxattr 213
227#define __NR_fgetxattr 214
228#define __NR_listxattr 215
229#define __NR_llistxattr 216
230#define __NR_flistxattr 217
231#define __NR_removexattr 218
232#define __NR_lremovexattr 219
233#define __NR_fremovexattr 220
234#define __NR_futex 221
235#define __NR_sched_setaffinity 222
236#define __NR_sched_getaffinity 223
237/* 224 currently unused */
238#define __NR_tuxcall 225
239/* #define __NR_sendfile64 226 32bit only */
240#define __NR_io_setup 227
241#define __NR_io_destroy 228
242#define __NR_io_getevents 229
243#define __NR_io_submit 230
244#define __NR_io_cancel 231
245#define __NR_set_tid_address 232
246#define __NR_fadvise64 233
247#define __NR_exit_group 234
248#define __NR_lookup_dcookie 235
249#define __NR_epoll_create 236
250#define __NR_epoll_ctl 237
251#define __NR_epoll_wait 238
252#define __NR_remap_file_pages 239
253#define __NR_timer_create 240
254#define __NR_timer_settime 241
255#define __NR_timer_gettime 242
256#define __NR_timer_getoverrun 243
257#define __NR_timer_delete 244
258#define __NR_clock_settime 245
259#define __NR_clock_gettime 246
260#define __NR_clock_getres 247
261#define __NR_clock_nanosleep 248
262#define __NR_swapcontext 249
263#define __NR_tgkill 250
264#define __NR_utimes 251
265#define __NR_statfs64 252
266#define __NR_fstatfs64 253
267/* #define __NR_fadvise64_64 254 32bit only */
268#define __NR_rtas 255
269/* Number 256 is reserved for sys_debug_setcontext */
270/* Number 257 is reserved for vserver */
271/* 258 currently unused */
272#define __NR_mbind 259
273#define __NR_get_mempolicy 260
274#define __NR_set_mempolicy 261
275#define __NR_mq_open 262
276#define __NR_mq_unlink 263
277#define __NR_mq_timedsend 264
278#define __NR_mq_timedreceive 265
279#define __NR_mq_notify 266
280#define __NR_mq_getsetattr 267
281#define __NR_kexec_load 268
282#define __NR_add_key 269
283#define __NR_request_key 270
284#define __NR_keyctl 271
285#define __NR_waitid 272
286#define __NR_ioprio_set 273
287#define __NR_ioprio_get 274
288#define __NR_inotify_init 275
289#define __NR_inotify_add_watch 276
290#define __NR_inotify_rm_watch 277
291
292#define __NR_syscalls 278
293#ifdef __KERNEL__
294#define NR_syscalls __NR_syscalls
295#endif
296
297#ifndef __ASSEMBLY__
298
299/* On powerpc a system call basically clobbers the same registers like a
300 * function call, with the exception of LR (which is needed for the
301 * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
302 * an error return status).
303 */
304
305#define __syscall_nr(nr, type, name, args...) \
306 unsigned long __sc_ret, __sc_err; \
307 { \
308 register unsigned long __sc_0 __asm__ ("r0"); \
309 register unsigned long __sc_3 __asm__ ("r3"); \
310 register unsigned long __sc_4 __asm__ ("r4"); \
311 register unsigned long __sc_5 __asm__ ("r5"); \
312 register unsigned long __sc_6 __asm__ ("r6"); \
313 register unsigned long __sc_7 __asm__ ("r7"); \
314 register unsigned long __sc_8 __asm__ ("r8"); \
315 \
316 __sc_loadargs_##nr(name, args); \
317 __asm__ __volatile__ \
318 ("sc \n\t" \
319 "mfcr %0 " \
320 : "=&r" (__sc_0), \
321 "=&r" (__sc_3), "=&r" (__sc_4), \
322 "=&r" (__sc_5), "=&r" (__sc_6), \
323 "=&r" (__sc_7), "=&r" (__sc_8) \
324 : __sc_asm_input_##nr \
325 : "cr0", "ctr", "memory", \
326 "r9", "r10","r11", "r12"); \
327 __sc_ret = __sc_3; \
328 __sc_err = __sc_0; \
329 } \
330 if (__sc_err & 0x10000000) \
331 { \
332 errno = __sc_ret; \
333 __sc_ret = -1; \
334 } \
335 return (type) __sc_ret
336
337#define __sc_loadargs_0(name, dummy...) \
338 __sc_0 = __NR_##name
339#define __sc_loadargs_1(name, arg1) \
340 __sc_loadargs_0(name); \
341 __sc_3 = (unsigned long) (arg1)
342#define __sc_loadargs_2(name, arg1, arg2) \
343 __sc_loadargs_1(name, arg1); \
344 __sc_4 = (unsigned long) (arg2)
345#define __sc_loadargs_3(name, arg1, arg2, arg3) \
346 __sc_loadargs_2(name, arg1, arg2); \
347 __sc_5 = (unsigned long) (arg3)
348#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
349 __sc_loadargs_3(name, arg1, arg2, arg3); \
350 __sc_6 = (unsigned long) (arg4)
351#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
352 __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
353 __sc_7 = (unsigned long) (arg5)
354#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
355 __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \
356 __sc_8 = (unsigned long) (arg6)
357
358#define __sc_asm_input_0 "0" (__sc_0)
359#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
360#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
361#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
362#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
363#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
364#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
365
366#define _syscall0(type,name) \
367type name(void) \
368{ \
369 __syscall_nr(0, type, name); \
370}
371
372#define _syscall1(type,name,type1,arg1) \
373type name(type1 arg1) \
374{ \
375 __syscall_nr(1, type, name, arg1); \
376}
377
378#define _syscall2(type,name,type1,arg1,type2,arg2) \
379type name(type1 arg1, type2 arg2) \
380{ \
381 __syscall_nr(2, type, name, arg1, arg2); \
382}
383
384#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
385type name(type1 arg1, type2 arg2, type3 arg3) \
386{ \
387 __syscall_nr(3, type, name, arg1, arg2, arg3); \
388}
389
390#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
391type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
392{ \
393 __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
394}
395
396#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
397type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
398{ \
399 __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
400}
401#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
402type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
403{ \
404 __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
405}
406
407#ifdef __KERNEL_SYSCALLS__
408
409/*
410 * Forking from kernel space will result in the child getting a new,
411 * empty kernel stack area. Thus the child cannot access automatic
412 * variables set in the parent unless they are in registers, and the
413 * procedure where the fork was done cannot return to its caller in
414 * the child.
415 */
416
417/*
418 * System call prototypes.
419 */
420static inline _syscall3(int, execve, __const__ char *, file, char **, argv,
421 char **,envp)
422
423#endif /* __KERNEL_SYSCALLS__ */
424
425#ifdef __KERNEL__
426
427#include <linux/types.h>
428#include <linux/compiler.h>
429#include <linux/linkage.h>
430
431#define __ARCH_WANT_IPC_PARSE_VERSION
432#define __ARCH_WANT_OLD_READDIR
433#define __ARCH_WANT_STAT64
434#define __ARCH_WANT_SYS_ALARM
435#define __ARCH_WANT_SYS_GETHOSTNAME
436#define __ARCH_WANT_SYS_PAUSE
437#define __ARCH_WANT_SYS_SGETMASK
438#define __ARCH_WANT_SYS_SIGNAL
439#define __ARCH_WANT_SYS_TIME
440#define __ARCH_WANT_COMPAT_SYS_TIME
441#define __ARCH_WANT_SYS_UTIME
442#define __ARCH_WANT_SYS_WAITPID
443#define __ARCH_WANT_SYS_SOCKETCALL
444#define __ARCH_WANT_SYS_FADVISE64
445#define __ARCH_WANT_SYS_GETPGRP
446#define __ARCH_WANT_SYS_LLSEEK
447#define __ARCH_WANT_SYS_NICE
448#define __ARCH_WANT_SYS_OLD_GETRLIMIT
449#define __ARCH_WANT_SYS_OLDUMOUNT
450#define __ARCH_WANT_SYS_SIGPENDING
451#define __ARCH_WANT_SYS_SIGPROCMASK
452#define __ARCH_WANT_SYS_RT_SIGACTION
453
454unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
455 unsigned long flags, unsigned long fd, off_t offset);
456struct pt_regs;
457int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
458 unsigned long a3, unsigned long a4, unsigned long a5,
459 struct pt_regs *regs);
460int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
461 unsigned long p4, unsigned long p5, unsigned long p6,
462 struct pt_regs *regs);
463int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
464 unsigned long p4, unsigned long p5, unsigned long p6,
465 struct pt_regs *regs);
466int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
467 unsigned long p4, unsigned long p5, unsigned long p6,
468 struct pt_regs *regs);
469int sys_pipe(int __user *fildes);
470int sys_ptrace(long request, long pid, long addr, long data);
471struct sigaction;
472long sys_rt_sigaction(int sig, const struct sigaction __user *act,
473 struct sigaction __user *oact, size_t sigsetsize);
474
475/*
476 * "Conditional" syscalls
477 *
478 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
479 * but it doesn't work on all toolchains, so we just do it by hand
480 */
481#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
482
483#endif /* __KERNEL__ */
484
485#endif /* __ASSEMBLY__ */
486
487#endif /* _ASM_PPC_UNISTD_H_ */
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3ff7b925c387..51df337b37db 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,14 +117,16 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
117 /* 117 /*
118 * No locking required for CPU-local interrupts: 118 * No locking required for CPU-local interrupts:
119 */ 119 */
120 desc->handler->ack(irq); 120 if (desc->handler->ack)
121 desc->handler->ack(irq);
121 action_ret = handle_IRQ_event(irq, regs, desc->action); 122 action_ret = handle_IRQ_event(irq, regs, desc->action);
122 desc->handler->end(irq); 123 desc->handler->end(irq);
123 return 1; 124 return 1;
124 } 125 }
125 126
126 spin_lock(&desc->lock); 127 spin_lock(&desc->lock);
127 desc->handler->ack(irq); 128 if (desc->handler->ack)
129 desc->handler->ack(irq);
128 /* 130 /*
129 * REPLAY is when Linux resends an IRQ that was dropped earlier 131 * REPLAY is when Linux resends an IRQ that was dropped earlier
130 * WAITING is used by probe to mark irqs that are being tested 132 * WAITING is used by probe to mark irqs that are being tested