aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig64
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/devtree.c2
-rw-r--r--arch/powerpc/boot/dts/asp834x-redboot.dts20
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts3
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts14
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts11
-rw-r--r--arch/powerpc/boot/dts/ksi8560.dts20
-rw-r--r--arch/powerpc/boot/dts/kuroboxHD.dts1
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts1
-rw-r--r--arch/powerpc/boot/dts/motionpro.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts6
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts31
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts44
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts158
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts483
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts234
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts45
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts2
-rw-r--r--arch/powerpc/boot/dts/sbc8349.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8548.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8560.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts44
-rw-r--r--arch/powerpc/boot/dts/stx_gp3_8560.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm5200.dts1
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts28
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts44
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts44
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts18
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig8
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig12
-rw-r--r--arch/powerpc/include/asm/atomic.h18
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/byteorder.h38
-rw-r--r--arch/powerpc/include/asm/cputable.h113
-rw-r--r--arch/powerpc/include/asm/dcr-native.h63
-rw-r--r--arch/powerpc/include/asm/dcr.h4
-rw-r--r--arch/powerpc/include/asm/device.h12
-rw-r--r--arch/powerpc/include/asm/disassemble.h80
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h156
-rw-r--r--arch/powerpc/include/asm/eeh.h8
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h30
-rw-r--r--arch/powerpc/include/asm/highmem.h23
-rw-r--r--arch/powerpc/include/asm/io.h7
-rw-r--r--arch/powerpc/include/asm/kdump.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h15
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h61
-rw-r--r--arch/powerpc/include/asm/kvm_host.h116
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h83
-rw-r--r--arch/powerpc/include/asm/local.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h3
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h5
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h23
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-fsl-booke.h7
-rw-r--r--arch/powerpc/include/asm/mmu.h57
-rw-r--r--arch/powerpc/include/asm/mmu_context.h257
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h19
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h11
-rw-r--r--arch/powerpc/include/asm/mutex.h135
-rw-r--r--arch/powerpc/include/asm/page.h13
-rw-r--r--arch/powerpc/include/asm/page_32.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h30
-rw-r--r--arch/powerpc/include/asm/pci.h15
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h34
-rw-r--r--arch/powerpc/include/asm/pgalloc.h41
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h15
-rw-r--r--arch/powerpc/include/asm/pgtable.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/ps3.h56
-rw-r--r--arch/powerpc/include/asm/ps3av.h4
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h58
-rw-r--r--arch/powerpc/include/asm/smp.h7
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/include/asm/system.h24
-rw-r--r--arch/powerpc/include/asm/time.h20
-rw-r--r--arch/powerpc/include/asm/tlbflush.h87
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h3
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c27
-rw-r--r--arch/powerpc/kernel/cputable.c117
-rw-r--r--arch/powerpc/kernel/dma.c26
-rw-r--r--arch/powerpc/kernel/head_32.S31
-rw-r--r--arch/powerpc/kernel/head_44x.S34
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S107
-rw-r--r--arch/powerpc/kernel/ibmebus.c3
-rw-r--r--arch/powerpc/kernel/init_task.c1
-rw-r--r--arch/powerpc/kernel/machine_kexec.c91
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c78
-rw-r--r--arch/powerpc/kernel/misc_32.S238
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/of_device.c18
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c300
-rw-r--r--arch/powerpc/kernel/pci_32.c108
-rw-r--r--arch/powerpc/kernel/pci_64.c134
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S (renamed from arch/powerpc/xmon/setjmp.S)2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/prom.c47
-rw-r--r--arch/powerpc/kernel/prom_parse.c5
-rw-r--r--arch/powerpc/kernel/rtas.c26
-rw-r--r--arch/powerpc/kernel/rtas_pci.c48
-rw-r--r--arch/powerpc/kernel/setup_32.c15
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c12
-rw-r--r--arch/powerpc/kernel/smp.c71
-rw-r--r--arch/powerpc/kernel/swsusp.c2
-rw-r--r--arch/powerpc/kernel/swsusp_32.S6
-rw-r--r--arch/powerpc/kernel/sysfs.c7
-rw-r--r--arch/powerpc/kernel/time.c36
-rw-r--r--arch/powerpc/kernel/traps.c62
-rw-r--r--arch/powerpc/kernel/vdso.c13
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S208
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S141
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/kvm/44x.c228
-rw-r--r--arch/powerpc/kvm/44x_emulate.c371
-rw-r--r--arch/powerpc/kvm/44x_tlb.c463
-rw-r--r--arch/powerpc/kvm/44x_tlb.h26
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile12
-rw-r--r--arch/powerpc/kvm/booke.c (renamed from arch/powerpc/kvm/booke_guest.c)418
-rw-r--r--arch/powerpc/kvm/booke.h60
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S72
-rw-r--r--arch/powerpc/kvm/emulate.c447
-rw-r--r--arch/powerpc/kvm/powerpc.c133
-rw-r--r--arch/powerpc/kvm/timing.c239
-rw-r--r--arch/powerpc/kvm/timing.h102
-rw-r--r--arch/powerpc/lib/copyuser_64.S17
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c25
-rw-r--r--arch/powerpc/lib/memcpy_64.S16
-rw-r--r--arch/powerpc/math-emu/Makefile2
-rw-r--r--arch/powerpc/math-emu/fadd.c1
-rw-r--r--arch/powerpc/math-emu/fcmpo.c5
-rw-r--r--arch/powerpc/math-emu/fdiv.c9
-rw-r--r--arch/powerpc/math-emu/fdivs.c9
-rw-r--r--arch/powerpc/math-emu/fmadd.c5
-rw-r--r--arch/powerpc/math-emu/fmadds.c5
-rw-r--r--arch/powerpc/math-emu/fmsub.c5
-rw-r--r--arch/powerpc/math-emu/fmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fmul.c3
-rw-r--r--arch/powerpc/math-emu/fmuls.c3
-rw-r--r--arch/powerpc/math-emu/fnmadd.c5
-rw-r--r--arch/powerpc/math-emu/fnmadds.c5
-rw-r--r--arch/powerpc/math-emu/fnmsub.c5
-rw-r--r--arch/powerpc/math-emu/fnmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fsqrt.c5
-rw-r--r--arch/powerpc/math-emu/fsqrts.c5
-rw-r--r--arch/powerpc/math-emu/fsub.c3
-rw-r--r--arch/powerpc/math-emu/fsubs.c3
-rw-r--r--arch/powerpc/math-emu/math_efp.c720
-rw-r--r--arch/powerpc/mm/Makefile10
-rw-r--r--arch/powerpc/mm/fault.c16
-rw-r--r--arch/powerpc/mm/hash_low_32.S111
-rw-r--r--arch/powerpc/mm/hugetlbpage.c22
-rw-r--r--arch/powerpc/mm/init_32.c6
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/mmu_context_32.c84
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c103
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c (renamed from arch/powerpc/mm/mmu_context_64.c)8
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c397
-rw-r--r--arch/powerpc/mm/mmu_decl.h65
-rw-r--r--arch/powerpc/mm/pgtable.c117
-rw-r--r--arch/powerpc/mm/pgtable_32.c56
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c10
-rw-r--r--arch/powerpc/mm/tlb_hash32.c (renamed from arch/powerpc/mm/tlb_32.c)4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c (renamed from arch/powerpc/mm/tlb_64.c)86
-rw-r--r--arch/powerpc/mm/tlb_nohash.c209
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S166
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c2
-rw-r--r--arch/powerpc/platforms/40x/ep405.c2
-rw-r--r--arch/powerpc/platforms/40x/kilauea.c2
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/ebony.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c237
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.h53
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c8
-rw-r--r--arch/powerpc/platforms/85xx/smp.c104
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/86xx/Makefile3
-rw-r--r--arch/powerpc/platforms/86xx/gef_gpio.c143
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype12
-rw-r--r--arch/powerpc/platforms/cell/Kconfig23
-rw-r--r--arch/powerpc/platforms/cell/Makefile17
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c9
-rw-r--r--arch/powerpc/platforms/cell/iommu.c5
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c152
-rw-r--r--arch/powerpc/platforms/cell/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c8
-rw-r--r--arch/powerpc/platforms/chrp/pci.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c6
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/maple/setup.c6
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c10
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c33
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c8
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c38
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c29
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c44
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c163
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c5
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c43
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.c3
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.h19
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c7
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h61
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h20
-rw-r--r--arch/powerpc/sysdev/dcr-low.S8
-rw-r--r--arch/powerpc/sysdev/dcr.c5
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c241
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c32
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c306
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c3
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c4
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/xmon.c5
272 files changed, 8911 insertions, 3825 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index adb23ea1c1ef..79f25cef32df 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -285,6 +285,10 @@ config IOMMU_VMERGE
285config IOMMU_HELPER 285config IOMMU_HELPER
286 def_bool PPC64 286 def_bool PPC64
287 287
288config PPC_NEED_DMA_SYNC_OPS
289 def_bool y
290 depends on NOT_COHERENT_CACHE
291
288config HOTPLUG_CPU 292config HOTPLUG_CPU
289 bool "Support for enabling/disabling CPUs" 293 bool "Support for enabling/disabling CPUs"
290 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 294 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
@@ -322,7 +326,7 @@ config KEXEC
322 326
323config CRASH_DUMP 327config CRASH_DUMP
324 bool "Build a kdump crash kernel" 328 bool "Build a kdump crash kernel"
325 depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE 329 depends on (PPC64 && RELOCATABLE) || 6xx
326 help 330 help
327 Build a kernel suitable for use as a kdump capture kernel. 331 Build a kernel suitable for use as a kdump capture kernel.
328 The same kernel binary can be used as production kernel and dump 332 The same kernel binary can be used as production kernel and dump
@@ -401,23 +405,53 @@ config PPC_HAS_HASH_64K
401 depends on PPC64 405 depends on PPC64
402 default n 406 default n
403 407
404config PPC_64K_PAGES 408choice
405 bool "64k page size" 409 prompt "Page size"
406 depends on PPC64 410 default PPC_4K_PAGES
407 select PPC_HAS_HASH_64K
408 help 411 help
409 This option changes the kernel logical page size to 64k. On machines 412 Select the kernel logical page size. Increasing the page size
410 without processor support for 64k pages, the kernel will simulate 413 will reduce software overhead at each page boundary, allow
411 them by loading each individual 4k page on demand transparently, 414 hardware prefetch mechanisms to be more effective, and allow
412 while on hardware with such support, it will be used to map 415 larger dma transfers increasing IO efficiency and reducing
413 normal application pages. 416 overhead. However the utilization of memory will increase.
417 For example, each cached file will using a multiple of the
418 page size to hold its contents and the difference between the
419 end of file and the end of page is wasted.
420
421 Some dedicated systems, such as software raid serving with
422 accelerated calculations, have shown significant increases.
423
424 If you configure a 64 bit kernel for 64k pages but the
425 processor does not support them, then the kernel will simulate
426 them with 4k pages, loading them on demand, but with the
427 reduced software overhead and larger internal fragmentation.
428 For the 32 bit kernel, a large page option will not be offered
429 unless it is supported by the configured processor.
430
431 If unsure, choose 4K_PAGES.
432
433config PPC_4K_PAGES
434 bool "4k page size"
435
436config PPC_16K_PAGES
437 bool "16k page size" if 44x
438
439config PPC_64K_PAGES
440 bool "64k page size" if 44x || PPC_STD_MMU_64
441 select PPC_HAS_HASH_64K if PPC_STD_MMU_64
442
443endchoice
414 444
415config FORCE_MAX_ZONEORDER 445config FORCE_MAX_ZONEORDER
416 int "Maximum zone order" 446 int "Maximum zone order"
417 range 9 64 if PPC_64K_PAGES 447 range 9 64 if PPC_STD_MMU_64 && PPC_64K_PAGES
418 default "9" if PPC_64K_PAGES 448 default "9" if PPC_STD_MMU_64 && PPC_64K_PAGES
419 range 13 64 if PPC64 && !PPC_64K_PAGES 449 range 13 64 if PPC_STD_MMU_64 && !PPC_64K_PAGES
420 default "13" if PPC64 && !PPC_64K_PAGES 450 default "13" if PPC_STD_MMU_64 && !PPC_64K_PAGES
451 range 9 64 if PPC_STD_MMU_32 && PPC_16K_PAGES
452 default "9" if PPC_STD_MMU_32 && PPC_16K_PAGES
453 range 7 64 if PPC_STD_MMU_32 && PPC_64K_PAGES
454 default "7" if PPC_STD_MMU_32 && PPC_64K_PAGES
421 range 11 64 455 range 11 64
422 default "11" 456 default "11"
423 help 457 help
@@ -437,7 +471,7 @@ config FORCE_MAX_ZONEORDER
437 471
438config PPC_SUBPAGE_PROT 472config PPC_SUBPAGE_PROT
439 bool "Support setting protections for 4k subpages" 473 bool "Support setting protections for 4k subpages"
440 depends on PPC_64K_PAGES 474 depends on PPC_STD_MMU_64 && PPC_64K_PAGES
441 help 475 help
442 This option adds support for a system call to allow user programs 476 This option adds support for a system call to allow user programs
443 to set access permissions (read/write, readonly, or no access) 477 to set access permissions (read/write, readonly, or no access)
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 15eb27861fc7..08f7cc0a1953 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -2,6 +2,15 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config PRINT_STACK_DEPTH
6 int "Stack depth to print" if DEBUG_KERNEL
7 default 64
8 help
9 This option allows you to set the stack depth that the kernel
10 prints in stack traces. This can be useful if your display is
11 too small and stack traces cause important information to
12 scroll off the screen.
13
5config DEBUG_STACKOVERFLOW 14config DEBUG_STACKOVERFLOW
6 bool "Check for stack overflows" 15 bool "Check for stack overflows"
7 depends on DEBUG_KERNEL 16 depends on DEBUG_KERNEL
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1f0667069940..72d17f50e54f 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,7 +107,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
107# (We use all available options to help semi-broken compilers) 107# (We use all available options to help semi-broken compilers)
108KBUILD_CFLAGS += $(call cc-option,-mno-spe) 108KBUILD_CFLAGS += $(call cc-option,-mno-spe)
109KBUILD_CFLAGS += $(call cc-option,-mspe=no) 109KBUILD_CFLAGS += $(call cc-option,-mspe=no)
110KBUILD_CFLAGS += $(call cc-option,-mabi=no-spe)
111 110
112# Enable unit-at-a-time mode when possible. It shrinks the 111# Enable unit-at-a-time mode when possible. It shrinks the
113# kernel considerably. 112# kernel considerably.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 3d3daa674299..f32829937aad 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -194,6 +194,7 @@ image-$(CONFIG_PPC_MAPLE) += zImage.pseries
194image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries 194image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
195image-$(CONFIG_PPC_PS3) += dtbImage.ps3 195image-$(CONFIG_PPC_PS3) += dtbImage.ps3
196image-$(CONFIG_PPC_CELLEB) += zImage.pseries 196image-$(CONFIG_PPC_CELLEB) += zImage.pseries
197image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
197image-$(CONFIG_PPC_CHRP) += zImage.chrp 198image-$(CONFIG_PPC_CHRP) += zImage.chrp
198image-$(CONFIG_PPC_EFIKA) += zImage.chrp 199image-$(CONFIG_PPC_EFIKA) += zImage.chrp
199image-$(CONFIG_PPC_PMAC) += zImage.pmac 200image-$(CONFIG_PPC_PMAC) += zImage.pmac
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d12336dc360..a7e21a35c03a 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -213,7 +213,7 @@ static int find_range(u32 *reg, u32 *ranges, int nregaddr,
213 u32 range_addr[MAX_ADDR_CELLS]; 213 u32 range_addr[MAX_ADDR_CELLS];
214 u32 range_size[MAX_ADDR_CELLS]; 214 u32 range_size[MAX_ADDR_CELLS];
215 215
216 copy_val(range_addr, ranges + i, naddr); 216 copy_val(range_addr, ranges + i, nregaddr);
217 copy_val(range_size, ranges + i + nregaddr + naddr, nsize); 217 copy_val(range_size, ranges + i + nregaddr + naddr, nsize);
218 218
219 if (compare_reg(reg, range_addr, range_size)) 219 if (compare_reg(reg, range_addr, range_size))
diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
index 6235fca445de..524af7ef9f26 100644
--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
+++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
@@ -199,8 +199,26 @@
199 reg = <0x2>; 199 reg = <0x2>;
200 device_type = "ethernet-phy"; 200 device_type = "ethernet-phy";
201 }; 201 };
202
203 tbi0: tbi-phy@11 {
204 reg = <0x11>;
205 device_type = "tbi-phy";
206 };
202 }; 207 };
203 208
209 mdio@25520 {
210 #address-cells = <1>;
211 #size-cells = <0>;
212 compatible = "fsl,gianfar-tbi";
213 reg = <0x25520 0x20>;
214
215 tbi1: tbi-phy@11 {
216 reg = <0x11>;
217 device_type = "tbi-phy";
218 };
219 };
220
221
204 enet0: ethernet@24000 { 222 enet0: ethernet@24000 {
205 cell-index = <0>; 223 cell-index = <0>;
206 device_type = "network"; 224 device_type = "network";
@@ -210,6 +228,7 @@
210 local-mac-address = [ 00 08 e5 11 32 33 ]; 228 local-mac-address = [ 00 08 e5 11 32 33 ];
211 interrupts = <32 0x8 33 0x8 34 0x8>; 229 interrupts = <32 0x8 33 0x8 34 0x8>;
212 interrupt-parent = <&ipic>; 230 interrupt-parent = <&ipic>;
231 tbi-handle = <&tbi0>;
213 phy-handle = <&phy0>; 232 phy-handle = <&phy0>;
214 linux,network-index = <0>; 233 linux,network-index = <0>;
215 }; 234 };
@@ -223,6 +242,7 @@
223 local-mac-address = [ 00 08 e5 11 32 34 ]; 242 local-mac-address = [ 00 08 e5 11 32 34 ];
224 interrupts = <35 0x8 36 0x8 37 0x8>; 243 interrupts = <35 0x8 36 0x8 37 0x8>;
225 interrupt-parent = <&ipic>; 244 interrupt-parent = <&ipic>;
245 tbi-handle = <&tbi1>;
226 phy-handle = <&phy1>; 246 phy-handle = <&phy1>;
227 linux,network-index = <1>; 247 linux,network-index = <1>;
228 }; 248 };
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index 6ce0cc2c0208..aa68911f6560 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -269,7 +269,8 @@
269 * later cannot be changed. Chip supports a second 269 * later cannot be changed. Chip supports a second
270 * IO range but we don't use it for now 270 * IO range but we don't use it for now
271 */ 271 */
272 ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000 272 ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
273 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
273 0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; 274 0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
274 275
275 /* Inbound 2GB range starting at 0 */ 276 /* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 79fe412c11c9..8b5ba8261a36 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -40,6 +40,7 @@
40 d-cache-size = <32768>; 40 d-cache-size = <32768>;
41 dcr-controller; 41 dcr-controller;
42 dcr-access-method = "native"; 42 dcr-access-method = "native";
43 next-level-cache = <&L2C0>;
43 }; 44 };
44 }; 45 };
45 46
@@ -104,6 +105,16 @@
104 dcr-reg = <0x00c 0x002>; 105 dcr-reg = <0x00c 0x002>;
105 }; 106 };
106 107
108 L2C0: l2c {
109 compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
110 dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
111 0x030 0x008>; /* L2 cache DCR's */
112 cache-line-size = <32>; /* 32 bytes */
113 cache-size = <262144>; /* L2, 256K */
114 interrupt-parent = <&UIC1>;
115 interrupts = <11 1>;
116 };
117
107 plb { 118 plb {
108 compatible = "ibm,plb-460ex", "ibm,plb4"; 119 compatible = "ibm,plb-460ex", "ibm,plb4";
109 #address-cells = <2>; 120 #address-cells = <2>;
@@ -343,6 +354,7 @@
343 * later cannot be changed 354 * later cannot be changed
344 */ 355 */
345 ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000 356 ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
357 0x02000000 0x00000000 0x00000000 0x0000000c 0x0ee00000 0x00000000 0x00100000
346 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>; 358 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
347 359
348 /* Inbound 2GB range starting at 0 */ 360 /* Inbound 2GB range starting at 0 */
@@ -373,6 +385,7 @@
373 * later cannot be changed 385 * later cannot be changed
374 */ 386 */
375 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000 387 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
388 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000
376 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>; 389 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
377 390
378 /* Inbound 2GB range starting at 0 */ 391 /* Inbound 2GB range starting at 0 */
@@ -414,6 +427,7 @@
414 * later cannot be changed 427 * later cannot be changed
415 */ 428 */
416 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000 429 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
430 0x02000000 0x00000000 0x00000000 0x0000000f 0x00100000 0x00000000 0x00100000
417 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>; 431 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
418 432
419 /* Inbound 2GB range starting at 0 */ 433 /* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index e48cfa740c8a..9708b3423bbd 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -98,6 +98,12 @@
98 interrupt-parent = <&mpic>; 98 interrupt-parent = <&mpic>;
99 99
100 }; 100 };
101 gef_gpio: gpio@7,14000 {
102 #gpio-cells = <2>;
103 compatible = "gef,sbc610-gpio";
104 reg = <0x7 0x14000 0x24>;
105 gpio-controller;
106 };
101 }; 107 };
102 108
103 soc@fef00000 { 109 soc@fef00000 {
@@ -119,6 +125,11 @@
119 interrupt-parent = <&mpic>; 125 interrupt-parent = <&mpic>;
120 dfsrr; 126 dfsrr;
121 127
128 rtc@51 {
129 compatible = "epson,rx8581";
130 reg = <0x00000051>;
131 };
132
122 eti@6b { 133 eti@6b {
123 compatible = "dallas,ds1682"; 134 compatible = "dallas,ds1682";
124 reg = <0x6b>; 135 reg = <0x6b>;
diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts
index 49737589ffc8..3bfff47418db 100644
--- a/arch/powerpc/boot/dts/ksi8560.dts
+++ b/arch/powerpc/boot/dts/ksi8560.dts
@@ -141,8 +141,26 @@
141 reg = <0x2>; 141 reg = <0x2>;
142 device_type = "ethernet-phy"; 142 device_type = "ethernet-phy";
143 }; 143 };
144
145 tbi0: tbi-phy@11 {
146 reg = <0x11>;
147 device_type = "tbi-phy";
148 };
144 }; 149 };
145 150
151 mdio@25520 {
152 #address-cells = <1>;
153 #size-cells = <0>;
154 compatible = "fsl,gianfar-tbi";
155 reg = <0x25520 0x20>;
156
157 tbi1: tbi-phy@11 {
158 reg = <0x11>;
159 device_type = "tbi-phy";
160 };
161 };
162
163
146 enet0: ethernet@24000 { 164 enet0: ethernet@24000 {
147 device_type = "network"; 165 device_type = "network";
148 model = "TSEC"; 166 model = "TSEC";
@@ -152,6 +170,7 @@
152 local-mac-address = [ 00 00 00 00 00 00 ]; 170 local-mac-address = [ 00 00 00 00 00 00 ];
153 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; 171 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
154 interrupt-parent = <&mpic>; 172 interrupt-parent = <&mpic>;
173 tbi-handle = <&tbi0>;
155 phy-handle = <&PHY1>; 174 phy-handle = <&PHY1>;
156 }; 175 };
157 176
@@ -164,6 +183,7 @@
164 local-mac-address = [ 00 00 00 00 00 00 ]; 183 local-mac-address = [ 00 00 00 00 00 00 ];
165 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; 184 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
166 interrupt-parent = <&mpic>; 185 interrupt-parent = <&mpic>;
186 tbi-handle = <&tbi1>;
167 phy-handle = <&PHY2>; 187 phy-handle = <&PHY2>;
168 }; 188 };
169 189
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index 2e5a1a1812b6..8d725d10882f 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
76 interrupt-parent = <&mpic>; 76 interrupt-parent = <&mpic>;
77 77
78 rtc@32 { 78 rtc@32 {
79 device_type = "rtc";
80 compatible = "ricoh,rs5c372a"; 79 compatible = "ricoh,rs5c372a";
81 reg = <0x32>; 80 reg = <0x32>;
82 }; 81 };
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index e4916e69ad31..b13a11eb81b0 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
76 interrupt-parent = <&mpic>; 76 interrupt-parent = <&mpic>;
77 77
78 rtc@32 { 78 rtc@32 {
79 device_type = "rtc";
80 compatible = "ricoh,rs5c372a"; 79 compatible = "ricoh,rs5c372a";
81 reg = <0x32>; 80 reg = <0x32>;
82 }; 81 };
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index 2cf9a8768f44..3f7a5dce8de0 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -130,7 +130,6 @@
130 130
131 rtc@800 { // Real time clock 131 rtc@800 { // Real time clock
132 compatible = "fsl,mpc5200-rtc"; 132 compatible = "fsl,mpc5200-rtc";
133 device_type = "rtc";
134 reg = <0x800 0x100>; 133 reg = <0x800 0x100>;
135 interrupts = <1 5 0 1 6 0>; 134 interrupts = <1 5 0 1 6 0>;
136 interrupt-parent = <&mpc5200_pic>; 135 interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 7bd5b9c399b8..63e3bb48e843 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -130,7 +130,6 @@
130 130
131 rtc@800 { // Real time clock 131 rtc@800 { // Real time clock
132 compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; 132 compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
133 device_type = "rtc";
134 reg = <0x800 0x100>; 133 reg = <0x800 0x100>;
135 interrupts = <1 5 0 1 6 0>; 134 interrupts = <1 5 0 1 6 0>;
136 interrupt-parent = <&mpc5200_pic>; 135 interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
index 9e3c921be164..52ba6f98b273 100644
--- a/arch/powerpc/boot/dts/motionpro.dts
+++ b/arch/powerpc/boot/dts/motionpro.dts
@@ -248,7 +248,6 @@
248 fsl5200-clocking; 248 fsl5200-clocking;
249 249
250 rtc@68 { 250 rtc@68 {
251 device_type = "rtc";
252 compatible = "dallas,ds1339"; 251 compatible = "dallas,ds1339";
253 reg = <0x68>; 252 reg = <0x68>;
254 }; 253 };
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 503031766825..d4df8b6857a4 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -190,6 +190,7 @@
190 local-mac-address = [ 00 00 00 00 00 00 ]; 190 local-mac-address = [ 00 00 00 00 00 00 ];
191 interrupts = <37 0x8 36 0x8 35 0x8>; 191 interrupts = <37 0x8 36 0x8 35 0x8>;
192 interrupt-parent = <&ipic>; 192 interrupt-parent = <&ipic>;
193 tbi-handle = < &tbi0 >;
193 phy-handle = < &phy1 >; 194 phy-handle = < &phy1 >;
194 fsl,magic-packet; 195 fsl,magic-packet;
195 196
@@ -210,6 +211,10 @@
210 reg = <0x4>; 211 reg = <0x4>;
211 device_type = "ethernet-phy"; 212 device_type = "ethernet-phy";
212 }; 213 };
214 tbi0: tbi-phy@11 {
215 reg = <0x11>;
216 device_type = "tbi-phy";
217 };
213 }; 218 };
214 }; 219 };
215 220
@@ -222,9 +227,24 @@
222 local-mac-address = [ 00 00 00 00 00 00 ]; 227 local-mac-address = [ 00 00 00 00 00 00 ];
223 interrupts = <34 0x8 33 0x8 32 0x8>; 228 interrupts = <34 0x8 33 0x8 32 0x8>;
224 interrupt-parent = <&ipic>; 229 interrupt-parent = <&ipic>;
230 tbi-handle = < &tbi1 >;
225 phy-handle = < &phy4 >; 231 phy-handle = < &phy4 >;
226 sleep = <&pmc 0x10000000>; 232 sleep = <&pmc 0x10000000>;
227 fsl,magic-packet; 233 fsl,magic-packet;
234
235 mdio@25520 {
236 #address-cells = <1>;
237 #size-cells = <0>;
238 compatible = "fsl,gianfar-tbi";
239 reg = <0x25520 0x20>;
240
241 tbi1: tbi-phy@11 {
242 reg = <0x11>;
243 device_type = "tbi-phy";
244 };
245 };
246
247
228 }; 248 };
229 249
230 serial0: serial@4500 { 250 serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 6b850670de1d..072c9b0f8c8e 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -117,7 +117,6 @@
117 interrupt-parent = <&ipic>; 117 interrupt-parent = <&ipic>;
118 dfsrr; 118 dfsrr;
119 rtc@68 { 119 rtc@68 {
120 device_type = "rtc";
121 compatible = "dallas,ds1339"; 120 compatible = "dallas,ds1339";
122 reg = <0x68>; 121 reg = <0x68>;
123 }; 122 };
@@ -206,8 +205,25 @@
206 reg = <0x1>; 205 reg = <0x1>;
207 device_type = "ethernet-phy"; 206 device_type = "ethernet-phy";
208 }; 207 };
208 tbi0: tbi-phy@11 {
209 reg = <0x11>;
210 device_type = "tbi-phy";
211 };
212 };
213
214 mdio@25520 {
215 #address-cells = <1>;
216 #size-cells = <0>;
217 compatible = "fsl,gianfar-tbi";
218 reg = <0x25520 0x20>;
219
220 tbi1: tbi-phy@11 {
221 reg = <0x11>;
222 device_type = "tbi-phy";
223 };
209 }; 224 };
210 225
226
211 enet0: ethernet@24000 { 227 enet0: ethernet@24000 {
212 cell-index = <0>; 228 cell-index = <0>;
213 device_type = "network"; 229 device_type = "network";
@@ -217,6 +233,7 @@
217 local-mac-address = [ 00 00 00 00 00 00 ]; 233 local-mac-address = [ 00 00 00 00 00 00 ];
218 interrupts = <32 0x8 33 0x8 34 0x8>; 234 interrupts = <32 0x8 33 0x8 34 0x8>;
219 interrupt-parent = <&ipic>; 235 interrupt-parent = <&ipic>;
236 tbi-handle = <&tbi0>;
220 phy-handle = < &phy0 >; 237 phy-handle = < &phy0 >;
221 }; 238 };
222 239
@@ -229,6 +246,7 @@
229 local-mac-address = [ 00 00 00 00 00 00 ]; 246 local-mac-address = [ 00 00 00 00 00 00 ];
230 interrupts = <35 0x8 36 0x8 37 0x8>; 247 interrupts = <35 0x8 36 0x8 37 0x8>;
231 interrupt-parent = <&ipic>; 248 interrupt-parent = <&ipic>;
249 tbi-handle = <&tbi1>;
232 phy-handle = < &phy1 >; 250 phy-handle = < &phy1 >;
233 }; 251 };
234 252
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 4bdbaf4993a1..b5eda94a8e2a 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -85,7 +85,6 @@
85 dfsrr; 85 dfsrr;
86 86
87 rtc@68 { 87 rtc@68 {
88 device_type = "rtc";
89 compatible = "dallas,ds1339"; 88 compatible = "dallas,ds1339";
90 reg = <0x68>; 89 reg = <0x68>;
91 interrupts = <18 0x8>; 90 interrupts = <18 0x8>;
@@ -184,6 +183,22 @@
184 reg = <0x1c>; 183 reg = <0x1c>;
185 device_type = "ethernet-phy"; 184 device_type = "ethernet-phy";
186 }; 185 };
186 tbi0: tbi-phy@11 {
187 reg = <0x11>;
188 device_type = "tbi-phy";
189 };
190 };
191
192 mdio@25520 {
193 #address-cells = <1>;
194 #size-cells = <0>;
195 compatible = "fsl,gianfar-tbi";
196 reg = <0x25520 0x20>;
197
198 tbi1: tbi-phy@11 {
199 reg = <0x11>;
200 device_type = "tbi-phy";
201 };
187 }; 202 };
188 203
189 enet0: ethernet@24000 { 204 enet0: ethernet@24000 {
@@ -195,6 +210,7 @@
195 local-mac-address = [ 00 00 00 00 00 00 ]; 210 local-mac-address = [ 00 00 00 00 00 00 ];
196 interrupts = <32 0x8 33 0x8 34 0x8>; 211 interrupts = <32 0x8 33 0x8 34 0x8>;
197 interrupt-parent = <&ipic>; 212 interrupt-parent = <&ipic>;
213 tbi-handle = <&tbi0>;
198 phy-handle = <&phy1c>; 214 phy-handle = <&phy1c>;
199 linux,network-index = <0>; 215 linux,network-index = <0>;
200 }; 216 };
@@ -211,6 +227,7 @@
211 /* Vitesse 7385 isn't on the MDIO bus */ 227 /* Vitesse 7385 isn't on the MDIO bus */
212 fixed-link = <1 1 1000 0 0>; 228 fixed-link = <1 1 1000 0 0>;
213 linux,network-index = <1>; 229 linux,network-index = <1>;
230 tbi-handle = <&tbi1>;
214 }; 231 };
215 232
216 serial0: serial@4500 { 233 serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index fa40647ee62e..c87a6015e165 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -83,7 +83,6 @@
83 dfsrr; 83 dfsrr;
84 84
85 rtc@68 { 85 rtc@68 {
86 device_type = "rtc";
87 compatible = "dallas,ds1339"; 86 compatible = "dallas,ds1339";
88 reg = <0x68>; 87 reg = <0x68>;
89 interrupts = <18 0x8>; 88 interrupts = <18 0x8>;
@@ -163,6 +162,10 @@
163 reg = <0x1c>; 162 reg = <0x1c>;
164 device_type = "ethernet-phy"; 163 device_type = "ethernet-phy";
165 }; 164 };
165 tbi0: tbi-phy@11 {
166 reg = <0x11>;
167 device_type = "tbi-phy";
168 };
166 }; 169 };
167 170
168 enet0: ethernet@24000 { 171 enet0: ethernet@24000 {
@@ -174,6 +177,7 @@
174 local-mac-address = [ 00 00 00 00 00 00 ]; 177 local-mac-address = [ 00 00 00 00 00 00 ];
175 interrupts = <32 0x8 33 0x8 34 0x8>; 178 interrupts = <32 0x8 33 0x8 34 0x8>;
176 interrupt-parent = <&ipic>; 179 interrupt-parent = <&ipic>;
180 tbi-handle = <&tbi0>;
177 phy-handle = <&phy1c>; 181 phy-handle = <&phy1c>;
178 linux,network-index = <0>; 182 linux,network-index = <0>;
179 }; 183 };
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index c986c541e9bb..d9adba01c09c 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -185,8 +185,25 @@
185 reg = <0x1>; 185 reg = <0x1>;
186 device_type = "ethernet-phy"; 186 device_type = "ethernet-phy";
187 }; 187 };
188 tbi0: tbi-phy@11 {
189 reg = <0x11>;
190 device_type = "tbi-phy";
191 };
192 };
193
194 mdio@25520 {
195 #address-cells = <1>;
196 #size-cells = <0>;
197 compatible = "fsl,gianfar-tbi";
198 reg = <0x25520 0x20>;
199
200 tbi1: tbi-phy@11 {
201 reg = <0x11>;
202 device_type = "tbi-phy";
203 };
188 }; 204 };
189 205
206
190 enet0: ethernet@24000 { 207 enet0: ethernet@24000 {
191 cell-index = <0>; 208 cell-index = <0>;
192 device_type = "network"; 209 device_type = "network";
@@ -196,6 +213,7 @@
196 local-mac-address = [ 00 00 00 00 00 00 ]; 213 local-mac-address = [ 00 00 00 00 00 00 ];
197 interrupts = <32 0x8 33 0x8 34 0x8>; 214 interrupts = <32 0x8 33 0x8 34 0x8>;
198 interrupt-parent = <&ipic>; 215 interrupt-parent = <&ipic>;
216 tbi-handle = <&tbi0>;
199 phy-handle = <&phy0>; 217 phy-handle = <&phy0>;
200 linux,network-index = <0>; 218 linux,network-index = <0>;
201 }; 219 };
@@ -209,6 +227,7 @@
209 local-mac-address = [ 00 00 00 00 00 00 ]; 227 local-mac-address = [ 00 00 00 00 00 00 ];
210 interrupts = <35 0x8 36 0x8 37 0x8>; 228 interrupts = <35 0x8 36 0x8 37 0x8>;
211 interrupt-parent = <&ipic>; 229 interrupt-parent = <&ipic>;
230 tbi-handle = <&tbi1>;
212 phy-handle = <&phy1>; 231 phy-handle = <&phy1>;
213 linux,network-index = <1>; 232 linux,network-index = <1>;
214 }; 233 };
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index 0484561bd2c0..1d14d7052e6d 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -193,8 +193,25 @@
193 reg = <0x3>; 193 reg = <0x3>;
194 device_type = "ethernet-phy"; 194 device_type = "ethernet-phy";
195 }; 195 };
196 tbi0: tbi-phy@11 {
197 reg = <0x11>;
198 device_type = "tbi-phy";
199 };
200 };
201
202 mdio@25520 {
203 #address-cells = <1>;
204 #size-cells = <0>;
205 compatible = "fsl,gianfar-tbi";
206 reg = <0x25520 0x20>;
207
208 tbi1: tbi-phy@11 {
209 reg = <0x11>;
210 device_type = "tbi-phy";
211 };
196 }; 212 };
197 213
214
198 enet0: ethernet@24000 { 215 enet0: ethernet@24000 {
199 cell-index = <0>; 216 cell-index = <0>;
200 device_type = "network"; 217 device_type = "network";
@@ -205,6 +222,7 @@
205 interrupts = <32 0x8 33 0x8 34 0x8>; 222 interrupts = <32 0x8 33 0x8 34 0x8>;
206 phy-connection-type = "mii"; 223 phy-connection-type = "mii";
207 interrupt-parent = <&ipic>; 224 interrupt-parent = <&ipic>;
225 tbi-handle = <&tbi0>;
208 phy-handle = <&phy2>; 226 phy-handle = <&phy2>;
209 }; 227 };
210 228
@@ -218,6 +236,7 @@
218 interrupts = <35 0x8 36 0x8 37 0x8>; 236 interrupts = <35 0x8 36 0x8 37 0x8>;
219 phy-connection-type = "mii"; 237 phy-connection-type = "mii";
220 interrupt-parent = <&ipic>; 238 interrupt-parent = <&ipic>;
239 tbi-handle = <&tbi1>;
221 phy-handle = <&phy3>; 240 phy-handle = <&phy3>;
222 }; 241 };
223 242
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 435ef3dd022d..9413af3b9925 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -117,7 +117,6 @@
117 interrupt-parent = <&ipic>; 117 interrupt-parent = <&ipic>;
118 dfsrr; 118 dfsrr;
119 rtc@68 { 119 rtc@68 {
120 device_type = "rtc";
121 compatible = "dallas,ds1339"; 120 compatible = "dallas,ds1339";
122 reg = <0x68>; 121 reg = <0x68>;
123 }; 122 };
@@ -211,8 +210,25 @@
211 reg = <0x2>; 210 reg = <0x2>;
212 device_type = "ethernet-phy"; 211 device_type = "ethernet-phy";
213 }; 212 };
213 tbi0: tbi-phy@11 {
214 reg = <0x11>;
215 device_type = "tbi-phy";
216 };
217 };
218
219 mdio@25520 {
220 #address-cells = <1>;
221 #size-cells = <0>;
222 compatible = "fsl,gianfar-tbi";
223 reg = <0x25520 0x20>;
224
225 tbi1: tbi-phy@11 {
226 reg = <0x11>;
227 device_type = "tbi-phy";
228 };
214 }; 229 };
215 230
231
216 enet0: ethernet@24000 { 232 enet0: ethernet@24000 {
217 cell-index = <0>; 233 cell-index = <0>;
218 device_type = "network"; 234 device_type = "network";
@@ -223,6 +239,7 @@
223 interrupts = <32 0x8 33 0x8 34 0x8>; 239 interrupts = <32 0x8 33 0x8 34 0x8>;
224 phy-connection-type = "mii"; 240 phy-connection-type = "mii";
225 interrupt-parent = <&ipic>; 241 interrupt-parent = <&ipic>;
242 tbi-handle = <&tbi0>;
226 phy-handle = <&phy2>; 243 phy-handle = <&phy2>;
227 }; 244 };
228 245
@@ -237,6 +254,7 @@
237 phy-connection-type = "mii"; 254 phy-connection-type = "mii";
238 interrupt-parent = <&ipic>; 255 interrupt-parent = <&ipic>;
239 fixed-link = <1 1 1000 0 0>; 256 fixed-link = <1 1 1000 0 0>;
257 tbi-handle = <&tbi1>;
240 }; 258 };
241 259
242 serial0: serial@4500 { 260 serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index 67a08d2e2ff2..b85fc02682d2 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -232,8 +232,25 @@
232 reg = <0x3>; 232 reg = <0x3>;
233 device_type = "ethernet-phy"; 233 device_type = "ethernet-phy";
234 }; 234 };
235 tbi0: tbi-phy@11 {
236 reg = <0x11>;
237 device_type = "tbi-phy";
238 };
239 };
240
241 mdio@25520 {
242 #address-cells = <1>;
243 #size-cells = <0>;
244 compatible = "fsl,gianfar-tbi";
245 reg = <0x25520 0x20>;
246
247 tbi1: tbi-phy@11 {
248 reg = <0x11>;
249 device_type = "tbi-phy";
250 };
235 }; 251 };
236 252
253
237 enet0: ethernet@24000 { 254 enet0: ethernet@24000 {
238 cell-index = <0>; 255 cell-index = <0>;
239 device_type = "network"; 256 device_type = "network";
@@ -244,6 +261,7 @@
244 interrupts = <32 0x8 33 0x8 34 0x8>; 261 interrupts = <32 0x8 33 0x8 34 0x8>;
245 phy-connection-type = "mii"; 262 phy-connection-type = "mii";
246 interrupt-parent = <&ipic>; 263 interrupt-parent = <&ipic>;
264 tbi-handle = <&tbi0>;
247 phy-handle = <&phy2>; 265 phy-handle = <&phy2>;
248 }; 266 };
249 267
@@ -257,6 +275,7 @@
257 interrupts = <35 0x8 36 0x8 37 0x8>; 275 interrupts = <35 0x8 36 0x8 37 0x8>;
258 phy-connection-type = "mii"; 276 phy-connection-type = "mii";
259 interrupt-parent = <&ipic>; 277 interrupt-parent = <&ipic>;
278 tbi-handle = <&tbi1>;
260 phy-handle = <&phy3>; 279 phy-handle = <&phy3>;
261 }; 280 };
262 281
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index b11e68f56a06..23c10ce22c2c 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -117,7 +117,6 @@
117 interrupt-parent = <&ipic>; 117 interrupt-parent = <&ipic>;
118 dfsrr; 118 dfsrr;
119 rtc@68 { 119 rtc@68 {
120 device_type = "rtc";
121 compatible = "dallas,ds1339"; 120 compatible = "dallas,ds1339";
122 reg = <0x68>; 121 reg = <0x68>;
123 }; 122 };
@@ -211,8 +210,25 @@
211 reg = <0x2>; 210 reg = <0x2>;
212 device_type = "ethernet-phy"; 211 device_type = "ethernet-phy";
213 }; 212 };
213 tbi0: tbi-phy@11 {
214 reg = <0x11>;
215 device_type = "tbi-phy";
216 };
217 };
218
219 mdio@25520 {
220 #address-cells = <1>;
221 #size-cells = <0>;
222 compatible = "fsl,gianfar-tbi";
223 reg = <0x25520 0x20>;
224
225 tbi1: tbi-phy@11 {
226 reg = <0x11>;
227 device_type = "tbi-phy";
228 };
214 }; 229 };
215 230
231
216 enet0: ethernet@24000 { 232 enet0: ethernet@24000 {
217 cell-index = <0>; 233 cell-index = <0>;
218 device_type = "network"; 234 device_type = "network";
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 323370a2b5ff..acf06c438dbf 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -232,6 +232,22 @@
232 reg = <0x3>; 232 reg = <0x3>;
233 device_type = "ethernet-phy"; 233 device_type = "ethernet-phy";
234 }; 234 };
235 tbi0: tbi-phy@11 {
236 reg = <0x11>;
237 device_type = "tbi-phy";
238 };
239 };
240
241 mdio@25520 {
242 #address-cells = <1>;
243 #size-cells = <0>;
244 compatible = "fsl,gianfar-tbi";
245 reg = <0x25520 0x20>;
246
247 tbi1: tbi-phy@11 {
248 reg = <0x11>;
249 device_type = "tbi-phy";
250 };
235 }; 251 };
236 252
237 enet0: ethernet@24000 { 253 enet0: ethernet@24000 {
@@ -244,6 +260,7 @@
244 interrupts = <32 0x8 33 0x8 34 0x8>; 260 interrupts = <32 0x8 33 0x8 34 0x8>;
245 phy-connection-type = "mii"; 261 phy-connection-type = "mii";
246 interrupt-parent = <&ipic>; 262 interrupt-parent = <&ipic>;
263 tbi-handle = <&tbi0>;
247 phy-handle = <&phy2>; 264 phy-handle = <&phy2>;
248 }; 265 };
249 266
@@ -257,6 +274,7 @@
257 interrupts = <35 0x8 36 0x8 37 0x8>; 274 interrupts = <35 0x8 36 0x8 37 0x8>;
258 phy-connection-type = "mii"; 275 phy-connection-type = "mii";
259 interrupt-parent = <&ipic>; 276 interrupt-parent = <&ipic>;
277 tbi-handle = <&tbi1>;
260 phy-handle = <&phy3>; 278 phy-handle = <&phy3>;
261 }; 279 };
262 280
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 337af6ea26d3..72cdc3c4c7e3 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -117,7 +117,6 @@
117 interrupt-parent = <&ipic>; 117 interrupt-parent = <&ipic>;
118 dfsrr; 118 dfsrr;
119 rtc@68 { 119 rtc@68 {
120 device_type = "rtc";
121 compatible = "dallas,ds1339"; 120 compatible = "dallas,ds1339";
122 reg = <0x68>; 121 reg = <0x68>;
123 }; 122 };
@@ -211,6 +210,22 @@
211 reg = <0x2>; 210 reg = <0x2>;
212 device_type = "ethernet-phy"; 211 device_type = "ethernet-phy";
213 }; 212 };
213 tbi0: tbi-phy@11 {
214 reg = <0x11>;
215 device_type = "tbi-phy";
216 };
217 };
218
219 mdio@25520 {
220 #address-cells = <1>;
221 #size-cells = <0>;
222 compatible = "fsl,gianfar-tbi";
223 reg = <0x25520 0x20>;
224
225 tbi1: tbi-phy@11 {
226 reg = <0x11>;
227 device_type = "tbi-phy";
228 };
214 }; 229 };
215 230
216 enet0: ethernet@24000 { 231 enet0: ethernet@24000 {
@@ -223,6 +238,7 @@
223 interrupts = <32 0x8 33 0x8 34 0x8>; 238 interrupts = <32 0x8 33 0x8 34 0x8>;
224 phy-connection-type = "mii"; 239 phy-connection-type = "mii";
225 interrupt-parent = <&ipic>; 240 interrupt-parent = <&ipic>;
241 tbi-handle = <&tbi0>;
226 phy-handle = <&phy2>; 242 phy-handle = <&phy2>;
227 }; 243 };
228 244
@@ -237,6 +253,7 @@
237 phy-connection-type = "mii"; 253 phy-connection-type = "mii";
238 interrupt-parent = <&ipic>; 254 interrupt-parent = <&ipic>;
239 fixed-link = <1 1 1000 0 0>; 255 fixed-link = <1 1 1000 0 0>;
256 tbi-handle = <&tbi1>;
240 }; 257 };
241 258
242 serial0: serial@4500 { 259 serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index 35db1e5440c7..3c905df1812c 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -155,6 +155,22 @@
155 reg = <1>; 155 reg = <1>;
156 device_type = "ethernet-phy"; 156 device_type = "ethernet-phy";
157 }; 157 };
158 tbi0: tbi-phy@11 {
159 reg = <0x11>;
160 device_type = "tbi-phy";
161 };
162 };
163
164 mdio@26520 {
165 #address-cells = <1>;
166 #size-cells = <0>;
167 compatible = "fsl,gianfar-tbi";
168 reg = <0x26520 0x20>;
169
170 tbi1: tbi-phy@11 {
171 reg = <0x11>;
172 device_type = "tbi-phy";
173 };
158 }; 174 };
159 175
160 usb@22000 { 176 usb@22000 {
@@ -186,6 +202,7 @@
186 local-mac-address = [ 00 00 00 00 00 00 ]; 202 local-mac-address = [ 00 00 00 00 00 00 ];
187 interrupts = <29 2 30 2 34 2>; 203 interrupts = <29 2 30 2 34 2>;
188 interrupt-parent = <&mpic>; 204 interrupt-parent = <&mpic>;
205 tbi-handle = <&tbi0>;
189 phy-handle = <&phy1>; 206 phy-handle = <&phy1>;
190 phy-connection-type = "rgmii-id"; 207 phy-connection-type = "rgmii-id";
191 }; 208 };
@@ -199,6 +216,7 @@
199 local-mac-address = [ 00 00 00 00 00 00 ]; 216 local-mac-address = [ 00 00 00 00 00 00 ];
200 interrupts = <31 2 32 2 33 2>; 217 interrupts = <31 2 32 2 33 2>;
201 interrupt-parent = <&mpic>; 218 interrupt-parent = <&mpic>;
219 tbi-handle = <&tbi1>;
202 phy-handle = <&phy0>; 220 phy-handle = <&phy0>;
203 phy-connection-type = "rgmii-id"; 221 phy-connection-type = "rgmii-id";
204 }; 222 };
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 9568bfaff8f7..79570ffe41b9 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -150,6 +150,34 @@
150 reg = <0x3>; 150 reg = <0x3>;
151 device_type = "ethernet-phy"; 151 device_type = "ethernet-phy";
152 }; 152 };
153 tbi0: tbi-phy@11 {
154 reg = <0x11>;
155 device_type = "tbi-phy";
156 };
157 };
158
159 mdio@25520 {
160 #address-cells = <1>;
161 #size-cells = <0>;
162 compatible = "fsl,gianfar-tbi";
163 reg = <0x25520 0x20>;
164
165 tbi1: tbi-phy@11 {
166 reg = <0x11>;
167 device_type = "tbi-phy";
168 };
169 };
170
171 mdio@26520 {
172 #address-cells = <1>;
173 #size-cells = <0>;
174 compatible = "fsl,gianfar-tbi";
175 reg = <0x26520 0x20>;
176
177 tbi2: tbi-phy@11 {
178 reg = <0x11>;
179 device_type = "tbi-phy";
180 };
153 }; 181 };
154 182
155 enet0: ethernet@24000 { 183 enet0: ethernet@24000 {
@@ -161,6 +189,7 @@
161 local-mac-address = [ 00 00 00 00 00 00 ]; 189 local-mac-address = [ 00 00 00 00 00 00 ];
162 interrupts = <29 2 30 2 34 2>; 190 interrupts = <29 2 30 2 34 2>;
163 interrupt-parent = <&mpic>; 191 interrupt-parent = <&mpic>;
192 tbi-handle = <&tbi0>;
164 phy-handle = <&phy0>; 193 phy-handle = <&phy0>;
165 }; 194 };
166 195
@@ -173,6 +202,7 @@
173 local-mac-address = [ 00 00 00 00 00 00 ]; 202 local-mac-address = [ 00 00 00 00 00 00 ];
174 interrupts = <35 2 36 2 40 2>; 203 interrupts = <35 2 36 2 40 2>;
175 interrupt-parent = <&mpic>; 204 interrupt-parent = <&mpic>;
205 tbi-handle = <&tbi1>;
176 phy-handle = <&phy1>; 206 phy-handle = <&phy1>;
177 }; 207 };
178 208
@@ -185,6 +215,7 @@
185 local-mac-address = [ 00 00 00 00 00 00 ]; 215 local-mac-address = [ 00 00 00 00 00 00 ];
186 interrupts = <41 2>; 216 interrupts = <41 2>;
187 interrupt-parent = <&mpic>; 217 interrupt-parent = <&mpic>;
218 tbi-handle = <&tbi2>;
188 phy-handle = <&phy3>; 219 phy-handle = <&phy3>;
189 }; 220 };
190 221
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 6480f4fd96e0..221036a8ce23 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -144,6 +144,22 @@
144 reg = <0x1>; 144 reg = <0x1>;
145 device_type = "ethernet-phy"; 145 device_type = "ethernet-phy";
146 }; 146 };
147 tbi0: tbi-phy@11 {
148 reg = <0x11>;
149 device_type = "tbi-phy";
150 };
151 };
152
153 mdio@25520 {
154 #address-cells = <1>;
155 #size-cells = <0>;
156 compatible = "fsl,gianfar-tbi";
157 reg = <0x25520 0x20>;
158
159 tbi1: tbi-phy@11 {
160 reg = <0x11>;
161 device_type = "tbi-phy";
162 };
147 }; 163 };
148 164
149 enet0: ethernet@24000 { 165 enet0: ethernet@24000 {
@@ -155,6 +171,7 @@
155 local-mac-address = [ 00 00 00 00 00 00 ]; 171 local-mac-address = [ 00 00 00 00 00 00 ];
156 interrupts = <29 2 30 2 34 2>; 172 interrupts = <29 2 30 2 34 2>;
157 interrupt-parent = <&mpic>; 173 interrupt-parent = <&mpic>;
174 tbi-handle = <&tbi0>;
158 phy-handle = <&phy0>; 175 phy-handle = <&phy0>;
159 }; 176 };
160 177
@@ -167,6 +184,7 @@
167 local-mac-address = [ 00 00 00 00 00 00 ]; 184 local-mac-address = [ 00 00 00 00 00 00 ];
168 interrupts = <35 2 36 2 40 2>; 185 interrupts = <35 2 36 2 40 2>;
169 interrupt-parent = <&mpic>; 186 interrupt-parent = <&mpic>;
187 tbi-handle = <&tbi1>;
170 phy-handle = <&phy1>; 188 phy-handle = <&phy1>;
171 }; 189 };
172 190
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index f1fb20737e3e..b9da42105066 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -116,8 +116,26 @@
116 reg = <0x1>; 116 reg = <0x1>;
117 device_type = "ethernet-phy"; 117 device_type = "ethernet-phy";
118 }; 118 };
119
120 tbi0: tbi-phy@11 {
121 reg = <0x11>;
122 device_type = "tbi-phy";
123 };
119 }; 124 };
120 125
126 mdio@26520 {
127 #address-cells = <1>;
128 #size-cells = <0>;
129 compatible = "fsl,gianfar-tbi";
130 reg = <0x26520 0x20>;
131
132 tbi1: tbi-phy@11 {
133 reg = <0x11>;
134 device_type = "tbi-phy";
135 };
136 };
137
138
121 dma@21300 { 139 dma@21300 {
122 #address-cells = <1>; 140 #address-cells = <1>;
123 #size-cells = <1>; 141 #size-cells = <1>;
@@ -169,6 +187,7 @@
169 interrupts = <29 2 30 2 34 2>; 187 interrupts = <29 2 30 2 34 2>;
170 interrupt-parent = <&mpic>; 188 interrupt-parent = <&mpic>;
171 phy-handle = <&phy0>; 189 phy-handle = <&phy0>;
190 tbi-handle = <&tbi0>;
172 phy-connection-type = "rgmii-id"; 191 phy-connection-type = "rgmii-id";
173 }; 192 };
174 193
@@ -182,6 +201,7 @@
182 interrupts = <31 2 32 2 33 2>; 201 interrupts = <31 2 32 2 33 2>;
183 interrupt-parent = <&mpic>; 202 interrupt-parent = <&mpic>;
184 phy-handle = <&phy1>; 203 phy-handle = <&phy1>;
204 tbi-handle = <&tbi1>;
185 phy-connection-type = "rgmii-id"; 205 phy-connection-type = "rgmii-id";
186 }; 206 };
187 207
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index 431b496270dc..df774a7088ff 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -172,6 +172,46 @@
172 reg = <0x3>; 172 reg = <0x3>;
173 device_type = "ethernet-phy"; 173 device_type = "ethernet-phy";
174 }; 174 };
175 tbi0: tbi-phy@11 {
176 reg = <0x11>;
177 device_type = "tbi-phy";
178 };
179 };
180
181 mdio@25520 {
182 #address-cells = <1>;
183 #size-cells = <0>;
184 compatible = "fsl,gianfar-tbi";
185 reg = <0x25520 0x20>;
186
187 tbi1: tbi-phy@11 {
188 reg = <0x11>;
189 device_type = "tbi-phy";
190 };
191 };
192
193 mdio@26520 {
194 #address-cells = <1>;
195 #size-cells = <0>;
196 compatible = "fsl,gianfar-tbi";
197 reg = <0x26520 0x20>;
198
199 tbi2: tbi-phy@11 {
200 reg = <0x11>;
201 device_type = "tbi-phy";
202 };
203 };
204
205 mdio@27520 {
206 #address-cells = <1>;
207 #size-cells = <0>;
208 compatible = "fsl,gianfar-tbi";
209 reg = <0x27520 0x20>;
210
211 tbi3: tbi-phy@11 {
212 reg = <0x11>;
213 device_type = "tbi-phy";
214 };
175 }; 215 };
176 216
177 enet0: ethernet@24000 { 217 enet0: ethernet@24000 {
@@ -183,6 +223,7 @@
183 local-mac-address = [ 00 00 00 00 00 00 ]; 223 local-mac-address = [ 00 00 00 00 00 00 ];
184 interrupts = <29 2 30 2 34 2>; 224 interrupts = <29 2 30 2 34 2>;
185 interrupt-parent = <&mpic>; 225 interrupt-parent = <&mpic>;
226 tbi-handle = <&tbi0>;
186 phy-handle = <&phy0>; 227 phy-handle = <&phy0>;
187 }; 228 };
188 229
@@ -195,6 +236,7 @@
195 local-mac-address = [ 00 00 00 00 00 00 ]; 236 local-mac-address = [ 00 00 00 00 00 00 ];
196 interrupts = <35 2 36 2 40 2>; 237 interrupts = <35 2 36 2 40 2>;
197 interrupt-parent = <&mpic>; 238 interrupt-parent = <&mpic>;
239 tbi-handle = <&tbi1>;
198 phy-handle = <&phy1>; 240 phy-handle = <&phy1>;
199 }; 241 };
200 242
@@ -208,6 +250,7 @@
208 local-mac-address = [ 00 00 00 00 00 00 ]; 250 local-mac-address = [ 00 00 00 00 00 00 ];
209 interrupts = <31 2 32 2 33 2>; 251 interrupts = <31 2 32 2 33 2>;
210 interrupt-parent = <&mpic>; 252 interrupt-parent = <&mpic>;
253 tbi-handle = <&tbi2>;
211 phy-handle = <&phy2>; 254 phy-handle = <&phy2>;
212 }; 255 };
213 256
@@ -220,6 +263,7 @@
220 local-mac-address = [ 00 00 00 00 00 00 ]; 263 local-mac-address = [ 00 00 00 00 00 00 ];
221 interrupts = <37 2 38 2 39 2>; 264 interrupts = <37 2 38 2 39 2>;
222 interrupt-parent = <&mpic>; 265 interrupt-parent = <&mpic>;
266 tbi-handle = <&tbi3>;
223 phy-handle = <&phy3>; 267 phy-handle = <&phy3>;
224 }; 268 };
225 */ 269 */
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index d833a5c4f476..053b01e1c93b 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -144,6 +144,22 @@
144 reg = <0x1>; 144 reg = <0x1>;
145 device_type = "ethernet-phy"; 145 device_type = "ethernet-phy";
146 }; 146 };
147 tbi0: tbi-phy@11 {
148 reg = <0x11>;
149 device_type = "tbi-phy";
150 };
151 };
152
153 mdio@25520 {
154 #address-cells = <1>;
155 #size-cells = <0>;
156 compatible = "fsl,gianfar-tbi";
157 reg = <0x25520 0x20>;
158
159 tbi1: tbi-phy@11 {
160 reg = <0x11>;
161 device_type = "tbi-phy";
162 };
147 }; 163 };
148 164
149 enet0: ethernet@24000 { 165 enet0: ethernet@24000 {
@@ -155,6 +171,7 @@
155 local-mac-address = [ 00 00 00 00 00 00 ]; 171 local-mac-address = [ 00 00 00 00 00 00 ];
156 interrupts = <29 2 30 2 34 2>; 172 interrupts = <29 2 30 2 34 2>;
157 interrupt-parent = <&mpic>; 173 interrupt-parent = <&mpic>;
174 tbi-handle = <&tbi0>;
158 phy-handle = <&phy0>; 175 phy-handle = <&phy0>;
159 }; 176 };
160 177
@@ -167,6 +184,7 @@
167 local-mac-address = [ 00 00 00 00 00 00 ]; 184 local-mac-address = [ 00 00 00 00 00 00 ];
168 interrupts = <35 2 36 2 40 2>; 185 interrupts = <35 2 36 2 40 2>;
169 interrupt-parent = <&mpic>; 186 interrupt-parent = <&mpic>;
187 tbi-handle = <&tbi1>;
170 phy-handle = <&phy1>; 188 phy-handle = <&phy1>;
171 }; 189 };
172 190
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 4d1f2f284094..11b1bcbe14ce 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -145,6 +145,22 @@
145 reg = <0x3>; 145 reg = <0x3>;
146 device_type = "ethernet-phy"; 146 device_type = "ethernet-phy";
147 }; 147 };
148 tbi0: tbi-phy@11 {
149 reg = <0x11>;
150 device_type = "tbi-phy";
151 };
152 };
153
154 mdio@25520 {
155 #address-cells = <1>;
156 #size-cells = <0>;
157 compatible = "fsl,gianfar-tbi";
158 reg = <0x25520 0x20>;
159
160 tbi1: tbi-phy@11 {
161 reg = <0x11>;
162 device_type = "tbi-phy";
163 };
148 }; 164 };
149 165
150 enet0: ethernet@24000 { 166 enet0: ethernet@24000 {
@@ -156,6 +172,7 @@
156 local-mac-address = [ 00 00 00 00 00 00 ]; 172 local-mac-address = [ 00 00 00 00 00 00 ];
157 interrupts = <29 2 30 2 34 2>; 173 interrupts = <29 2 30 2 34 2>;
158 interrupt-parent = <&mpic>; 174 interrupt-parent = <&mpic>;
175 tbi-handle = <&tbi0>;
159 phy-handle = <&phy0>; 176 phy-handle = <&phy0>;
160 }; 177 };
161 178
@@ -168,6 +185,7 @@
168 local-mac-address = [ 00 00 00 00 00 00 ]; 185 local-mac-address = [ 00 00 00 00 00 00 ];
169 interrupts = <35 2 36 2 40 2>; 186 interrupts = <35 2 36 2 40 2>;
170 interrupt-parent = <&mpic>; 187 interrupt-parent = <&mpic>;
188 tbi-handle = <&tbi1>;
171 phy-handle = <&phy1>; 189 phy-handle = <&phy1>;
172 }; 190 };
173 191
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index c80158f7741d..1955bd9e113d 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -179,6 +179,22 @@
179 reg = <0x3>; 179 reg = <0x3>;
180 device_type = "ethernet-phy"; 180 device_type = "ethernet-phy";
181 }; 181 };
182 tbi0: tbi-phy@11 {
183 reg = <0x11>;
184 device_type = "tbi-phy";
185 };
186 };
187
188 mdio@25520 {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 compatible = "fsl,gianfar-tbi";
192 reg = <0x25520 0x20>;
193
194 tbi1: tbi-phy@11 {
195 reg = <0x11>;
196 device_type = "tbi-phy";
197 };
182 }; 198 };
183 199
184 enet0: ethernet@24000 { 200 enet0: ethernet@24000 {
@@ -190,6 +206,7 @@
190 local-mac-address = [ 00 00 00 00 00 00 ]; 206 local-mac-address = [ 00 00 00 00 00 00 ];
191 interrupts = <29 2 30 2 34 2>; 207 interrupts = <29 2 30 2 34 2>;
192 interrupt-parent = <&mpic>; 208 interrupt-parent = <&mpic>;
209 tbi-handle = <&tbi0>;
193 phy-handle = <&phy2>; 210 phy-handle = <&phy2>;
194 }; 211 };
195 212
@@ -202,6 +219,7 @@
202 local-mac-address = [ 00 00 00 00 00 00 ]; 219 local-mac-address = [ 00 00 00 00 00 00 ];
203 interrupts = <35 2 36 2 40 2>; 220 interrupts = <35 2 36 2 40 2>;
204 interrupt-parent = <&mpic>; 221 interrupt-parent = <&mpic>;
222 tbi-handle = <&tbi1>;
205 phy-handle = <&phy3>; 223 phy-handle = <&phy3>;
206 }; 224 };
207 225
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 5c69b2fafd32..21459e161d02 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -63,6 +63,119 @@
63 device_type = "memory"; 63 device_type = "memory";
64 }; 64 };
65 65
66 localbus@ffe05000 {
67 #address-cells = <2>;
68 #size-cells = <1>;
69 compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
70 reg = <0 0xffe05000 0 0x1000>;
71 interrupts = <19 2>;
72 interrupt-parent = <&mpic>;
73
74 ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
75 0x1 0x0 0x0 0xe0000000 0x08000000
76 0x2 0x0 0x0 0xffa00000 0x00040000
77 0x3 0x0 0x0 0xffdf0000 0x00008000
78 0x4 0x0 0x0 0xffa40000 0x00040000
79 0x5 0x0 0x0 0xffa80000 0x00040000
80 0x6 0x0 0x0 0xffac0000 0x00040000>;
81
82 nor@0,0 {
83 #address-cells = <1>;
84 #size-cells = <1>;
85 compatible = "cfi-flash";
86 reg = <0x0 0x0 0x8000000>;
87 bank-width = <2>;
88 device-width = <1>;
89
90 ramdisk@0 {
91 reg = <0x0 0x03000000>;
92 readl-only;
93 };
94
95 diagnostic@3000000 {
96 reg = <0x03000000 0x00e00000>;
97 read-only;
98 };
99
100 dink@3e00000 {
101 reg = <0x03e00000 0x00200000>;
102 read-only;
103 };
104
105 kernel@4000000 {
106 reg = <0x04000000 0x00400000>;
107 read-only;
108 };
109
110 jffs2@4400000 {
111 reg = <0x04400000 0x03b00000>;
112 };
113
114 dtb@7f00000 {
115 reg = <0x07f00000 0x00080000>;
116 read-only;
117 };
118
119 u-boot@7f80000 {
120 reg = <0x07f80000 0x00080000>;
121 read-only;
122 };
123 };
124
125 nand@2,0 {
126 #address-cells = <1>;
127 #size-cells = <1>;
128 compatible = "fsl,mpc8572-fcm-nand",
129 "fsl,elbc-fcm-nand";
130 reg = <0x2 0x0 0x40000>;
131
132 u-boot@0 {
133 reg = <0x0 0x02000000>;
134 read-only;
135 };
136
137 jffs2@2000000 {
138 reg = <0x02000000 0x10000000>;
139 };
140
141 ramdisk@12000000 {
142 reg = <0x12000000 0x08000000>;
143 read-only;
144 };
145
146 kernel@1a000000 {
147 reg = <0x1a000000 0x04000000>;
148 };
149
150 dtb@1e000000 {
151 reg = <0x1e000000 0x01000000>;
152 read-only;
153 };
154
155 empty@1f000000 {
156 reg = <0x1f000000 0x21000000>;
157 };
158 };
159
160 nand@4,0 {
161 compatible = "fsl,mpc8572-fcm-nand",
162 "fsl,elbc-fcm-nand";
163 reg = <0x4 0x0 0x40000>;
164 };
165
166 nand@5,0 {
167 compatible = "fsl,mpc8572-fcm-nand",
168 "fsl,elbc-fcm-nand";
169 reg = <0x5 0x0 0x40000>;
170 };
171
172 nand@6,0 {
173 compatible = "fsl,mpc8572-fcm-nand",
174 "fsl,elbc-fcm-nand";
175 reg = <0x6 0x0 0x40000>;
176 };
177 };
178
66 soc8572@ffe00000 { 179 soc8572@ffe00000 {
67 #address-cells = <1>; 180 #address-cells = <1>;
68 #size-cells = <1>; 181 #size-cells = <1>;
@@ -225,6 +338,47 @@
225 interrupts = <10 1>; 338 interrupts = <10 1>;
226 reg = <0x3>; 339 reg = <0x3>;
227 }; 340 };
341
342 tbi0: tbi-phy@11 {
343 reg = <0x11>;
344 device_type = "tbi-phy";
345 };
346 };
347
348 mdio@25520 {
349 #address-cells = <1>;
350 #size-cells = <0>;
351 compatible = "fsl,gianfar-tbi";
352 reg = <0x25520 0x20>;
353
354 tbi1: tbi-phy@11 {
355 reg = <0x11>;
356 device_type = "tbi-phy";
357 };
358 };
359
360 mdio@26520 {
361 #address-cells = <1>;
362 #size-cells = <0>;
363 compatible = "fsl,gianfar-tbi";
364 reg = <0x26520 0x20>;
365
366 tbi2: tbi-phy@11 {
367 reg = <0x11>;
368 device_type = "tbi-phy";
369 };
370 };
371
372 mdio@27520 {
373 #address-cells = <1>;
374 #size-cells = <0>;
375 compatible = "fsl,gianfar-tbi";
376 reg = <0x27520 0x20>;
377
378 tbi3: tbi-phy@11 {
379 reg = <0x11>;
380 device_type = "tbi-phy";
381 };
228 }; 382 };
229 383
230 enet0: ethernet@24000 { 384 enet0: ethernet@24000 {
@@ -236,6 +390,7 @@
236 local-mac-address = [ 00 00 00 00 00 00 ]; 390 local-mac-address = [ 00 00 00 00 00 00 ];
237 interrupts = <29 2 30 2 34 2>; 391 interrupts = <29 2 30 2 34 2>;
238 interrupt-parent = <&mpic>; 392 interrupt-parent = <&mpic>;
393 tbi-handle = <&tbi0>;
239 phy-handle = <&phy0>; 394 phy-handle = <&phy0>;
240 phy-connection-type = "rgmii-id"; 395 phy-connection-type = "rgmii-id";
241 }; 396 };
@@ -249,6 +404,7 @@
249 local-mac-address = [ 00 00 00 00 00 00 ]; 404 local-mac-address = [ 00 00 00 00 00 00 ];
250 interrupts = <35 2 36 2 40 2>; 405 interrupts = <35 2 36 2 40 2>;
251 interrupt-parent = <&mpic>; 406 interrupt-parent = <&mpic>;
407 tbi-handle = <&tbi1>;
252 phy-handle = <&phy1>; 408 phy-handle = <&phy1>;
253 phy-connection-type = "rgmii-id"; 409 phy-connection-type = "rgmii-id";
254 }; 410 };
@@ -262,6 +418,7 @@
262 local-mac-address = [ 00 00 00 00 00 00 ]; 418 local-mac-address = [ 00 00 00 00 00 00 ];
263 interrupts = <31 2 32 2 33 2>; 419 interrupts = <31 2 32 2 33 2>;
264 interrupt-parent = <&mpic>; 420 interrupt-parent = <&mpic>;
421 tbi-handle = <&tbi2>;
265 phy-handle = <&phy2>; 422 phy-handle = <&phy2>;
266 phy-connection-type = "rgmii-id"; 423 phy-connection-type = "rgmii-id";
267 }; 424 };
@@ -275,6 +432,7 @@
275 local-mac-address = [ 00 00 00 00 00 00 ]; 432 local-mac-address = [ 00 00 00 00 00 00 ];
276 interrupts = <37 2 38 2 39 2>; 433 interrupts = <37 2 38 2 39 2>;
277 interrupt-parent = <&mpic>; 434 interrupt-parent = <&mpic>;
435 tbi-handle = <&tbi3>;
278 phy-handle = <&phy3>; 436 phy-handle = <&phy3>;
279 phy-connection-type = "rgmii-id"; 437 phy-connection-type = "rgmii-id";
280 }; 438 };
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
new file mode 100644
index 000000000000..c114c4ee9931
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -0,0 +1,483 @@
1/*
2 * MPC8572 DS Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, dma1, global-util, eth0,
7 * eth1, crypto, pci0, pci1.
8 *
9 * Copyright 2007, 2008 Freescale Semiconductor Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17/dts-v1/;
18/ {
19 model = "fsl,MPC8572DS";
20 compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
21 #address-cells = <1>;
22 #size-cells = <1>;
23
24 aliases {
25 ethernet0 = &enet0;
26 ethernet1 = &enet1;
27 serial0 = &serial0;
28 pci0 = &pci0;
29 pci1 = &pci1;
30 };
31
32 cpus {
33 #address-cells = <1>;
34 #size-cells = <0>;
35
36 PowerPC,8572@0 {
37 device_type = "cpu";
38 reg = <0x0>;
39 d-cache-line-size = <32>; // 32 bytes
40 i-cache-line-size = <32>; // 32 bytes
41 d-cache-size = <0x8000>; // L1, 32K
42 i-cache-size = <0x8000>; // L1, 32K
43 timebase-frequency = <0>;
44 bus-frequency = <0>;
45 clock-frequency = <0>;
46 next-level-cache = <&L2>;
47 };
48
49 };
50
51 memory {
52 device_type = "memory";
53 reg = <0x0 0x0>; // Filled by U-Boot
54 };
55
56 soc8572@ffe00000 {
57 #address-cells = <1>;
58 #size-cells = <1>;
59 device_type = "soc";
60 compatible = "simple-bus";
61 ranges = <0x0 0xffe00000 0x100000>;
62 reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
63 bus-frequency = <0>; // Filled out by uboot.
64
65 memory-controller@2000 {
66 compatible = "fsl,mpc8572-memory-controller";
67 reg = <0x2000 0x1000>;
68 interrupt-parent = <&mpic>;
69 interrupts = <18 2>;
70 };
71
72 memory-controller@6000 {
73 compatible = "fsl,mpc8572-memory-controller";
74 reg = <0x6000 0x1000>;
75 interrupt-parent = <&mpic>;
76 interrupts = <18 2>;
77 };
78
79 L2: l2-cache-controller@20000 {
80 compatible = "fsl,mpc8572-l2-cache-controller";
81 reg = <0x20000 0x1000>;
82 cache-line-size = <32>; // 32 bytes
83 cache-size = <0x80000>; // L2, 512K
84 interrupt-parent = <&mpic>;
85 interrupts = <16 2>;
86 };
87
88 i2c@3000 {
89 #address-cells = <1>;
90 #size-cells = <0>;
91 cell-index = <0>;
92 compatible = "fsl-i2c";
93 reg = <0x3000 0x100>;
94 interrupts = <43 2>;
95 interrupt-parent = <&mpic>;
96 dfsrr;
97 };
98
99 i2c@3100 {
100 #address-cells = <1>;
101 #size-cells = <0>;
102 cell-index = <1>;
103 compatible = "fsl-i2c";
104 reg = <0x3100 0x100>;
105 interrupts = <43 2>;
106 interrupt-parent = <&mpic>;
107 dfsrr;
108 };
109
110 dma@21300 {
111 #address-cells = <1>;
112 #size-cells = <1>;
113 compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
114 reg = <0x21300 0x4>;
115 ranges = <0x0 0x21100 0x200>;
116 cell-index = <0>;
117 dma-channel@0 {
118 compatible = "fsl,mpc8572-dma-channel",
119 "fsl,eloplus-dma-channel";
120 reg = <0x0 0x80>;
121 cell-index = <0>;
122 interrupt-parent = <&mpic>;
123 interrupts = <20 2>;
124 };
125 dma-channel@80 {
126 compatible = "fsl,mpc8572-dma-channel",
127 "fsl,eloplus-dma-channel";
128 reg = <0x80 0x80>;
129 cell-index = <1>;
130 interrupt-parent = <&mpic>;
131 interrupts = <21 2>;
132 };
133 dma-channel@100 {
134 compatible = "fsl,mpc8572-dma-channel",
135 "fsl,eloplus-dma-channel";
136 reg = <0x100 0x80>;
137 cell-index = <2>;
138 interrupt-parent = <&mpic>;
139 interrupts = <22 2>;
140 };
141 dma-channel@180 {
142 compatible = "fsl,mpc8572-dma-channel",
143 "fsl,eloplus-dma-channel";
144 reg = <0x180 0x80>;
145 cell-index = <3>;
146 interrupt-parent = <&mpic>;
147 interrupts = <23 2>;
148 };
149 };
150
151 mdio@24520 {
152 #address-cells = <1>;
153 #size-cells = <0>;
154 compatible = "fsl,gianfar-mdio";
155 reg = <0x24520 0x20>;
156
157 phy0: ethernet-phy@0 {
158 interrupt-parent = <&mpic>;
159 interrupts = <10 1>;
160 reg = <0x0>;
161 };
162 phy1: ethernet-phy@1 {
163 interrupt-parent = <&mpic>;
164 interrupts = <10 1>;
165 reg = <0x1>;
166 };
167 };
168
169 enet0: ethernet@24000 {
170 cell-index = <0>;
171 device_type = "network";
172 model = "eTSEC";
173 compatible = "gianfar";
174 reg = <0x24000 0x1000>;
175 local-mac-address = [ 00 00 00 00 00 00 ];
176 interrupts = <29 2 30 2 34 2>;
177 interrupt-parent = <&mpic>;
178 phy-handle = <&phy0>;
179 phy-connection-type = "rgmii-id";
180 };
181
182 enet1: ethernet@25000 {
183 cell-index = <1>;
184 device_type = "network";
185 model = "eTSEC";
186 compatible = "gianfar";
187 reg = <0x25000 0x1000>;
188 local-mac-address = [ 00 00 00 00 00 00 ];
189 interrupts = <35 2 36 2 40 2>;
190 interrupt-parent = <&mpic>;
191 phy-handle = <&phy1>;
192 phy-connection-type = "rgmii-id";
193 };
194
195 serial0: serial@4500 {
196 cell-index = <0>;
197 device_type = "serial";
198 compatible = "ns16550";
199 reg = <0x4500 0x100>;
200 clock-frequency = <0>;
201 };
202
203 global-utilities@e0000 { //global utilities block
204 compatible = "fsl,mpc8572-guts";
205 reg = <0xe0000 0x1000>;
206 fsl,has-rstcr;
207 };
208
209 crypto@30000 {
210 compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
211 "fsl,sec2.1", "fsl,sec2.0";
212 reg = <0x30000 0x10000>;
213 interrupts = <45 2 58 2>;
214 interrupt-parent = <&mpic>;
215 fsl,num-channels = <4>;
216 fsl,channel-fifo-len = <24>;
217 fsl,exec-units-mask = <0x9fe>;
218 fsl,descriptor-types-mask = <0x3ab0ebf>;
219 };
220
221 mpic: pic@40000 {
222 interrupt-controller;
223 #address-cells = <0>;
224 #interrupt-cells = <2>;
225 reg = <0x40000 0x40000>;
226 compatible = "chrp,open-pic";
227 device_type = "open-pic";
228 protected-sources = <
229 31 32 33 37 38 39 /* enet2 enet3 */
230 76 77 78 79 27 42 /* dma2 pci2 serial*/
231 0xe0 0xe1 0xe2 0xe3 /* msi */
232 0xe4 0xe5 0xe6 0xe7
233 >;
234 };
235 };
236
237 pci0: pcie@ffe08000 {
238 cell-index = <0>;
239 compatible = "fsl,mpc8548-pcie";
240 device_type = "pci";
241 #interrupt-cells = <1>;
242 #size-cells = <2>;
243 #address-cells = <3>;
244 reg = <0xffe08000 0x1000>;
245 bus-range = <0 255>;
246 ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
247 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
248 clock-frequency = <33333333>;
249 interrupt-parent = <&mpic>;
250 interrupts = <24 2>;
251 interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
252 interrupt-map = <
253 /* IDSEL 0x11 func 0 - PCI slot 1 */
254 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
255 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
256 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
257 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
258
259 /* IDSEL 0x11 func 1 - PCI slot 1 */
260 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
261 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
262 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
263 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
264
265 /* IDSEL 0x11 func 2 - PCI slot 1 */
266 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
267 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
268 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
269 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
270
271 /* IDSEL 0x11 func 3 - PCI slot 1 */
272 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
273 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
274 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
275 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
276
277 /* IDSEL 0x11 func 4 - PCI slot 1 */
278 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
279 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
280 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
281 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
282
283 /* IDSEL 0x11 func 5 - PCI slot 1 */
284 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
285 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
286 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
287 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
288
289 /* IDSEL 0x11 func 6 - PCI slot 1 */
290 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
291 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
292 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
293 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
294
295 /* IDSEL 0x11 func 7 - PCI slot 1 */
296 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
297 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
298 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
299 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
300
301 /* IDSEL 0x12 func 0 - PCI slot 2 */
302 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
303 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
304 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
305 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
306
307 /* IDSEL 0x12 func 1 - PCI slot 2 */
308 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
309 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
310 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
311 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
312
313 /* IDSEL 0x12 func 2 - PCI slot 2 */
314 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
315 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
316 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
317 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
318
319 /* IDSEL 0x12 func 3 - PCI slot 2 */
320 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
321 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
322 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
323 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
324
325 /* IDSEL 0x12 func 4 - PCI slot 2 */
326 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
327 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
328 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
329 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
330
331 /* IDSEL 0x12 func 5 - PCI slot 2 */
332 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
333 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
334 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
335 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
336
337 /* IDSEL 0x12 func 6 - PCI slot 2 */
338 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
339 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
340 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
341 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
342
343 /* IDSEL 0x12 func 7 - PCI slot 2 */
344 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
345 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
346 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
347 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
348
349 // IDSEL 0x1c USB
350 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
351 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
352 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
353 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
354
355 // IDSEL 0x1d Audio
356 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
357
358 // IDSEL 0x1e Legacy
359 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
360 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
361
362 // IDSEL 0x1f IDE/SATA
363 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
364 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
365
366 >;
367
368 pcie@0 {
369 reg = <0x0 0x0 0x0 0x0 0x0>;
370 #size-cells = <2>;
371 #address-cells = <3>;
372 device_type = "pci";
373 ranges = <0x2000000 0x0 0x80000000
374 0x2000000 0x0 0x80000000
375 0x0 0x20000000
376
377 0x1000000 0x0 0x0
378 0x1000000 0x0 0x0
379 0x0 0x100000>;
380 uli1575@0 {
381 reg = <0x0 0x0 0x0 0x0 0x0>;
382 #size-cells = <2>;
383 #address-cells = <3>;
384 ranges = <0x2000000 0x0 0x80000000
385 0x2000000 0x0 0x80000000
386 0x0 0x20000000
387
388 0x1000000 0x0 0x0
389 0x1000000 0x0 0x0
390 0x0 0x100000>;
391 isa@1e {
392 device_type = "isa";
393 #interrupt-cells = <2>;
394 #size-cells = <1>;
395 #address-cells = <2>;
396 reg = <0xf000 0x0 0x0 0x0 0x0>;
397 ranges = <0x1 0x0 0x1000000 0x0 0x0
398 0x1000>;
399 interrupt-parent = <&i8259>;
400
401 i8259: interrupt-controller@20 {
402 reg = <0x1 0x20 0x2
403 0x1 0xa0 0x2
404 0x1 0x4d0 0x2>;
405 interrupt-controller;
406 device_type = "interrupt-controller";
407 #address-cells = <0>;
408 #interrupt-cells = <2>;
409 compatible = "chrp,iic";
410 interrupts = <9 2>;
411 interrupt-parent = <&mpic>;
412 };
413
414 i8042@60 {
415 #size-cells = <0>;
416 #address-cells = <1>;
417 reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
418 interrupts = <1 3 12 3>;
419 interrupt-parent =
420 <&i8259>;
421
422 keyboard@0 {
423 reg = <0x0>;
424 compatible = "pnpPNP,303";
425 };
426
427 mouse@1 {
428 reg = <0x1>;
429 compatible = "pnpPNP,f03";
430 };
431 };
432
433 rtc@70 {
434 compatible = "pnpPNP,b00";
435 reg = <0x1 0x70 0x2>;
436 };
437
438 gpio@400 {
439 reg = <0x1 0x400 0x80>;
440 };
441 };
442 };
443 };
444
445 };
446
447 pci1: pcie@ffe09000 {
448 cell-index = <1>;
449 compatible = "fsl,mpc8548-pcie";
450 device_type = "pci";
451 #interrupt-cells = <1>;
452 #size-cells = <2>;
453 #address-cells = <3>;
454 reg = <0xffe09000 0x1000>;
455 bus-range = <0 255>;
456 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
457 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
458 clock-frequency = <33333333>;
459 interrupt-parent = <&mpic>;
460 interrupts = <26 2>;
461 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
462 interrupt-map = <
463 /* IDSEL 0x0 */
464 0000 0x0 0x0 0x1 &mpic 0x4 0x1
465 0000 0x0 0x0 0x2 &mpic 0x5 0x1
466 0000 0x0 0x0 0x3 &mpic 0x6 0x1
467 0000 0x0 0x0 0x4 &mpic 0x7 0x1
468 >;
469 pcie@0 {
470 reg = <0x0 0x0 0x0 0x0 0x0>;
471 #size-cells = <2>;
472 #address-cells = <3>;
473 device_type = "pci";
474 ranges = <0x2000000 0x0 0xa0000000
475 0x2000000 0x0 0xa0000000
476 0x0 0x20000000
477
478 0x1000000 0x0 0x0
479 0x1000000 0x0 0x0
480 0x0 0x100000>;
481 };
482 };
483};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
new file mode 100644
index 000000000000..04ecda18d206
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -0,0 +1,234 @@
1/*
2 * MPC8572 DS Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, dma2, eth2, eth3, pci2, msi.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2007, 2008 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/dts-v1/;
19/ {
20 model = "fsl,MPC8572DS";
21 compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
22 #address-cells = <1>;
23 #size-cells = <1>;
24
25 aliases {
26 ethernet2 = &enet2;
27 ethernet3 = &enet3;
28 serial0 = &serial0;
29 pci2 = &pci2;
30 };
31
32 cpus {
33 #address-cells = <1>;
34 #size-cells = <0>;
35
36 PowerPC,8572@1 {
37 device_type = "cpu";
38 reg = <0x1>;
39 d-cache-line-size = <32>; // 32 bytes
40 i-cache-line-size = <32>; // 32 bytes
41 d-cache-size = <0x8000>; // L1, 32K
42 i-cache-size = <0x8000>; // L1, 32K
43 timebase-frequency = <0>;
44 bus-frequency = <0>;
45 clock-frequency = <0>;
46 next-level-cache = <&L2>;
47 };
48 };
49
50 memory {
51 device_type = "memory";
52 reg = <0x0 0x0>; // Filled by U-Boot
53 };
54
55 soc8572@ffe00000 {
56 #address-cells = <1>;
57 #size-cells = <1>;
58 device_type = "soc";
59 compatible = "simple-bus";
60 ranges = <0x0 0xffe00000 0x100000>;
61 reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
62 bus-frequency = <0>; // Filled out by uboot.
63
64 L2: l2-cache-controller@20000 {
65 compatible = "fsl,mpc8572-l2-cache-controller";
66 reg = <0x20000 0x1000>;
67 cache-line-size = <32>; // 32 bytes
68 cache-size = <0x80000>; // L2, 512K
69 interrupt-parent = <&mpic>;
70 };
71
72 dma@c300 {
73 #address-cells = <1>;
74 #size-cells = <1>;
75 compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
76 reg = <0xc300 0x4>;
77 ranges = <0x0 0xc100 0x200>;
78 cell-index = <0>;
79 dma-channel@0 {
80 compatible = "fsl,mpc8572-dma-channel",
81 "fsl,eloplus-dma-channel";
82 reg = <0x0 0x80>;
83 cell-index = <0>;
84 interrupt-parent = <&mpic>;
85 interrupts = <76 2>;
86 };
87 dma-channel@80 {
88 compatible = "fsl,mpc8572-dma-channel",
89 "fsl,eloplus-dma-channel";
90 reg = <0x80 0x80>;
91 cell-index = <1>;
92 interrupt-parent = <&mpic>;
93 interrupts = <77 2>;
94 };
95 dma-channel@100 {
96 compatible = "fsl,mpc8572-dma-channel",
97 "fsl,eloplus-dma-channel";
98 reg = <0x100 0x80>;
99 cell-index = <2>;
100 interrupt-parent = <&mpic>;
101 interrupts = <78 2>;
102 };
103 dma-channel@180 {
104 compatible = "fsl,mpc8572-dma-channel",
105 "fsl,eloplus-dma-channel";
106 reg = <0x180 0x80>;
107 cell-index = <3>;
108 interrupt-parent = <&mpic>;
109 interrupts = <79 2>;
110 };
111 };
112
113 mdio@24520 {
114 #address-cells = <1>;
115 #size-cells = <0>;
116 compatible = "fsl,gianfar-mdio";
117 reg = <0x24520 0x20>;
118
119 phy2: ethernet-phy@2 {
120 interrupt-parent = <&mpic>;
121 reg = <0x2>;
122 };
123 phy3: ethernet-phy@3 {
124 interrupt-parent = <&mpic>;
125 reg = <0x3>;
126 };
127 };
128
129 enet2: ethernet@26000 {
130 cell-index = <2>;
131 device_type = "network";
132 model = "eTSEC";
133 compatible = "gianfar";
134 reg = <0x26000 0x1000>;
135 local-mac-address = [ 00 00 00 00 00 00 ];
136 interrupts = <31 2 32 2 33 2>;
137 interrupt-parent = <&mpic>;
138 phy-handle = <&phy2>;
139 phy-connection-type = "rgmii-id";
140 };
141
142 enet3: ethernet@27000 {
143 cell-index = <3>;
144 device_type = "network";
145 model = "eTSEC";
146 compatible = "gianfar";
147 reg = <0x27000 0x1000>;
148 local-mac-address = [ 00 00 00 00 00 00 ];
149 interrupts = <37 2 38 2 39 2>;
150 interrupt-parent = <&mpic>;
151 phy-handle = <&phy3>;
152 phy-connection-type = "rgmii-id";
153 };
154
155 msi@41600 {
156 compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
157 reg = <0x41600 0x80>;
158 msi-available-ranges = <0 0x100>;
159 interrupts = <
160 0xe0 0
161 0xe1 0
162 0xe2 0
163 0xe3 0
164 0xe4 0
165 0xe5 0
166 0xe6 0
167 0xe7 0>;
168 interrupt-parent = <&mpic>;
169 };
170
171 serial0: serial@4600 {
172 cell-index = <1>;
173 device_type = "serial";
174 compatible = "ns16550";
175 reg = <0x4600 0x100>;
176 clock-frequency = <0>;
177 };
178
179 mpic: pic@40000 {
180 interrupt-controller;
181 #address-cells = <0>;
182 #interrupt-cells = <2>;
183 reg = <0x40000 0x40000>;
184 compatible = "chrp,open-pic";
185 device_type = "open-pic";
186 protected-sources = <
187 18 16 10 42 45 58 /* MEM L2 mdio serial crypto */
188 29 30 34 35 36 40 /* enet0 enet1 */
189 24 26 20 21 22 23 /* pcie0 pcie1 dma1 */
190 43 /* i2c */
191 0x1 0x2 0x3 0x4 /* pci slot */
192 0x9 0xa 0xb 0xc /* usb */
193 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */
194 >;
195 };
196 };
197
198 pci2: pcie@ffe0a000 {
199 cell-index = <2>;
200 compatible = "fsl,mpc8548-pcie";
201 device_type = "pci";
202 #interrupt-cells = <1>;
203 #size-cells = <2>;
204 #address-cells = <3>;
205 reg = <0xffe0a000 0x1000>;
206 bus-range = <0 255>;
207 ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
208 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
209 clock-frequency = <33333333>;
210 interrupt-parent = <&mpic>;
211 interrupts = <27 2>;
212 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
213 interrupt-map = <
214 /* IDSEL 0x0 */
215 0000 0x0 0x0 0x1 &mpic 0x0 0x1
216 0000 0x0 0x0 0x2 &mpic 0x1 0x1
217 0000 0x0 0x0 0x3 &mpic 0x2 0x1
218 0000 0x0 0x0 0x4 &mpic 0x3 0x1
219 >;
220 pcie@0 {
221 reg = <0x0 0x0 0x0 0x0 0x0>;
222 #size-cells = <2>;
223 #address-cells = <3>;
224 device_type = "pci";
225 ranges = <0x2000000 0x0 0xc0000000
226 0x2000000 0x0 0xc0000000
227 0x0 0x20000000
228
229 0x1000000 0x0 0x0
230 0x1000000 0x0 0x0
231 0x0 0x100000>;
232 };
233 };
234};
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index d665e767822a..35d5e248ccd7 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -205,8 +205,49 @@
205 reg = <3>; 205 reg = <3>;
206 device_type = "ethernet-phy"; 206 device_type = "ethernet-phy";
207 }; 207 };
208 tbi0: tbi-phy@11 {
209 reg = <0x11>;
210 device_type = "tbi-phy";
211 };
212 };
213
214 mdio@25520 {
215 #address-cells = <1>;
216 #size-cells = <0>;
217 compatible = "fsl,gianfar-tbi";
218 reg = <0x25520 0x20>;
219
220 tbi1: tbi-phy@11 {
221 reg = <0x11>;
222 device_type = "tbi-phy";
223 };
224 };
225
226 mdio@26520 {
227 #address-cells = <1>;
228 #size-cells = <0>;
229 compatible = "fsl,gianfar-tbi";
230 reg = <0x26520 0x20>;
231
232 tbi2: tbi-phy@11 {
233 reg = <0x11>;
234 device_type = "tbi-phy";
235 };
236 };
237
238 mdio@27520 {
239 #address-cells = <1>;
240 #size-cells = <0>;
241 compatible = "fsl,gianfar-tbi";
242 reg = <0x27520 0x20>;
243
244 tbi3: tbi-phy@11 {
245 reg = <0x11>;
246 device_type = "tbi-phy";
247 };
208 }; 248 };
209 249
250
210 enet0: ethernet@24000 { 251 enet0: ethernet@24000 {
211 cell-index = <0>; 252 cell-index = <0>;
212 device_type = "network"; 253 device_type = "network";
@@ -216,6 +257,7 @@
216 local-mac-address = [ 00 00 00 00 00 00 ]; 257 local-mac-address = [ 00 00 00 00 00 00 ];
217 interrupts = <29 2 30 2 34 2>; 258 interrupts = <29 2 30 2 34 2>;
218 interrupt-parent = <&mpic>; 259 interrupt-parent = <&mpic>;
260 tbi-handle = <&tbi0>;
219 phy-handle = <&phy0>; 261 phy-handle = <&phy0>;
220 phy-connection-type = "rgmii-id"; 262 phy-connection-type = "rgmii-id";
221 }; 263 };
@@ -229,6 +271,7 @@
229 local-mac-address = [ 00 00 00 00 00 00 ]; 271 local-mac-address = [ 00 00 00 00 00 00 ];
230 interrupts = <35 2 36 2 40 2>; 272 interrupts = <35 2 36 2 40 2>;
231 interrupt-parent = <&mpic>; 273 interrupt-parent = <&mpic>;
274 tbi-handle = <&tbi1>;
232 phy-handle = <&phy1>; 275 phy-handle = <&phy1>;
233 phy-connection-type = "rgmii-id"; 276 phy-connection-type = "rgmii-id";
234 }; 277 };
@@ -242,6 +285,7 @@
242 local-mac-address = [ 00 00 00 00 00 00 ]; 285 local-mac-address = [ 00 00 00 00 00 00 ];
243 interrupts = <31 2 32 2 33 2>; 286 interrupts = <31 2 32 2 33 2>;
244 interrupt-parent = <&mpic>; 287 interrupt-parent = <&mpic>;
288 tbi-handle = <&tbi2>;
245 phy-handle = <&phy2>; 289 phy-handle = <&phy2>;
246 phy-connection-type = "rgmii-id"; 290 phy-connection-type = "rgmii-id";
247 }; 291 };
@@ -255,6 +299,7 @@
255 local-mac-address = [ 00 00 00 00 00 00 ]; 299 local-mac-address = [ 00 00 00 00 00 00 ];
256 interrupts = <37 2 38 2 39 2>; 300 interrupts = <37 2 38 2 39 2>;
257 interrupt-parent = <&mpic>; 301 interrupt-parent = <&mpic>;
302 tbi-handle = <&tbi3>;
258 phy-handle = <&phy3>; 303 phy-handle = <&phy3>;
259 phy-connection-type = "rgmii-id"; 304 phy-connection-type = "rgmii-id";
260 }; 305 };
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 7c1bb952360c..be2c11ca0594 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -143,7 +143,6 @@
143 143
144 rtc@800 { // Real time clock 144 rtc@800 { // Real time clock
145 compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; 145 compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
146 device_type = "rtc";
147 reg = <0x800 0x100>; 146 reg = <0x800 0x100>;
148 interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>; 147 interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>;
149 interrupt-parent = <&mpc5200_pic>; 148 interrupt-parent = <&mpc5200_pic>;
@@ -301,7 +300,6 @@
301 interrupt-parent = <&mpc5200_pic>; 300 interrupt-parent = <&mpc5200_pic>;
302 fsl5200-clocking; 301 fsl5200-clocking;
303 rtc@51 { 302 rtc@51 {
304 device_type = "rtc";
305 compatible = "nxp,pcf8563"; 303 compatible = "nxp,pcf8563";
306 reg = <0x51>; 304 reg = <0x51>;
307 }; 305 };
diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
index 0f941f310e44..8d365a57ebc1 100644
--- a/arch/powerpc/boot/dts/sbc8349.dts
+++ b/arch/powerpc/boot/dts/sbc8349.dts
@@ -177,6 +177,22 @@
177 reg = <0x1a>; 177 reg = <0x1a>;
178 device_type = "ethernet-phy"; 178 device_type = "ethernet-phy";
179 }; 179 };
180 tbi0: tbi-phy@11 {
181 reg = <0x11>;
182 device_type = "tbi-phy";
183 };
184 };
185
186 mdio@25520 {
187 #address-cells = <1>;
188 #size-cells = <0>;
189 compatible = "fsl,gianfar-tbi";
190 reg = <0x25520 0x20>;
191
192 tbi1: tbi-phy@11 {
193 reg = <0x11>;
194 device_type = "tbi-phy";
195 };
180 }; 196 };
181 197
182 enet0: ethernet@24000 { 198 enet0: ethernet@24000 {
@@ -188,6 +204,7 @@
188 local-mac-address = [ 00 00 00 00 00 00 ]; 204 local-mac-address = [ 00 00 00 00 00 00 ];
189 interrupts = <32 0x8 33 0x8 34 0x8>; 205 interrupts = <32 0x8 33 0x8 34 0x8>;
190 interrupt-parent = <&ipic>; 206 interrupt-parent = <&ipic>;
207 tbi-handle = <&tbi0>;
191 phy-handle = <&phy0>; 208 phy-handle = <&phy0>;
192 linux,network-index = <0>; 209 linux,network-index = <0>;
193 }; 210 };
@@ -201,6 +218,7 @@
201 local-mac-address = [ 00 00 00 00 00 00 ]; 218 local-mac-address = [ 00 00 00 00 00 00 ];
202 interrupts = <35 0x8 36 0x8 37 0x8>; 219 interrupts = <35 0x8 36 0x8 37 0x8>;
203 interrupt-parent = <&ipic>; 220 interrupt-parent = <&ipic>;
221 tbi-handle = <&tbi1>;
204 phy-handle = <&phy1>; 222 phy-handle = <&phy1>;
205 linux,network-index = <1>; 223 linux,network-index = <1>;
206 }; 224 };
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
index 333552b4e90d..2baf4a51f224 100644
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ b/arch/powerpc/boot/dts/sbc8548.dts
@@ -252,6 +252,22 @@
252 reg = <0x1a>; 252 reg = <0x1a>;
253 device_type = "ethernet-phy"; 253 device_type = "ethernet-phy";
254 }; 254 };
255 tbi0: tbi-phy@11 {
256 reg = <0x11>;
257 device_type = "tbi-phy";
258 };
259 };
260
261 mdio@25520 {
262 #address-cells = <1>;
263 #size-cells = <0>;
264 compatible = "fsl,gianfar-tbi";
265 reg = <0x25520 0x20>;
266
267 tbi1: tbi-phy@11 {
268 reg = <0x11>;
269 device_type = "tbi-phy";
270 };
255 }; 271 };
256 272
257 enet0: ethernet@24000 { 273 enet0: ethernet@24000 {
@@ -263,6 +279,7 @@
263 local-mac-address = [ 00 00 00 00 00 00 ]; 279 local-mac-address = [ 00 00 00 00 00 00 ];
264 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; 280 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
265 interrupt-parent = <&mpic>; 281 interrupt-parent = <&mpic>;
282 tbi-handle = <&tbi0>;
266 phy-handle = <&phy0>; 283 phy-handle = <&phy0>;
267 }; 284 };
268 285
@@ -275,6 +292,7 @@
275 local-mac-address = [ 00 00 00 00 00 00 ]; 292 local-mac-address = [ 00 00 00 00 00 00 ];
276 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; 293 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
277 interrupt-parent = <&mpic>; 294 interrupt-parent = <&mpic>;
295 tbi-handle = <&tbi1>;
278 phy-handle = <&phy1>; 296 phy-handle = <&phy1>;
279 }; 297 };
280 298
diff --git a/arch/powerpc/boot/dts/sbc8560.dts b/arch/powerpc/boot/dts/sbc8560.dts
index db3632ef9888..01542f7062ab 100644
--- a/arch/powerpc/boot/dts/sbc8560.dts
+++ b/arch/powerpc/boot/dts/sbc8560.dts
@@ -168,6 +168,22 @@
168 reg = <0x1c>; 168 reg = <0x1c>;
169 device_type = "ethernet-phy"; 169 device_type = "ethernet-phy";
170 }; 170 };
171 tbi0: tbi-phy@11 {
172 reg = <0x11>;
173 device_type = "tbi-phy";
174 };
175 };
176
177 mdio@25520 {
178 #address-cells = <1>;
179 #size-cells = <0>;
180 compatible = "fsl,gianfar-tbi";
181 reg = <0x25520 0x20>;
182
183 tbi1: tbi-phy@11 {
184 reg = <0x11>;
185 device_type = "tbi-phy";
186 };
171 }; 187 };
172 188
173 enet0: ethernet@24000 { 189 enet0: ethernet@24000 {
@@ -179,6 +195,7 @@
179 local-mac-address = [ 00 00 00 00 00 00 ]; 195 local-mac-address = [ 00 00 00 00 00 00 ];
180 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>; 196 interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
181 interrupt-parent = <&mpic>; 197 interrupt-parent = <&mpic>;
198 tbi-handle = <&tbi0>;
182 phy-handle = <&phy0>; 199 phy-handle = <&phy0>;
183 }; 200 };
184 201
@@ -191,6 +208,7 @@
191 local-mac-address = [ 00 00 00 00 00 00 ]; 208 local-mac-address = [ 00 00 00 00 00 00 ];
192 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>; 209 interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
193 interrupt-parent = <&mpic>; 210 interrupt-parent = <&mpic>;
211 tbi-handle = <&tbi1>;
194 phy-handle = <&phy1>; 212 phy-handle = <&phy1>;
195 }; 213 };
196 214
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 9652456158fb..36db981548e4 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -222,6 +222,46 @@
222 reg = <2>; 222 reg = <2>;
223 device_type = "ethernet-phy"; 223 device_type = "ethernet-phy";
224 }; 224 };
225 tbi0: tbi-phy@11 {
226 reg = <0x11>;
227 device_type = "tbi-phy";
228 };
229 };
230
231 mdio@25520 {
232 #address-cells = <1>;
233 #size-cells = <0>;
234 compatible = "fsl,gianfar-tbi";
235 reg = <0x25520 0x20>;
236
237 tbi1: tbi-phy@11 {
238 reg = <0x11>;
239 device_type = "tbi-phy";
240 };
241 };
242
243 mdio@26520 {
244 #address-cells = <1>;
245 #size-cells = <0>;
246 compatible = "fsl,gianfar-tbi";
247 reg = <0x26520 0x20>;
248
249 tbi2: tbi-phy@11 {
250 reg = <0x11>;
251 device_type = "tbi-phy";
252 };
253 };
254
255 mdio@27520 {
256 #address-cells = <1>;
257 #size-cells = <0>;
258 compatible = "fsl,gianfar-tbi";
259 reg = <0x27520 0x20>;
260
261 tbi3: tbi-phy@11 {
262 reg = <0x11>;
263 device_type = "tbi-phy";
264 };
225 }; 265 };
226 266
227 enet0: ethernet@24000 { 267 enet0: ethernet@24000 {
@@ -233,6 +273,7 @@
233 local-mac-address = [ 00 00 00 00 00 00 ]; 273 local-mac-address = [ 00 00 00 00 00 00 ];
234 interrupts = <29 2 30 2 34 2>; 274 interrupts = <29 2 30 2 34 2>;
235 interrupt-parent = <&mpic>; 275 interrupt-parent = <&mpic>;
276 tbi-handle = <&tbi0>;
236 phy-handle = <&phy0>; 277 phy-handle = <&phy0>;
237 phy-connection-type = "rgmii-id"; 278 phy-connection-type = "rgmii-id";
238 }; 279 };
@@ -246,6 +287,7 @@
246 local-mac-address = [ 00 00 00 00 00 00 ]; 287 local-mac-address = [ 00 00 00 00 00 00 ];
247 interrupts = <35 2 36 2 40 2>; 288 interrupts = <35 2 36 2 40 2>;
248 interrupt-parent = <&mpic>; 289 interrupt-parent = <&mpic>;
290 tbi-handle = <&tbi1>;
249 phy-handle = <&phy1>; 291 phy-handle = <&phy1>;
250 phy-connection-type = "rgmii-id"; 292 phy-connection-type = "rgmii-id";
251 }; 293 };
@@ -259,6 +301,7 @@
259 local-mac-address = [ 00 00 00 00 00 00 ]; 301 local-mac-address = [ 00 00 00 00 00 00 ];
260 interrupts = <31 2 32 2 33 2>; 302 interrupts = <31 2 32 2 33 2>;
261 interrupt-parent = <&mpic>; 303 interrupt-parent = <&mpic>;
304 tbi-handle = <&tbi2>;
262 phy-handle = <&phy2>; 305 phy-handle = <&phy2>;
263 phy-connection-type = "rgmii-id"; 306 phy-connection-type = "rgmii-id";
264 }; 307 };
@@ -272,6 +315,7 @@
272 local-mac-address = [ 00 00 00 00 00 00 ]; 315 local-mac-address = [ 00 00 00 00 00 00 ];
273 interrupts = <37 2 38 2 39 2>; 316 interrupts = <37 2 38 2 39 2>;
274 interrupt-parent = <&mpic>; 317 interrupt-parent = <&mpic>;
318 tbi-handle = <&tbi3>;
275 phy-handle = <&phy3>; 319 phy-handle = <&phy3>;
276 phy-connection-type = "rgmii-id"; 320 phy-connection-type = "rgmii-id";
277 }; 321 };
diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts
index fcd1db6ca0a8..fff33fe6efc6 100644
--- a/arch/powerpc/boot/dts/stx_gp3_8560.dts
+++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts
@@ -142,6 +142,22 @@
142 reg = <4>; 142 reg = <4>;
143 device_type = "ethernet-phy"; 143 device_type = "ethernet-phy";
144 }; 144 };
145 tbi0: tbi-phy@11 {
146 reg = <0x11>;
147 device_type = "tbi-phy";
148 };
149 };
150
151 mdio@25520 {
152 #address-cells = <1>;
153 #size-cells = <0>;
154 compatible = "fsl,gianfar-tbi";
155 reg = <0x25520 0x20>;
156
157 tbi1: tbi-phy@11 {
158 reg = <0x11>;
159 device_type = "tbi-phy";
160 };
145 }; 161 };
146 162
147 enet0: ethernet@24000 { 163 enet0: ethernet@24000 {
@@ -153,6 +169,7 @@
153 local-mac-address = [ 00 00 00 00 00 00 ]; 169 local-mac-address = [ 00 00 00 00 00 00 ];
154 interrupts = <29 2 30 2 34 2>; 170 interrupts = <29 2 30 2 34 2>;
155 interrupt-parent = <&mpic>; 171 interrupt-parent = <&mpic>;
172 tbi-handle = <&tbi0>;
156 phy-handle = <&phy2>; 173 phy-handle = <&phy2>;
157 }; 174 };
158 175
@@ -165,6 +182,7 @@
165 local-mac-address = [ 00 00 00 00 00 00 ]; 182 local-mac-address = [ 00 00 00 00 00 00 ];
166 interrupts = <35 2 36 2 40 2>; 183 interrupts = <35 2 36 2 40 2>;
167 interrupt-parent = <&mpic>; 184 interrupt-parent = <&mpic>;
185 tbi-handle = <&tbi1>;
168 phy-handle = <&phy4>; 186 phy-handle = <&phy4>;
169 }; 187 };
170 188
diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
index 3008bf8830c1..906302e26a62 100644
--- a/arch/powerpc/boot/dts/tqm5200.dts
+++ b/arch/powerpc/boot/dts/tqm5200.dts
@@ -181,7 +181,6 @@
181 fsl5200-clocking; 181 fsl5200-clocking;
182 182
183 rtc@68 { 183 rtc@68 {
184 device_type = "rtc";
185 compatible = "dallas,ds1307"; 184 compatible = "dallas,ds1307";
186 reg = <0x68>; 185 reg = <0x68>;
187 }; 186 };
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index e1d260b9085e..a693f01c21aa 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -155,6 +155,34 @@
155 reg = <3>; 155 reg = <3>;
156 device_type = "ethernet-phy"; 156 device_type = "ethernet-phy";
157 }; 157 };
158 tbi0: tbi-phy@11 {
159 reg = <0x11>;
160 device_type = "tbi-phy";
161 };
162 };
163
164 mdio@25520 {
165 #address-cells = <1>;
166 #size-cells = <0>;
167 compatible = "fsl,gianfar-tbi";
168 reg = <0x25520 0x20>;
169
170 tbi1: tbi-phy@11 {
171 reg = <0x11>;
172 device_type = "tbi-phy";
173 };
174 };
175
176 mdio@26520 {
177 #address-cells = <1>;
178 #size-cells = <0>;
179 compatible = "fsl,gianfar-tbi";
180 reg = <0x26520 0x20>;
181
182 tbi2: tbi-phy@11 {
183 reg = <0x11>;
184 device_type = "tbi-phy";
185 };
158 }; 186 };
159 187
160 enet0: ethernet@24000 { 188 enet0: ethernet@24000 {
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index d76441ec5dc7..9e3f5f0dde20 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -154,6 +154,22 @@
154 reg = <3>; 154 reg = <3>;
155 device_type = "ethernet-phy"; 155 device_type = "ethernet-phy";
156 }; 156 };
157 tbi0: tbi-phy@11 {
158 reg = <0x11>;
159 device_type = "tbi-phy";
160 };
161 };
162
163 mdio@25520 {
164 #address-cells = <1>;
165 #size-cells = <0>;
166 compatible = "fsl,gianfar-tbi";
167 reg = <0x25520 0x20>;
168
169 tbi1: tbi-phy@11 {
170 reg = <0x11>;
171 device_type = "tbi-phy";
172 };
157 }; 173 };
158 174
159 enet0: ethernet@24000 { 175 enet0: ethernet@24000 {
@@ -165,6 +181,7 @@
165 local-mac-address = [ 00 00 00 00 00 00 ]; 181 local-mac-address = [ 00 00 00 00 00 00 ];
166 interrupts = <29 2 30 2 34 2>; 182 interrupts = <29 2 30 2 34 2>;
167 interrupt-parent = <&mpic>; 183 interrupt-parent = <&mpic>;
184 tbi-handle = <&tbi0>;
168 phy-handle = <&phy2>; 185 phy-handle = <&phy2>;
169 }; 186 };
170 187
@@ -177,6 +194,7 @@
177 local-mac-address = [ 00 00 00 00 00 00 ]; 194 local-mac-address = [ 00 00 00 00 00 00 ];
178 interrupts = <35 2 36 2 40 2>; 195 interrupts = <35 2 36 2 40 2>;
179 interrupt-parent = <&mpic>; 196 interrupt-parent = <&mpic>;
197 tbi-handle = <&tbi1>;
180 phy-handle = <&phy1>; 198 phy-handle = <&phy1>;
181 }; 199 };
182 200
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 4199e89b4e50..15086eb65c50 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -179,6 +179,46 @@
179 reg = <5>; 179 reg = <5>;
180 device_type = "ethernet-phy"; 180 device_type = "ethernet-phy";
181 }; 181 };
182 tbi0: tbi-phy@11 {
183 reg = <0x11>;
184 device_type = "tbi-phy";
185 };
186 };
187
188 mdio@25520 {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 compatible = "fsl,gianfar-tbi";
192 reg = <0x25520 0x20>;
193
194 tbi1: tbi-phy@11 {
195 reg = <0x11>;
196 device_type = "tbi-phy";
197 };
198 };
199
200 mdio@26520 {
201 #address-cells = <1>;
202 #size-cells = <0>;
203 compatible = "fsl,gianfar-tbi";
204 reg = <0x26520 0x20>;
205
206 tbi2: tbi-phy@11 {
207 reg = <0x11>;
208 device_type = "tbi-phy";
209 };
210 };
211
212 mdio@27520 {
213 #address-cells = <1>;
214 #size-cells = <0>;
215 compatible = "fsl,gianfar-tbi";
216 reg = <0x27520 0x20>;
217
218 tbi3: tbi-phy@11 {
219 reg = <0x11>;
220 device_type = "tbi-phy";
221 };
182 }; 222 };
183 223
184 enet0: ethernet@24000 { 224 enet0: ethernet@24000 {
@@ -190,6 +230,7 @@
190 local-mac-address = [ 00 00 00 00 00 00 ]; 230 local-mac-address = [ 00 00 00 00 00 00 ];
191 interrupts = <29 2 30 2 34 2>; 231 interrupts = <29 2 30 2 34 2>;
192 interrupt-parent = <&mpic>; 232 interrupt-parent = <&mpic>;
233 tbi-handle = <&tbi0>;
193 phy-handle = <&phy2>; 234 phy-handle = <&phy2>;
194 }; 235 };
195 236
@@ -202,6 +243,7 @@
202 local-mac-address = [ 00 00 00 00 00 00 ]; 243 local-mac-address = [ 00 00 00 00 00 00 ];
203 interrupts = <35 2 36 2 40 2>; 244 interrupts = <35 2 36 2 40 2>;
204 interrupt-parent = <&mpic>; 245 interrupt-parent = <&mpic>;
246 tbi-handle = <&tbi1>;
205 phy-handle = <&phy1>; 247 phy-handle = <&phy1>;
206 }; 248 };
207 249
@@ -214,6 +256,7 @@
214 local-mac-address = [ 00 00 00 00 00 00 ]; 256 local-mac-address = [ 00 00 00 00 00 00 ];
215 interrupts = <31 2 32 2 33 2>; 257 interrupts = <31 2 32 2 33 2>;
216 interrupt-parent = <&mpic>; 258 interrupt-parent = <&mpic>;
259 tbi-handle = <&tbi2>;
217 phy-handle = <&phy3>; 260 phy-handle = <&phy3>;
218 }; 261 };
219 262
@@ -226,6 +269,7 @@
226 local-mac-address = [ 00 00 00 00 00 00 ]; 269 local-mac-address = [ 00 00 00 00 00 00 ];
227 interrupts = <37 2 38 2 39 2>; 270 interrupts = <37 2 38 2 39 2>;
228 interrupt-parent = <&mpic>; 271 interrupt-parent = <&mpic>;
272 tbi-handle = <&tbi3>;
229 phy-handle = <&phy4>; 273 phy-handle = <&phy4>;
230 }; 274 };
231 275
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 58ee4185454b..b7b65f5e79b6 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -179,6 +179,46 @@
179 reg = <5>; 179 reg = <5>;
180 device_type = "ethernet-phy"; 180 device_type = "ethernet-phy";
181 }; 181 };
182 tbi0: tbi-phy@11 {
183 reg = <0x11>;
184 device_type = "tbi-phy";
185 };
186 };
187
188 mdio@25520 {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 compatible = "fsl,gianfar-tbi";
192 reg = <0x25520 0x20>;
193
194 tbi1: tbi-phy@11 {
195 reg = <0x11>;
196 device_type = "tbi-phy";
197 };
198 };
199
200 mdio@26520 {
201 #address-cells = <1>;
202 #size-cells = <0>;
203 compatible = "fsl,gianfar-tbi";
204 reg = <0x26520 0x20>;
205
206 tbi2: tbi-phy@11 {
207 reg = <0x11>;
208 device_type = "tbi-phy";
209 };
210 };
211
212 mdio@27520 {
213 #address-cells = <1>;
214 #size-cells = <0>;
215 compatible = "fsl,gianfar-tbi";
216 reg = <0x27520 0x20>;
217
218 tbi3: tbi-phy@11 {
219 reg = <0x11>;
220 device_type = "tbi-phy";
221 };
182 }; 222 };
183 223
184 enet0: ethernet@24000 { 224 enet0: ethernet@24000 {
@@ -190,6 +230,7 @@
190 local-mac-address = [ 00 00 00 00 00 00 ]; 230 local-mac-address = [ 00 00 00 00 00 00 ];
191 interrupts = <29 2 30 2 34 2>; 231 interrupts = <29 2 30 2 34 2>;
192 interrupt-parent = <&mpic>; 232 interrupt-parent = <&mpic>;
233 tbi-handle = <&tbi0>;
193 phy-handle = <&phy2>; 234 phy-handle = <&phy2>;
194 }; 235 };
195 236
@@ -202,6 +243,7 @@
202 local-mac-address = [ 00 00 00 00 00 00 ]; 243 local-mac-address = [ 00 00 00 00 00 00 ];
203 interrupts = <35 2 36 2 40 2>; 244 interrupts = <35 2 36 2 40 2>;
204 interrupt-parent = <&mpic>; 245 interrupt-parent = <&mpic>;
246 tbi-handle = <&tbi1>;
205 phy-handle = <&phy1>; 247 phy-handle = <&phy1>;
206 }; 248 };
207 249
@@ -214,6 +256,7 @@
214 local-mac-address = [ 00 00 00 00 00 00 ]; 256 local-mac-address = [ 00 00 00 00 00 00 ];
215 interrupts = <31 2 32 2 33 2>; 257 interrupts = <31 2 32 2 33 2>;
216 interrupt-parent = <&mpic>; 258 interrupt-parent = <&mpic>;
259 tbi-handle = <&tbi2>;
217 phy-handle = <&phy3>; 260 phy-handle = <&phy3>;
218 }; 261 };
219 262
@@ -226,6 +269,7 @@
226 local-mac-address = [ 00 00 00 00 00 00 ]; 269 local-mac-address = [ 00 00 00 00 00 00 ];
227 interrupts = <37 2 38 2 39 2>; 270 interrupts = <37 2 38 2 39 2>;
228 interrupt-parent = <&mpic>; 271 interrupt-parent = <&mpic>;
272 tbi-handle = <&tbi3>;
229 phy-handle = <&phy4>; 273 phy-handle = <&phy4>;
230 }; 274 };
231 275
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 6f7ea59c4846..cf92b4e7945e 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -154,6 +154,22 @@
154 reg = <3>; 154 reg = <3>;
155 device_type = "ethernet-phy"; 155 device_type = "ethernet-phy";
156 }; 156 };
157 tbi0: tbi-phy@11 {
158 reg = <0x11>;
159 device_type = "tbi-phy";
160 };
161 };
162
163 mdio@25520 {
164 #address-cells = <1>;
165 #size-cells = <0>;
166 compatible = "fsl,gianfar-tbi";
167 reg = <0x25520 0x20>;
168
169 tbi1: tbi-phy@11 {
170 reg = <0x11>;
171 device_type = "tbi-phy";
172 };
157 }; 173 };
158 174
159 enet0: ethernet@24000 { 175 enet0: ethernet@24000 {
@@ -165,6 +181,7 @@
165 local-mac-address = [ 00 00 00 00 00 00 ]; 181 local-mac-address = [ 00 00 00 00 00 00 ];
166 interrupts = <29 2 30 2 34 2>; 182 interrupts = <29 2 30 2 34 2>;
167 interrupt-parent = <&mpic>; 183 interrupt-parent = <&mpic>;
184 tbi-handle = <&tbi0>;
168 phy-handle = <&phy2>; 185 phy-handle = <&phy2>;
169 }; 186 };
170 187
@@ -177,6 +194,7 @@
177 local-mac-address = [ 00 00 00 00 00 00 ]; 194 local-mac-address = [ 00 00 00 00 00 00 ];
178 interrupts = <35 2 36 2 40 2>; 195 interrupts = <35 2 36 2 40 2>;
179 interrupt-parent = <&mpic>; 196 interrupt-parent = <&mpic>;
197 tbi-handle = <&tbi1>;
180 phy-handle = <&phy1>; 198 phy-handle = <&phy1>;
181 }; 199 };
182 200
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 3fe35208907b..9e1ab2d2f669 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -156,6 +156,22 @@
156 reg = <3>; 156 reg = <3>;
157 device_type = "ethernet-phy"; 157 device_type = "ethernet-phy";
158 }; 158 };
159 tbi0: tbi-phy@11 {
160 reg = <0x11>;
161 device_type = "tbi-phy";
162 };
163 };
164
165 mdio@25520 {
166 #address-cells = <1>;
167 #size-cells = <0>;
168 compatible = "fsl,gianfar-tbi";
169 reg = <0x25520 0x20>;
170
171 tbi1: tbi-phy@11 {
172 reg = <0x11>;
173 device_type = "tbi-phy";
174 };
159 }; 175 };
160 176
161 enet0: ethernet@24000 { 177 enet0: ethernet@24000 {
@@ -167,6 +183,7 @@
167 local-mac-address = [ 00 00 00 00 00 00 ]; 183 local-mac-address = [ 00 00 00 00 00 00 ];
168 interrupts = <29 2 30 2 34 2>; 184 interrupts = <29 2 30 2 34 2>;
169 interrupt-parent = <&mpic>; 185 interrupt-parent = <&mpic>;
186 tbi-handle = <&tbi0>;
170 phy-handle = <&phy2>; 187 phy-handle = <&phy2>;
171 }; 188 };
172 189
@@ -179,6 +196,7 @@
179 local-mac-address = [ 00 00 00 00 00 00 ]; 196 local-mac-address = [ 00 00 00 00 00 00 ];
180 interrupts = <35 2 36 2 40 2>; 197 interrupts = <35 2 36 2 40 2>;
181 interrupt-parent = <&mpic>; 198 interrupt-parent = <&mpic>;
199 tbi-handle = <&tbi1>;
182 phy-handle = <&phy1>; 200 phy-handle = <&phy1>;
183 }; 201 };
184 202
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index 9276327bc2bb..bb8b9b3505ee 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -185,7 +185,7 @@ void fdt_init(void *blob)
185 185
186 /* Make sure the dt blob is the right version and so forth */ 186 /* Make sure the dt blob is the right version and so forth */
187 fdt = blob; 187 fdt = blob;
188 bufsize = fdt_totalsize(fdt) + 4; 188 bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY;
189 buf = malloc(bufsize); 189 buf = malloc(bufsize);
190 if(!buf) 190 if(!buf)
191 fatal("malloc failed. can't relocate the device tree\n\r"); 191 fatal("malloc failed. can't relocate the device tree\n\r");
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 07ccaf89f379..cd1ffa449327 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1397,8 +1397,11 @@ CONFIG_USB_STORAGE=y
1397# CONFIG_ACCESSIBILITY is not set 1397# CONFIG_ACCESSIBILITY is not set
1398# CONFIG_INFINIBAND is not set 1398# CONFIG_INFINIBAND is not set
1399# CONFIG_EDAC is not set 1399# CONFIG_EDAC is not set
1400CONFIG_RTC_LIB=m 1400CONFIG_RTC_LIB=y
1401CONFIG_RTC_CLASS=m 1401CONFIG_RTC_CLASS=y
1402CONFIG_RTC_HCTOSYS=y
1403CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1404# CONFIG_RTC_DEBUG is not set
1402 1405
1403# 1406#
1404# RTC interfaces 1407# RTC interfaces
@@ -1424,6 +1427,7 @@ CONFIG_RTC_INTF_DEV=y
1424# CONFIG_RTC_DRV_M41T80 is not set 1427# CONFIG_RTC_DRV_M41T80 is not set
1425# CONFIG_RTC_DRV_S35390A is not set 1428# CONFIG_RTC_DRV_S35390A is not set
1426# CONFIG_RTC_DRV_FM3130 is not set 1429# CONFIG_RTC_DRV_FM3130 is not set
1430CONFIG_RTC_DRV_RX8581=y
1427 1431
1428# 1432#
1429# SPI RTC drivers 1433# SPI RTC drivers
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index cfc94cfcf4cb..034a1fbdc887 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -267,7 +267,7 @@ CONFIG_PCI_SYSCALL=y
267# CONFIG_PCIEPORTBUS is not set 267# CONFIG_PCIEPORTBUS is not set
268CONFIG_ARCH_SUPPORTS_MSI=y 268CONFIG_ARCH_SUPPORTS_MSI=y
269# CONFIG_PCI_MSI is not set 269# CONFIG_PCI_MSI is not set
270CONFIG_PCI_LEGACY=y 270# CONFIG_PCI_LEGACY is not set
271# CONFIG_PCI_DEBUG is not set 271# CONFIG_PCI_DEBUG is not set
272# CONFIG_PCCARD is not set 272# CONFIG_PCCARD is not set
273# CONFIG_HOTPLUG_PCI is not set 273# CONFIG_HOTPLUG_PCI is not set
@@ -354,7 +354,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
354# CONFIG_IP_SCTP is not set 354# CONFIG_IP_SCTP is not set
355# CONFIG_TIPC is not set 355# CONFIG_TIPC is not set
356# CONFIG_ATM is not set 356# CONFIG_ATM is not set
357# CONFIG_BRIDGE is not set 357CONFIG_BRIDGE=m
358# CONFIG_NET_DSA is not set 358# CONFIG_NET_DSA is not set
359# CONFIG_VLAN_8021Q is not set 359# CONFIG_VLAN_8021Q is not set
360# CONFIG_DECNET is not set 360# CONFIG_DECNET is not set
@@ -579,7 +579,7 @@ CONFIG_NETDEVICES=y
579# CONFIG_BONDING is not set 579# CONFIG_BONDING is not set
580# CONFIG_MACVLAN is not set 580# CONFIG_MACVLAN is not set
581# CONFIG_EQUALIZER is not set 581# CONFIG_EQUALIZER is not set
582# CONFIG_TUN is not set 582CONFIG_TUN=m
583# CONFIG_VETH is not set 583# CONFIG_VETH is not set
584# CONFIG_ARCNET is not set 584# CONFIG_ARCNET is not set
585# CONFIG_PHYLIB is not set 585# CONFIG_PHYLIB is not set
@@ -1001,11 +1001,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1001# CONFIG_USB_TMC is not set 1001# CONFIG_USB_TMC is not set
1002 1002
1003# 1003#
1004# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1004# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1005# 1005#
1006 1006
1007# 1007#
1008# may also be needed; see USB_STORAGE Help for more information 1008# see USB_STORAGE Help for more information
1009# 1009#
1010CONFIG_USB_STORAGE=m 1010CONFIG_USB_STORAGE=m
1011# CONFIG_USB_STORAGE_DEBUG is not set 1011# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1418,6 +1418,6 @@ CONFIG_CRYPTO_LZO=m
1418# CONFIG_PPC_CLOCK is not set 1418# CONFIG_PPC_CLOCK is not set
1419CONFIG_VIRTUALIZATION=y 1419CONFIG_VIRTUALIZATION=y
1420CONFIG_KVM=y 1420CONFIG_KVM=y
1421CONFIG_KVM_BOOKE_HOST=y 1421CONFIG_KVM_440=y
1422# CONFIG_VIRTIO_PCI is not set 1422# CONFIG_VIRTIO_PCI is not set
1423# CONFIG_VIRTIO_BALLOON is not set 1423# CONFIG_VIRTIO_BALLOON is not set
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index f3fc733758f5..499be5bdd6fa 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v)
111 bne- 1b" 111 bne- 1b"
112 : "=&r" (t), "+m" (v->counter) 112 : "=&r" (t), "+m" (v->counter)
113 : "r" (&v->counter) 113 : "r" (&v->counter)
114 : "cc"); 114 : "cc", "xer");
115} 115}
116 116
117static __inline__ int atomic_inc_return(atomic_t *v) 117static __inline__ int atomic_inc_return(atomic_t *v)
@@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
128 ISYNC_ON_SMP 128 ISYNC_ON_SMP
129 : "=&r" (t) 129 : "=&r" (t)
130 : "r" (&v->counter) 130 : "r" (&v->counter)
131 : "cc", "memory"); 131 : "cc", "xer", "memory");
132 132
133 return t; 133 return t;
134} 134}
@@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v)
155 bne- 1b" 155 bne- 1b"
156 : "=&r" (t), "+m" (v->counter) 156 : "=&r" (t), "+m" (v->counter)
157 : "r" (&v->counter) 157 : "r" (&v->counter)
158 : "cc"); 158 : "cc", "xer");
159} 159}
160 160
161static __inline__ int atomic_dec_return(atomic_t *v) 161static __inline__ int atomic_dec_return(atomic_t *v)
@@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
172 ISYNC_ON_SMP 172 ISYNC_ON_SMP
173 : "=&r" (t) 173 : "=&r" (t)
174 : "r" (&v->counter) 174 : "r" (&v->counter)
175 : "cc", "memory"); 175 : "cc", "xer", "memory");
176 176
177 return t; 177 return t;
178} 178}
@@ -346,7 +346,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
346 bne- 1b" 346 bne- 1b"
347 : "=&r" (t), "+m" (v->counter) 347 : "=&r" (t), "+m" (v->counter)
348 : "r" (&v->counter) 348 : "r" (&v->counter)
349 : "cc"); 349 : "cc", "xer");
350} 350}
351 351
352static __inline__ long atomic64_inc_return(atomic64_t *v) 352static __inline__ long atomic64_inc_return(atomic64_t *v)
@@ -362,7 +362,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
362 ISYNC_ON_SMP 362 ISYNC_ON_SMP
363 : "=&r" (t) 363 : "=&r" (t)
364 : "r" (&v->counter) 364 : "r" (&v->counter)
365 : "cc", "memory"); 365 : "cc", "xer", "memory");
366 366
367 return t; 367 return t;
368} 368}
@@ -388,7 +388,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
388 bne- 1b" 388 bne- 1b"
389 : "=&r" (t), "+m" (v->counter) 389 : "=&r" (t), "+m" (v->counter)
390 : "r" (&v->counter) 390 : "r" (&v->counter)
391 : "cc"); 391 : "cc", "xer");
392} 392}
393 393
394static __inline__ long atomic64_dec_return(atomic64_t *v) 394static __inline__ long atomic64_dec_return(atomic64_t *v)
@@ -404,7 +404,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
404 ISYNC_ON_SMP 404 ISYNC_ON_SMP
405 : "=&r" (t) 405 : "=&r" (t)
406 : "r" (&v->counter) 406 : "r" (&v->counter)
407 : "cc", "memory"); 407 : "cc", "xer", "memory");
408 408
409 return t; 409 return t;
410} 410}
@@ -431,7 +431,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
431 "\n\ 431 "\n\
4322:" : "=&r" (t) 4322:" : "=&r" (t)
433 : "r" (&v->counter) 433 : "r" (&v->counter)
434 : "cc", "memory"); 434 : "cc", "xer", "memory");
435 435
436 return t; 436 return t;
437} 437}
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index e55d1f66b86f..64e1fdca233e 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -3,6 +3,7 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <asm/asm-compat.h> 5#include <asm/asm-compat.h>
6
6/* 7/*
7 * Define an illegal instr to trap on the bug. 8 * Define an illegal instr to trap on the bug.
8 * We don't use 0 because that marks the end of a function 9 * We don't use 0 because that marks the end of a function
@@ -14,6 +15,7 @@
14#ifdef CONFIG_BUG 15#ifdef CONFIG_BUG
15 16
16#ifdef __ASSEMBLY__ 17#ifdef __ASSEMBLY__
18#include <asm/asm-offsets.h>
17#ifdef CONFIG_DEBUG_BUGVERBOSE 19#ifdef CONFIG_DEBUG_BUGVERBOSE
18.macro EMIT_BUG_ENTRY addr,file,line,flags 20.macro EMIT_BUG_ENTRY addr,file,line,flags
19 .section __bug_table,"a" 21 .section __bug_table,"a"
@@ -26,7 +28,7 @@
26 .previous 28 .previous
27.endm 29.endm
28#else 30#else
29 .macro EMIT_BUG_ENTRY addr,file,line,flags 31.macro EMIT_BUG_ENTRY addr,file,line,flags
30 .section __bug_table,"a" 32 .section __bug_table,"a"
315001: PPC_LONG \addr 335001: PPC_LONG \addr
32 .short \flags 34 .short \flags
@@ -113,6 +115,13 @@
113#define HAVE_ARCH_BUG_ON 115#define HAVE_ARCH_BUG_ON
114#define HAVE_ARCH_WARN_ON 116#define HAVE_ARCH_WARN_ON
115#endif /* __ASSEMBLY __ */ 117#endif /* __ASSEMBLY __ */
118#else
119#ifdef __ASSEMBLY__
120.macro EMIT_BUG_ENTRY addr,file,line,flags
121.endm
122#else /* !__ASSEMBLY__ */
123#define _EMIT_BUG_ENTRY
124#endif
116#endif /* CONFIG_BUG */ 125#endif /* CONFIG_BUG */
117 126
118#include <asm-generic/bug.h> 127#include <asm-generic/bug.h>
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
index b37752214a16..d5de325472e9 100644
--- a/arch/powerpc/include/asm/byteorder.h
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -11,6 +11,8 @@
11#include <asm/types.h> 11#include <asm/types.h>
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14#define __BIG_ENDIAN
15
14#ifdef __GNUC__ 16#ifdef __GNUC__
15#ifdef __KERNEL__ 17#ifdef __KERNEL__
16 18
@@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
21 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 23 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
22 return val; 24 return val;
23} 25}
26#define __arch_swab16p ld_le16
24 27
25static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) 28static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
26{ 29{
27 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 30 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
28} 31}
29 32
33static inline void __arch_swab16s(__u16 *addr)
34{
35 st_le16(addr, *addr);
36}
37#define __arch_swab16s __arch_swab16s
38
30static __inline__ __u32 ld_le32(const volatile __u32 *addr) 39static __inline__ __u32 ld_le32(const volatile __u32 *addr)
31{ 40{
32 __u32 val; 41 __u32 val;
@@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
34 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 43 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
35 return val; 44 return val;
36} 45}
46#define __arch_swab32p ld_le32
37 47
38static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) 48static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
39{ 49{
40 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 50 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
41} 51}
42 52
43static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) 53static inline void __arch_swab32s(__u32 *addr)
54{
55 st_le32(addr, *addr);
56}
57#define __arch_swab32s __arch_swab32s
58
59static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
44{ 60{
45 __u16 result; 61 __u16 result;
46 62
@@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
49 : "r" (value), "0" (value >> 8)); 65 : "r" (value), "0" (value >> 8));
50 return result; 66 return result;
51} 67}
68#define __arch_swab16 __arch_swab16
52 69
53static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) 70static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
54{ 71{
55 __u32 result; 72 __u32 result;
56 73
@@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
61 : "r" (value), "0" (value >> 24)); 78 : "r" (value), "0" (value >> 24));
62 return result; 79 return result;
63} 80}
64 81#define __arch_swab32 __arch_swab32
65#define __arch__swab16(x) ___arch__swab16(x)
66#define __arch__swab32(x) ___arch__swab32(x)
67
68/* The same, but returns converted value from the location pointer by addr. */
69#define __arch__swab16p(addr) ld_le16(addr)
70#define __arch__swab32p(addr) ld_le32(addr)
71
72/* The same, but do the conversion in situ, ie. put the value back to addr. */
73#define __arch__swab16s(addr) st_le16(addr,*addr)
74#define __arch__swab32s(addr) st_le32(addr,*addr)
75 82
76#endif /* __KERNEL__ */ 83#endif /* __KERNEL__ */
77 84
78#ifndef __STRICT_ANSI__
79#define __BYTEORDER_HAS_U64__
80#ifndef __powerpc64__ 85#ifndef __powerpc64__
81#define __SWAB_64_THRU_32__ 86#define __SWAB_64_THRU_32__
82#endif /* __powerpc64__ */ 87#endif /* __powerpc64__ */
83#endif /* __STRICT_ANSI__ */
84 88
85#endif /* __GNUC__ */ 89#endif /* __GNUC__ */
86 90
87#include <linux/byteorder/big_endian.h> 91#include <linux/byteorder.h>
88 92
89#endif /* _ASM_POWERPC_BYTEORDER_H */ 93#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1e94b07a020e..4911104791c3 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -82,6 +82,7 @@ struct cpu_spec {
82 char *cpu_name; 82 char *cpu_name;
83 unsigned long cpu_features; /* Kernel features */ 83 unsigned long cpu_features; /* Kernel features */
84 unsigned int cpu_user_features; /* Userland features */ 84 unsigned int cpu_user_features; /* Userland features */
85 unsigned int mmu_features; /* MMU features */
85 86
86 /* cache line sizes */ 87 /* cache line sizes */
87 unsigned int icache_bsize; 88 unsigned int icache_bsize;
@@ -144,17 +145,14 @@ extern const char *powerpc_base_platform;
144#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) 145#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
145#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) 146#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
146#define CPU_FTR_601 ASM_CONST(0x0000000000000100) 147#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
147#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
148#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) 148#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
149#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) 149#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
150#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) 150#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
151#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) 151#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
152#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) 152#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
153#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) 153#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
154#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
155#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) 154#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
156#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) 155#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
157#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
158#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) 156#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
159#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) 157#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
160#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) 158#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -163,6 +161,8 @@ extern const char *powerpc_base_platform;
163#define CPU_FTR_SPE ASM_CONST(0x0000000002000000) 161#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
164#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) 162#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
165#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) 163#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
164#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
165#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
166 166
167/* 167/*
168 * Add the 64-bit processor unique features in the top half of the word; 168 * Add the 64-bit processor unique features in the top half of the word;
@@ -177,7 +177,6 @@ extern const char *powerpc_base_platform;
177#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) 177#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
178#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) 178#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
179#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) 179#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
180#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
181#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) 180#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
182#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) 181#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
183#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) 182#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
@@ -194,6 +193,7 @@ extern const char *powerpc_base_platform;
194#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) 193#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
195#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) 194#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
196#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) 195#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
196#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
197 197
198#ifndef __ASSEMBLY__ 198#ifndef __ASSEMBLY__
199 199
@@ -264,164 +264,159 @@ extern const char *powerpc_base_platform;
264 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ 264 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
265 !defined(CONFIG_BOOKE)) 265 !defined(CONFIG_BOOKE))
266 266
267#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \ 267#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
268 CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) 268 CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
269#define CPU_FTRS_603 (CPU_FTR_COMMON | \ 269#define CPU_FTRS_603 (CPU_FTR_COMMON | \
270 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 270 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
271 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 271 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
272#define CPU_FTRS_604 (CPU_FTR_COMMON | \ 272#define CPU_FTRS_604 (CPU_FTR_COMMON | \
273 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE) 273 CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
274#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ 274#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
275 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 275 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
276 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 276 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
277#define CPU_FTRS_740 (CPU_FTR_COMMON | \ 277#define CPU_FTRS_740 (CPU_FTR_COMMON | \
278 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 278 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
279 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 279 CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
280 CPU_FTR_PPC_LE) 280 CPU_FTR_PPC_LE)
281#define CPU_FTRS_750 (CPU_FTR_COMMON | \ 281#define CPU_FTRS_750 (CPU_FTR_COMMON | \
282 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 282 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
283 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 283 CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
284 CPU_FTR_PPC_LE) 284 CPU_FTR_PPC_LE)
285#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS) 285#define CPU_FTRS_750CL (CPU_FTRS_750)
286#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) 286#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
287#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM) 287#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
288#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \ 288#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
289 CPU_FTR_HAS_HIGH_BATS)
290#define CPU_FTRS_750GX (CPU_FTRS_750FX) 289#define CPU_FTRS_750GX (CPU_FTRS_750FX)
291#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ 290#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
292 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 291 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
293 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 292 CPU_FTR_ALTIVEC_COMP | \
294 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 293 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
295#define CPU_FTRS_7400 (CPU_FTR_COMMON | \ 294#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
296 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 295 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
297 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 296 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
298 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) 297 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
299#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ 298#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
300 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 299 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
301 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 300 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
302 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 301 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
303#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ 302#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
304 CPU_FTR_USE_TB | \ 303 CPU_FTR_USE_TB | \
305 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 304 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
306 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 305 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
307 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 306 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
308 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 307 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
309#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ 308#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
310 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 309 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
311 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 310 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
312 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 311 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
313 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) 312 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
314#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ 313#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
315 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 314 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
316 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ 315 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
317 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ 316 CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
318 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
319#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ 317#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
320 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ 318 CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
321 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 319 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
322 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 320 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
323 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 321 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
324 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) 322 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
325#define CPU_FTRS_7455 (CPU_FTR_COMMON | \ 323#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
326 CPU_FTR_USE_TB | \ 324 CPU_FTR_USE_TB | \
327 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 325 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
328 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 326 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
329 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
330 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 327 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
331#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ 328#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
332 CPU_FTR_USE_TB | \ 329 CPU_FTR_USE_TB | \
333 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 330 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
334 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 331 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
335 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
336 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ 332 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
337 CPU_FTR_NEED_PAIRED_STWCX) 333 CPU_FTR_NEED_PAIRED_STWCX)
338#define CPU_FTRS_7447 (CPU_FTR_COMMON | \ 334#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
339 CPU_FTR_USE_TB | \ 335 CPU_FTR_USE_TB | \
340 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 336 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
341 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 337 CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
342 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
343 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 338 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
344#define CPU_FTRS_7447A (CPU_FTR_COMMON | \ 339#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
345 CPU_FTR_USE_TB | \ 340 CPU_FTR_USE_TB | \
346 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 341 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
347 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 342 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
348 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
349 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 343 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
350#define CPU_FTRS_7448 (CPU_FTR_COMMON | \ 344#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
351 CPU_FTR_USE_TB | \ 345 CPU_FTR_USE_TB | \
352 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 346 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
353 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 347 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
354 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
355 CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) 348 CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
356#define CPU_FTRS_82XX (CPU_FTR_COMMON | \ 349#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
357 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) 350 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
358#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ 351#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
359 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS) 352 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
360#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ 353#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
361 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ 354 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
362 CPU_FTR_COMMON) 355 CPU_FTR_COMMON)
363#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ 356#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
364 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ 357 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
365 CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) 358 CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
366#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \ 359#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
367 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
368#define CPU_FTRS_8XX (CPU_FTR_USE_TB) 360#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
369#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) 361#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
370#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) 362#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
363#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
364 CPU_FTR_INDEXED_DCR)
371#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ 365#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
372 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ 366 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
373 CPU_FTR_UNIFIED_ID_CACHE) 367 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
374#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 368#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
375 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) 369 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
370 CPU_FTR_NOEXECUTE)
376#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 371#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
377 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \ 372 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
378 CPU_FTR_NODSISRALIGN) 373 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
379#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 374#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
380 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ 375 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
381 CPU_FTR_L2CSR | CPU_FTR_LWSYNC) 376 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
382#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 377#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
383 378
384/* 64-bit CPUs */ 379/* 64-bit CPUs */
385#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 380#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
386 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) 381 CPU_FTR_IABR | CPU_FTR_PPC_LE)
387#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 382#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
388 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ 383 CPU_FTR_IABR | \
389 CPU_FTR_MMCRA | CPU_FTR_CTRL) 384 CPU_FTR_MMCRA | CPU_FTR_CTRL)
390#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 385#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
391 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 386 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
392 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) 387 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
393#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 388#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
394 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 389 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
395 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 390 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
396 CPU_FTR_CP_USE_DCBTZ) 391 CPU_FTR_CP_USE_DCBTZ)
397#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 392#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
398 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 393 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
399 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 394 CPU_FTR_MMCRA | CPU_FTR_SMT | \
400 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 395 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
401 CPU_FTR_PURR) 396 CPU_FTR_PURR)
402#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 397#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
403 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 398 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
404 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 399 CPU_FTR_MMCRA | CPU_FTR_SMT | \
405 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 400 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
406 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 401 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
407 CPU_FTR_DSCR) 402 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
408#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 403#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
409 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
410 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 405 CPU_FTR_MMCRA | CPU_FTR_SMT | \
411 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 406 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
412 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 407 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
413 CPU_FTR_DSCR | CPU_FTR_SAO) 408 CPU_FTR_DSCR | CPU_FTR_SAO)
414#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 409#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
415 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 410 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
416 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
417 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ 412 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
418 CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) 413 CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
414 CPU_FTR_UNALIGNED_LD_STD)
419#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 415#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 416 CPU_FTR_PPCAS_ARCH_V2 | \
421 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 417 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
422 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) 418 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
423#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \ 419#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
424 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
425 420
426#ifdef __powerpc64__ 421#ifdef __powerpc64__
427#define CPU_FTRS_POSSIBLE \ 422#define CPU_FTRS_POSSIBLE \
@@ -452,7 +447,7 @@ enum {
452 CPU_FTRS_40X | 447 CPU_FTRS_40X |
453#endif 448#endif
454#ifdef CONFIG_44x 449#ifdef CONFIG_44x
455 CPU_FTRS_44X | 450 CPU_FTRS_44X | CPU_FTRS_440x6 |
456#endif 451#endif
457#ifdef CONFIG_E200 452#ifdef CONFIG_E200
458 CPU_FTRS_E200 | 453 CPU_FTRS_E200 |
@@ -492,7 +487,7 @@ enum {
492 CPU_FTRS_40X & 487 CPU_FTRS_40X &
493#endif 488#endif
494#ifdef CONFIG_44x 489#ifdef CONFIG_44x
495 CPU_FTRS_44X & 490 CPU_FTRS_44X & CPU_FTRS_440x6 &
496#endif 491#endif
497#ifdef CONFIG_E200 492#ifdef CONFIG_E200
498 CPU_FTRS_E200 & 493 CPU_FTRS_E200 &
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 72d2b72c7390..7d2e6235726d 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -23,6 +23,7 @@
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <asm/cputable.h>
26 27
27typedef struct { 28typedef struct {
28 unsigned int base; 29 unsigned int base;
@@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host)
39#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) 40#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
40#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) 41#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
41 42
42/* Device Control Registers */ 43/* Table based DCR accessors */
43void __mtdcr(int reg, unsigned int val); 44extern void __mtdcr(unsigned int reg, unsigned int val);
44unsigned int __mfdcr(int reg); 45extern unsigned int __mfdcr(unsigned int reg);
46
47/* mfdcrx/mtdcrx instruction based accessors. We hand code
48 * the opcodes in order not to depend on newer binutils
49 */
50static inline unsigned int mfdcrx(unsigned int reg)
51{
52 unsigned int ret;
53 asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
54 : "=r" (ret) : "r" (reg));
55 return ret;
56}
57
58static inline void mtdcrx(unsigned int reg, unsigned int val)
59{
60 asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
61 : : "r" (val), "r" (reg));
62}
63
45#define mfdcr(rn) \ 64#define mfdcr(rn) \
46 ({unsigned int rval; \ 65 ({unsigned int rval; \
47 if (__builtin_constant_p(rn)) \ 66 if (__builtin_constant_p(rn) && rn < 1024) \
48 asm volatile("mfdcr %0," __stringify(rn) \ 67 asm volatile("mfdcr %0," __stringify(rn) \
49 : "=r" (rval)); \ 68 : "=r" (rval)); \
69 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
70 rval = mfdcrx(rn); \
50 else \ 71 else \
51 rval = __mfdcr(rn); \ 72 rval = __mfdcr(rn); \
52 rval;}) 73 rval;})
53 74
54#define mtdcr(rn, v) \ 75#define mtdcr(rn, v) \
55do { \ 76do { \
56 if (__builtin_constant_p(rn)) \ 77 if (__builtin_constant_p(rn) && rn < 1024) \
57 asm volatile("mtdcr " __stringify(rn) ",%0" \ 78 asm volatile("mtdcr " __stringify(rn) ",%0" \
58 : : "r" (v)); \ 79 : : "r" (v)); \
80 else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
81 mtdcrx(rn, v); \
59 else \ 82 else \
60 __mtdcr(rn, v); \ 83 __mtdcr(rn, v); \
61} while (0) 84} while (0)
@@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
69 unsigned int val; 92 unsigned int val;
70 93
71 spin_lock_irqsave(&dcr_ind_lock, flags); 94 spin_lock_irqsave(&dcr_ind_lock, flags);
72 __mtdcr(base_addr, reg); 95 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
73 val = __mfdcr(base_data); 96 mtdcrx(base_addr, reg);
97 val = mfdcrx(base_data);
98 } else {
99 __mtdcr(base_addr, reg);
100 val = __mfdcr(base_data);
101 }
74 spin_unlock_irqrestore(&dcr_ind_lock, flags); 102 spin_unlock_irqrestore(&dcr_ind_lock, flags);
75 return val; 103 return val;
76} 104}
@@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg,
81 unsigned long flags; 109 unsigned long flags;
82 110
83 spin_lock_irqsave(&dcr_ind_lock, flags); 111 spin_lock_irqsave(&dcr_ind_lock, flags);
84 __mtdcr(base_addr, reg); 112 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
85 __mtdcr(base_data, val); 113 mtdcrx(base_addr, reg);
114 mtdcrx(base_data, val);
115 } else {
116 __mtdcr(base_addr, reg);
117 __mtdcr(base_data, val);
118 }
86 spin_unlock_irqrestore(&dcr_ind_lock, flags); 119 spin_unlock_irqrestore(&dcr_ind_lock, flags);
87} 120}
88 121
@@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg,
93 unsigned int val; 126 unsigned int val;
94 127
95 spin_lock_irqsave(&dcr_ind_lock, flags); 128 spin_lock_irqsave(&dcr_ind_lock, flags);
96 __mtdcr(base_addr, reg); 129 if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
97 val = (__mfdcr(base_data) & ~clr) | set; 130 mtdcrx(base_addr, reg);
98 __mtdcr(base_data, val); 131 val = (mfdcrx(base_data) & ~clr) | set;
132 mtdcrx(base_data, val);
133 } else {
134 __mtdcr(base_addr, reg);
135 val = (__mfdcr(base_data) & ~clr) | set;
136 __mtdcr(base_data, val);
137 }
99 spin_unlock_irqrestore(&dcr_ind_lock, flags); 138 spin_unlock_irqrestore(&dcr_ind_lock, flags);
100} 139}
101 140
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
index d13fb68bb5c0..9d6851cfb841 100644
--- a/arch/powerpc/include/asm/dcr.h
+++ b/arch/powerpc/include/asm/dcr.h
@@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t;
68 * additional helpers to read the DCR * base from the device-tree 68 * additional helpers to read the DCR * base from the device-tree
69 */ 69 */
70struct device_node; 70struct device_node;
71extern unsigned int dcr_resource_start(struct device_node *np, 71extern unsigned int dcr_resource_start(const struct device_node *np,
72 unsigned int index); 72 unsigned int index);
73extern unsigned int dcr_resource_len(struct device_node *np, 73extern unsigned int dcr_resource_len(const struct device_node *np,
74 unsigned int index); 74 unsigned int index);
75#endif /* CONFIG_PPC_DCR */ 75#endif /* CONFIG_PPC_DCR */
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index dfd504caccc1..7d2277cef09a 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -18,4 +18,16 @@ struct dev_archdata {
18 void *dma_data; 18 void *dma_data;
19}; 19};
20 20
21static inline void dev_archdata_set_node(struct dev_archdata *ad,
22 struct device_node *np)
23{
24 ad->of_node = np;
25}
26
27static inline struct device_node *
28dev_archdata_get_node(const struct dev_archdata *ad)
29{
30 return ad->of_node;
31}
32
21#endif /* _ASM_POWERPC_DEVICE_H */ 33#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
new file mode 100644
index 000000000000..9b198d1b3b2b
--- /dev/null
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -0,0 +1,80 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_PPC_DISASSEMBLE_H__
21#define __ASM_PPC_DISASSEMBLE_H__
22
23#include <linux/types.h>
24
25static inline unsigned int get_op(u32 inst)
26{
27 return inst >> 26;
28}
29
30static inline unsigned int get_xop(u32 inst)
31{
32 return (inst >> 1) & 0x3ff;
33}
34
35static inline unsigned int get_sprn(u32 inst)
36{
37 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
38}
39
40static inline unsigned int get_dcrn(u32 inst)
41{
42 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
43}
44
45static inline unsigned int get_rt(u32 inst)
46{
47 return (inst >> 21) & 0x1f;
48}
49
50static inline unsigned int get_rs(u32 inst)
51{
52 return (inst >> 21) & 0x1f;
53}
54
55static inline unsigned int get_ra(u32 inst)
56{
57 return (inst >> 16) & 0x1f;
58}
59
60static inline unsigned int get_rb(u32 inst)
61{
62 return (inst >> 11) & 0x1f;
63}
64
65static inline unsigned int get_rc(u32 inst)
66{
67 return inst & 0x1;
68}
69
70static inline unsigned int get_ws(u32 inst)
71{
72 return (inst >> 11) & 0x1f;
73}
74
75static inline unsigned int get_d(u32 inst)
76{
77 return inst & 0xffff;
78}
79
80#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index fddb229bd74f..86cef7ddc8d5 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
60 dma_addr_t *dma_handle, gfp_t flag); 60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size, 61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle); 62 void *vaddr, dma_addr_t dma_handle);
63 dma_addr_t (*map_single)(struct device *dev, void *ptr,
64 size_t size, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
66 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
67 size_t size, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
69 int (*map_sg)(struct device *dev, struct scatterlist *sg, 63 int (*map_sg)(struct device *dev, struct scatterlist *sg,
70 int nents, enum dma_data_direction direction, 64 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs); 65 struct dma_attrs *attrs);
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
82 dma_addr_t dma_address, size_t size, 76 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction, 77 enum dma_data_direction direction,
84 struct dma_attrs *attrs); 78 struct dma_attrs *attrs);
79#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu)(struct device *hwdev,
81 dma_addr_t dma_handle, unsigned long offset,
82 size_t size,
83 enum dma_data_direction direction);
84 void (*sync_single_range_for_device)(struct device *hwdev,
85 dma_addr_t dma_handle, unsigned long offset,
86 size_t size,
87 enum dma_data_direction direction);
88 void (*sync_sg_for_cpu)(struct device *hwdev,
89 struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction);
91 void (*sync_sg_for_device)(struct device *hwdev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction direction);
94#endif
85}; 95};
86 96
87/* 97/*
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
149} 159}
150 160
151/* 161/*
152 * TODO: map_/unmap_single will ideally go away, to be completely 162 * map_/unmap_single actually call through to map/unmap_page now that all the
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have 163 * dma_mapping_ops have been converted over. We just have to get the page and
154 * one or the other, or both by checking to see if the specific 164 * offset to pass through to map_page
155 * function requested exists; and if not, falling back on the other set.
156 */ 165 */
157static inline dma_addr_t dma_map_single_attrs(struct device *dev, 166static inline dma_addr_t dma_map_single_attrs(struct device *dev,
158 void *cpu_addr, 167 void *cpu_addr,
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
164 173
165 BUG_ON(!dma_ops); 174 BUG_ON(!dma_ops);
166 175
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr), 176 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size, 177 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs); 178 direction, attrs);
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
183 188
184 BUG_ON(!dma_ops); 189 BUG_ON(!dma_ops);
185 190
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); 191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
192} 192}
193 193
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
201 201
202 BUG_ON(!dma_ops); 202 BUG_ON(!dma_ops);
203 203
204 if (dma_ops->map_page) 204 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
208 return dma_ops->map_single(dev, page_address(page) + offset, size,
209 direction, attrs);
210} 205}
211 206
212static inline void dma_unmap_page_attrs(struct device *dev, 207static inline void dma_unmap_page_attrs(struct device *dev,
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
219 214
220 BUG_ON(!dma_ops); 215 BUG_ON(!dma_ops);
221 216
222 if (dma_ops->unmap_page) { 217 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
228} 218}
229 219
230static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 220static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); 298 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
309} 299}
310 300
301#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
311static inline void dma_sync_single_for_cpu(struct device *dev, 302static inline void dma_sync_single_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, size_t size, 303 dma_addr_t dma_handle, size_t size,
313 enum dma_data_direction direction) 304 enum dma_data_direction direction)
314{ 305{
315 BUG_ON(direction == DMA_NONE); 306 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
316 __dma_sync(bus_to_virt(dma_handle), size, direction); 307
308 BUG_ON(!dma_ops);
309 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
310 size, direction);
317} 311}
318 312
319static inline void dma_sync_single_for_device(struct device *dev, 313static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t dma_handle, size_t size, 314 dma_addr_t dma_handle, size_t size,
321 enum dma_data_direction direction) 315 enum dma_data_direction direction)
322{ 316{
323 BUG_ON(direction == DMA_NONE); 317 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
324 __dma_sync(bus_to_virt(dma_handle), size, direction); 318
319 BUG_ON(!dma_ops);
320 dma_ops->sync_single_range_for_device(dev, dma_handle,
321 0, size, direction);
325} 322}
326 323
327static inline void dma_sync_sg_for_cpu(struct device *dev, 324static inline void dma_sync_sg_for_cpu(struct device *dev,
328 struct scatterlist *sgl, int nents, 325 struct scatterlist *sgl, int nents,
329 enum dma_data_direction direction) 326 enum dma_data_direction direction)
330{ 327{
331 struct scatterlist *sg; 328 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
332 int i;
333 329
334 BUG_ON(direction == DMA_NONE); 330 BUG_ON(!dma_ops);
331 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
332}
333
334static inline void dma_sync_sg_for_device(struct device *dev,
335 struct scatterlist *sgl, int nents,
336 enum dma_data_direction direction)
337{
338 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
339
340 BUG_ON(!dma_ops);
341 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
342}
343
344static inline void dma_sync_single_range_for_cpu(struct device *dev,
345 dma_addr_t dma_handle, unsigned long offset, size_t size,
346 enum dma_data_direction direction)
347{
348 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
335 349
336 for_each_sg(sgl, sg, nents, i) 350 BUG_ON(!dma_ops);
337 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 351 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
352 offset, size, direction);
353}
354
355static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
363 size, direction);
364}
365#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
366static inline void dma_sync_single_for_cpu(struct device *dev,
367 dma_addr_t dma_handle, size_t size,
368 enum dma_data_direction direction)
369{
370}
371
372static inline void dma_sync_single_for_device(struct device *dev,
373 dma_addr_t dma_handle, size_t size,
374 enum dma_data_direction direction)
375{
376}
377
378static inline void dma_sync_sg_for_cpu(struct device *dev,
379 struct scatterlist *sgl, int nents,
380 enum dma_data_direction direction)
381{
338} 382}
339 383
340static inline void dma_sync_sg_for_device(struct device *dev, 384static inline void dma_sync_sg_for_device(struct device *dev,
341 struct scatterlist *sgl, int nents, 385 struct scatterlist *sgl, int nents,
342 enum dma_data_direction direction) 386 enum dma_data_direction direction)
343{ 387{
344 struct scatterlist *sg; 388}
345 int i;
346 389
347 BUG_ON(direction == DMA_NONE); 390static inline void dma_sync_single_range_for_cpu(struct device *dev,
391 dma_addr_t dma_handle, unsigned long offset, size_t size,
392 enum dma_data_direction direction)
393{
394}
348 395
349 for_each_sg(sgl, sg, nents, i) 396static inline void dma_sync_single_range_for_device(struct device *dev,
350 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 397 dma_addr_t dma_handle, unsigned long offset, size_t size,
398 enum dma_data_direction direction)
399{
351} 400}
401#endif
352 402
353static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 403static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 404{
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
382#endif 432#endif
383} 433}
384 434
385static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t dma_handle, unsigned long offset, size_t size,
387 enum dma_data_direction direction)
388{
389 /* just sync everything for now */
390 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
391}
392
393static inline void dma_sync_single_range_for_device(struct device *dev,
394 dma_addr_t dma_handle, unsigned long offset, size_t size,
395 enum dma_data_direction direction)
396{
397 /* just sync everything for now */
398 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
399}
400
401static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 435static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
402 enum dma_data_direction direction) 436 enum dma_data_direction direction)
403{ 437{
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b886bec67016..66ea9b8b95c5 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef _PPC64_EEH_H 20#ifndef _POWERPC_EEH_H
21#define _PPC64_EEH_H 21#define _POWERPC_EEH_H
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#include <linux/init.h> 24#include <linux/init.h>
@@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
110#define EEH_IO_ERROR_VALUE(size) (-1UL) 110#define EEH_IO_ERROR_VALUE(size) (-1UL)
111#endif /* CONFIG_EEH */ 111#endif /* CONFIG_EEH */
112 112
113#ifdef CONFIG_PPC64
113/* 114/*
114 * MMIO read/write operations with EEH support. 115 * MMIO read/write operations with EEH support.
115 */ 116 */
@@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
207 eeh_check_failure(addr, *(u32*)buf); 208 eeh_check_failure(addr, *(u32*)buf);
208} 209}
209 210
211#endif /* CONFIG_PPC64 */
210#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
211#endif /* _PPC64_EEH_H */ 213#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index d812929390e4..cd46f023ec6d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
267#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 267#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
268struct linux_binprm; 268struct linux_binprm;
269extern int arch_setup_additional_pages(struct linux_binprm *bprm, 269extern int arch_setup_additional_pages(struct linux_binprm *bprm,
270 int executable_stack); 270 int uses_interp);
271#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); 271#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
272 272
273#endif /* __KERNEL__ */ 273#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index a1029967620b..e4094a5cb05b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -81,6 +81,36 @@ label##5: \
81#define ALT_FTR_SECTION_END_IFCLR(msk) \ 81#define ALT_FTR_SECTION_END_IFCLR(msk) \
82 ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) 82 ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
83 83
84/* MMU feature dependent sections */
85#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
86#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
87
88#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
89 FTR_SECTION_ELSE_NESTED(label) \
90 MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
91
92#define END_MMU_FTR_SECTION(msk, val) \
93 END_MMU_FTR_SECTION_NESTED(msk, val, 97)
94
95#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
96#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
97
98/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
99#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
100#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
101#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
102 MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
103#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
104 ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
105#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
106 ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
107#define ALT_MMU_FTR_SECTION_END(msk, val) \
108 ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
109#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
110 ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
111#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
112 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
113
84/* Firmware feature dependent sections */ 114/* Firmware feature dependent sections */
85#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) 115#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
86#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) 116#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 91c589520c0a..04e4a620952e 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
38 * easily, subsequent pte tables have to be allocated in one physical 38 * easily, subsequent pte tables have to be allocated in one physical
39 * chunk of RAM. 39 * chunk of RAM.
40 */ 40 */
41#define LAST_PKMAP (1 << PTE_SHIFT) 41/*
42#define LAST_PKMAP_MASK (LAST_PKMAP-1) 42 * We use one full pte table with 4K pages. And with 16K/64K pages pte
43 * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
44 * and PKMAP can be placed in single pte table. We use 1024 pages for
45 * PKMAP in case of 16K/64K pages.
46 */
47#ifdef CONFIG_PPC_4K_PAGES
48#define PKMAP_ORDER PTE_SHIFT
49#else
50#define PKMAP_ORDER 10
51#endif
52#define LAST_PKMAP (1 << PKMAP_ORDER)
53#ifndef CONFIG_PPC_4K_PAGES
54#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
55#else
43#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) 56#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
57#endif
58#define LAST_PKMAP_MASK (LAST_PKMAP-1)
44#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 59#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
45#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 60#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
46 61
@@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
85 BUG_ON(!pte_none(*(kmap_pte-idx))); 100 BUG_ON(!pte_none(*(kmap_pte-idx)));
86#endif 101#endif
87 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 102 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
88 flush_tlb_page(NULL, vaddr); 103 local_flush_tlb_page(NULL, vaddr);
89 104
90 return (void*) vaddr; 105 return (void*) vaddr;
91} 106}
@@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
113 * this pte without first remap it 128 * this pte without first remap it
114 */ 129 */
115 pte_clear(&init_mm, vaddr, kmap_pte-idx); 130 pte_clear(&init_mm, vaddr, kmap_pte-idx);
116 flush_tlb_page(NULL, vaddr); 131 local_flush_tlb_page(NULL, vaddr);
117#endif 132#endif
118 pagefault_enable(); 133 pagefault_enable();
119} 134}
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08266d2728b3..494cd8b0a278 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address)
713 */ 713 */
714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) 714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
715 715
716/* We do NOT want virtual merging, it would put too much pressure on
717 * our iommu allocator. Instead, we want drivers to be smart enough
718 * to coalesce sglists that happen to have been mapped in a contiguous
719 * way by the iommu
720 */
721#define BIO_VMERGE_BOUNDARY 0
722
723/* 716/*
724 * 32 bits still uses virt_to_bus() for it's implementation of DMA 717 * 32 bits still uses virt_to_bus() for it's implementation of DMA
725 * mappings se we have to keep it defined here. We also have some old 718 * mappings se we have to keep it defined here. We also have some old
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index b07ebb9784d3..5ebfe5d3c61f 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -1,6 +1,8 @@
1#ifndef _PPC64_KDUMP_H 1#ifndef _PPC64_KDUMP_H
2#define _PPC64_KDUMP_H 2#define _PPC64_KDUMP_H
3 3
4#include <asm/page.h>
5
4/* Kdump kernel runs at 32 MB, change at your peril. */ 6/* Kdump kernel runs at 32 MB, change at your peril. */
5#define KDUMP_KERNELBASE 0x2000000 7#define KDUMP_KERNELBASE 0x2000000
6 8
@@ -11,8 +13,19 @@
11 13
12#ifdef CONFIG_CRASH_DUMP 14#ifdef CONFIG_CRASH_DUMP
13 15
16/*
17 * On PPC64 translation is disabled during trampoline setup, so we use
18 * physical addresses. Though on PPC32 translation is already enabled,
19 * so we can't do the same. Luckily create_trampoline() creates relative
20 * branches, so we can just add the PAGE_OFFSET and don't worry about it.
21 */
22#ifdef __powerpc64__
14#define KDUMP_TRAMPOLINE_START 0x0100 23#define KDUMP_TRAMPOLINE_START 0x0100
15#define KDUMP_TRAMPOLINE_END 0x3000 24#define KDUMP_TRAMPOLINE_END 0x3000
25#else
26#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
27#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
28#endif /* __powerpc64__ */
16 29
17#define KDUMP_MIN_TCE_ENTRIES 2048 30#define KDUMP_MIN_TCE_ENTRIES 2048
18 31
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 3736d9b33289..6dbffc981702 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -33,12 +33,12 @@
33 33
34#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
35#include <linux/cpumask.h> 35#include <linux/cpumask.h>
36#include <asm/reg.h>
36 37
37typedef void (*crash_shutdown_t)(void); 38typedef void (*crash_shutdown_t)(void);
38 39
39#ifdef CONFIG_KEXEC 40#ifdef CONFIG_KEXEC
40 41
41#ifdef __powerpc64__
42/* 42/*
43 * This function is responsible for capturing register states if coming 43 * This function is responsible for capturing register states if coming
44 * via panic or invoking dump using sysrq-trigger. 44 * via panic or invoking dump using sysrq-trigger.
@@ -48,6 +48,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
48{ 48{
49 if (oldregs) 49 if (oldregs)
50 memcpy(newregs, oldregs, sizeof(*newregs)); 50 memcpy(newregs, oldregs, sizeof(*newregs));
51#ifdef __powerpc64__
51 else { 52 else {
52 /* FIXME Merge this with xmon_save_regs ?? */ 53 /* FIXME Merge this with xmon_save_regs ?? */
53 unsigned long tmp1, tmp2; 54 unsigned long tmp1, tmp2;
@@ -100,15 +101,11 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
100 : "b" (newregs) 101 : "b" (newregs)
101 : "memory"); 102 : "memory");
102 } 103 }
103}
104#else 104#else
105/* 105 else
106 * Provide a dummy definition to avoid build failures. Will remain 106 ppc_save_regs(newregs);
107 * empty till crash dump support is enabled. 107#endif /* __powerpc64__ */
108 */ 108}
109static inline void crash_setup_regs(struct pt_regs *newregs,
110 struct pt_regs *oldregs) { }
111#endif /* !__powerpc64 __ */
112 109
113extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 110extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
114 master to copy new code to 0 */ 111 master to copy new code to 0 */
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
new file mode 100644
index 000000000000..f49031b632ca
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -0,0 +1,61 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_44X_H__
21#define __ASM_44X_H__
22
23#include <linux/kvm_host.h>
24
25#define PPC44x_TLB_SIZE 64
26
27/* If the guest is expecting it, this can be as large as we like; we'd just
28 * need to find some way of advertising it. */
29#define KVM44x_GUEST_TLB_SIZE 64
30
31struct kvmppc_44x_shadow_ref {
32 struct page *page;
33 u16 gtlb_index;
34 u8 writeable;
35 u8 tid;
36};
37
38struct kvmppc_vcpu_44x {
39 /* Unmodified copy of the guest's TLB. */
40 struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
41
42 /* References to guest pages in the hardware TLB. */
43 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
44
45 /* State of the shadow TLB at guest context switch time. */
46 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
47 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
48
49 struct kvm_vcpu vcpu;
50};
51
52static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
53{
54 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
55}
56
57void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
58void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
59void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
60
61#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 34b52b7180cd..c1e436fe7738 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -64,27 +64,58 @@ struct kvm_vcpu_stat {
64 u32 halt_wakeup; 64 u32 halt_wakeup;
65}; 65};
66 66
67struct tlbe { 67struct kvmppc_44x_tlbe {
68 u32 tid; /* Only the low 8 bits are used. */ 68 u32 tid; /* Only the low 8 bits are used. */
69 u32 word0; 69 u32 word0;
70 u32 word1; 70 u32 word1;
71 u32 word2; 71 u32 word2;
72}; 72};
73 73
74struct kvm_arch { 74enum kvm_exit_types {
75 MMIO_EXITS,
76 DCR_EXITS,
77 SIGNAL_EXITS,
78 ITLB_REAL_MISS_EXITS,
79 ITLB_VIRT_MISS_EXITS,
80 DTLB_REAL_MISS_EXITS,
81 DTLB_VIRT_MISS_EXITS,
82 SYSCALL_EXITS,
83 ISI_EXITS,
84 DSI_EXITS,
85 EMULATED_INST_EXITS,
86 EMULATED_MTMSRWE_EXITS,
87 EMULATED_WRTEE_EXITS,
88 EMULATED_MTSPR_EXITS,
89 EMULATED_MFSPR_EXITS,
90 EMULATED_MTMSR_EXITS,
91 EMULATED_MFMSR_EXITS,
92 EMULATED_TLBSX_EXITS,
93 EMULATED_TLBWE_EXITS,
94 EMULATED_RFI_EXITS,
95 DEC_EXITS,
96 EXT_INTR_EXITS,
97 HALT_WAKEUP,
98 USR_PR_INST,
99 FP_UNAVAIL,
100 DEBUG_EXITS,
101 TIMEINGUEST,
102 __NUMBER_OF_KVM_EXIT_TYPES
75}; 103};
76 104
77struct kvm_vcpu_arch { 105/* allow access to big endian 32bit upper/lower parts and 64bit var */
78 /* Unmodified copy of the guest's TLB. */ 106struct kvmppc_exit_timing {
79 struct tlbe guest_tlb[PPC44x_TLB_SIZE]; 107 union {
80 /* TLB that's actually used when the guest is running. */ 108 u64 tv64;
81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; 109 struct {
82 /* Pages which are referenced in the shadow TLB. */ 110 u32 tbu, tbl;
83 struct page *shadow_pages[PPC44x_TLB_SIZE]; 111 } tv32;
112 };
113};
84 114
85 /* Track which TLB entries we've modified in the current exit. */ 115struct kvm_arch {
86 u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; 116};
87 117
118struct kvm_vcpu_arch {
88 u32 host_stack; 119 u32 host_stack;
89 u32 host_pid; 120 u32 host_pid;
90 u32 host_dbcr0; 121 u32 host_dbcr0;
@@ -94,32 +125,32 @@ struct kvm_vcpu_arch {
94 u32 host_msr; 125 u32 host_msr;
95 126
96 u64 fpr[32]; 127 u64 fpr[32];
97 u32 gpr[32]; 128 ulong gpr[32];
98 129
99 u32 pc; 130 ulong pc;
100 u32 cr; 131 u32 cr;
101 u32 ctr; 132 ulong ctr;
102 u32 lr; 133 ulong lr;
103 u32 xer; 134 ulong xer;
104 135
105 u32 msr; 136 ulong msr;
106 u32 mmucr; 137 u32 mmucr;
107 u32 sprg0; 138 ulong sprg0;
108 u32 sprg1; 139 ulong sprg1;
109 u32 sprg2; 140 ulong sprg2;
110 u32 sprg3; 141 ulong sprg3;
111 u32 sprg4; 142 ulong sprg4;
112 u32 sprg5; 143 ulong sprg5;
113 u32 sprg6; 144 ulong sprg6;
114 u32 sprg7; 145 ulong sprg7;
115 u32 srr0; 146 ulong srr0;
116 u32 srr1; 147 ulong srr1;
117 u32 csrr0; 148 ulong csrr0;
118 u32 csrr1; 149 ulong csrr1;
119 u32 dsrr0; 150 ulong dsrr0;
120 u32 dsrr1; 151 ulong dsrr1;
121 u32 dear; 152 ulong dear;
122 u32 esr; 153 ulong esr;
123 u32 dec; 154 u32 dec;
124 u32 decar; 155 u32 decar;
125 u32 tbl; 156 u32 tbl;
@@ -127,7 +158,7 @@ struct kvm_vcpu_arch {
127 u32 tcr; 158 u32 tcr;
128 u32 tsr; 159 u32 tsr;
129 u32 ivor[16]; 160 u32 ivor[16];
130 u32 ivpr; 161 ulong ivpr;
131 u32 pir; 162 u32 pir;
132 163
133 u32 shadow_pid; 164 u32 shadow_pid;
@@ -140,9 +171,22 @@ struct kvm_vcpu_arch {
140 u32 dbcr0; 171 u32 dbcr0;
141 u32 dbcr1; 172 u32 dbcr1;
142 173
174#ifdef CONFIG_KVM_EXIT_TIMING
175 struct kvmppc_exit_timing timing_exit;
176 struct kvmppc_exit_timing timing_last_enter;
177 u32 last_exit_type;
178 u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
179 u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
180 u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
181 u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
182 u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
183 u64 timing_last_exit;
184 struct dentry *debugfs_exit_timing;
185#endif
186
143 u32 last_inst; 187 u32 last_inst;
144 u32 fault_dear; 188 ulong fault_dear;
145 u32 fault_esr; 189 ulong fault_esr;
146 gpa_t paddr_accessed; 190 gpa_t paddr_accessed;
147 191
148 u8 io_gpr; /* GPR used as IO source/target */ 192 u8 io_gpr; /* GPR used as IO source/target */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index bb62ad876de3..36d2a50a8487 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -29,11 +29,6 @@
29#include <linux/kvm_types.h> 29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h> 30#include <linux/kvm_host.h>
31 31
32struct kvm_tlb {
33 struct tlbe guest_tlb[PPC44x_TLB_SIZE];
34 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
35};
36
37enum emulation_result { 32enum emulation_result {
38 EMULATE_DONE, /* no further processing */ 33 EMULATE_DONE, /* no further processing */
39 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 34 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
@@ -41,9 +36,6 @@ enum emulation_result {
41 EMULATE_FAIL, /* can't emulate this instruction */ 36 EMULATE_FAIL, /* can't emulate this instruction */
42}; 37};
43 38
44extern const unsigned char exception_priority[];
45extern const unsigned char priority_exception[];
46
47extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 39extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
48extern char kvmppc_handlers_start[]; 40extern char kvmppc_handlers_start[];
49extern unsigned long kvmppc_handler_len; 41extern unsigned long kvmppc_handler_len;
@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
58extern int kvmppc_emulate_instruction(struct kvm_run *run, 50extern int kvmppc_emulate_instruction(struct kvm_run *run,
59 struct kvm_vcpu *vcpu); 51 struct kvm_vcpu *vcpu);
60extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 52extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
53extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
61 54
62extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, 55extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
63 u64 asid, u32 flags); 56 u64 asid, u32 flags, u32 max_bytes,
64extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 57 unsigned int gtlb_idx);
65 gva_t eend, u32 asid);
66extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 58extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
67extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 59extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
68 60
69/* XXX Book E specific */ 61/* Core-specific hooks */
70extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); 62
71 63extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
72extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); 64 unsigned int id);
73 65extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
74static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) 66extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
75{ 67extern int kvmppc_core_check_processor_compat(void);
76 unsigned int priority = exception_priority[exception]; 68extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
77 set_bit(priority, &vcpu->arch.pending_exceptions); 69 struct kvm_translation *tr);
78} 70
79 71extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
80static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) 72extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
81{ 73
82 unsigned int priority = exception_priority[exception]; 74extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
83 clear_bit(priority, &vcpu->arch.pending_exceptions); 75extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
84} 76
85 77extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
86/* Helper function for "full" MSR writes. No need to call this if only EE is 78extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
87 * changing. */ 79extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
88static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 80extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
89{ 81extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
90 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) 82 struct kvm_interrupt *irq);
91 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 83
92 84extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
93 vcpu->arch.msr = new_msr; 85 unsigned int op, int *advance);
94 86extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
95 if (vcpu->arch.msr & MSR_WE) 87extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
96 kvm_vcpu_block(vcpu); 88
97} 89extern int kvmppc_booke_init(void);
98 90extern void kvmppc_booke_exit(void);
99static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
100{
101 if (vcpu->arch.pid != new_pid) {
102 vcpu->arch.pid = new_pid;
103 vcpu->arch.swap_pid = 1;
104 }
105}
106 91
107extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 92extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
108 93
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 612d83276653..84b457a3c1bc 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l)
67 bne- 1b" 67 bne- 1b"
68 : "=&r" (t) 68 : "=&r" (t)
69 : "r" (&(l->a.counter)) 69 : "r" (&(l->a.counter))
70 : "cc", "memory"); 70 : "cc", "xer", "memory");
71 71
72 return t; 72 return t;
73} 73}
@@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l)
94 bne- 1b" 94 bne- 1b"
95 : "=&r" (t) 95 : "=&r" (t)
96 : "r" (&(l->a.counter)) 96 : "r" (&(l->a.counter))
97 : "cc", "memory"); 97 : "cc", "xer", "memory");
98 98
99 return t; 99 return t;
100} 100}
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 2fe268b10333..25aaa97facd8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -133,7 +133,8 @@ struct lppaca {
133//============================================================================= 133//=============================================================================
134// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data 134// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
135//============================================================================= 135//=============================================================================
136 u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF 136 u32 page_ins; // CMO Hint - # page ins by OS x00-x04
137 u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
137} __attribute__((__aligned__(0x400))); 138} __attribute__((__aligned__(0x400)));
138 139
139extern struct lppaca lppaca[]; 140extern struct lppaca lppaca[];
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 3d108676584c..776f415a36aa 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -54,8 +54,9 @@
54#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
55 55
56typedef struct { 56typedef struct {
57 unsigned long id; 57 unsigned int id;
58 unsigned long vdso_base; 58 unsigned int active;
59 unsigned long vdso_base;
59} mm_context_t; 60} mm_context_t;
60 61
61#endif /* !__ASSEMBLY__ */ 62#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index a825524c981a..27cc6fdcd3b7 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -4,6 +4,8 @@
4 * PPC440 support 4 * PPC440 support
5 */ 5 */
6 6
7#include <asm/page.h>
8
7#define PPC44x_MMUCR_TID 0x000000ff 9#define PPC44x_MMUCR_TID 0x000000ff
8#define PPC44x_MMUCR_STS 0x00010000 10#define PPC44x_MMUCR_STS 0x00010000
9 11
@@ -54,10 +56,12 @@
54#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
55 57
56extern unsigned int tlb_44x_hwater; 58extern unsigned int tlb_44x_hwater;
59extern unsigned int tlb_44x_index;
57 60
58typedef struct { 61typedef struct {
59 unsigned long id; 62 unsigned int id;
60 unsigned long vdso_base; 63 unsigned int active;
64 unsigned long vdso_base;
61} mm_context_t; 65} mm_context_t;
62 66
63#endif /* !__ASSEMBLY__ */ 67#endif /* !__ASSEMBLY__ */
@@ -73,4 +77,19 @@ typedef struct {
73/* Size of the TLBs used for pinning in lowmem */ 77/* Size of the TLBs used for pinning in lowmem */
74#define PPC_PIN_SIZE (1 << 28) /* 256M */ 78#define PPC_PIN_SIZE (1 << 28) /* 256M */
75 79
80#if (PAGE_SHIFT == 12)
81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
82#elif (PAGE_SHIFT == 14)
83#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
84#elif (PAGE_SHIFT == 16)
85#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
86#else
87#error "Unsupported PAGE_SIZE"
88#endif
89
90#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
91#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
92#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
93#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
94
76#endif /* _ASM_POWERPC_MMU_44X_H_ */ 95#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 9db877eb88db..07865a357848 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -137,7 +137,8 @@
137 137
138#ifndef __ASSEMBLY__ 138#ifndef __ASSEMBLY__
139typedef struct { 139typedef struct {
140 unsigned long id; 140 unsigned int id;
141 unsigned int active;
141 unsigned long vdso_base; 142 unsigned long vdso_base;
142} mm_context_t; 143} mm_context_t;
143#endif /* !__ASSEMBLY__ */ 144#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h
index 925d93cf64d8..3f941c0f7e8e 100644
--- a/arch/powerpc/include/asm/mmu-fsl-booke.h
+++ b/arch/powerpc/include/asm/mmu-fsl-booke.h
@@ -40,6 +40,8 @@
40#define MAS2_M 0x00000004 40#define MAS2_M 0x00000004
41#define MAS2_G 0x00000002 41#define MAS2_G 0x00000002
42#define MAS2_E 0x00000001 42#define MAS2_E 0x00000001
43#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
44#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
43 45
44#define MAS3_RPN 0xFFFFF000 46#define MAS3_RPN 0xFFFFF000
45#define MAS3_U0 0x00000200 47#define MAS3_U0 0x00000200
@@ -74,8 +76,9 @@
74#ifndef __ASSEMBLY__ 76#ifndef __ASSEMBLY__
75 77
76typedef struct { 78typedef struct {
77 unsigned long id; 79 unsigned int id;
78 unsigned long vdso_base; 80 unsigned int active;
81 unsigned long vdso_base;
79} mm_context_t; 82} mm_context_t;
80#endif /* !__ASSEMBLY__ */ 83#endif /* !__ASSEMBLY__ */
81 84
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4c0e1b4f975c..6e7639911318 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -2,6 +2,63 @@
2#define _ASM_POWERPC_MMU_H_ 2#define _ASM_POWERPC_MMU_H_
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <asm/asm-compat.h>
6#include <asm/feature-fixups.h>
7
8/*
9 * MMU features bit definitions
10 */
11
12/*
13 * First half is MMU families
14 */
15#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
16#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
20
21/*
22 * This is individual features
23 */
24
25/* Enable use of high BAT registers */
26#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
27
28/* Enable >32-bit physical addresses on 32-bit processor, only used
29 * by CONFIG_6xx currently as BookE supports that from day 1
30 */
31#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
32
33/* Enable use of broadcast TLB invalidations. We don't always set it
34 * on processors that support it due to other constraints with the
35 * use of such invalidations
36 */
37#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
38
39/* Enable use of tlbilx invalidate-by-PID variant.
40 */
41#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
42
43/* This indicates that the processor cannot handle multiple outstanding
44 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
45 * around such invalidate forms.
46 */
47#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
48
49#ifndef __ASSEMBLY__
50#include <asm/cputable.h>
51
52static inline int mmu_has_feature(unsigned long feature)
53{
54 return (cur_cpu_spec->mmu_features & feature);
55}
56
57extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
58
59#endif /* !__ASSEMBLY__ */
60
61
5#ifdef CONFIG_PPC64 62#ifdef CONFIG_PPC64
6/* 64-bit classic hash table MMU */ 63/* 64-bit classic hash table MMU */
7# include <asm/mmu-hash64.h> 64# include <asm/mmu-hash64.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6b993ef452ff..ab4f19263c42 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -2,237 +2,26 @@
2#define __ASM_POWERPC_MMU_CONTEXT_H 2#define __ASM_POWERPC_MMU_CONTEXT_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
5#include <asm/mmu.h> 9#include <asm/mmu.h>
6#include <asm/cputable.h> 10#include <asm/cputable.h>
7#include <asm-generic/mm_hooks.h> 11#include <asm-generic/mm_hooks.h>
8 12#include <asm/cputhreads.h>
9#ifndef CONFIG_PPC64
10#include <asm/atomic.h>
11#include <linux/bitops.h>
12
13/*
14 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
15 * (virtual segment identifiers) for each context. Although the
16 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
17 * we only use 32,768 of them. That is ample, since there can be
18 * at most around 30,000 tasks in the system anyway, and it means
19 * that we can use a bitmap to indicate which contexts are in use.
20 * Using a bitmap means that we entirely avoid all of the problems
21 * that we used to have when the context number overflowed,
22 * particularly on SMP systems.
23 * -- paulus.
24 */
25
26/*
27 * This function defines the mapping from contexts to VSIDs (virtual
28 * segment IDs). We use a skew on both the context and the high 4 bits
29 * of the 32-bit virtual address (the "effective segment ID") in order
30 * to spread out the entries in the MMU hash table. Note, if this
31 * function is changed then arch/ppc/mm/hashtable.S will have to be
32 * changed to correspond.
33 */
34#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
35 & 0xffffff)
36
37/*
38 The MPC8xx has only 16 contexts. We rotate through them on each
39 task switch. A better way would be to keep track of tasks that
40 own contexts, and implement an LRU usage. That way very active
41 tasks don't always have to pay the TLB reload overhead. The
42 kernel pages are mapped shared, so the kernel can run on behalf
43 of any task that makes a kernel entry. Shared does not mean they
44 are not protected, just that the ASID comparison is not performed.
45 -- Dan
46
47 The IBM4xx has 256 contexts, so we can just rotate through these
48 as a way of "switching" contexts. If the TID of the TLB is zero,
49 the PID/TID comparison is disabled, so we can use a TID of zero
50 to represent all kernel pages as shared among all contexts.
51 -- Dan
52 */
53
54static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
55{
56}
57
58#ifdef CONFIG_8xx
59#define NO_CONTEXT 16
60#define LAST_CONTEXT 15
61#define FIRST_CONTEXT 0
62
63#elif defined(CONFIG_4xx)
64#define NO_CONTEXT 256
65#define LAST_CONTEXT 255
66#define FIRST_CONTEXT 1
67
68#elif defined(CONFIG_E200) || defined(CONFIG_E500)
69#define NO_CONTEXT 256
70#define LAST_CONTEXT 255
71#define FIRST_CONTEXT 1
72
73#else
74
75/* PPC 6xx, 7xx CPUs */
76#define NO_CONTEXT ((unsigned long) -1)
77#define LAST_CONTEXT 32767
78#define FIRST_CONTEXT 1
79#endif
80
81/*
82 * Set the current MMU context.
83 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
84 * loading up the segment registers for the user part of the address space.
85 *
86 * Since the PGD is immediately available, it is much faster to simply
87 * pass this along as a second parameter, which is required for 8xx and
88 * can be used for debugging on all processors (if you happen to have
89 * an Abatron).
90 */
91extern void set_context(unsigned long contextid, pgd_t *pgd);
92
93/*
94 * Bitmap of contexts in use.
95 * The size of this bitmap is LAST_CONTEXT + 1 bits.
96 */
97extern unsigned long context_map[];
98
99/*
100 * This caches the next context number that we expect to be free.
101 * Its use is an optimization only, we can't rely on this context
102 * number to be free, but it usually will be.
103 */
104extern unsigned long next_mmu_context;
105
106/*
107 * If we don't have sufficient contexts to give one to every task
108 * that could be in the system, we need to be able to steal contexts.
109 * These variables support that.
110 */
111#if LAST_CONTEXT < 30000
112#define FEW_CONTEXTS 1
113extern atomic_t nr_free_contexts;
114extern struct mm_struct *context_mm[LAST_CONTEXT+1];
115extern void steal_context(void);
116#endif
117
118/*
119 * Get a new mmu context for the address space described by `mm'.
120 */
121static inline void get_mmu_context(struct mm_struct *mm)
122{
123 unsigned long ctx;
124
125 if (mm->context.id != NO_CONTEXT)
126 return;
127#ifdef FEW_CONTEXTS
128 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
129 steal_context();
130#endif
131 ctx = next_mmu_context;
132 while (test_and_set_bit(ctx, context_map)) {
133 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
134 if (ctx > LAST_CONTEXT)
135 ctx = 0;
136 }
137 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
138 mm->context.id = ctx;
139#ifdef FEW_CONTEXTS
140 context_mm[ctx] = mm;
141#endif
142}
143
144/*
145 * Set up the context for a new address space.
146 */
147static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
148{
149 mm->context.id = NO_CONTEXT;
150 return 0;
151}
152
153/*
154 * We're finished using the context for an address space.
155 */
156static inline void destroy_context(struct mm_struct *mm)
157{
158 preempt_disable();
159 if (mm->context.id != NO_CONTEXT) {
160 clear_bit(mm->context.id, context_map);
161 mm->context.id = NO_CONTEXT;
162#ifdef FEW_CONTEXTS
163 atomic_inc(&nr_free_contexts);
164#endif
165 }
166 preempt_enable();
167}
168
169static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
170 struct task_struct *tsk)
171{
172#ifdef CONFIG_ALTIVEC
173 if (cpu_has_feature(CPU_FTR_ALTIVEC))
174 asm volatile ("dssall;\n"
175#ifndef CONFIG_POWER4
176 "sync;\n" /* G4 needs a sync here, G5 apparently not */
177#endif
178 : : );
179#endif /* CONFIG_ALTIVEC */
180
181 tsk->thread.pgdir = next->pgd;
182
183 /* No need to flush userspace segments if the mm doesnt change */
184 if (prev == next)
185 return;
186
187 /* Setup new userspace context */
188 get_mmu_context(next);
189 set_context(next->context.id, next->pgd);
190}
191
192#define deactivate_mm(tsk,mm) do { } while (0)
193 13
194/* 14/*
195 * After we have set current->mm to a new value, this activates 15 * Most if the context management is out of line
196 * the context for the new mm so we see the new mappings.
197 */ 16 */
198#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
199
200extern void mmu_context_init(void); 17extern void mmu_context_init(void);
201
202
203#else
204
205#include <linux/kernel.h>
206#include <linux/mm.h>
207#include <linux/sched.h>
208
209/*
210 * Copyright (C) 2001 PPC 64 Team, IBM Corp
211 *
212 * This program is free software; you can redistribute it and/or
213 * modify it under the terms of the GNU General Public License
214 * as published by the Free Software Foundation; either version
215 * 2 of the License, or (at your option) any later version.
216 */
217
218static inline void enter_lazy_tlb(struct mm_struct *mm,
219 struct task_struct *tsk)
220{
221}
222
223/*
224 * The proto-VSID space has 2^35 - 1 segments available for user mappings.
225 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
226 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
227 */
228#define NO_CONTEXT 0
229#define MAX_CONTEXT ((1UL << 19) - 1)
230
231extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
232extern void destroy_context(struct mm_struct *mm); 19extern void destroy_context(struct mm_struct *mm);
233 20
21extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
234extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); 22extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
235extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 23extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
24extern void set_context(unsigned long id, pgd_t *pgd);
236 25
237/* 26/*
238 * switch_mm is the entry point called from the architecture independent 27 * switch_mm is the entry point called from the architecture independent
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
241static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 30static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
242 struct task_struct *tsk) 31 struct task_struct *tsk)
243{ 32{
244 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) 33 /* Mark this context has been used on the new CPU */
245 cpu_set(smp_processor_id(), next->cpu_vm_mask); 34 cpu_set(smp_processor_id(), next->cpu_vm_mask);
35
36 /* 32-bit keeps track of the current PGDIR in the thread struct */
37#ifdef CONFIG_PPC32
38 tsk->thread.pgdir = next->pgd;
39#endif /* CONFIG_PPC32 */
246 40
247 /* No need to flush userspace segments if the mm doesnt change */ 41 /* Nothing else to do if we aren't actually switching */
248 if (prev == next) 42 if (prev == next)
249 return; 43 return;
250 44
45 /* We must stop all altivec streams before changing the HW
46 * context
47 */
251#ifdef CONFIG_ALTIVEC 48#ifdef CONFIG_ALTIVEC
252 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 49 if (cpu_has_feature(CPU_FTR_ALTIVEC))
253 asm volatile ("dssall"); 50 asm volatile ("dssall");
254#endif /* CONFIG_ALTIVEC */ 51#endif /* CONFIG_ALTIVEC */
255 52
53 /* The actual HW switching method differs between the various
54 * sub architectures.
55 */
56#ifdef CONFIG_PPC_STD_MMU_64
256 if (cpu_has_feature(CPU_FTR_SLB)) 57 if (cpu_has_feature(CPU_FTR_SLB))
257 switch_slb(tsk, next); 58 switch_slb(tsk, next);
258 else 59 else
259 switch_stab(tsk, next); 60 switch_stab(tsk, next);
61#else
62 /* Out of line for now */
63 switch_mmu_context(prev, next);
64#endif
65
260} 66}
261 67
262#define deactivate_mm(tsk,mm) do { } while (0) 68#define deactivate_mm(tsk,mm) do { } while (0)
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
274 local_irq_restore(flags); 80 local_irq_restore(flags);
275} 81}
276 82
277#endif /* CONFIG_PPC64 */ 83/* We don't currently use enter_lazy_tlb() for anything */
84static inline void enter_lazy_tlb(struct mm_struct *mm,
85 struct task_struct *tsk)
86{
87}
88
278#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
279#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 90#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 81ef10b6b672..81a23932a160 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -239,6 +239,25 @@ struct mpc52xx_cdm {
239 u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */ 239 u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
240}; 240};
241 241
242/* Interrupt controller Register set */
243struct mpc52xx_intr {
244 u32 per_mask; /* INTR + 0x00 */
245 u32 per_pri1; /* INTR + 0x04 */
246 u32 per_pri2; /* INTR + 0x08 */
247 u32 per_pri3; /* INTR + 0x0c */
248 u32 ctrl; /* INTR + 0x10 */
249 u32 main_mask; /* INTR + 0x14 */
250 u32 main_pri1; /* INTR + 0x18 */
251 u32 main_pri2; /* INTR + 0x1c */
252 u32 reserved1; /* INTR + 0x20 */
253 u32 enc_status; /* INTR + 0x24 */
254 u32 crit_status; /* INTR + 0x28 */
255 u32 main_status; /* INTR + 0x2c */
256 u32 per_status; /* INTR + 0x30 */
257 u32 reserved2; /* INTR + 0x34 */
258 u32 per_error; /* INTR + 0x38 */
259};
260
242#endif /* __ASSEMBLY__ */ 261#endif /* __ASSEMBLY__ */
243 262
244 263
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 8917ed630565..a218da6bec7c 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -68,12 +68,20 @@
68#define MPC52xx_PSC_IMR_ORERR 0x1000 68#define MPC52xx_PSC_IMR_ORERR 0x1000
69#define MPC52xx_PSC_IMR_IPC 0x8000 69#define MPC52xx_PSC_IMR_IPC 0x8000
70 70
71/* PSC input port change bit */ 71/* PSC input port change bits */
72#define MPC52xx_PSC_CTS 0x01 72#define MPC52xx_PSC_CTS 0x01
73#define MPC52xx_PSC_DCD 0x02 73#define MPC52xx_PSC_DCD 0x02
74#define MPC52xx_PSC_D_CTS 0x10 74#define MPC52xx_PSC_D_CTS 0x10
75#define MPC52xx_PSC_D_DCD 0x20 75#define MPC52xx_PSC_D_DCD 0x20
76 76
77/* PSC acr bits */
78#define MPC52xx_PSC_IEC_CTS 0x01
79#define MPC52xx_PSC_IEC_DCD 0x02
80
81/* PSC output port bits */
82#define MPC52xx_PSC_OP_RTS 0x01
83#define MPC52xx_PSC_OP_RES 0x02
84
77/* PSC mode fields */ 85/* PSC mode fields */
78#define MPC52xx_PSC_MODE_5_BITS 0x00 86#define MPC52xx_PSC_MODE_5_BITS 0x00
79#define MPC52xx_PSC_MODE_6_BITS 0x01 87#define MPC52xx_PSC_MODE_6_BITS 0x01
@@ -91,6 +99,7 @@
91#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00 99#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
92#define MPC52xx_PSC_MODE_ONE_STOP 0x07 100#define MPC52xx_PSC_MODE_ONE_STOP 0x07
93#define MPC52xx_PSC_MODE_TWO_STOP 0x0f 101#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
102#define MPC52xx_PSC_MODE_TXCTS 0x10
94 103
95#define MPC52xx_PSC_RFNUM_MASK 0x01ff 104#define MPC52xx_PSC_RFNUM_MASK 0x01ff
96 105
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 458c1f7fbc18..dabc01c727b8 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -1,9 +1,134 @@
1/* 1/*
2 * Pull in the generic implementation for the mutex fastpath. 2 * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
3 */
4#ifndef _ASM_POWERPC_MUTEX_H
5#define _ASM_POWERPC_MUTEX_H
6
7static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
8{
9 int t;
10
11 __asm__ __volatile__ (
12"1: lwarx %0,0,%1 # mutex trylock\n\
13 cmpw 0,%0,%2\n\
14 bne- 2f\n"
15 PPC405_ERR77(0,%1)
16" stwcx. %3,0,%1\n\
17 bne- 1b"
18 ISYNC_ON_SMP
19 "\n\
202:"
21 : "=&r" (t)
22 : "r" (&v->counter), "r" (old), "r" (new)
23 : "cc", "memory");
24
25 return t;
26}
27
28static inline int __mutex_dec_return_lock(atomic_t *v)
29{
30 int t;
31
32 __asm__ __volatile__(
33"1: lwarx %0,0,%1 # mutex lock\n\
34 addic %0,%0,-1\n"
35 PPC405_ERR77(0,%1)
36" stwcx. %0,0,%1\n\
37 bne- 1b"
38 ISYNC_ON_SMP
39 : "=&r" (t)
40 : "r" (&v->counter)
41 : "cc", "memory");
42
43 return t;
44}
45
46static inline int __mutex_inc_return_unlock(atomic_t *v)
47{
48 int t;
49
50 __asm__ __volatile__(
51 LWSYNC_ON_SMP
52"1: lwarx %0,0,%1 # mutex unlock\n\
53 addic %0,%0,1\n"
54 PPC405_ERR77(0,%1)
55" stwcx. %0,0,%1 \n\
56 bne- 1b"
57 : "=&r" (t)
58 : "r" (&v->counter)
59 : "cc", "memory");
60
61 return t;
62}
63
64/**
65 * __mutex_fastpath_lock - try to take the lock by moving the count
66 * from 1 to a 0 value
67 * @count: pointer of type atomic_t
68 * @fail_fn: function to call if the original value was not 1
69 *
70 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
71 * it wasn't 1 originally. This function MUST leave the value lower than
72 * 1 even when the "1" assertion wasn't true.
73 */
74static inline void
75__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
76{
77 if (unlikely(__mutex_dec_return_lock(count) < 0))
78 fail_fn(count);
79}
80
81/**
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t
85 * @fail_fn: function to call if the original value was not 1
86 *
87 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
88 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
89 * or anything the slow path function returns.
90 */
91static inline int
92__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
93{
94 if (unlikely(__mutex_dec_return_lock(count) < 0))
95 return fail_fn(count);
96 return 0;
97}
98
99/**
100 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
101 * @count: pointer of type atomic_t
102 * @fail_fn: function to call if the original value was not 0
103 *
104 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
105 * In the failure case, this function is allowed to either set the value to
106 * 1, or to set it to a value lower than 1.
107 */
108static inline void
109__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
110{
111 if (unlikely(__mutex_inc_return_unlock(count) <= 0))
112 fail_fn(count);
113}
114
115#define __mutex_slowpath_needs_to_unlock() 1
116
117/**
118 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
119 *
120 * @count: pointer of type atomic_t
121 * @fail_fn: fallback function
3 * 122 *
4 * TODO: implement optimized primitives instead, or leave the generic 123 * Change the count from 1 to 0, and return 1 (success), or if the count
5 * implementation in place, or pick the atomic_xchg() based generic 124 * was not 1, then return 0 (failure).
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */ 125 */
126static inline int
127__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
128{
129 if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
130 return 1;
131 return 0;
132}
8 133
9#include <asm-generic/mutex-dec.h> 134#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c0b8d4a29a91..197d569f5bd3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,15 @@
19#include <asm/kdump.h> 19#include <asm/kdump.h>
20 20
21/* 21/*
22 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software 22 * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
23 * on PPC44x). For PPC64 we support either 4K or 64K software
23 * page size. When using 64K pages however, whether we are really supporting 24 * page size. When using 64K pages however, whether we are really supporting
24 * 64K pages in HW or not is irrelevant to those definitions. 25 * 64K pages in HW or not is irrelevant to those definitions.
25 */ 26 */
26#ifdef CONFIG_PPC_64K_PAGES 27#if defined(CONFIG_PPC_64K_PAGES)
27#define PAGE_SHIFT 16 28#define PAGE_SHIFT 16
29#elif defined(CONFIG_PPC_16K_PAGES)
30#define PAGE_SHIFT 14
28#else 31#else
29#define PAGE_SHIFT 12 32#define PAGE_SHIFT 12
30#endif 33#endif
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
151/* 64k pages additionally define a bigger "real PTE" type that gathers 154/* 64k pages additionally define a bigger "real PTE" type that gathers
152 * the "second half" part of the PTE for pseudo 64k pages 155 * the "second half" part of the PTE for pseudo 64k pages
153 */ 156 */
154#ifdef CONFIG_PPC_64K_PAGES 157#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
155typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 158typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
156#else 159#else
157typedef struct { pte_t pte; } real_pte_t; 160typedef struct { pte_t pte; } real_pte_t;
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
191#define pte_val(x) (x) 194#define pte_val(x) (x)
192#define __pte(x) (x) 195#define __pte(x) (x)
193 196
194#ifdef CONFIG_PPC_64K_PAGES 197#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
195typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 198typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
196#else 199#else
197typedef unsigned long real_pte_t; 200typedef pte_t real_pte_t;
198#endif 201#endif
199 202
200 203
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d77072a32cc6..1458d9500381 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,6 +19,8 @@
19#define PTE_FLAGS_OFFSET 0 19#define PTE_FLAGS_OFFSET 0
20#endif 20#endif
21 21
22#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
23
22#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
23/* 25/*
24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 26 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
@@ -26,10 +28,8 @@
26 */ 28 */
27#ifdef CONFIG_PTE_64BIT 29#ifdef CONFIG_PTE_64BIT
28typedef unsigned long long pte_basic_t; 30typedef unsigned long long pte_basic_t;
29#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
30#else 31#else
31typedef unsigned long pte_basic_t; 32typedef unsigned long pte_basic_t;
32#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
33#endif 33#endif
34 34
35struct page; 35struct page;
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
39 39
40#include <asm-generic/page.h> 40#include <asm-generic/page.h>
41 41
42#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
43#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
44
42#endif /* __ASSEMBLY__ */ 45#endif /* __ASSEMBLY__ */
43 46
44#endif /* _ASM_POWERPC_PAGE_32_H */ 47#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 9047af7baa69..84007afabdb5 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -13,7 +13,6 @@
13 13
14struct device_node; 14struct device_node;
15 15
16extern unsigned int ppc_pci_flags;
17enum { 16enum {
18 /* Force re-assigning all resources (ignore firmware 17 /* Force re-assigning all resources (ignore firmware
19 * setup completely) 18 * setup completely)
@@ -36,6 +35,31 @@ enum {
36 /* ... except for domain 0 */ 35 /* ... except for domain 0 */
37 PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020, 36 PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
38}; 37};
38#ifdef CONFIG_PCI
39extern unsigned int ppc_pci_flags;
40
41static inline void ppc_pci_set_flags(int flags)
42{
43 ppc_pci_flags = flags;
44}
45
46static inline void ppc_pci_add_flags(int flags)
47{
48 ppc_pci_flags |= flags;
49}
50
51static inline int ppc_pci_has_flag(int flag)
52{
53 return (ppc_pci_flags & flag);
54}
55#else
56static inline void ppc_pci_set_flags(int flags) { }
57static inline void ppc_pci_add_flags(int flags) { }
58static inline int ppc_pci_has_flag(int flag)
59{
60 return 0;
61}
62#endif
39 63
40 64
41/* 65/*
@@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
241 265
242/** Discover new pci devices under this bus, and add them */ 266/** Discover new pci devices under this bus, and add them */
243extern void pcibios_add_pci_devices(struct pci_bus *bus); 267extern void pcibios_add_pci_devices(struct pci_bus *bus);
244extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
245
246extern int pcibios_remove_root_bus(struct pci_controller *phb);
247 268
248static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) 269static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
249{ 270{
@@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
290/* Allocate & free a PCI host bridge structure */ 311/* Allocate & free a PCI host bridge structure */
291extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); 312extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
292extern void pcibios_free_controller(struct pci_controller *phb); 313extern void pcibios_free_controller(struct pci_controller *phb);
314extern void pcibios_setup_phb_resources(struct pci_controller *hose);
293 315
294#ifdef CONFIG_PCI 316#ifdef CONFIG_PCI
295extern unsigned long pci_address_to_pio(phys_addr_t address); 317extern unsigned long pci_address_to_pio(phys_addr_t address);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 57a2a494886b..3548159a1beb 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -38,8 +38,8 @@ struct pci_dev;
38 * Set this to 1 if you want the kernel to re-assign all PCI 38 * Set this to 1 if you want the kernel to re-assign all PCI
39 * bus numbers (don't do that on ppc64 yet !) 39 * bus numbers (don't do that on ppc64 yet !)
40 */ 40 */
41#define pcibios_assign_all_busses() (ppc_pci_flags & \ 41#define pcibios_assign_all_busses() \
42 PPC_PCI_REASSIGN_ALL_BUS) 42 (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
43#define pcibios_scan_all_fns(a, b) 0 43#define pcibios_scan_all_fns(a, b) 0
44 44
45static inline void pcibios_set_master(struct pci_dev *dev) 45static inline void pcibios_set_master(struct pci_dev *dev)
@@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
204 return root; 204 return root;
205} 205}
206 206
207extern void pcibios_setup_new_device(struct pci_dev *dev);
208
209extern void pcibios_claim_one_bus(struct pci_bus *b); 207extern void pcibios_claim_one_bus(struct pci_bus *b);
210 208
211extern void pcibios_allocate_bus_resources(struct pci_bus *bus); 209extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
212 210
213extern void pcibios_resource_survey(void); 211extern void pcibios_resource_survey(void);
214 212
215extern struct pci_controller *init_phb_dynamic(struct device_node *dn); 213extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
214extern int remove_phb_dynamic(struct pci_controller *phb);
216 215
217extern struct pci_dev *of_create_pci_dev(struct device_node *node, 216extern struct pci_dev *of_create_pci_dev(struct device_node *node,
218 struct pci_bus *bus, int devfn); 217 struct pci_bus *bus, int devfn);
@@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node,
221 struct pci_dev *dev); 220 struct pci_dev *dev);
222 221
223extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); 222extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
223extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
224 224
225extern int pci_read_irq_line(struct pci_dev *dev); 225extern int pci_read_irq_line(struct pci_dev *dev);
226 226
@@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
235 const struct resource *rsrc, 235 const struct resource *rsrc,
236 resource_size_t *start, resource_size_t *end); 236 resource_size_t *start, resource_size_t *end);
237 237
238extern void pcibios_do_bus_setup(struct pci_bus *bus); 238extern void pcibios_setup_bus_devices(struct pci_bus *bus);
239extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); 239extern void pcibios_setup_bus_self(struct pci_bus *bus);
240
241 240
242#endif /* __KERNEL__ */ 241#endif /* __KERNEL__ */
243#endif /* __ASM_POWERPC_PCI_H */ 242#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 58c07147b3ea..0815eb40acae 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
6#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
7
6extern void __bad_pte(pmd_t *pmd); 8extern void __bad_pte(pmd_t *pmd);
7 9
8extern pgd_t *pgd_alloc(struct mm_struct *mm); 10extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
33 35
34extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 36extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
35extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 37extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
36extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
37extern void pte_free(struct mm_struct *mm, pgtable_t pte);
38 38
39#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 39static inline void pgtable_free(pgtable_free_t pgf)
40{
41 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
42
43 free_page((unsigned long)p);
44}
40 45
41#define check_pgt_cache() do { } while (0) 46#define check_pgt_cache() do { } while (0)
42 47
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 812a1d8f35cb..afda2bdd860f 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -7,7 +7,6 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/mm.h>
11#include <linux/slab.h> 10#include <linux/slab.h>
12#include <linux/cpumask.h> 11#include <linux/cpumask.h>
13#include <linux/percpu.h> 12#include <linux/percpu.h>
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
108 return page; 107 return page;
109} 108}
110 109
111static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
112{
113 free_page((unsigned long)pte);
114}
115
116static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
117{
118 pgtable_page_dtor(ptepage);
119 __free_page(ptepage);
120}
121
122#define PGF_CACHENUM_MASK 0x7
123
124typedef struct pgtable_free {
125 unsigned long val;
126} pgtable_free_t;
127
128static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
129 unsigned long mask)
130{
131 BUG_ON(cachenum > PGF_CACHENUM_MASK);
132
133 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
134}
135
136static inline void pgtable_free(pgtable_free_t pgf) 110static inline void pgtable_free(pgtable_free_t pgf)
137{ 111{
138 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 112 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
144 kmem_cache_free(pgtable_cache[cachenum], p); 118 kmem_cache_free(pgtable_cache[cachenum], p);
145} 119}
146 120
147extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
148
149#define __pte_free_tlb(tlb,ptepage) \
150do { \
151 pgtable_page_dtor(ptepage); \
152 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
153 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
154} while (0)
155#define __pmd_free_tlb(tlb, pmd) \ 121#define __pmd_free_tlb(tlb, pmd) \
156 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
157 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index b4505ed0f0f2..5d8480265a77 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -2,11 +2,52 @@
2#define _ASM_POWERPC_PGALLOC_H 2#define _ASM_POWERPC_PGALLOC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/mm.h>
6
7static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8{
9 free_page((unsigned long)pte);
10}
11
12static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
13{
14 pgtable_page_dtor(ptepage);
15 __free_page(ptepage);
16}
17
18typedef struct pgtable_free {
19 unsigned long val;
20} pgtable_free_t;
21
22#define PGF_CACHENUM_MASK 0x7
23
24static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
25 unsigned long mask)
26{
27 BUG_ON(cachenum > PGF_CACHENUM_MASK);
28
29 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
30}
31
5#ifdef CONFIG_PPC64 32#ifdef CONFIG_PPC64
6#include <asm/pgalloc-64.h> 33#include <asm/pgalloc-64.h>
7#else 34#else
8#include <asm/pgalloc-32.h> 35#include <asm/pgalloc-32.h>
9#endif 36#endif
10 37
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39
40#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage) \
42do { \
43 pgtable_page_dtor(ptepage); \
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
46} while (0)
47#else
48#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
49#endif
50
51
11#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
12#endif /* _ASM_POWERPC_PGALLOC_H */ 53#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6ab7c67cb5ab..f69a4d977729 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush;
228 * - FILE *must* be in the bottom three bits because swap cache 228 * - FILE *must* be in the bottom three bits because swap cache
229 * entries use the top 29 bits for TLB2. 229 * entries use the top 29 bits for TLB2.
230 * 230 *
231 * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it 231 * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
232 * doesn't support SMP. So we can use this as software bit, like 232 * because it doesn't support SMP. However, some later 460 variants
233 * DIRTY. 233 * have -some- form of SMP support and so I keep the bit there for
234 * future use
234 * 235 *
235 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used 236 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
236 * for memory protection related functions (see PTE structure in 237 * for memory protection related functions (see PTE structure in
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush;
436 _PAGE_USER | _PAGE_ACCESSED | \ 437 _PAGE_USER | _PAGE_ACCESSED | \
437 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ 438 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
438 _PAGE_EXEC | _PAGE_HWEXEC) 439 _PAGE_EXEC | _PAGE_HWEXEC)
440
439/* 441/*
440 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 442 * We define 2 sets of base prot bits, one for basic pages (ie,
441 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 443 * cacheable kernel and user pages) and one for non cacheable
442 * to have it in the Linux PTE, and in fact the bit could be reused for 444 * pages. We always set _PAGE_COHERENT when SMP is enabled or
443 * another purpose. -- paulus. 445 * the processor might need it for DMA coherency.
444 */ 446 */
445 447#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
446#ifdef CONFIG_44x 448#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
447#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
448#else 449#else
449#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 450#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
450#endif 451#endif
452#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
453
451#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) 454#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
452#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) 455#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
456#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
453 457
454#ifdef CONFIG_PPC_STD_MMU 458#ifdef CONFIG_PPC_STD_MMU
455/* On standard PPC MMU, no user access implies kernel read/write access, 459/* On standard PPC MMU, no user access implies kernel read/write access,
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush;
459#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) 463#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
460#endif 464#endif
461 465
462#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) 466#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
463#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) 467#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
464 468
465#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 469#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
552static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 556static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
553static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 557static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
554 558
555static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
556static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
557
558static inline pte_t pte_wrprotect(pte_t pte) { 559static inline pte_t pte_wrprotect(pte_t pte) {
559 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 560 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
560static inline pte_t pte_mkclean(pte_t pte) { 561static inline pte_t pte_mkclean(pte_t pte) {
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
693#endif 694#endif
694} 695}
695 696
697
696static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 698static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
697 pte_t *ptep, pte_t pte) 699 pte_t *ptep, pte_t pte)
698{ 700{
699#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 701#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
700 WARN_ON(pte_present(*ptep)); 702 WARN_ON(pte_present(*ptep));
701#endif 703#endif
702 __set_pte_at(mm, addr, ptep, pte); 704 __set_pte_at(mm, addr, ptep, pte);
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
760 __changed; \ 762 __changed; \
761}) 763})
762 764
763/*
764 * Macro to mark a page protection value as "uncacheable".
765 */
766#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
767
768struct file;
769extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
770 unsigned long size, pgprot_t vma_prot);
771#define __HAVE_PHYS_MEM_ACCESS_PROT
772
773#define __HAVE_ARCH_PTE_SAME 765#define __HAVE_ARCH_PTE_SAME
774#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 766#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
775 767
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4c0a8c62859d..b0f18be81d9f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -100,7 +100,7 @@
100 100
101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
102 102
103/* __pgprot defined in arch/powerpc/incliude/asm/page.h */ 103/* __pgprot defined in arch/powerpc/include/asm/page.h */
104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
105 105
106#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 106#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
247 247
248static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
249static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
250
251static inline pte_t pte_wrprotect(pte_t pte) { 248static inline pte_t pte_wrprotect(pte_t pte) {
252 pte_val(pte) &= ~(_PAGE_RW); return pte; } 249 pte_val(pte) &= ~(_PAGE_RW); return pte; }
253static inline pte_t pte_mkclean(pte_t pte) { 250static inline pte_t pte_mkclean(pte_t pte) {
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
405 __changed; \ 402 __changed; \
406}) 403})
407 404
408/*
409 * Macro to mark a page protection value as "uncacheable".
410 */
411#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
412
413struct file;
414extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
415 unsigned long size, pgprot_t vma_prot);
416#define __HAVE_PHYS_MEM_ACCESS_PROT
417
418#define __HAVE_ARCH_PTE_SAME 405#define __HAVE_ARCH_PTE_SAME
419#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 406#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
420 407
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dbb8ca172e44..07f55e601696 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -16,6 +16,32 @@ struct mm_struct;
16#endif 16#endif
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19
20/*
21 * Macro to mark a page protection value as "uncacheable".
22 */
23
24#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
25 _PAGE_WRITETHRU)
26
27#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
28 _PAGE_NO_CACHE | _PAGE_GUARDED))
29
30#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
31 _PAGE_NO_CACHE))
32
33#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
34 _PAGE_COHERENT))
35
36#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
37 _PAGE_COHERENT | _PAGE_WRITETHRU))
38
39
40struct file;
41extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
42 unsigned long size, pgprot_t vma_prot);
43#define __HAVE_PHYS_MEM_ACCESS_PROT
44
19/* 45/*
20 * ZERO_PAGE is a global shared page that is always zero: used 46 * ZERO_PAGE is a global shared page that is always zero: used
21 * for zero-mapped memory areas etc.. 47 * for zero-mapped memory areas etc..
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c4a029ccb4d3..1a0d628eb114 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
425#define fromreal(rd) tovirt(rd,rd) 425#define fromreal(rd) tovirt(rd,rd)
426 426
427#define tophys(rd,rs) \ 427#define tophys(rd,rs) \
4280: addis rd,rs,-KERNELBASE@h; \ 4280: addis rd,rs,-PAGE_OFFSET@h; \
429 .section ".vtop_fixup","aw"; \ 429 .section ".vtop_fixup","aw"; \
430 .align 1; \ 430 .align 1; \
431 .long 0b; \ 431 .long 0b; \
432 .previous 432 .previous
433 433
434#define tovirt(rd,rs) \ 434#define tovirt(rd,rs) \
4350: addis rd,rs,KERNELBASE@h; \ 4350: addis rd,rs,PAGE_OFFSET@h; \
436 .section ".ptov_fixup","aw"; \ 436 .section ".ptov_fixup","aw"; \
437 .align 1; \ 437 .align 1; \
438 .long 0b; \ 438 .long 0b; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 101ed87f7d84..d3466490104a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -69,8 +69,6 @@ extern int _prep_type;
69 69
70#ifdef __KERNEL__ 70#ifdef __KERNEL__
71 71
72extern int have_of;
73
74struct task_struct; 72struct task_struct;
75void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); 73void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
76void release_thread(struct task_struct *); 74void release_thread(struct task_struct *);
@@ -207,6 +205,11 @@ struct thread_struct {
207#define INIT_SP_LIMIT \ 205#define INIT_SP_LIMIT \
208 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 206 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
209 207
208#ifdef CONFIG_SPE
209#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
210#else
211#define SPEFSCR_INIT
212#endif
210 213
211#ifdef CONFIG_PPC32 214#ifdef CONFIG_PPC32
212#define INIT_THREAD { \ 215#define INIT_THREAD { \
@@ -215,6 +218,7 @@ struct thread_struct {
215 .fs = KERNEL_DS, \ 218 .fs = KERNEL_DS, \
216 .pgdir = swapper_pg_dir, \ 219 .pgdir = swapper_pg_dir, \
217 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 220 .fpexc_mode = MSR_FE0 | MSR_FE1, \
221 SPEFSCR_INIT \
218} 222}
219#else 223#else
220#define INIT_THREAD { \ 224#define INIT_THREAD { \
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index eb3bd2e1c7f6..6ff04185d2aa 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void);
253/* CPU OF node matching */ 253/* CPU OF node matching */
254struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 254struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
255 255
256/* cache lookup */
257struct device_node *of_find_next_cache_node(struct device_node *np);
258
256/* Get the MAC address */ 259/* Get the MAC address */
257extern const void *of_get_mac_address(struct device_node *np); 260extern const void *of_get_mac_address(struct device_node *np);
258 261
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index f9e34c493cbb..cff30c0ef1ff 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -305,30 +305,34 @@ static inline const char* ps3_result(int result)
305/* system bus routines */ 305/* system bus routines */
306 306
307enum ps3_match_id { 307enum ps3_match_id {
308 PS3_MATCH_ID_EHCI = 1, 308 PS3_MATCH_ID_EHCI = 1,
309 PS3_MATCH_ID_OHCI = 2, 309 PS3_MATCH_ID_OHCI = 2,
310 PS3_MATCH_ID_GELIC = 3, 310 PS3_MATCH_ID_GELIC = 3,
311 PS3_MATCH_ID_AV_SETTINGS = 4, 311 PS3_MATCH_ID_AV_SETTINGS = 4,
312 PS3_MATCH_ID_SYSTEM_MANAGER = 5, 312 PS3_MATCH_ID_SYSTEM_MANAGER = 5,
313 PS3_MATCH_ID_STOR_DISK = 6, 313 PS3_MATCH_ID_STOR_DISK = 6,
314 PS3_MATCH_ID_STOR_ROM = 7, 314 PS3_MATCH_ID_STOR_ROM = 7,
315 PS3_MATCH_ID_STOR_FLASH = 8, 315 PS3_MATCH_ID_STOR_FLASH = 8,
316 PS3_MATCH_ID_SOUND = 9, 316 PS3_MATCH_ID_SOUND = 9,
317 PS3_MATCH_ID_GRAPHICS = 10, 317 PS3_MATCH_ID_GPU = 10,
318 PS3_MATCH_ID_LPM = 11, 318 PS3_MATCH_ID_LPM = 11,
319}; 319};
320 320
321#define PS3_MODULE_ALIAS_EHCI "ps3:1" 321enum ps3_match_sub_id {
322#define PS3_MODULE_ALIAS_OHCI "ps3:2" 322 PS3_MATCH_SUB_ID_GPU_FB = 1,
323#define PS3_MODULE_ALIAS_GELIC "ps3:3" 323};
324#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4" 324
325#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5" 325#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
326#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6" 326#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
327#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7" 327#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
328#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8" 328#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
329#define PS3_MODULE_ALIAS_SOUND "ps3:9" 329#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
330#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10" 330#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
331#define PS3_MODULE_ALIAS_LPM "ps3:11" 331#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
332#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
333#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
334#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
335#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
332 336
333enum ps3_system_bus_device_type { 337enum ps3_system_bus_device_type {
334 PS3_DEVICE_TYPE_IOC0 = 1, 338 PS3_DEVICE_TYPE_IOC0 = 1,
@@ -337,11 +341,6 @@ enum ps3_system_bus_device_type {
337 PS3_DEVICE_TYPE_LPM, 341 PS3_DEVICE_TYPE_LPM,
338}; 342};
339 343
340enum ps3_match_sub_id {
341 /* for PS3_MATCH_ID_GRAPHICS */
342 PS3_MATCH_SUB_ID_FB = 1,
343};
344
345/** 344/**
346 * struct ps3_system_bus_device - a device on the system bus 345 * struct ps3_system_bus_device - a device on the system bus
347 */ 346 */
@@ -516,4 +515,7 @@ void ps3_sync_irq(int node);
516u32 ps3_get_hw_thread_id(int cpu); 515u32 ps3_get_hw_thread_id(int cpu);
517u64 ps3_get_spe_id(void *arg); 516u64 ps3_get_spe_id(void *arg);
518 517
518/* mutex synchronizing GPU accesses and video mode changes */
519extern struct mutex ps3_gpu_mutex;
520
519#endif 521#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 5aa22cffdbd6..cd24ac16660a 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int);
740extern int ps3av_audio_mute_analog(int); 740extern int ps3av_audio_mute_analog(int);
741extern int ps3av_dev_open(void); 741extern int ps3av_dev_open(void);
742extern int ps3av_dev_close(void); 742extern int ps3av_dev_close(void);
743extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
744 void *flip_data);
745extern void ps3av_flip_ctl(int on);
746
747#endif /* _ASM_POWERPC_PS3AV_H_ */ 743#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c6d1ab650778..f484a343efba 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value);
783#define __get_SP() ({unsigned long sp; \ 783#define __get_SP() ({unsigned long sp; \
784 asm volatile("mr %0,1": "=r" (sp)); sp;}) 784 asm volatile("mr %0,1": "=r" (sp)); sp;})
785 785
786struct pt_regs;
787
788extern void ppc_save_regs(struct pt_regs *regs);
789
786#endif /* __ASSEMBLY__ */ 790#endif /* __ASSEMBLY__ */
787#endif /* __KERNEL__ */ 791#endif /* __KERNEL__ */
788#endif /* _ASM_POWERPC_REG_H */ 792#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 8eaa7b28d9d0..e0175beb4462 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -168,6 +168,7 @@ extern void rtas_os_term(char *str);
168extern int rtas_get_sensor(int sensor, int index, int *state); 168extern int rtas_get_sensor(int sensor, int index, int *state);
169extern int rtas_get_power_level(int powerdomain, int *level); 169extern int rtas_get_power_level(int powerdomain, int *level);
170extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); 170extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
171extern bool rtas_indicator_present(int token, int *maxindex);
171extern int rtas_set_indicator(int indicator, int index, int new_value); 172extern int rtas_set_indicator(int indicator, int index, int new_value);
172extern int rtas_set_indicator_fast(int indicator, int index, int new_value); 173extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
173extern void rtas_progress(char *s, unsigned short hex); 174extern void rtas_progress(char *s, unsigned short hex);
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index ced34f1dc8f8..3d9f831c3c55 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -82,7 +82,7 @@
82#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) 82#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
83#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) 83#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
84 84
85#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) 85#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
86#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) 86#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
87 87
88/* These macros define what NaN looks like. They're supposed to expand to 88/* These macros define what NaN looks like. They're supposed to expand to
@@ -97,6 +97,20 @@
97 97
98#define _FP_KEEPNANFRACP 1 98#define _FP_KEEPNANFRACP 1
99 99
100#ifdef FP_EX_BOOKE_E500_SPE
101#define FP_EX_INEXACT (1 << 21)
102#define FP_EX_INVALID (1 << 20)
103#define FP_EX_DIVZERO (1 << 19)
104#define FP_EX_UNDERFLOW (1 << 18)
105#define FP_EX_OVERFLOW (1 << 17)
106#define FP_INHIBIT_RESULTS 0
107
108#define __FPU_FPSCR (current->thread.spefscr)
109#define __FPU_ENABLED_EXC \
110({ \
111 (__FPU_FPSCR >> 2) & 0x1f; \
112})
113#else
100/* Exception flags. We use the bit positions of the appropriate bits 114/* Exception flags. We use the bit positions of the appropriate bits
101 in the FPSCR, which also correspond to the FE_* bits. This makes 115 in the FPSCR, which also correspond to the FE_* bits. This makes
102 everything easier ;-). */ 116 everything easier ;-). */
@@ -111,22 +125,6 @@
111#define FP_EX_DIVZERO (1 << (31 - 5)) 125#define FP_EX_DIVZERO (1 << (31 - 5))
112#define FP_EX_INEXACT (1 << (31 - 6)) 126#define FP_EX_INEXACT (1 << (31 - 6))
113 127
114/* This macro appears to be called when both X and Y are NaNs, and
115 * has to choose one and copy it to R. i386 goes for the larger of the
116 * two, sparc64 just picks Y. I don't understand this at all so I'll
117 * go with sparc64 because it's shorter :-> -- PMM
118 */
119#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
120 do { \
121 R##_s = Y##_s; \
122 _FP_FRAC_COPY_##wc(R,Y); \
123 R##_c = FP_CLS_NAN; \
124 } while (0)
125
126
127#include <linux/kernel.h>
128#include <linux/sched.h>
129
130#define __FPU_FPSCR (current->thread.fpscr.val) 128#define __FPU_FPSCR (current->thread.fpscr.val)
131 129
132/* We only actually write to the destination register 130/* We only actually write to the destination register
@@ -137,6 +135,32 @@
137 (__FPU_FPSCR >> 3) & 0x1f; \ 135 (__FPU_FPSCR >> 3) & 0x1f; \
138}) 136})
139 137
138#endif
139
140/*
141 * If one NaN is signaling and the other is not,
142 * we choose that one, otherwise we choose X.
143 */
144#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
145 do { \
146 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
147 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
148 { \
149 R##_s = X##_s; \
150 _FP_FRAC_COPY_##wc(R,X); \
151 } \
152 else \
153 { \
154 R##_s = Y##_s; \
155 _FP_FRAC_COPY_##wc(R,Y); \
156 } \
157 R##_c = FP_CLS_NAN; \
158 } while (0)
159
160
161#include <linux/kernel.h>
162#include <linux/sched.h>
163
140#define __FPU_TRAP_P(bits) \ 164#define __FPU_TRAP_P(bits) \
141 ((__FPU_ENABLED_EXC & (bits)) != 0) 165 ((__FPU_ENABLED_EXC & (bits)) != 0)
142 166
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1866cec4f967..c25f73d1d842 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu);
81#define PPC_MSG_CALL_FUNC_SINGLE 2 81#define PPC_MSG_CALL_FUNC_SINGLE 2
82#define PPC_MSG_DEBUGGER_BREAK 3 82#define PPC_MSG_DEBUGGER_BREAK 3
83 83
84/*
85 * irq controllers that have dedicated ipis per message and don't
86 * need additional code in the action handler may use this
87 */
88extern int smp_request_message_ipi(int virq, int message);
89extern const char *smp_ipi_name[];
90
84void smp_init_iSeries(void); 91void smp_init_iSeries(void);
85void smp_init_pSeries(void); 92void smp_init_pSeries(void);
86void smp_init_cell(void); 93void smp_init_cell(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f56a843f4705..36864364e601 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
277 bne- 1b" 277 bne- 1b"
278 : "=&r"(tmp) 278 : "=&r"(tmp)
279 : "r"(&rw->lock) 279 : "r"(&rw->lock)
280 : "cr0", "memory"); 280 : "cr0", "xer", "memory");
281} 281}
282 282
283static inline void __raw_write_unlock(raw_rwlock_t *rw) 283static inline void __raw_write_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 45963e80f557..28f6ddbff4cf 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,6 +5,10 @@
5#include <linux/stringify.h> 5#include <linux/stringify.h>
6#include <asm/feature-fixups.h> 6#include <asm/feature-fixups.h>
7 7
8#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
9#define __SUBARCH_HAS_LWSYNC
10#endif
11
8#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
9extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 13extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
10extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 14extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index d6648c143322..2a4be19a92c4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -23,15 +23,17 @@
23 * read_barrier_depends() prevents data-dependent loads being reordered 23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC). 24 * across this point (nop on PPC).
25 * 25 *
26 * We have to use the sync instructions for mb(), since lwsync doesn't 26 * *mb() variants without smp_ prefix must order all types of memory
27 * order loads with respect to previous stores. Lwsync is fine for 27 * operations with one another. sync is the only instruction sufficient
28 * rmb(), though. Note that rmb() actually uses a sync on 32-bit 28 * to do this.
29 * architectures.
30 * 29 *
31 * For wmb(), we use sync since wmb is used in drivers to order 30 * For the smp_ barriers, ordering is for cacheable memory operations
32 * stores to system memory with respect to writes to the device. 31 * only. We have to use the sync instruction for smp_mb(), since lwsync
33 * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier 32 * doesn't order loads with respect to previous stores. Lwsync can be
34 * on SMP since it is only used to order updates to system memory. 33 * used for smp_rmb() and smp_wmb().
34 *
35 * However, on CPUs that don't support lwsync, lwsync actually maps to a
36 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
35 */ 37 */
36#define mb() __asm__ __volatile__ ("sync" : : : "memory") 38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
37#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 39#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
@@ -45,14 +47,14 @@
45#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
46 48
47#ifdef __SUBARCH_HAS_LWSYNC 49#ifdef __SUBARCH_HAS_LWSYNC
48# define SMPWMB lwsync 50# define SMPWMB LWSYNC
49#else 51#else
50# define SMPWMB eieio 52# define SMPWMB eieio
51#endif 53#endif
52 54
53#define smp_mb() mb() 55#define smp_mb() mb()
54#define smp_rmb() rmb() 56#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
55#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") 57#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
56#define smp_read_barrier_depends() read_barrier_depends() 58#define smp_read_barrier_depends() read_barrier_depends()
57#else 59#else
58#define smp_mb() barrier() 60#define smp_mb() barrier()
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index febd581ec9b0..27ccb764fdab 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq;
48extern unsigned long ppc_tb_freq; 48extern unsigned long ppc_tb_freq;
49#define DEFAULT_TB_FREQ 125000000UL 49#define DEFAULT_TB_FREQ 125000000UL
50 50
51/*
52 * By putting all of this stuff into a single struct we
53 * reduce the number of cache lines touched by do_gettimeofday.
54 * Both by collecting all of the data in one cache line and
55 * by touching only one TOC entry on ppc64.
56 */
57struct gettimeofday_vars {
58 u64 tb_to_xs;
59 u64 stamp_xsec;
60 u64 tb_orig_stamp;
61};
62
63struct gettimeofday_struct {
64 unsigned long tb_ticks_per_sec;
65 struct gettimeofday_vars vars[2];
66 struct gettimeofday_vars * volatile varp;
67 unsigned var_idx;
68 unsigned tb_to_us;
69};
70
71struct div_result { 51struct div_result {
72 u64 result_high; 52 u64 result_high;
73 u64 result_low; 53 u64 result_low;
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index a2c6bfd85fb7..abbe3419d1dd 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,6 +6,9 @@
6 * 6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page 8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - local_flush_tlb_mm(mm) flushes the specified mm context on
10 * the local processor
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB 12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages 13 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 14 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -17,7 +20,7 @@
17 */ 20 */
18#ifdef __KERNEL__ 21#ifdef __KERNEL__
19 22
20#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) 23#ifdef CONFIG_PPC_MMU_NOHASH
21/* 24/*
22 * TLB flushing for software loaded TLB chips 25 * TLB flushing for software loaded TLB chips
23 * 26 *
@@ -28,63 +31,49 @@
28 31
29#include <linux/mm.h> 32#include <linux/mm.h>
30 33
31extern void _tlbie(unsigned long address, unsigned int pid); 34#define MMU_NO_CONTEXT ((unsigned int)-1)
32extern void _tlbil_all(void);
33extern void _tlbil_pid(unsigned int pid);
34extern void _tlbil_va(unsigned long address, unsigned int pid);
35 35
36#if defined(CONFIG_40x) || defined(CONFIG_8xx) 36extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
37#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 37 unsigned long end);
38#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ 38extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39extern void _tlbia(void);
40#endif
41
42static inline void flush_tlb_mm(struct mm_struct *mm)
43{
44 _tlbil_pid(mm->context.id);
45}
46
47static inline void flush_tlb_page(struct vm_area_struct *vma,
48 unsigned long vmaddr)
49{
50 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
51}
52 39
53static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 40extern void local_flush_tlb_mm(struct mm_struct *mm);
54 unsigned long vmaddr) 41extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
55{
56 flush_tlb_page(vma, vmaddr);
57}
58 42
59static inline void flush_tlb_range(struct vm_area_struct *vma, 43#ifdef CONFIG_SMP
60 unsigned long start, unsigned long end) 44extern void flush_tlb_mm(struct mm_struct *mm);
61{ 45extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
62 _tlbil_pid(vma->vm_mm->context.id); 46#else
63} 47#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
48#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
49#endif
50#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
64 51
65static inline void flush_tlb_kernel_range(unsigned long start, 52#elif defined(CONFIG_PPC_STD_MMU_32)
66 unsigned long end)
67{
68 _tlbil_pid(0);
69}
70 53
71#elif defined(CONFIG_PPC32)
72/* 54/*
73 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx 55 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
74 */ 56 */
75extern void _tlbie(unsigned long address);
76extern void _tlbia(void);
77
78extern void flush_tlb_mm(struct mm_struct *mm); 57extern void flush_tlb_mm(struct mm_struct *mm);
79extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 58extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
80extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 59extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
81extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 60extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
82 unsigned long end); 61 unsigned long end);
83extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 62extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
63static inline void local_flush_tlb_page(struct vm_area_struct *vma,
64 unsigned long vmaddr)
65{
66 flush_tlb_page(vma, vmaddr);
67}
68static inline void local_flush_tlb_mm(struct mm_struct *mm)
69{
70 flush_tlb_mm(mm);
71}
72
73#elif defined(CONFIG_PPC_STD_MMU_64)
84 74
85#else
86/* 75/*
87 * TLB flushing for 64-bit has-MMU CPUs 76 * TLB flushing for 64-bit hash-MMU CPUs
88 */ 77 */
89 78
90#include <linux/percpu.h> 79#include <linux/percpu.h>
@@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
134extern void flush_hash_range(unsigned long number, int local); 123extern void flush_hash_range(unsigned long number, int local);
135 124
136 125
126static inline void local_flush_tlb_mm(struct mm_struct *mm)
127{
128}
129
137static inline void flush_tlb_mm(struct mm_struct *mm) 130static inline void flush_tlb_mm(struct mm_struct *mm)
138{ 131{
139} 132}
140 133
134static inline void local_flush_tlb_page(struct vm_area_struct *vma,
135 unsigned long vmaddr)
136{
137}
138
141static inline void flush_tlb_page(struct vm_area_struct *vma, 139static inline void flush_tlb_page(struct vm_area_struct *vma,
142 unsigned long vmaddr) 140 unsigned long vmaddr)
143{ 141{
@@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
162extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 160extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
163 unsigned long end); 161 unsigned long end);
164 162
165 163#else
164#error Unsupported MMU type
166#endif 165#endif
167 166
168#endif /*__KERNEL__ */ 167#endif /*__KERNEL__ */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index f01393224b52..13c2c283e178 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -39,6 +39,7 @@
39#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40 40
41#include <linux/unistd.h> 41#include <linux/unistd.h>
42#include <linux/time.h>
42 43
43#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) 44#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
44 45
@@ -83,6 +84,7 @@ struct vdso_data {
83 __u32 icache_log_block_size; /* L1 i-cache log block size */ 84 __u32 icache_log_block_size; /* L1 i-cache log block size */
84 __s32 wtom_clock_sec; /* Wall to monotonic clock */ 85 __s32 wtom_clock_sec; /* Wall to monotonic clock */
85 __s32 wtom_clock_nsec; 86 __s32 wtom_clock_nsec;
87 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
86 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ 88 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
87 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 89 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
88}; 90};
@@ -102,6 +104,7 @@ struct vdso_data {
102 __u32 tz_dsttime; /* Type of dst correction 0x5C */ 104 __u32 tz_dsttime; /* Type of dst correction 0x5C */
103 __s32 wtom_clock_sec; /* Wall to monotonic clock */ 105 __s32 wtom_clock_sec; /* Wall to monotonic clock */
104 __s32 wtom_clock_nsec; 106 __s32 wtom_clock_nsec;
107 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
105 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 108 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
106 __u32 dcache_block_size; /* L1 d-cache block size */ 109 __u32 dcache_block_size; /* L1 d-cache block size */
107 __u32 icache_block_size; /* L1 i-cache block size */ 110 __u32 icache_block_size; /* L1 i-cache block size */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index d17edb4a2f9d..1308a86e9070 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -103,6 +103,10 @@ endif
103 103
104obj-$(CONFIG_PPC64) += $(obj64-y) 104obj-$(CONFIG_PPC64) += $(obj64-y)
105 105
106ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
107obj-y += ppc_save_regs.o
108endif
109
106extra-$(CONFIG_PPC_FPU) += fpu.o 110extra-$(CONFIG_PPC_FPU) += fpu.o
107extra-$(CONFIG_PPC64) += entry_64.o 111extra-$(CONFIG_PPC64) += entry_64.o
108 112
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 75c5dd0138fd..9937fe44555f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,9 +23,6 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/suspend.h> 24#include <linux/suspend.h>
25#include <linux/hrtimer.h> 25#include <linux/hrtimer.h>
26#ifdef CONFIG_KVM
27#include <linux/kvm_host.h>
28#endif
29#ifdef CONFIG_PPC64 26#ifdef CONFIG_PPC64
30#include <linux/time.h> 27#include <linux/time.h>
31#include <linux/hardirq.h> 28#include <linux/hardirq.h>
@@ -51,6 +48,9 @@
51#ifdef CONFIG_PPC_ISERIES 48#ifdef CONFIG_PPC_ISERIES
52#include <asm/iseries/alpaca.h> 49#include <asm/iseries/alpaca.h>
53#endif 50#endif
51#ifdef CONFIG_KVM
52#include <asm/kvm_44x.h>
53#endif
54 54
55#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 55#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
56#include "head_booke.h" 56#include "head_booke.h"
@@ -60,6 +60,7 @@ int main(void)
60{ 60{
61 DEFINE(THREAD, offsetof(struct task_struct, thread)); 61 DEFINE(THREAD, offsetof(struct task_struct, thread));
62 DEFINE(MM, offsetof(struct task_struct, mm)); 62 DEFINE(MM, offsetof(struct task_struct, mm));
63 DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
63#ifdef CONFIG_PPC64 64#ifdef CONFIG_PPC64
64 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 65 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
65#else 66#else
@@ -306,6 +307,7 @@ int main(void)
306 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); 307 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
307 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 308 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
308 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 309 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
310 DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
309 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); 311 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
310 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); 312 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
311 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); 313 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
@@ -355,12 +357,10 @@ int main(void)
355 DEFINE(PTE_SIZE, sizeof(pte_t)); 357 DEFINE(PTE_SIZE, sizeof(pte_t));
356 358
357#ifdef CONFIG_KVM 359#ifdef CONFIG_KVM
358 DEFINE(TLBE_BYTES, sizeof(struct tlbe)); 360 DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
359 361
360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 362 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 363 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
362 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
363 DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -378,6 +378,21 @@ int main(void)
378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
379 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 379 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
380#endif 380#endif
381#ifdef CONFIG_44x
382 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
383 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
384#endif
385
386#ifdef CONFIG_KVM_EXIT_TIMING
387 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
388 arch.timing_exit.tv32.tbu));
389 DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
390 arch.timing_exit.tv32.tbl));
391 DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
392 arch.timing_last_enter.tv32.tbu));
393 DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
394 arch.timing_last_enter.tv32.tbl));
395#endif
381 396
382 return 0; 397 return 0;
383} 398}
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 7e8719504f39..923f87aff20a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -19,6 +19,7 @@
19#include <asm/oprofile_impl.h> 19#include <asm/oprofile_impl.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ 21#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
22#include <asm/mmu.h>
22 23
23struct cpu_spec* cur_cpu_spec = NULL; 24struct cpu_spec* cur_cpu_spec = NULL;
24EXPORT_SYMBOL(cur_cpu_spec); 25EXPORT_SYMBOL(cur_cpu_spec);
@@ -94,6 +95,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
94 .cpu_name = "POWER3 (630)", 95 .cpu_name = "POWER3 (630)",
95 .cpu_features = CPU_FTRS_POWER3, 96 .cpu_features = CPU_FTRS_POWER3,
96 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, 97 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
98 .mmu_features = MMU_FTR_HPTE_TABLE,
97 .icache_bsize = 128, 99 .icache_bsize = 128,
98 .dcache_bsize = 128, 100 .dcache_bsize = 128,
99 .num_pmcs = 8, 101 .num_pmcs = 8,
@@ -109,6 +111,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
109 .cpu_name = "POWER3 (630+)", 111 .cpu_name = "POWER3 (630+)",
110 .cpu_features = CPU_FTRS_POWER3, 112 .cpu_features = CPU_FTRS_POWER3,
111 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, 113 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
114 .mmu_features = MMU_FTR_HPTE_TABLE,
112 .icache_bsize = 128, 115 .icache_bsize = 128,
113 .dcache_bsize = 128, 116 .dcache_bsize = 128,
114 .num_pmcs = 8, 117 .num_pmcs = 8,
@@ -124,6 +127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
124 .cpu_name = "RS64-II (northstar)", 127 .cpu_name = "RS64-II (northstar)",
125 .cpu_features = CPU_FTRS_RS64, 128 .cpu_features = CPU_FTRS_RS64,
126 .cpu_user_features = COMMON_USER_PPC64, 129 .cpu_user_features = COMMON_USER_PPC64,
130 .mmu_features = MMU_FTR_HPTE_TABLE,
127 .icache_bsize = 128, 131 .icache_bsize = 128,
128 .dcache_bsize = 128, 132 .dcache_bsize = 128,
129 .num_pmcs = 8, 133 .num_pmcs = 8,
@@ -139,6 +143,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
139 .cpu_name = "RS64-III (pulsar)", 143 .cpu_name = "RS64-III (pulsar)",
140 .cpu_features = CPU_FTRS_RS64, 144 .cpu_features = CPU_FTRS_RS64,
141 .cpu_user_features = COMMON_USER_PPC64, 145 .cpu_user_features = COMMON_USER_PPC64,
146 .mmu_features = MMU_FTR_HPTE_TABLE,
142 .icache_bsize = 128, 147 .icache_bsize = 128,
143 .dcache_bsize = 128, 148 .dcache_bsize = 128,
144 .num_pmcs = 8, 149 .num_pmcs = 8,
@@ -154,6 +159,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
154 .cpu_name = "RS64-III (icestar)", 159 .cpu_name = "RS64-III (icestar)",
155 .cpu_features = CPU_FTRS_RS64, 160 .cpu_features = CPU_FTRS_RS64,
156 .cpu_user_features = COMMON_USER_PPC64, 161 .cpu_user_features = COMMON_USER_PPC64,
162 .mmu_features = MMU_FTR_HPTE_TABLE,
157 .icache_bsize = 128, 163 .icache_bsize = 128,
158 .dcache_bsize = 128, 164 .dcache_bsize = 128,
159 .num_pmcs = 8, 165 .num_pmcs = 8,
@@ -169,6 +175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
169 .cpu_name = "RS64-IV (sstar)", 175 .cpu_name = "RS64-IV (sstar)",
170 .cpu_features = CPU_FTRS_RS64, 176 .cpu_features = CPU_FTRS_RS64,
171 .cpu_user_features = COMMON_USER_PPC64, 177 .cpu_user_features = COMMON_USER_PPC64,
178 .mmu_features = MMU_FTR_HPTE_TABLE,
172 .icache_bsize = 128, 179 .icache_bsize = 128,
173 .dcache_bsize = 128, 180 .dcache_bsize = 128,
174 .num_pmcs = 8, 181 .num_pmcs = 8,
@@ -184,6 +191,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
184 .cpu_name = "POWER4 (gp)", 191 .cpu_name = "POWER4 (gp)",
185 .cpu_features = CPU_FTRS_POWER4, 192 .cpu_features = CPU_FTRS_POWER4,
186 .cpu_user_features = COMMON_USER_POWER4, 193 .cpu_user_features = COMMON_USER_POWER4,
194 .mmu_features = MMU_FTR_HPTE_TABLE,
187 .icache_bsize = 128, 195 .icache_bsize = 128,
188 .dcache_bsize = 128, 196 .dcache_bsize = 128,
189 .num_pmcs = 8, 197 .num_pmcs = 8,
@@ -199,6 +207,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
199 .cpu_name = "POWER4+ (gq)", 207 .cpu_name = "POWER4+ (gq)",
200 .cpu_features = CPU_FTRS_POWER4, 208 .cpu_features = CPU_FTRS_POWER4,
201 .cpu_user_features = COMMON_USER_POWER4, 209 .cpu_user_features = COMMON_USER_POWER4,
210 .mmu_features = MMU_FTR_HPTE_TABLE,
202 .icache_bsize = 128, 211 .icache_bsize = 128,
203 .dcache_bsize = 128, 212 .dcache_bsize = 128,
204 .num_pmcs = 8, 213 .num_pmcs = 8,
@@ -215,6 +224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
215 .cpu_features = CPU_FTRS_PPC970, 224 .cpu_features = CPU_FTRS_PPC970,
216 .cpu_user_features = COMMON_USER_POWER4 | 225 .cpu_user_features = COMMON_USER_POWER4 |
217 PPC_FEATURE_HAS_ALTIVEC_COMP, 226 PPC_FEATURE_HAS_ALTIVEC_COMP,
227 .mmu_features = MMU_FTR_HPTE_TABLE,
218 .icache_bsize = 128, 228 .icache_bsize = 128,
219 .dcache_bsize = 128, 229 .dcache_bsize = 128,
220 .num_pmcs = 8, 230 .num_pmcs = 8,
@@ -233,6 +243,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
233 .cpu_features = CPU_FTRS_PPC970, 243 .cpu_features = CPU_FTRS_PPC970,
234 .cpu_user_features = COMMON_USER_POWER4 | 244 .cpu_user_features = COMMON_USER_POWER4 |
235 PPC_FEATURE_HAS_ALTIVEC_COMP, 245 PPC_FEATURE_HAS_ALTIVEC_COMP,
246 .mmu_features = MMU_FTR_HPTE_TABLE,
236 .icache_bsize = 128, 247 .icache_bsize = 128,
237 .dcache_bsize = 128, 248 .dcache_bsize = 128,
238 .num_pmcs = 8, 249 .num_pmcs = 8,
@@ -251,6 +262,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
251 .cpu_features = CPU_FTRS_PPC970, 262 .cpu_features = CPU_FTRS_PPC970,
252 .cpu_user_features = COMMON_USER_POWER4 | 263 .cpu_user_features = COMMON_USER_POWER4 |
253 PPC_FEATURE_HAS_ALTIVEC_COMP, 264 PPC_FEATURE_HAS_ALTIVEC_COMP,
265 .mmu_features = MMU_FTR_HPTE_TABLE,
254 .icache_bsize = 128, 266 .icache_bsize = 128,
255 .dcache_bsize = 128, 267 .dcache_bsize = 128,
256 .num_pmcs = 8, 268 .num_pmcs = 8,
@@ -269,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
269 .cpu_features = CPU_FTRS_PPC970, 281 .cpu_features = CPU_FTRS_PPC970,
270 .cpu_user_features = COMMON_USER_POWER4 | 282 .cpu_user_features = COMMON_USER_POWER4 |
271 PPC_FEATURE_HAS_ALTIVEC_COMP, 283 PPC_FEATURE_HAS_ALTIVEC_COMP,
284 .mmu_features = MMU_FTR_HPTE_TABLE,
272 .icache_bsize = 128, 285 .icache_bsize = 128,
273 .dcache_bsize = 128, 286 .dcache_bsize = 128,
274 .num_pmcs = 8, 287 .num_pmcs = 8,
@@ -287,6 +300,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
287 .cpu_features = CPU_FTRS_PPC970, 300 .cpu_features = CPU_FTRS_PPC970,
288 .cpu_user_features = COMMON_USER_POWER4 | 301 .cpu_user_features = COMMON_USER_POWER4 |
289 PPC_FEATURE_HAS_ALTIVEC_COMP, 302 PPC_FEATURE_HAS_ALTIVEC_COMP,
303 .mmu_features = MMU_FTR_HPTE_TABLE,
290 .icache_bsize = 128, 304 .icache_bsize = 128,
291 .dcache_bsize = 128, 305 .dcache_bsize = 128,
292 .num_pmcs = 8, 306 .num_pmcs = 8,
@@ -303,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
303 .cpu_name = "POWER5 (gr)", 317 .cpu_name = "POWER5 (gr)",
304 .cpu_features = CPU_FTRS_POWER5, 318 .cpu_features = CPU_FTRS_POWER5,
305 .cpu_user_features = COMMON_USER_POWER5, 319 .cpu_user_features = COMMON_USER_POWER5,
320 .mmu_features = MMU_FTR_HPTE_TABLE,
306 .icache_bsize = 128, 321 .icache_bsize = 128,
307 .dcache_bsize = 128, 322 .dcache_bsize = 128,
308 .num_pmcs = 6, 323 .num_pmcs = 6,
@@ -323,6 +338,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
323 .cpu_name = "POWER5+ (gs)", 338 .cpu_name = "POWER5+ (gs)",
324 .cpu_features = CPU_FTRS_POWER5, 339 .cpu_features = CPU_FTRS_POWER5,
325 .cpu_user_features = COMMON_USER_POWER5_PLUS, 340 .cpu_user_features = COMMON_USER_POWER5_PLUS,
341 .mmu_features = MMU_FTR_HPTE_TABLE,
326 .icache_bsize = 128, 342 .icache_bsize = 128,
327 .dcache_bsize = 128, 343 .dcache_bsize = 128,
328 .num_pmcs = 6, 344 .num_pmcs = 6,
@@ -339,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
339 .cpu_name = "POWER5+ (gs)", 355 .cpu_name = "POWER5+ (gs)",
340 .cpu_features = CPU_FTRS_POWER5, 356 .cpu_features = CPU_FTRS_POWER5,
341 .cpu_user_features = COMMON_USER_POWER5_PLUS, 357 .cpu_user_features = COMMON_USER_POWER5_PLUS,
358 .mmu_features = MMU_FTR_HPTE_TABLE,
342 .icache_bsize = 128, 359 .icache_bsize = 128,
343 .dcache_bsize = 128, 360 .dcache_bsize = 128,
344 .num_pmcs = 6, 361 .num_pmcs = 6,
@@ -356,6 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
356 .cpu_name = "POWER5+", 373 .cpu_name = "POWER5+",
357 .cpu_features = CPU_FTRS_POWER5, 374 .cpu_features = CPU_FTRS_POWER5,
358 .cpu_user_features = COMMON_USER_POWER5_PLUS, 375 .cpu_user_features = COMMON_USER_POWER5_PLUS,
376 .mmu_features = MMU_FTR_HPTE_TABLE,
359 .icache_bsize = 128, 377 .icache_bsize = 128,
360 .dcache_bsize = 128, 378 .dcache_bsize = 128,
361 .machine_check = machine_check_generic, 379 .machine_check = machine_check_generic,
@@ -369,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
369 .cpu_features = CPU_FTRS_POWER6, 387 .cpu_features = CPU_FTRS_POWER6,
370 .cpu_user_features = COMMON_USER_POWER6 | 388 .cpu_user_features = COMMON_USER_POWER6 |
371 PPC_FEATURE_POWER6_EXT, 389 PPC_FEATURE_POWER6_EXT,
390 .mmu_features = MMU_FTR_HPTE_TABLE,
372 .icache_bsize = 128, 391 .icache_bsize = 128,
373 .dcache_bsize = 128, 392 .dcache_bsize = 128,
374 .num_pmcs = 6, 393 .num_pmcs = 6,
@@ -388,6 +407,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
388 .cpu_name = "POWER6 (architected)", 407 .cpu_name = "POWER6 (architected)",
389 .cpu_features = CPU_FTRS_POWER6, 408 .cpu_features = CPU_FTRS_POWER6,
390 .cpu_user_features = COMMON_USER_POWER6, 409 .cpu_user_features = COMMON_USER_POWER6,
410 .mmu_features = MMU_FTR_HPTE_TABLE,
391 .icache_bsize = 128, 411 .icache_bsize = 128,
392 .dcache_bsize = 128, 412 .dcache_bsize = 128,
393 .machine_check = machine_check_generic, 413 .machine_check = machine_check_generic,
@@ -400,6 +420,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
400 .cpu_name = "POWER7 (architected)", 420 .cpu_name = "POWER7 (architected)",
401 .cpu_features = CPU_FTRS_POWER7, 421 .cpu_features = CPU_FTRS_POWER7,
402 .cpu_user_features = COMMON_USER_POWER7, 422 .cpu_user_features = COMMON_USER_POWER7,
423 .mmu_features = MMU_FTR_HPTE_TABLE,
403 .icache_bsize = 128, 424 .icache_bsize = 128,
404 .dcache_bsize = 128, 425 .dcache_bsize = 128,
405 .machine_check = machine_check_generic, 426 .machine_check = machine_check_generic,
@@ -412,6 +433,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
412 .cpu_name = "POWER7 (raw)", 433 .cpu_name = "POWER7 (raw)",
413 .cpu_features = CPU_FTRS_POWER7, 434 .cpu_features = CPU_FTRS_POWER7,
414 .cpu_user_features = COMMON_USER_POWER7, 435 .cpu_user_features = COMMON_USER_POWER7,
436 .mmu_features = MMU_FTR_HPTE_TABLE,
415 .icache_bsize = 128, 437 .icache_bsize = 128,
416 .dcache_bsize = 128, 438 .dcache_bsize = 128,
417 .num_pmcs = 6, 439 .num_pmcs = 6,
@@ -434,6 +456,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
434 .cpu_user_features = COMMON_USER_PPC64 | 456 .cpu_user_features = COMMON_USER_PPC64 |
435 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | 457 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
436 PPC_FEATURE_SMT, 458 PPC_FEATURE_SMT,
459 .mmu_features = MMU_FTR_HPTE_TABLE,
437 .icache_bsize = 128, 460 .icache_bsize = 128,
438 .dcache_bsize = 128, 461 .dcache_bsize = 128,
439 .num_pmcs = 4, 462 .num_pmcs = 4,
@@ -449,6 +472,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
449 .cpu_name = "PA6T", 472 .cpu_name = "PA6T",
450 .cpu_features = CPU_FTRS_PA6T, 473 .cpu_features = CPU_FTRS_PA6T,
451 .cpu_user_features = COMMON_USER_PA6T, 474 .cpu_user_features = COMMON_USER_PA6T,
475 .mmu_features = MMU_FTR_HPTE_TABLE,
452 .icache_bsize = 64, 476 .icache_bsize = 64,
453 .dcache_bsize = 64, 477 .dcache_bsize = 64,
454 .num_pmcs = 6, 478 .num_pmcs = 6,
@@ -466,6 +490,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
466 .cpu_name = "POWER4 (compatible)", 490 .cpu_name = "POWER4 (compatible)",
467 .cpu_features = CPU_FTRS_COMPATIBLE, 491 .cpu_features = CPU_FTRS_COMPATIBLE,
468 .cpu_user_features = COMMON_USER_PPC64, 492 .cpu_user_features = COMMON_USER_PPC64,
493 .mmu_features = MMU_FTR_HPTE_TABLE,
469 .icache_bsize = 128, 494 .icache_bsize = 128,
470 .dcache_bsize = 128, 495 .dcache_bsize = 128,
471 .num_pmcs = 6, 496 .num_pmcs = 6,
@@ -483,6 +508,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
483 .cpu_features = CPU_FTRS_PPC601, 508 .cpu_features = CPU_FTRS_PPC601,
484 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | 509 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
485 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 510 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
511 .mmu_features = MMU_FTR_HPTE_TABLE,
486 .icache_bsize = 32, 512 .icache_bsize = 32,
487 .dcache_bsize = 32, 513 .dcache_bsize = 32,
488 .machine_check = machine_check_generic, 514 .machine_check = machine_check_generic,
@@ -494,6 +520,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
494 .cpu_name = "603", 520 .cpu_name = "603",
495 .cpu_features = CPU_FTRS_603, 521 .cpu_features = CPU_FTRS_603,
496 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 522 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
523 .mmu_features = 0,
497 .icache_bsize = 32, 524 .icache_bsize = 32,
498 .dcache_bsize = 32, 525 .dcache_bsize = 32,
499 .cpu_setup = __setup_cpu_603, 526 .cpu_setup = __setup_cpu_603,
@@ -506,6 +533,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
506 .cpu_name = "603e", 533 .cpu_name = "603e",
507 .cpu_features = CPU_FTRS_603, 534 .cpu_features = CPU_FTRS_603,
508 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 535 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
536 .mmu_features = 0,
509 .icache_bsize = 32, 537 .icache_bsize = 32,
510 .dcache_bsize = 32, 538 .dcache_bsize = 32,
511 .cpu_setup = __setup_cpu_603, 539 .cpu_setup = __setup_cpu_603,
@@ -518,6 +546,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
518 .cpu_name = "603ev", 546 .cpu_name = "603ev",
519 .cpu_features = CPU_FTRS_603, 547 .cpu_features = CPU_FTRS_603,
520 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 548 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
549 .mmu_features = 0,
521 .icache_bsize = 32, 550 .icache_bsize = 32,
522 .dcache_bsize = 32, 551 .dcache_bsize = 32,
523 .cpu_setup = __setup_cpu_603, 552 .cpu_setup = __setup_cpu_603,
@@ -530,6 +559,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
530 .cpu_name = "604", 559 .cpu_name = "604",
531 .cpu_features = CPU_FTRS_604, 560 .cpu_features = CPU_FTRS_604,
532 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 561 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
562 .mmu_features = MMU_FTR_HPTE_TABLE,
533 .icache_bsize = 32, 563 .icache_bsize = 32,
534 .dcache_bsize = 32, 564 .dcache_bsize = 32,
535 .num_pmcs = 2, 565 .num_pmcs = 2,
@@ -543,6 +573,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
543 .cpu_name = "604e", 573 .cpu_name = "604e",
544 .cpu_features = CPU_FTRS_604, 574 .cpu_features = CPU_FTRS_604,
545 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 575 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
576 .mmu_features = MMU_FTR_HPTE_TABLE,
546 .icache_bsize = 32, 577 .icache_bsize = 32,
547 .dcache_bsize = 32, 578 .dcache_bsize = 32,
548 .num_pmcs = 4, 579 .num_pmcs = 4,
@@ -556,6 +587,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
556 .cpu_name = "604r", 587 .cpu_name = "604r",
557 .cpu_features = CPU_FTRS_604, 588 .cpu_features = CPU_FTRS_604,
558 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 589 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
590 .mmu_features = MMU_FTR_HPTE_TABLE,
559 .icache_bsize = 32, 591 .icache_bsize = 32,
560 .dcache_bsize = 32, 592 .dcache_bsize = 32,
561 .num_pmcs = 4, 593 .num_pmcs = 4,
@@ -569,6 +601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
569 .cpu_name = "604ev", 601 .cpu_name = "604ev",
570 .cpu_features = CPU_FTRS_604, 602 .cpu_features = CPU_FTRS_604,
571 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 603 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
604 .mmu_features = MMU_FTR_HPTE_TABLE,
572 .icache_bsize = 32, 605 .icache_bsize = 32,
573 .dcache_bsize = 32, 606 .dcache_bsize = 32,
574 .num_pmcs = 4, 607 .num_pmcs = 4,
@@ -582,6 +615,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
582 .cpu_name = "740/750", 615 .cpu_name = "740/750",
583 .cpu_features = CPU_FTRS_740_NOTAU, 616 .cpu_features = CPU_FTRS_740_NOTAU,
584 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 617 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
618 .mmu_features = MMU_FTR_HPTE_TABLE,
585 .icache_bsize = 32, 619 .icache_bsize = 32,
586 .dcache_bsize = 32, 620 .dcache_bsize = 32,
587 .num_pmcs = 4, 621 .num_pmcs = 4,
@@ -595,6 +629,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
595 .cpu_name = "750CX", 629 .cpu_name = "750CX",
596 .cpu_features = CPU_FTRS_750, 630 .cpu_features = CPU_FTRS_750,
597 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 631 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
632 .mmu_features = MMU_FTR_HPTE_TABLE,
598 .icache_bsize = 32, 633 .icache_bsize = 32,
599 .dcache_bsize = 32, 634 .dcache_bsize = 32,
600 .num_pmcs = 4, 635 .num_pmcs = 4,
@@ -608,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
608 .cpu_name = "750CX", 643 .cpu_name = "750CX",
609 .cpu_features = CPU_FTRS_750, 644 .cpu_features = CPU_FTRS_750,
610 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 645 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
646 .mmu_features = MMU_FTR_HPTE_TABLE,
611 .icache_bsize = 32, 647 .icache_bsize = 32,
612 .dcache_bsize = 32, 648 .dcache_bsize = 32,
613 .num_pmcs = 4, 649 .num_pmcs = 4,
@@ -622,6 +658,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
622 .cpu_name = "750CXe", 658 .cpu_name = "750CXe",
623 .cpu_features = CPU_FTRS_750, 659 .cpu_features = CPU_FTRS_750,
624 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 660 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
661 .mmu_features = MMU_FTR_HPTE_TABLE,
625 .icache_bsize = 32, 662 .icache_bsize = 32,
626 .dcache_bsize = 32, 663 .dcache_bsize = 32,
627 .num_pmcs = 4, 664 .num_pmcs = 4,
@@ -636,6 +673,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
636 .cpu_name = "750CXe", 673 .cpu_name = "750CXe",
637 .cpu_features = CPU_FTRS_750, 674 .cpu_features = CPU_FTRS_750,
638 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 675 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
676 .mmu_features = MMU_FTR_HPTE_TABLE,
639 .icache_bsize = 32, 677 .icache_bsize = 32,
640 .dcache_bsize = 32, 678 .dcache_bsize = 32,
641 .num_pmcs = 4, 679 .num_pmcs = 4,
@@ -650,6 +688,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
650 .cpu_name = "750CL", 688 .cpu_name = "750CL",
651 .cpu_features = CPU_FTRS_750CL, 689 .cpu_features = CPU_FTRS_750CL,
652 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 690 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
691 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
653 .icache_bsize = 32, 692 .icache_bsize = 32,
654 .dcache_bsize = 32, 693 .dcache_bsize = 32,
655 .num_pmcs = 4, 694 .num_pmcs = 4,
@@ -664,6 +703,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
664 .cpu_name = "745/755", 703 .cpu_name = "745/755",
665 .cpu_features = CPU_FTRS_750, 704 .cpu_features = CPU_FTRS_750,
666 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 705 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
706 .mmu_features = MMU_FTR_HPTE_TABLE,
667 .icache_bsize = 32, 707 .icache_bsize = 32,
668 .dcache_bsize = 32, 708 .dcache_bsize = 32,
669 .num_pmcs = 4, 709 .num_pmcs = 4,
@@ -678,6 +718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
678 .cpu_name = "750FX", 718 .cpu_name = "750FX",
679 .cpu_features = CPU_FTRS_750FX1, 719 .cpu_features = CPU_FTRS_750FX1,
680 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 720 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
721 .mmu_features = MMU_FTR_HPTE_TABLE,
681 .icache_bsize = 32, 722 .icache_bsize = 32,
682 .dcache_bsize = 32, 723 .dcache_bsize = 32,
683 .num_pmcs = 4, 724 .num_pmcs = 4,
@@ -692,6 +733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
692 .cpu_name = "750FX", 733 .cpu_name = "750FX",
693 .cpu_features = CPU_FTRS_750FX2, 734 .cpu_features = CPU_FTRS_750FX2,
694 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 735 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
736 .mmu_features = MMU_FTR_HPTE_TABLE,
695 .icache_bsize = 32, 737 .icache_bsize = 32,
696 .dcache_bsize = 32, 738 .dcache_bsize = 32,
697 .num_pmcs = 4, 739 .num_pmcs = 4,
@@ -706,6 +748,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
706 .cpu_name = "750FX", 748 .cpu_name = "750FX",
707 .cpu_features = CPU_FTRS_750FX, 749 .cpu_features = CPU_FTRS_750FX,
708 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 750 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
751 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
709 .icache_bsize = 32, 752 .icache_bsize = 32,
710 .dcache_bsize = 32, 753 .dcache_bsize = 32,
711 .num_pmcs = 4, 754 .num_pmcs = 4,
@@ -720,6 +763,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
720 .cpu_name = "750GX", 763 .cpu_name = "750GX",
721 .cpu_features = CPU_FTRS_750GX, 764 .cpu_features = CPU_FTRS_750GX,
722 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 765 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
766 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
723 .icache_bsize = 32, 767 .icache_bsize = 32,
724 .dcache_bsize = 32, 768 .dcache_bsize = 32,
725 .num_pmcs = 4, 769 .num_pmcs = 4,
@@ -734,6 +778,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
734 .cpu_name = "740/750", 778 .cpu_name = "740/750",
735 .cpu_features = CPU_FTRS_740, 779 .cpu_features = CPU_FTRS_740,
736 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, 780 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
781 .mmu_features = MMU_FTR_HPTE_TABLE,
737 .icache_bsize = 32, 782 .icache_bsize = 32,
738 .dcache_bsize = 32, 783 .dcache_bsize = 32,
739 .num_pmcs = 4, 784 .num_pmcs = 4,
@@ -749,6 +794,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
749 .cpu_features = CPU_FTRS_7400_NOTAU, 794 .cpu_features = CPU_FTRS_7400_NOTAU,
750 .cpu_user_features = COMMON_USER | 795 .cpu_user_features = COMMON_USER |
751 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 796 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
797 .mmu_features = MMU_FTR_HPTE_TABLE,
752 .icache_bsize = 32, 798 .icache_bsize = 32,
753 .dcache_bsize = 32, 799 .dcache_bsize = 32,
754 .num_pmcs = 4, 800 .num_pmcs = 4,
@@ -764,6 +810,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
764 .cpu_features = CPU_FTRS_7400, 810 .cpu_features = CPU_FTRS_7400,
765 .cpu_user_features = COMMON_USER | 811 .cpu_user_features = COMMON_USER |
766 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 812 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
813 .mmu_features = MMU_FTR_HPTE_TABLE,
767 .icache_bsize = 32, 814 .icache_bsize = 32,
768 .dcache_bsize = 32, 815 .dcache_bsize = 32,
769 .num_pmcs = 4, 816 .num_pmcs = 4,
@@ -779,6 +826,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
779 .cpu_features = CPU_FTRS_7400, 826 .cpu_features = CPU_FTRS_7400,
780 .cpu_user_features = COMMON_USER | 827 .cpu_user_features = COMMON_USER |
781 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 828 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
829 .mmu_features = MMU_FTR_HPTE_TABLE,
782 .icache_bsize = 32, 830 .icache_bsize = 32,
783 .dcache_bsize = 32, 831 .dcache_bsize = 32,
784 .num_pmcs = 4, 832 .num_pmcs = 4,
@@ -794,6 +842,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
794 .cpu_features = CPU_FTRS_7450_20, 842 .cpu_features = CPU_FTRS_7450_20,
795 .cpu_user_features = COMMON_USER | 843 .cpu_user_features = COMMON_USER |
796 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 844 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
845 .mmu_features = MMU_FTR_HPTE_TABLE,
797 .icache_bsize = 32, 846 .icache_bsize = 32,
798 .dcache_bsize = 32, 847 .dcache_bsize = 32,
799 .num_pmcs = 6, 848 .num_pmcs = 6,
@@ -811,6 +860,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
811 .cpu_features = CPU_FTRS_7450_21, 860 .cpu_features = CPU_FTRS_7450_21,
812 .cpu_user_features = COMMON_USER | 861 .cpu_user_features = COMMON_USER |
813 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 862 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
863 .mmu_features = MMU_FTR_HPTE_TABLE,
814 .icache_bsize = 32, 864 .icache_bsize = 32,
815 .dcache_bsize = 32, 865 .dcache_bsize = 32,
816 .num_pmcs = 6, 866 .num_pmcs = 6,
@@ -828,6 +878,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
828 .cpu_features = CPU_FTRS_7450_23, 878 .cpu_features = CPU_FTRS_7450_23,
829 .cpu_user_features = COMMON_USER | 879 .cpu_user_features = COMMON_USER |
830 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 880 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
881 .mmu_features = MMU_FTR_HPTE_TABLE,
831 .icache_bsize = 32, 882 .icache_bsize = 32,
832 .dcache_bsize = 32, 883 .dcache_bsize = 32,
833 .num_pmcs = 6, 884 .num_pmcs = 6,
@@ -845,6 +896,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
845 .cpu_features = CPU_FTRS_7455_1, 896 .cpu_features = CPU_FTRS_7455_1,
846 .cpu_user_features = COMMON_USER | 897 .cpu_user_features = COMMON_USER |
847 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 898 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
899 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
848 .icache_bsize = 32, 900 .icache_bsize = 32,
849 .dcache_bsize = 32, 901 .dcache_bsize = 32,
850 .num_pmcs = 6, 902 .num_pmcs = 6,
@@ -862,6 +914,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
862 .cpu_features = CPU_FTRS_7455_20, 914 .cpu_features = CPU_FTRS_7455_20,
863 .cpu_user_features = COMMON_USER | 915 .cpu_user_features = COMMON_USER |
864 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 916 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
917 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
865 .icache_bsize = 32, 918 .icache_bsize = 32,
866 .dcache_bsize = 32, 919 .dcache_bsize = 32,
867 .num_pmcs = 6, 920 .num_pmcs = 6,
@@ -879,6 +932,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
879 .cpu_features = CPU_FTRS_7455, 932 .cpu_features = CPU_FTRS_7455,
880 .cpu_user_features = COMMON_USER | 933 .cpu_user_features = COMMON_USER |
881 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 934 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
935 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
882 .icache_bsize = 32, 936 .icache_bsize = 32,
883 .dcache_bsize = 32, 937 .dcache_bsize = 32,
884 .num_pmcs = 6, 938 .num_pmcs = 6,
@@ -896,6 +950,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
896 .cpu_features = CPU_FTRS_7447_10, 950 .cpu_features = CPU_FTRS_7447_10,
897 .cpu_user_features = COMMON_USER | 951 .cpu_user_features = COMMON_USER |
898 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 952 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
953 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
899 .icache_bsize = 32, 954 .icache_bsize = 32,
900 .dcache_bsize = 32, 955 .dcache_bsize = 32,
901 .num_pmcs = 6, 956 .num_pmcs = 6,
@@ -913,6 +968,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
913 .cpu_features = CPU_FTRS_7447_10, 968 .cpu_features = CPU_FTRS_7447_10,
914 .cpu_user_features = COMMON_USER | 969 .cpu_user_features = COMMON_USER |
915 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 970 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
971 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
916 .icache_bsize = 32, 972 .icache_bsize = 32,
917 .dcache_bsize = 32, 973 .dcache_bsize = 32,
918 .num_pmcs = 6, 974 .num_pmcs = 6,
@@ -929,6 +985,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
929 .cpu_name = "7447/7457", 985 .cpu_name = "7447/7457",
930 .cpu_features = CPU_FTRS_7447, 986 .cpu_features = CPU_FTRS_7447,
931 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 987 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
988 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
932 .icache_bsize = 32, 989 .icache_bsize = 32,
933 .dcache_bsize = 32, 990 .dcache_bsize = 32,
934 .num_pmcs = 6, 991 .num_pmcs = 6,
@@ -946,6 +1003,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
946 .cpu_features = CPU_FTRS_7447A, 1003 .cpu_features = CPU_FTRS_7447A,
947 .cpu_user_features = COMMON_USER | 1004 .cpu_user_features = COMMON_USER |
948 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 1005 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
1006 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
949 .icache_bsize = 32, 1007 .icache_bsize = 32,
950 .dcache_bsize = 32, 1008 .dcache_bsize = 32,
951 .num_pmcs = 6, 1009 .num_pmcs = 6,
@@ -963,6 +1021,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
963 .cpu_features = CPU_FTRS_7448, 1021 .cpu_features = CPU_FTRS_7448,
964 .cpu_user_features = COMMON_USER | 1022 .cpu_user_features = COMMON_USER |
965 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, 1023 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
1024 .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
966 .icache_bsize = 32, 1025 .icache_bsize = 32,
967 .dcache_bsize = 32, 1026 .dcache_bsize = 32,
968 .num_pmcs = 6, 1027 .num_pmcs = 6,
@@ -979,6 +1038,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
979 .cpu_name = "82xx", 1038 .cpu_name = "82xx",
980 .cpu_features = CPU_FTRS_82XX, 1039 .cpu_features = CPU_FTRS_82XX,
981 .cpu_user_features = COMMON_USER, 1040 .cpu_user_features = COMMON_USER,
1041 .mmu_features = 0,
982 .icache_bsize = 32, 1042 .icache_bsize = 32,
983 .dcache_bsize = 32, 1043 .dcache_bsize = 32,
984 .cpu_setup = __setup_cpu_603, 1044 .cpu_setup = __setup_cpu_603,
@@ -991,6 +1051,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
991 .cpu_name = "G2_LE", 1051 .cpu_name = "G2_LE",
992 .cpu_features = CPU_FTRS_G2_LE, 1052 .cpu_features = CPU_FTRS_G2_LE,
993 .cpu_user_features = COMMON_USER, 1053 .cpu_user_features = COMMON_USER,
1054 .mmu_features = MMU_FTR_USE_HIGH_BATS,
994 .icache_bsize = 32, 1055 .icache_bsize = 32,
995 .dcache_bsize = 32, 1056 .dcache_bsize = 32,
996 .cpu_setup = __setup_cpu_603, 1057 .cpu_setup = __setup_cpu_603,
@@ -1003,6 +1064,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1003 .cpu_name = "e300c1", 1064 .cpu_name = "e300c1",
1004 .cpu_features = CPU_FTRS_E300, 1065 .cpu_features = CPU_FTRS_E300,
1005 .cpu_user_features = COMMON_USER, 1066 .cpu_user_features = COMMON_USER,
1067 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1006 .icache_bsize = 32, 1068 .icache_bsize = 32,
1007 .dcache_bsize = 32, 1069 .dcache_bsize = 32,
1008 .cpu_setup = __setup_cpu_603, 1070 .cpu_setup = __setup_cpu_603,
@@ -1015,6 +1077,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1015 .cpu_name = "e300c2", 1077 .cpu_name = "e300c2",
1016 .cpu_features = CPU_FTRS_E300C2, 1078 .cpu_features = CPU_FTRS_E300C2,
1017 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1079 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1080 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1018 .icache_bsize = 32, 1081 .icache_bsize = 32,
1019 .dcache_bsize = 32, 1082 .dcache_bsize = 32,
1020 .cpu_setup = __setup_cpu_603, 1083 .cpu_setup = __setup_cpu_603,
@@ -1027,6 +1090,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1027 .cpu_name = "e300c3", 1090 .cpu_name = "e300c3",
1028 .cpu_features = CPU_FTRS_E300, 1091 .cpu_features = CPU_FTRS_E300,
1029 .cpu_user_features = COMMON_USER, 1092 .cpu_user_features = COMMON_USER,
1093 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1030 .icache_bsize = 32, 1094 .icache_bsize = 32,
1031 .dcache_bsize = 32, 1095 .dcache_bsize = 32,
1032 .cpu_setup = __setup_cpu_603, 1096 .cpu_setup = __setup_cpu_603,
@@ -1041,6 +1105,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1041 .cpu_name = "e300c4", 1105 .cpu_name = "e300c4",
1042 .cpu_features = CPU_FTRS_E300, 1106 .cpu_features = CPU_FTRS_E300,
1043 .cpu_user_features = COMMON_USER, 1107 .cpu_user_features = COMMON_USER,
1108 .mmu_features = MMU_FTR_USE_HIGH_BATS,
1044 .icache_bsize = 32, 1109 .icache_bsize = 32,
1045 .dcache_bsize = 32, 1110 .dcache_bsize = 32,
1046 .cpu_setup = __setup_cpu_603, 1111 .cpu_setup = __setup_cpu_603,
@@ -1056,6 +1121,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1056 .cpu_name = "(generic PPC)", 1121 .cpu_name = "(generic PPC)",
1057 .cpu_features = CPU_FTRS_CLASSIC32, 1122 .cpu_features = CPU_FTRS_CLASSIC32,
1058 .cpu_user_features = COMMON_USER, 1123 .cpu_user_features = COMMON_USER,
1124 .mmu_features = MMU_FTR_HPTE_TABLE,
1059 .icache_bsize = 32, 1125 .icache_bsize = 32,
1060 .dcache_bsize = 32, 1126 .dcache_bsize = 32,
1061 .machine_check = machine_check_generic, 1127 .machine_check = machine_check_generic,
@@ -1071,6 +1137,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1071 * if the 8xx code is there.... */ 1137 * if the 8xx code is there.... */
1072 .cpu_features = CPU_FTRS_8XX, 1138 .cpu_features = CPU_FTRS_8XX,
1073 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1139 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1140 .mmu_features = MMU_FTR_TYPE_8xx,
1074 .icache_bsize = 16, 1141 .icache_bsize = 16,
1075 .dcache_bsize = 16, 1142 .dcache_bsize = 16,
1076 .platform = "ppc823", 1143 .platform = "ppc823",
@@ -1083,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1083 .cpu_name = "403GC", 1150 .cpu_name = "403GC",
1084 .cpu_features = CPU_FTRS_40X, 1151 .cpu_features = CPU_FTRS_40X,
1085 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1152 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1153 .mmu_features = MMU_FTR_TYPE_40x,
1086 .icache_bsize = 16, 1154 .icache_bsize = 16,
1087 .dcache_bsize = 16, 1155 .dcache_bsize = 16,
1088 .machine_check = machine_check_4xx, 1156 .machine_check = machine_check_4xx,
@@ -1095,6 +1163,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1095 .cpu_features = CPU_FTRS_40X, 1163 .cpu_features = CPU_FTRS_40X,
1096 .cpu_user_features = PPC_FEATURE_32 | 1164 .cpu_user_features = PPC_FEATURE_32 |
1097 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 1165 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
1166 .mmu_features = MMU_FTR_TYPE_40x,
1098 .icache_bsize = 16, 1167 .icache_bsize = 16,
1099 .dcache_bsize = 16, 1168 .dcache_bsize = 16,
1100 .machine_check = machine_check_4xx, 1169 .machine_check = machine_check_4xx,
@@ -1106,6 +1175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1106 .cpu_name = "403G ??", 1175 .cpu_name = "403G ??",
1107 .cpu_features = CPU_FTRS_40X, 1176 .cpu_features = CPU_FTRS_40X,
1108 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1177 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1178 .mmu_features = MMU_FTR_TYPE_40x,
1109 .icache_bsize = 16, 1179 .icache_bsize = 16,
1110 .dcache_bsize = 16, 1180 .dcache_bsize = 16,
1111 .machine_check = machine_check_4xx, 1181 .machine_check = machine_check_4xx,
@@ -1118,6 +1188,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1118 .cpu_features = CPU_FTRS_40X, 1188 .cpu_features = CPU_FTRS_40X,
1119 .cpu_user_features = PPC_FEATURE_32 | 1189 .cpu_user_features = PPC_FEATURE_32 |
1120 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1190 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1191 .mmu_features = MMU_FTR_TYPE_40x,
1121 .icache_bsize = 32, 1192 .icache_bsize = 32,
1122 .dcache_bsize = 32, 1193 .dcache_bsize = 32,
1123 .machine_check = machine_check_4xx, 1194 .machine_check = machine_check_4xx,
@@ -1130,6 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1130 .cpu_features = CPU_FTRS_40X, 1201 .cpu_features = CPU_FTRS_40X,
1131 .cpu_user_features = PPC_FEATURE_32 | 1202 .cpu_user_features = PPC_FEATURE_32 |
1132 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1203 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1204 .mmu_features = MMU_FTR_TYPE_40x,
1133 .icache_bsize = 32, 1205 .icache_bsize = 32,
1134 .dcache_bsize = 32, 1206 .dcache_bsize = 32,
1135 .machine_check = machine_check_4xx, 1207 .machine_check = machine_check_4xx,
@@ -1142,6 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1142 .cpu_features = CPU_FTRS_40X, 1214 .cpu_features = CPU_FTRS_40X,
1143 .cpu_user_features = PPC_FEATURE_32 | 1215 .cpu_user_features = PPC_FEATURE_32 |
1144 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1216 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1217 .mmu_features = MMU_FTR_TYPE_40x,
1145 .icache_bsize = 32, 1218 .icache_bsize = 32,
1146 .dcache_bsize = 32, 1219 .dcache_bsize = 32,
1147 .machine_check = machine_check_4xx, 1220 .machine_check = machine_check_4xx,
@@ -1154,6 +1227,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1154 .cpu_features = CPU_FTRS_40X, 1227 .cpu_features = CPU_FTRS_40X,
1155 .cpu_user_features = PPC_FEATURE_32 | 1228 .cpu_user_features = PPC_FEATURE_32 |
1156 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1229 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1230 .mmu_features = MMU_FTR_TYPE_40x,
1157 .icache_bsize = 32, 1231 .icache_bsize = 32,
1158 .dcache_bsize = 32, 1232 .dcache_bsize = 32,
1159 .machine_check = machine_check_4xx, 1233 .machine_check = machine_check_4xx,
@@ -1166,6 +1240,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1166 .cpu_features = CPU_FTRS_40X, 1240 .cpu_features = CPU_FTRS_40X,
1167 .cpu_user_features = PPC_FEATURE_32 | 1241 .cpu_user_features = PPC_FEATURE_32 |
1168 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1242 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1243 .mmu_features = MMU_FTR_TYPE_40x,
1169 .icache_bsize = 32, 1244 .icache_bsize = 32,
1170 .dcache_bsize = 32, 1245 .dcache_bsize = 32,
1171 .machine_check = machine_check_4xx, 1246 .machine_check = machine_check_4xx,
@@ -1178,6 +1253,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1178 .cpu_features = CPU_FTRS_40X, 1253 .cpu_features = CPU_FTRS_40X,
1179 .cpu_user_features = PPC_FEATURE_32 | 1254 .cpu_user_features = PPC_FEATURE_32 |
1180 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1255 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1256 .mmu_features = MMU_FTR_TYPE_40x,
1181 .icache_bsize = 32, 1257 .icache_bsize = 32,
1182 .dcache_bsize = 32, 1258 .dcache_bsize = 32,
1183 .machine_check = machine_check_4xx, 1259 .machine_check = machine_check_4xx,
@@ -1190,6 +1266,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1190 .cpu_features = CPU_FTRS_40X, 1266 .cpu_features = CPU_FTRS_40X,
1191 .cpu_user_features = PPC_FEATURE_32 | 1267 .cpu_user_features = PPC_FEATURE_32 |
1192 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1268 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1269 .mmu_features = MMU_FTR_TYPE_40x,
1193 .icache_bsize = 32, 1270 .icache_bsize = 32,
1194 .dcache_bsize = 32, 1271 .dcache_bsize = 32,
1195 .machine_check = machine_check_4xx, 1272 .machine_check = machine_check_4xx,
@@ -1202,6 +1279,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1202 .cpu_features = CPU_FTRS_40X, 1279 .cpu_features = CPU_FTRS_40X,
1203 .cpu_user_features = PPC_FEATURE_32 | 1280 .cpu_user_features = PPC_FEATURE_32 |
1204 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1281 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1282 .mmu_features = MMU_FTR_TYPE_40x,
1205 .icache_bsize = 32, 1283 .icache_bsize = 32,
1206 .dcache_bsize = 32, 1284 .dcache_bsize = 32,
1207 .machine_check = machine_check_4xx, 1285 .machine_check = machine_check_4xx,
@@ -1213,6 +1291,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1213 .cpu_name = "405LP", 1291 .cpu_name = "405LP",
1214 .cpu_features = CPU_FTRS_40X, 1292 .cpu_features = CPU_FTRS_40X,
1215 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 1293 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
1294 .mmu_features = MMU_FTR_TYPE_40x,
1216 .icache_bsize = 32, 1295 .icache_bsize = 32,
1217 .dcache_bsize = 32, 1296 .dcache_bsize = 32,
1218 .machine_check = machine_check_4xx, 1297 .machine_check = machine_check_4xx,
@@ -1225,6 +1304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1225 .cpu_features = CPU_FTRS_40X, 1304 .cpu_features = CPU_FTRS_40X,
1226 .cpu_user_features = PPC_FEATURE_32 | 1305 .cpu_user_features = PPC_FEATURE_32 |
1227 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1306 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1307 .mmu_features = MMU_FTR_TYPE_40x,
1228 .icache_bsize = 32, 1308 .icache_bsize = 32,
1229 .dcache_bsize = 32, 1309 .dcache_bsize = 32,
1230 .machine_check = machine_check_4xx, 1310 .machine_check = machine_check_4xx,
@@ -1237,6 +1317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1237 .cpu_features = CPU_FTRS_40X, 1317 .cpu_features = CPU_FTRS_40X,
1238 .cpu_user_features = PPC_FEATURE_32 | 1318 .cpu_user_features = PPC_FEATURE_32 |
1239 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1319 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1320 .mmu_features = MMU_FTR_TYPE_40x,
1240 .icache_bsize = 32, 1321 .icache_bsize = 32,
1241 .dcache_bsize = 32, 1322 .dcache_bsize = 32,
1242 .machine_check = machine_check_4xx, 1323 .machine_check = machine_check_4xx,
@@ -1249,6 +1330,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1249 .cpu_features = CPU_FTRS_40X, 1330 .cpu_features = CPU_FTRS_40X,
1250 .cpu_user_features = PPC_FEATURE_32 | 1331 .cpu_user_features = PPC_FEATURE_32 |
1251 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1332 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1333 .mmu_features = MMU_FTR_TYPE_40x,
1252 .icache_bsize = 32, 1334 .icache_bsize = 32,
1253 .dcache_bsize = 32, 1335 .dcache_bsize = 32,
1254 .machine_check = machine_check_4xx, 1336 .machine_check = machine_check_4xx,
@@ -1261,6 +1343,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1261 .cpu_features = CPU_FTRS_40X, 1343 .cpu_features = CPU_FTRS_40X,
1262 .cpu_user_features = PPC_FEATURE_32 | 1344 .cpu_user_features = PPC_FEATURE_32 |
1263 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1345 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1346 .mmu_features = MMU_FTR_TYPE_40x,
1264 .icache_bsize = 32, 1347 .icache_bsize = 32,
1265 .dcache_bsize = 32, 1348 .dcache_bsize = 32,
1266 .machine_check = machine_check_4xx, 1349 .machine_check = machine_check_4xx,
@@ -1273,6 +1356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1273 .cpu_features = CPU_FTRS_40X, 1356 .cpu_features = CPU_FTRS_40X,
1274 .cpu_user_features = PPC_FEATURE_32 | 1357 .cpu_user_features = PPC_FEATURE_32 |
1275 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1358 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1359 .mmu_features = MMU_FTR_TYPE_40x,
1276 .icache_bsize = 32, 1360 .icache_bsize = 32,
1277 .dcache_bsize = 32, 1361 .dcache_bsize = 32,
1278 .machine_check = machine_check_4xx, 1362 .machine_check = machine_check_4xx,
@@ -1286,6 +1370,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1286 .cpu_features = CPU_FTRS_40X, 1370 .cpu_features = CPU_FTRS_40X,
1287 .cpu_user_features = PPC_FEATURE_32 | 1371 .cpu_user_features = PPC_FEATURE_32 |
1288 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1372 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1373 .mmu_features = MMU_FTR_TYPE_40x,
1289 .icache_bsize = 32, 1374 .icache_bsize = 32,
1290 .dcache_bsize = 32, 1375 .dcache_bsize = 32,
1291 .machine_check = machine_check_4xx, 1376 .machine_check = machine_check_4xx,
@@ -1298,6 +1383,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1298 .cpu_features = CPU_FTRS_40X, 1383 .cpu_features = CPU_FTRS_40X,
1299 .cpu_user_features = PPC_FEATURE_32 | 1384 .cpu_user_features = PPC_FEATURE_32 |
1300 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 1385 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1386 .mmu_features = MMU_FTR_TYPE_40x,
1301 .icache_bsize = 32, 1387 .icache_bsize = 32,
1302 .dcache_bsize = 32, 1388 .dcache_bsize = 32,
1303 .machine_check = machine_check_4xx, 1389 .machine_check = machine_check_4xx,
@@ -1312,6 +1398,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1312 .cpu_name = "440GR Rev. A", 1398 .cpu_name = "440GR Rev. A",
1313 .cpu_features = CPU_FTRS_44X, 1399 .cpu_features = CPU_FTRS_44X,
1314 .cpu_user_features = COMMON_USER_BOOKE, 1400 .cpu_user_features = COMMON_USER_BOOKE,
1401 .mmu_features = MMU_FTR_TYPE_44x,
1315 .icache_bsize = 32, 1402 .icache_bsize = 32,
1316 .dcache_bsize = 32, 1403 .dcache_bsize = 32,
1317 .machine_check = machine_check_4xx, 1404 .machine_check = machine_check_4xx,
@@ -1323,6 +1410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1323 .cpu_name = "440EP Rev. A", 1410 .cpu_name = "440EP Rev. A",
1324 .cpu_features = CPU_FTRS_44X, 1411 .cpu_features = CPU_FTRS_44X,
1325 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1412 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1413 .mmu_features = MMU_FTR_TYPE_44x,
1326 .icache_bsize = 32, 1414 .icache_bsize = 32,
1327 .dcache_bsize = 32, 1415 .dcache_bsize = 32,
1328 .cpu_setup = __setup_cpu_440ep, 1416 .cpu_setup = __setup_cpu_440ep,
@@ -1335,6 +1423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1335 .cpu_name = "440GR Rev. B", 1423 .cpu_name = "440GR Rev. B",
1336 .cpu_features = CPU_FTRS_44X, 1424 .cpu_features = CPU_FTRS_44X,
1337 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1425 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1426 .mmu_features = MMU_FTR_TYPE_44x,
1338 .icache_bsize = 32, 1427 .icache_bsize = 32,
1339 .dcache_bsize = 32, 1428 .dcache_bsize = 32,
1340 .machine_check = machine_check_4xx, 1429 .machine_check = machine_check_4xx,
@@ -1346,6 +1435,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1346 .cpu_name = "440EP Rev. C", 1435 .cpu_name = "440EP Rev. C",
1347 .cpu_features = CPU_FTRS_44X, 1436 .cpu_features = CPU_FTRS_44X,
1348 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1437 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1438 .mmu_features = MMU_FTR_TYPE_44x,
1349 .icache_bsize = 32, 1439 .icache_bsize = 32,
1350 .dcache_bsize = 32, 1440 .dcache_bsize = 32,
1351 .cpu_setup = __setup_cpu_440ep, 1441 .cpu_setup = __setup_cpu_440ep,
@@ -1358,6 +1448,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1358 .cpu_name = "440EP Rev. B", 1448 .cpu_name = "440EP Rev. B",
1359 .cpu_features = CPU_FTRS_44X, 1449 .cpu_features = CPU_FTRS_44X,
1360 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1450 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1451 .mmu_features = MMU_FTR_TYPE_44x,
1361 .icache_bsize = 32, 1452 .icache_bsize = 32,
1362 .dcache_bsize = 32, 1453 .dcache_bsize = 32,
1363 .cpu_setup = __setup_cpu_440ep, 1454 .cpu_setup = __setup_cpu_440ep,
@@ -1370,6 +1461,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1370 .cpu_name = "440GRX", 1461 .cpu_name = "440GRX",
1371 .cpu_features = CPU_FTRS_44X, 1462 .cpu_features = CPU_FTRS_44X,
1372 .cpu_user_features = COMMON_USER_BOOKE, 1463 .cpu_user_features = COMMON_USER_BOOKE,
1464 .mmu_features = MMU_FTR_TYPE_44x,
1373 .icache_bsize = 32, 1465 .icache_bsize = 32,
1374 .dcache_bsize = 32, 1466 .dcache_bsize = 32,
1375 .cpu_setup = __setup_cpu_440grx, 1467 .cpu_setup = __setup_cpu_440grx,
@@ -1382,6 +1474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1382 .cpu_name = "440EPX", 1474 .cpu_name = "440EPX",
1383 .cpu_features = CPU_FTRS_44X, 1475 .cpu_features = CPU_FTRS_44X,
1384 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1476 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1477 .mmu_features = MMU_FTR_TYPE_44x,
1385 .icache_bsize = 32, 1478 .icache_bsize = 32,
1386 .dcache_bsize = 32, 1479 .dcache_bsize = 32,
1387 .cpu_setup = __setup_cpu_440epx, 1480 .cpu_setup = __setup_cpu_440epx,
@@ -1394,6 +1487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1394 .cpu_name = "440GP Rev. B", 1487 .cpu_name = "440GP Rev. B",
1395 .cpu_features = CPU_FTRS_44X, 1488 .cpu_features = CPU_FTRS_44X,
1396 .cpu_user_features = COMMON_USER_BOOKE, 1489 .cpu_user_features = COMMON_USER_BOOKE,
1490 .mmu_features = MMU_FTR_TYPE_44x,
1397 .icache_bsize = 32, 1491 .icache_bsize = 32,
1398 .dcache_bsize = 32, 1492 .dcache_bsize = 32,
1399 .machine_check = machine_check_4xx, 1493 .machine_check = machine_check_4xx,
@@ -1405,6 +1499,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1405 .cpu_name = "440GP Rev. C", 1499 .cpu_name = "440GP Rev. C",
1406 .cpu_features = CPU_FTRS_44X, 1500 .cpu_features = CPU_FTRS_44X,
1407 .cpu_user_features = COMMON_USER_BOOKE, 1501 .cpu_user_features = COMMON_USER_BOOKE,
1502 .mmu_features = MMU_FTR_TYPE_44x,
1408 .icache_bsize = 32, 1503 .icache_bsize = 32,
1409 .dcache_bsize = 32, 1504 .dcache_bsize = 32,
1410 .machine_check = machine_check_4xx, 1505 .machine_check = machine_check_4xx,
@@ -1416,6 +1511,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1416 .cpu_name = "440GX Rev. A", 1511 .cpu_name = "440GX Rev. A",
1417 .cpu_features = CPU_FTRS_44X, 1512 .cpu_features = CPU_FTRS_44X,
1418 .cpu_user_features = COMMON_USER_BOOKE, 1513 .cpu_user_features = COMMON_USER_BOOKE,
1514 .mmu_features = MMU_FTR_TYPE_44x,
1419 .icache_bsize = 32, 1515 .icache_bsize = 32,
1420 .dcache_bsize = 32, 1516 .dcache_bsize = 32,
1421 .cpu_setup = __setup_cpu_440gx, 1517 .cpu_setup = __setup_cpu_440gx,
@@ -1428,6 +1524,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1428 .cpu_name = "440GX Rev. B", 1524 .cpu_name = "440GX Rev. B",
1429 .cpu_features = CPU_FTRS_44X, 1525 .cpu_features = CPU_FTRS_44X,
1430 .cpu_user_features = COMMON_USER_BOOKE, 1526 .cpu_user_features = COMMON_USER_BOOKE,
1527 .mmu_features = MMU_FTR_TYPE_44x,
1431 .icache_bsize = 32, 1528 .icache_bsize = 32,
1432 .dcache_bsize = 32, 1529 .dcache_bsize = 32,
1433 .cpu_setup = __setup_cpu_440gx, 1530 .cpu_setup = __setup_cpu_440gx,
@@ -1440,6 +1537,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1440 .cpu_name = "440GX Rev. C", 1537 .cpu_name = "440GX Rev. C",
1441 .cpu_features = CPU_FTRS_44X, 1538 .cpu_features = CPU_FTRS_44X,
1442 .cpu_user_features = COMMON_USER_BOOKE, 1539 .cpu_user_features = COMMON_USER_BOOKE,
1540 .mmu_features = MMU_FTR_TYPE_44x,
1443 .icache_bsize = 32, 1541 .icache_bsize = 32,
1444 .dcache_bsize = 32, 1542 .dcache_bsize = 32,
1445 .cpu_setup = __setup_cpu_440gx, 1543 .cpu_setup = __setup_cpu_440gx,
@@ -1452,6 +1550,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1452 .cpu_name = "440GX Rev. F", 1550 .cpu_name = "440GX Rev. F",
1453 .cpu_features = CPU_FTRS_44X, 1551 .cpu_features = CPU_FTRS_44X,
1454 .cpu_user_features = COMMON_USER_BOOKE, 1552 .cpu_user_features = COMMON_USER_BOOKE,
1553 .mmu_features = MMU_FTR_TYPE_44x,
1455 .icache_bsize = 32, 1554 .icache_bsize = 32,
1456 .dcache_bsize = 32, 1555 .dcache_bsize = 32,
1457 .cpu_setup = __setup_cpu_440gx, 1556 .cpu_setup = __setup_cpu_440gx,
@@ -1464,6 +1563,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1464 .cpu_name = "440SP Rev. A", 1563 .cpu_name = "440SP Rev. A",
1465 .cpu_features = CPU_FTRS_44X, 1564 .cpu_features = CPU_FTRS_44X,
1466 .cpu_user_features = COMMON_USER_BOOKE, 1565 .cpu_user_features = COMMON_USER_BOOKE,
1566 .mmu_features = MMU_FTR_TYPE_44x,
1467 .icache_bsize = 32, 1567 .icache_bsize = 32,
1468 .dcache_bsize = 32, 1568 .dcache_bsize = 32,
1469 .machine_check = machine_check_4xx, 1569 .machine_check = machine_check_4xx,
@@ -1475,6 +1575,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1475 .cpu_name = "440SPe Rev. A", 1575 .cpu_name = "440SPe Rev. A",
1476 .cpu_features = CPU_FTRS_44X, 1576 .cpu_features = CPU_FTRS_44X,
1477 .cpu_user_features = COMMON_USER_BOOKE, 1577 .cpu_user_features = COMMON_USER_BOOKE,
1578 .mmu_features = MMU_FTR_TYPE_44x,
1478 .icache_bsize = 32, 1579 .icache_bsize = 32,
1479 .dcache_bsize = 32, 1580 .dcache_bsize = 32,
1480 .cpu_setup = __setup_cpu_440spe, 1581 .cpu_setup = __setup_cpu_440spe,
@@ -1487,6 +1588,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1487 .cpu_name = "440SPe Rev. B", 1588 .cpu_name = "440SPe Rev. B",
1488 .cpu_features = CPU_FTRS_44X, 1589 .cpu_features = CPU_FTRS_44X,
1489 .cpu_user_features = COMMON_USER_BOOKE, 1590 .cpu_user_features = COMMON_USER_BOOKE,
1591 .mmu_features = MMU_FTR_TYPE_44x,
1490 .icache_bsize = 32, 1592 .icache_bsize = 32,
1491 .dcache_bsize = 32, 1593 .dcache_bsize = 32,
1492 .cpu_setup = __setup_cpu_440spe, 1594 .cpu_setup = __setup_cpu_440spe,
@@ -1499,6 +1601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1499 .cpu_name = "440 in Virtex-5 FXT", 1601 .cpu_name = "440 in Virtex-5 FXT",
1500 .cpu_features = CPU_FTRS_44X, 1602 .cpu_features = CPU_FTRS_44X,
1501 .cpu_user_features = COMMON_USER_BOOKE, 1603 .cpu_user_features = COMMON_USER_BOOKE,
1604 .mmu_features = MMU_FTR_TYPE_44x,
1502 .icache_bsize = 32, 1605 .icache_bsize = 32,
1503 .dcache_bsize = 32, 1606 .dcache_bsize = 32,
1504 .cpu_setup = __setup_cpu_440x5, 1607 .cpu_setup = __setup_cpu_440x5,
@@ -1509,8 +1612,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
1509 .pvr_mask = 0xffff0002, 1612 .pvr_mask = 0xffff0002,
1510 .pvr_value = 0x13020002, 1613 .pvr_value = 0x13020002,
1511 .cpu_name = "460EX", 1614 .cpu_name = "460EX",
1512 .cpu_features = CPU_FTRS_44X, 1615 .cpu_features = CPU_FTRS_440x6,
1513 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1616 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1617 .mmu_features = MMU_FTR_TYPE_44x,
1514 .icache_bsize = 32, 1618 .icache_bsize = 32,
1515 .dcache_bsize = 32, 1619 .dcache_bsize = 32,
1516 .cpu_setup = __setup_cpu_460ex, 1620 .cpu_setup = __setup_cpu_460ex,
@@ -1521,8 +1625,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
1521 .pvr_mask = 0xffff0002, 1625 .pvr_mask = 0xffff0002,
1522 .pvr_value = 0x13020000, 1626 .pvr_value = 0x13020000,
1523 .cpu_name = "460GT", 1627 .cpu_name = "460GT",
1524 .cpu_features = CPU_FTRS_44X, 1628 .cpu_features = CPU_FTRS_440x6,
1525 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1629 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1630 .mmu_features = MMU_FTR_TYPE_44x,
1526 .icache_bsize = 32, 1631 .icache_bsize = 32,
1527 .dcache_bsize = 32, 1632 .dcache_bsize = 32,
1528 .cpu_setup = __setup_cpu_460gt, 1633 .cpu_setup = __setup_cpu_460gt,
@@ -1535,6 +1640,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1535 .cpu_name = "(generic 44x PPC)", 1640 .cpu_name = "(generic 44x PPC)",
1536 .cpu_features = CPU_FTRS_44X, 1641 .cpu_features = CPU_FTRS_44X,
1537 .cpu_user_features = COMMON_USER_BOOKE, 1642 .cpu_user_features = COMMON_USER_BOOKE,
1643 .mmu_features = MMU_FTR_TYPE_44x,
1538 .icache_bsize = 32, 1644 .icache_bsize = 32,
1539 .dcache_bsize = 32, 1645 .dcache_bsize = 32,
1540 .machine_check = machine_check_4xx, 1646 .machine_check = machine_check_4xx,
@@ -1551,6 +1657,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1551 .cpu_user_features = COMMON_USER_BOOKE | 1657 .cpu_user_features = COMMON_USER_BOOKE |
1552 PPC_FEATURE_HAS_EFP_SINGLE | 1658 PPC_FEATURE_HAS_EFP_SINGLE |
1553 PPC_FEATURE_UNIFIED_CACHE, 1659 PPC_FEATURE_UNIFIED_CACHE,
1660 .mmu_features = MMU_FTR_TYPE_FSL_E,
1554 .dcache_bsize = 32, 1661 .dcache_bsize = 32,
1555 .machine_check = machine_check_e200, 1662 .machine_check = machine_check_e200,
1556 .platform = "ppc5554", 1663 .platform = "ppc5554",
@@ -1565,6 +1672,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1565 PPC_FEATURE_HAS_SPE_COMP | 1672 PPC_FEATURE_HAS_SPE_COMP |
1566 PPC_FEATURE_HAS_EFP_SINGLE_COMP | 1673 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1567 PPC_FEATURE_UNIFIED_CACHE, 1674 PPC_FEATURE_UNIFIED_CACHE,
1675 .mmu_features = MMU_FTR_TYPE_FSL_E,
1568 .dcache_bsize = 32, 1676 .dcache_bsize = 32,
1569 .machine_check = machine_check_e200, 1677 .machine_check = machine_check_e200,
1570 .platform = "ppc5554", 1678 .platform = "ppc5554",
@@ -1577,6 +1685,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1577 .cpu_user_features = COMMON_USER_BOOKE | 1685 .cpu_user_features = COMMON_USER_BOOKE |
1578 PPC_FEATURE_HAS_EFP_SINGLE | 1686 PPC_FEATURE_HAS_EFP_SINGLE |
1579 PPC_FEATURE_UNIFIED_CACHE, 1687 PPC_FEATURE_UNIFIED_CACHE,
1688 .mmu_features = MMU_FTR_TYPE_FSL_E,
1580 .dcache_bsize = 32, 1689 .dcache_bsize = 32,
1581 .machine_check = machine_check_e200, 1690 .machine_check = machine_check_e200,
1582 .platform = "ppc5554", 1691 .platform = "ppc5554",
@@ -1591,6 +1700,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1591 .cpu_user_features = COMMON_USER_BOOKE | 1700 .cpu_user_features = COMMON_USER_BOOKE |
1592 PPC_FEATURE_HAS_SPE_COMP | 1701 PPC_FEATURE_HAS_SPE_COMP |
1593 PPC_FEATURE_HAS_EFP_SINGLE_COMP, 1702 PPC_FEATURE_HAS_EFP_SINGLE_COMP,
1703 .mmu_features = MMU_FTR_TYPE_FSL_E,
1594 .icache_bsize = 32, 1704 .icache_bsize = 32,
1595 .dcache_bsize = 32, 1705 .dcache_bsize = 32,
1596 .num_pmcs = 4, 1706 .num_pmcs = 4,
@@ -1608,6 +1718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1608 PPC_FEATURE_HAS_SPE_COMP | 1718 PPC_FEATURE_HAS_SPE_COMP |
1609 PPC_FEATURE_HAS_EFP_SINGLE_COMP | 1719 PPC_FEATURE_HAS_EFP_SINGLE_COMP |
1610 PPC_FEATURE_HAS_EFP_DOUBLE_COMP, 1720 PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
1721 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
1611 .icache_bsize = 32, 1722 .icache_bsize = 32,
1612 .dcache_bsize = 32, 1723 .dcache_bsize = 32,
1613 .num_pmcs = 4, 1724 .num_pmcs = 4,
@@ -1622,6 +1733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1622 .cpu_name = "e500mc", 1733 .cpu_name = "e500mc",
1623 .cpu_features = CPU_FTRS_E500MC, 1734 .cpu_features = CPU_FTRS_E500MC,
1624 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1735 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1736 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
1625 .icache_bsize = 64, 1737 .icache_bsize = 64,
1626 .dcache_bsize = 64, 1738 .dcache_bsize = 64,
1627 .num_pmcs = 4, 1739 .num_pmcs = 4,
@@ -1638,6 +1750,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1638 .cpu_user_features = COMMON_USER_BOOKE | 1750 .cpu_user_features = COMMON_USER_BOOKE |
1639 PPC_FEATURE_HAS_SPE_COMP | 1751 PPC_FEATURE_HAS_SPE_COMP |
1640 PPC_FEATURE_HAS_EFP_SINGLE_COMP, 1752 PPC_FEATURE_HAS_EFP_SINGLE_COMP,
1753 .mmu_features = MMU_FTR_TYPE_FSL_E,
1641 .icache_bsize = 32, 1754 .icache_bsize = 32,
1642 .dcache_bsize = 32, 1755 .dcache_bsize = 32,
1643 .machine_check = machine_check_e500, 1756 .machine_check = machine_check_e500,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3a6eaa876ee1..1c5c8a6fc129 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev,
120{ 120{
121} 121}
122 122
123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
123struct dma_mapping_ops dma_direct_ops = { 143struct dma_mapping_ops dma_direct_ops = {
124 .alloc_coherent = dma_direct_alloc_coherent, 144 .alloc_coherent = dma_direct_alloc_coherent,
125 .free_coherent = dma_direct_free_coherent, 145 .free_coherent = dma_direct_free_coherent,
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = {
128 .dma_supported = dma_direct_dma_supported, 148 .dma_supported = dma_direct_dma_supported,
129 .map_page = dma_direct_map_page, 149 .map_page = dma_direct_map_page,
130 .unmap_page = dma_direct_unmap_page, 150 .unmap_page = dma_direct_unmap_page,
151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
131}; 157};
132EXPORT_SYMBOL(dma_direct_ops); 158EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0c326823c6d4..a1c4cfd25ded 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -31,6 +31,7 @@
31#include <asm/ppc_asm.h> 31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h> 32#include <asm/asm-offsets.h>
33#include <asm/ptrace.h> 33#include <asm/ptrace.h>
34#include <asm/bug.h>
34 35
35/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 36/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
36#define LOAD_BAT(n, reg, RA, RB) \ 37#define LOAD_BAT(n, reg, RA, RB) \
@@ -182,7 +183,8 @@ __after_mmu_off:
182 bl reloc_offset 183 bl reloc_offset
183 mr r26,r3 184 mr r26,r3
184 addis r4,r3,KERNELBASE@h /* current address of _start */ 185 addis r4,r3,KERNELBASE@h /* current address of _start */
185 cmpwi 0,r4,0 /* are we already running at 0? */ 186 lis r5,PHYSICAL_START@h
187 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
186 bne relocate_kernel 188 bne relocate_kernel
187/* 189/*
188 * we now have the 1st 16M of ram mapped with the bats. 190 * we now have the 1st 16M of ram mapped with the bats.
@@ -810,13 +812,13 @@ giveup_altivec:
810 812
811/* 813/*
812 * This code is jumped to from the startup code to copy 814 * This code is jumped to from the startup code to copy
813 * the kernel image to physical address 0. 815 * the kernel image to physical address PHYSICAL_START.
814 */ 816 */
815relocate_kernel: 817relocate_kernel:
816 addis r9,r26,klimit@ha /* fetch klimit */ 818 addis r9,r26,klimit@ha /* fetch klimit */
817 lwz r25,klimit@l(r9) 819 lwz r25,klimit@l(r9)
818 addis r25,r25,-KERNELBASE@h 820 addis r25,r25,-KERNELBASE@h
819 li r3,0 /* Destination base address */ 821 lis r3,PHYSICAL_START@h /* Destination base address */
820 li r6,0 /* Destination offset */ 822 li r6,0 /* Destination offset */
821 li r5,0x4000 /* # bytes of memory to copy */ 823 li r5,0x4000 /* # bytes of memory to copy */
822 bl copy_and_flush /* copy the first 0x4000 bytes */ 824 bl copy_and_flush /* copy the first 0x4000 bytes */
@@ -989,12 +991,12 @@ load_up_mmu:
989 LOAD_BAT(1,r3,r4,r5) 991 LOAD_BAT(1,r3,r4,r5)
990 LOAD_BAT(2,r3,r4,r5) 992 LOAD_BAT(2,r3,r4,r5)
991 LOAD_BAT(3,r3,r4,r5) 993 LOAD_BAT(3,r3,r4,r5)
992BEGIN_FTR_SECTION 994BEGIN_MMU_FTR_SECTION
993 LOAD_BAT(4,r3,r4,r5) 995 LOAD_BAT(4,r3,r4,r5)
994 LOAD_BAT(5,r3,r4,r5) 996 LOAD_BAT(5,r3,r4,r5)
995 LOAD_BAT(6,r3,r4,r5) 997 LOAD_BAT(6,r3,r4,r5)
996 LOAD_BAT(7,r3,r4,r5) 998 LOAD_BAT(7,r3,r4,r5)
997END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) 999END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
998 blr 1000 blr
999 1001
1000/* 1002/*
@@ -1070,9 +1072,14 @@ start_here:
1070 RFI 1072 RFI
1071 1073
1072/* 1074/*
1075 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1076 *
1073 * Set up the segment registers for a new context. 1077 * Set up the segment registers for a new context.
1074 */ 1078 */
1075_ENTRY(set_context) 1079_ENTRY(switch_mmu_context)
1080 lwz r3,MMCONTEXTID(r4)
1081 cmpwi cr0,r3,0
1082 blt- 4f
1076 mulli r3,r3,897 /* multiply context by skew factor */ 1083 mulli r3,r3,897 /* multiply context by skew factor */
1077 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ 1084 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1078 addis r3,r3,0x6000 /* Set Ks, Ku bits */ 1085 addis r3,r3,0x6000 /* Set Ks, Ku bits */
@@ -1083,6 +1090,7 @@ _ENTRY(set_context)
1083 /* Context switch the PTE pointer for the Abatron BDI2000. 1090 /* Context switch the PTE pointer for the Abatron BDI2000.
1084 * The PGDIR is passed as second argument. 1091 * The PGDIR is passed as second argument.
1085 */ 1092 */
1093 lwz r4,MM_PGD(r4)
1086 lis r5, KERNELBASE@h 1094 lis r5, KERNELBASE@h
1087 lwz r5, 0xf0(r5) 1095 lwz r5, 0xf0(r5)
1088 stw r4, 0x4(r5) 1096 stw r4, 0x4(r5)
@@ -1098,6 +1106,9 @@ _ENTRY(set_context)
1098 sync 1106 sync
1099 isync 1107 isync
1100 blr 1108 blr
11094: trap
1110 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1111 blr
1101 1112
1102/* 1113/*
1103 * An undocumented "feature" of 604e requires that the v bit 1114 * An undocumented "feature" of 604e requires that the v bit
@@ -1131,7 +1142,7 @@ clear_bats:
1131 mtspr SPRN_IBAT2L,r10 1142 mtspr SPRN_IBAT2L,r10
1132 mtspr SPRN_IBAT3U,r10 1143 mtspr SPRN_IBAT3U,r10
1133 mtspr SPRN_IBAT3L,r10 1144 mtspr SPRN_IBAT3L,r10
1134BEGIN_FTR_SECTION 1145BEGIN_MMU_FTR_SECTION
1135 /* Here's a tweak: at this point, CPU setup have 1146 /* Here's a tweak: at this point, CPU setup have
1136 * not been called yet, so HIGH_BAT_EN may not be 1147 * not been called yet, so HIGH_BAT_EN may not be
1137 * set in HID0 for the 745x processors. However, it 1148 * set in HID0 for the 745x processors. However, it
@@ -1154,7 +1165,7 @@ BEGIN_FTR_SECTION
1154 mtspr SPRN_IBAT6L,r10 1165 mtspr SPRN_IBAT6L,r10
1155 mtspr SPRN_IBAT7U,r10 1166 mtspr SPRN_IBAT7U,r10
1156 mtspr SPRN_IBAT7L,r10 1167 mtspr SPRN_IBAT7L,r10
1157END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) 1168END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1158 blr 1169 blr
1159 1170
1160flush_tlbs: 1171flush_tlbs:
@@ -1178,11 +1189,11 @@ mmu_off:
1178 1189
1179/* 1190/*
1180 * Use the first pair of BAT registers to map the 1st 16MB 1191 * Use the first pair of BAT registers to map the 1st 16MB
1181 * of RAM to KERNELBASE. From this point on we can't safely 1192 * of RAM to PAGE_OFFSET. From this point on we can't safely
1182 * call OF any more. 1193 * call OF any more.
1183 */ 1194 */
1184initial_bats: 1195initial_bats:
1185 lis r11,KERNELBASE@h 1196 lis r11,PAGE_OFFSET@h
1186 mfspr r9,SPRN_PVR 1197 mfspr r9,SPRN_PVR
1187 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 1198 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1188 cmpwi 0,r9,1 1199 cmpwi 0,r9,1
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index f3a1ea9d7fe4..b56fecc93a16 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -69,6 +69,17 @@ _ENTRY(_start);
69 li r24,0 /* CPU number */ 69 li r24,0 /* CPU number */
70 70
71/* 71/*
72 * In case the firmware didn't do it, we apply some workarounds
73 * that are good for all 440 core variants here
74 */
75 mfspr r3,SPRN_CCR0
76 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
77 isync
78 mtspr SPRN_CCR0,r3
79 isync
80 sync
81
82/*
72 * Set up the initial MMU state 83 * Set up the initial MMU state
73 * 84 *
74 * We are still executing code at the virtual address 85 * We are still executing code at the virtual address
@@ -391,12 +402,14 @@ interrupt_base:
391 rlwimi r13,r12,10,30,30 402 rlwimi r13,r12,10,30,30
392 403
393 /* Load the PTE */ 404 /* Load the PTE */
394 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 405 /* Compute pgdir/pmd offset */
406 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
395 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 407 lwzx r11, r12, r11 /* Get pgd/pmd entry */
396 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 408 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
397 beq 2f /* Bail if no table */ 409 beq 2f /* Bail if no table */
398 410
399 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 411 /* Compute pte address */
412 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
400 lwz r11, 0(r12) /* Get high word of pte entry */ 413 lwz r11, 0(r12) /* Get high word of pte entry */
401 lwz r12, 4(r12) /* Get low word of pte entry */ 414 lwz r12, 4(r12) /* Get low word of pte entry */
402 415
@@ -485,12 +498,14 @@ tlb_44x_patch_hwater_D:
485 /* Make up the required permissions */ 498 /* Make up the required permissions */
486 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC 499 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
487 500
488 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ 501 /* Compute pgdir/pmd offset */
502 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
489 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 503 lwzx r11, r12, r11 /* Get pgd/pmd entry */
490 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 504 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
491 beq 2f /* Bail if no table */ 505 beq 2f /* Bail if no table */
492 506
493 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ 507 /* Compute pte address */
508 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
494 lwz r11, 0(r12) /* Get high word of pte entry */ 509 lwz r11, 0(r12) /* Get high word of pte entry */
495 lwz r12, 4(r12) /* Get low word of pte entry */ 510 lwz r12, 4(r12) /* Get low word of pte entry */
496 511
@@ -554,15 +569,16 @@ tlb_44x_patch_hwater_I:
554 */ 569 */
555finish_tlb_load: 570finish_tlb_load:
556 /* Combine RPN & ERPN an write WS 0 */ 571 /* Combine RPN & ERPN an write WS 0 */
557 rlwimi r11,r12,0,0,19 572 rlwimi r11,r12,0,0,31-PAGE_SHIFT
558 tlbwe r11,r13,PPC44x_TLB_XLAT 573 tlbwe r11,r13,PPC44x_TLB_XLAT
559 574
560 /* 575 /*
561 * Create WS1. This is the faulting address (EPN), 576 * Create WS1. This is the faulting address (EPN),
562 * page size, and valid flag. 577 * page size, and valid flag.
563 */ 578 */
564 li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K 579 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
565 rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ 580 /* Insert valid and page size */
581 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
566 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ 582 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
567 583
568 /* And WS 2 */ 584 /* And WS 2 */
@@ -634,12 +650,12 @@ _GLOBAL(set_context)
634 * goes at the beginning of the data segment, which is page-aligned. 650 * goes at the beginning of the data segment, which is page-aligned.
635 */ 651 */
636 .data 652 .data
637 .align 12 653 .align PAGE_SHIFT
638 .globl sdata 654 .globl sdata
639sdata: 655sdata:
640 .globl empty_zero_page 656 .globl empty_zero_page
641empty_zero_page: 657empty_zero_page:
642 .space 4096 658 .space PAGE_SIZE
643 659
644/* 660/*
645 * To support >32-bit physical addresses, we use an 8KB pgdir. 661 * To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 590304c24dad..11b549acc034 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -92,6 +92,7 @@ _ENTRY(_start);
92 * if needed 92 * if needed
93 */ 93 */
94 94
95_ENTRY(__early_start)
95/* 1. Find the index of the entry we're executing in */ 96/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */ 97 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */ 98invstr: mflr r6 /* Make it accessible */
@@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */
235 tlbivax 0,r9 236 tlbivax 0,r9
236 TLBSYNC 237 TLBSYNC
237 238
239/* The mapping only needs to be cache-coherent on SMP */
240#ifdef CONFIG_SMP
241#define M_IF_SMP MAS2_M
242#else
243#define M_IF_SMP 0
244#endif
245
238/* 6. Setup KERNELBASE mapping in TLB1[0] */ 246/* 6. Setup KERNELBASE mapping in TLB1[0] */
239 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 247 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
240 mtspr SPRN_MAS0,r6 248 mtspr SPRN_MAS0,r6
241 lis r6,(MAS1_VALID|MAS1_IPROT)@h 249 lis r6,(MAS1_VALID|MAS1_IPROT)@h
242 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l 250 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
243 mtspr SPRN_MAS1,r6 251 mtspr SPRN_MAS1,r6
244 li r7,0 252 lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h
245 lis r6,PAGE_OFFSET@h 253 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l
246 ori r6,r6,PAGE_OFFSET@l
247 rlwimi r6,r7,0,20,31
248 mtspr SPRN_MAS2,r6 254 mtspr SPRN_MAS2,r6
249 mtspr SPRN_MAS3,r8 255 mtspr SPRN_MAS3,r8
250 tlbwe 256 tlbwe
251 257
252/* 7. Jump to KERNELBASE mapping */ 258/* 7. Jump to KERNELBASE mapping */
253 lis r6,KERNELBASE@h 259 lis r6,(KERNELBASE & ~0xfff)@h
254 ori r6,r6,KERNELBASE@l 260 ori r6,r6,(KERNELBASE & ~0xfff)@l
255 rlwimi r6,r7,0,20,31
256 lis r7,MSR_KERNEL@h 261 lis r7,MSR_KERNEL@h
257 ori r7,r7,MSR_KERNEL@l 262 ori r7,r7,MSR_KERNEL@l
258 bl 1f /* Find our address */ 263 bl 1f /* Find our address */
2591: mflr r9 2641: mflr r9
260 rlwimi r6,r9,0,20,31 265 rlwimi r6,r9,0,20,31
261 addi r6,r6,24 266 addi r6,r6,(2f - 1b)
262 mtspr SPRN_SRR0,r6 267 mtspr SPRN_SRR0,r6
263 mtspr SPRN_SRR1,r7 268 mtspr SPRN_SRR1,r7
264 rfi /* start execution out of TLB1[0] entry */ 269 rfi /* start execution out of TLB1[0] entry */
265 270
266/* 8. Clear out the temp mapping */ 271/* 8. Clear out the temp mapping */
267 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 2722: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
268 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 273 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
269 mtspr SPRN_MAS0,r7 274 mtspr SPRN_MAS0,r7
270 tlbre 275 tlbre
@@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */
344 mtspr SPRN_DBSR,r2 349 mtspr SPRN_DBSR,r2
345#endif 350#endif
346 351
352#ifdef CONFIG_SMP
353 /* Check to see if we're the second processor, and jump
354 * to the secondary_start code if so
355 */
356 mfspr r24,SPRN_PIR
357 cmpwi r24,0
358 bne __secondary_start
359#endif
360
347 /* 361 /*
348 * This is where the main kernel code starts. 362 * This is where the main kernel code starts.
349 */ 363 */
@@ -685,12 +699,13 @@ interrupt_base:
685 /* SPE Floating Point Data */ 699 /* SPE Floating Point Data */
686#ifdef CONFIG_SPE 700#ifdef CONFIG_SPE
687 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 701 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
688#else
689 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
690#endif /* CONFIG_SPE */
691 702
692 /* SPE Floating Point Round */ 703 /* SPE Floating Point Round */
704 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
693 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 707 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
708#endif /* CONFIG_SPE */
694 709
695 /* Performance Monitor */ 710 /* Performance Monitor */
696 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 711 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
@@ -735,6 +750,9 @@ finish_tlb_load:
735#else 750#else
736 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 751 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
737#endif 752#endif
753#ifdef CONFIG_SMP
754 ori r12, r12, MAS2_M
755#endif
738 mtspr SPRN_MAS2, r12 756 mtspr SPRN_MAS2, r12
739 757
740 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT) 758 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
@@ -746,15 +764,15 @@ finish_tlb_load:
746 iseleq r12, r12, r10 764 iseleq r12, r12, r10
747 765
748#ifdef CONFIG_PTE_64BIT 766#ifdef CONFIG_PTE_64BIT
7492: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 767 rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
750 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 768 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
751 mtspr SPRN_MAS3, r12 769 mtspr SPRN_MAS3, r12
752BEGIN_FTR_SECTION 770BEGIN_MMU_FTR_SECTION
753 srwi r10, r13, 8 /* grab RPN[8:31] */ 771 srwi r10, r13, 8 /* grab RPN[8:31] */
754 mtspr SPRN_MAS7, r10 772 mtspr SPRN_MAS7, r10
755END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) 773END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
756#else 774#else
7572: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 775 rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
758 mtspr SPRN_MAS3, r11 776 mtspr SPRN_MAS3, r11
759#endif 777#endif
760#ifdef CONFIG_E200 778#ifdef CONFIG_E200
@@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1)
1037 1055
1038 blr 1056 blr
1039 1057
1058#ifdef CONFIG_SMP
1059/* When we get here, r24 needs to hold the CPU # */
1060 .globl __secondary_start
1061__secondary_start:
1062 lis r3,__secondary_hold_acknowledge@h
1063 ori r3,r3,__secondary_hold_acknowledge@l
1064 stw r24,0(r3)
1065
1066 li r3,0
1067 mr r4,r24 /* Why? */
1068 bl call_setup_cpu
1069
1070 lis r3,tlbcam_index@ha
1071 lwz r3,tlbcam_index@l(r3)
1072 mtctr r3
1073 li r26,0 /* r26 safe? */
1074
1075 /* Load each CAM entry */
10761: mr r3,r26
1077 bl loadcam_entry
1078 addi r26,r26,1
1079 bdnz 1b
1080
1081 /* get current_thread_info and current */
1082 lis r1,secondary_ti@ha
1083 lwz r1,secondary_ti@l(r1)
1084 lwz r2,TI_TASK(r1)
1085
1086 /* stack */
1087 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1088 li r0,0
1089 stw r0,0(r1)
1090
1091 /* ptr to current thread */
1092 addi r4,r2,THREAD /* address of our thread_struct */
1093 mtspr SPRN_SPRG3,r4
1094
1095 /* Setup the defaults for TLB entries */
1096 li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
1097 mtspr SPRN_MAS4,r4
1098
1099 /* Jump to start_secondary */
1100 lis r4,MSR_KERNEL@h
1101 ori r4,r4,MSR_KERNEL@l
1102 lis r3,start_secondary@h
1103 ori r3,r3,start_secondary@l
1104 mtspr SPRN_SRR0,r3
1105 mtspr SPRN_SRR1,r4
1106 sync
1107 rfi
1108 sync
1109
1110 .globl __secondary_hold_acknowledge
1111__secondary_hold_acknowledge:
1112 .long -1
1113#endif
1114
1040/* 1115/*
1041 * We put a few things here that have to be page-aligned. This stuff 1116 * We put a few things here that have to be page-aligned. This stuff
1042 * goes at the beginning of the data segment, which is page-aligned. 1117 * goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 64299d28f364..6e3f62493659 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,7 @@
47#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
48 48
49static struct device ibmebus_bus_device = { /* fake "parent" device */ 49static struct device ibmebus_bus_device = { /* fake "parent" device */
50 .bus_id = "ibmebus", 50 .init_name = "ibmebus",
51}; 51};
52 52
53struct bus_type ibmebus_bus_type; 53struct bus_type ibmebus_bus_type;
@@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id)
231 unsigned int irq = irq_find_mapping(NULL, ist); 231 unsigned int irq = irq_find_mapping(NULL, ist);
232 232
233 free_irq(irq, dev_id); 233 free_irq(irq, dev_id);
234 irq_dispose_mapping(irq);
234} 235}
235EXPORT_SYMBOL(ibmebus_free_irq); 236EXPORT_SYMBOL(ibmebus_free_irq);
236 237
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 4c85b8d56478..688b329800bd 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -7,7 +7,6 @@
7#include <linux/mqueue.h> 7#include <linux/mqueue.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10static struct fs_struct init_fs = INIT_FS;
11static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 10static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
12static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 11static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
13struct mm_struct init_mm = INIT_MM(init_mm); 12struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index ac2a21f45c75..b3abebb7ee64 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -13,13 +13,17 @@
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/threads.h> 14#include <linux/threads.h>
15#include <linux/lmb.h> 15#include <linux/lmb.h>
16#include <linux/of.h>
16#include <asm/machdep.h> 17#include <asm/machdep.h>
17#include <asm/prom.h> 18#include <asm/prom.h>
19#include <asm/sections.h>
18 20
19void machine_crash_shutdown(struct pt_regs *regs) 21void machine_crash_shutdown(struct pt_regs *regs)
20{ 22{
21 if (ppc_md.machine_crash_shutdown) 23 if (ppc_md.machine_crash_shutdown)
22 ppc_md.machine_crash_shutdown(regs); 24 ppc_md.machine_crash_shutdown(regs);
25 else
26 default_machine_crash_shutdown(regs);
23} 27}
24 28
25/* 29/*
@@ -31,11 +35,8 @@ int machine_kexec_prepare(struct kimage *image)
31{ 35{
32 if (ppc_md.machine_kexec_prepare) 36 if (ppc_md.machine_kexec_prepare)
33 return ppc_md.machine_kexec_prepare(image); 37 return ppc_md.machine_kexec_prepare(image);
34 /* 38 else
35 * Fail if platform doesn't provide its own machine_kexec_prepare 39 return default_machine_kexec_prepare(image);
36 * implementation.
37 */
38 return -ENOSYS;
39} 40}
40 41
41void machine_kexec_cleanup(struct kimage *image) 42void machine_kexec_cleanup(struct kimage *image)
@@ -52,13 +53,11 @@ void machine_kexec(struct kimage *image)
52{ 53{
53 if (ppc_md.machine_kexec) 54 if (ppc_md.machine_kexec)
54 ppc_md.machine_kexec(image); 55 ppc_md.machine_kexec(image);
55 else { 56 else
56 /* 57 default_machine_kexec(image);
57 * Fall back to normal restart if platform doesn't provide 58
58 * its own kexec function, and user insist to kexec... 59 /* Fall back to normal restart if we're still alive. */
59 */ 60 machine_restart(NULL);
60 machine_restart(NULL);
61 }
62 for(;;); 61 for(;;);
63} 62}
64 63
@@ -118,3 +117,71 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
118{ 117{
119 return (start + size) > crashk_res.start && start <= crashk_res.end; 118 return (start + size) > crashk_res.start && start <= crashk_res.end;
120} 119}
120
121/* Values we need to export to the second kernel via the device tree. */
122static unsigned long kernel_end;
123static unsigned long crashk_size;
124
125static struct property kernel_end_prop = {
126 .name = "linux,kernel-end",
127 .length = sizeof(unsigned long),
128 .value = &kernel_end,
129};
130
131static struct property crashk_base_prop = {
132 .name = "linux,crashkernel-base",
133 .length = sizeof(unsigned long),
134 .value = &crashk_res.start,
135};
136
137static struct property crashk_size_prop = {
138 .name = "linux,crashkernel-size",
139 .length = sizeof(unsigned long),
140 .value = &crashk_size,
141};
142
143static void __init export_crashk_values(struct device_node *node)
144{
145 struct property *prop;
146
147 /* There might be existing crash kernel properties, but we can't
148 * be sure what's in them, so remove them. */
149 prop = of_find_property(node, "linux,crashkernel-base", NULL);
150 if (prop)
151 prom_remove_property(node, prop);
152
153 prop = of_find_property(node, "linux,crashkernel-size", NULL);
154 if (prop)
155 prom_remove_property(node, prop);
156
157 if (crashk_res.start != 0) {
158 prom_add_property(node, &crashk_base_prop);
159 crashk_size = crashk_res.end - crashk_res.start + 1;
160 prom_add_property(node, &crashk_size_prop);
161 }
162}
163
164static int __init kexec_setup(void)
165{
166 struct device_node *node;
167 struct property *prop;
168
169 node = of_find_node_by_path("/chosen");
170 if (!node)
171 return -ENOENT;
172
173 /* remove any stale properties so ours can be found */
174 prop = of_find_property(node, kernel_end_prop.name, NULL);
175 if (prop)
176 prom_remove_property(node, prop);
177
178 /* information needed by userspace when using default_machine_kexec */
179 kernel_end = __pa(_end);
180 prom_add_property(node, &kernel_end_prop);
181
182 export_crashk_values(node);
183
184 of_node_put(node);
185 return 0;
186}
187late_initcall(kexec_setup);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 3c4ca046e854..49e705fcee6d 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -289,7 +289,7 @@ void default_machine_kexec(struct kimage *image)
289} 289}
290 290
291/* Values we need to export to the second kernel via the device tree. */ 291/* Values we need to export to the second kernel via the device tree. */
292static unsigned long htab_base, kernel_end; 292static unsigned long htab_base;
293 293
294static struct property htab_base_prop = { 294static struct property htab_base_prop = {
295 .name = "linux,htab-base", 295 .name = "linux,htab-base",
@@ -303,25 +303,20 @@ static struct property htab_size_prop = {
303 .value = &htab_size_bytes, 303 .value = &htab_size_bytes,
304}; 304};
305 305
306static struct property kernel_end_prop = { 306static int __init export_htab_values(void)
307 .name = "linux,kernel-end",
308 .length = sizeof(unsigned long),
309 .value = &kernel_end,
310};
311
312static void __init export_htab_values(void)
313{ 307{
314 struct device_node *node; 308 struct device_node *node;
315 struct property *prop; 309 struct property *prop;
316 310
311 /* On machines with no htab htab_address is NULL */
312 if (!htab_address)
313 return -ENODEV;
314
317 node = of_find_node_by_path("/chosen"); 315 node = of_find_node_by_path("/chosen");
318 if (!node) 316 if (!node)
319 return; 317 return -ENODEV;
320 318
321 /* remove any stale propertys so ours can be found */ 319 /* remove any stale propertys so ours can be found */
322 prop = of_find_property(node, kernel_end_prop.name, NULL);
323 if (prop)
324 prom_remove_property(node, prop);
325 prop = of_find_property(node, htab_base_prop.name, NULL); 320 prop = of_find_property(node, htab_base_prop.name, NULL);
326 if (prop) 321 if (prop)
327 prom_remove_property(node, prop); 322 prom_remove_property(node, prop);
@@ -329,68 +324,11 @@ static void __init export_htab_values(void)
329 if (prop) 324 if (prop)
330 prom_remove_property(node, prop); 325 prom_remove_property(node, prop);
331 326
332 /* information needed by userspace when using default_machine_kexec */
333 kernel_end = __pa(_end);
334 prom_add_property(node, &kernel_end_prop);
335
336 /* On machines with no htab htab_address is NULL */
337 if (NULL == htab_address)
338 goto out;
339
340 htab_base = __pa(htab_address); 327 htab_base = __pa(htab_address);
341 prom_add_property(node, &htab_base_prop); 328 prom_add_property(node, &htab_base_prop);
342 prom_add_property(node, &htab_size_prop); 329 prom_add_property(node, &htab_size_prop);
343 330
344 out:
345 of_node_put(node);
346}
347
348static struct property crashk_base_prop = {
349 .name = "linux,crashkernel-base",
350 .length = sizeof(unsigned long),
351 .value = &crashk_res.start,
352};
353
354static unsigned long crashk_size;
355
356static struct property crashk_size_prop = {
357 .name = "linux,crashkernel-size",
358 .length = sizeof(unsigned long),
359 .value = &crashk_size,
360};
361
362static void __init export_crashk_values(void)
363{
364 struct device_node *node;
365 struct property *prop;
366
367 node = of_find_node_by_path("/chosen");
368 if (!node)
369 return;
370
371 /* There might be existing crash kernel properties, but we can't
372 * be sure what's in them, so remove them. */
373 prop = of_find_property(node, "linux,crashkernel-base", NULL);
374 if (prop)
375 prom_remove_property(node, prop);
376
377 prop = of_find_property(node, "linux,crashkernel-size", NULL);
378 if (prop)
379 prom_remove_property(node, prop);
380
381 if (crashk_res.start != 0) {
382 prom_add_property(node, &crashk_base_prop);
383 crashk_size = crashk_res.end - crashk_res.start + 1;
384 prom_add_property(node, &crashk_size_prop);
385 }
386
387 of_node_put(node); 331 of_node_put(node);
388}
389
390static int __init kexec_setup(void)
391{
392 export_htab_values();
393 export_crashk_values();
394 return 0; 332 return 0;
395} 333}
396__initcall(kexec_setup); 334late_initcall(export_htab_values);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 5c33bc14bd9f..15f28e0de78d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -29,6 +29,7 @@
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/kexec.h> 31#include <asm/kexec.h>
32#include <asm/bug.h>
32 33
33 .text 34 .text
34 35
@@ -271,231 +272,6 @@ _GLOBAL(real_writeb)
271 272
272#endif /* CONFIG_40x */ 273#endif /* CONFIG_40x */
273 274
274/*
275 * Flush MMU TLB
276 */
277#ifndef CONFIG_FSL_BOOKE
278_GLOBAL(_tlbil_all)
279_GLOBAL(_tlbil_pid)
280#endif
281_GLOBAL(_tlbia)
282#if defined(CONFIG_40x)
283 sync /* Flush to memory before changing mapping */
284 tlbia
285 isync /* Flush shadow TLB */
286#elif defined(CONFIG_44x)
287 li r3,0
288 sync
289
290 /* Load high watermark */
291 lis r4,tlb_44x_hwater@ha
292 lwz r5,tlb_44x_hwater@l(r4)
293
2941: tlbwe r3,r3,PPC44x_TLB_PAGEID
295 addi r3,r3,1
296 cmpw 0,r3,r5
297 ble 1b
298
299 isync
300#elif defined(CONFIG_FSL_BOOKE)
301 /* Invalidate all entries in TLB0 */
302 li r3, 0x04
303 tlbivax 0,3
304 /* Invalidate all entries in TLB1 */
305 li r3, 0x0c
306 tlbivax 0,3
307 msync
308#ifdef CONFIG_SMP
309 tlbsync
310#endif /* CONFIG_SMP */
311#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
312#if defined(CONFIG_SMP)
313 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
314 lwz r8,TI_CPU(r8)
315 oris r8,r8,10
316 mfmsr r10
317 SYNC
318 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
319 rlwinm r0,r0,0,28,26 /* clear DR */
320 mtmsr r0
321 SYNC_601
322 isync
323 lis r9,mmu_hash_lock@h
324 ori r9,r9,mmu_hash_lock@l
325 tophys(r9,r9)
32610: lwarx r7,0,r9
327 cmpwi 0,r7,0
328 bne- 10b
329 stwcx. r8,0,r9
330 bne- 10b
331 sync
332 tlbia
333 sync
334 TLBSYNC
335 li r0,0
336 stw r0,0(r9) /* clear mmu_hash_lock */
337 mtmsr r10
338 SYNC_601
339 isync
340#else /* CONFIG_SMP */
341 sync
342 tlbia
343 sync
344#endif /* CONFIG_SMP */
345#endif /* ! defined(CONFIG_40x) */
346 blr
347
348/*
349 * Flush MMU TLB for a particular address
350 */
351#ifndef CONFIG_FSL_BOOKE
352_GLOBAL(_tlbil_va)
353#endif
354_GLOBAL(_tlbie)
355#if defined(CONFIG_40x)
356 /* We run the search with interrupts disabled because we have to change
357 * the PID and I don't want to preempt when that happens.
358 */
359 mfmsr r5
360 mfspr r6,SPRN_PID
361 wrteei 0
362 mtspr SPRN_PID,r4
363 tlbsx. r3, 0, r3
364 mtspr SPRN_PID,r6
365 wrtee r5
366 bne 10f
367 sync
368 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
369 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
370 * the TLB entry. */
371 tlbwe r3, r3, TLB_TAG
372 isync
37310:
374
375#elif defined(CONFIG_44x)
376 mfspr r5,SPRN_MMUCR
377 rlwimi r5,r4,0,24,31 /* Set TID */
378
379 /* We have to run the search with interrupts disabled, even critical
380 * and debug interrupts (in fact the only critical exceptions we have
381 * are debug and machine check). Otherwise an interrupt which causes
382 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
383 mfmsr r4
384 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
385 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
386 andc r6,r4,r6
387 mtmsr r6
388 mtspr SPRN_MMUCR,r5
389 tlbsx. r3, 0, r3
390 mtmsr r4
391 bne 10f
392 sync
393 /* There are only 64 TLB entries, so r3 < 64,
394 * which means bit 22, is clear. Since 22 is
395 * the V bit in the TLB_PAGEID, loading this
396 * value will invalidate the TLB entry.
397 */
398 tlbwe r3, r3, PPC44x_TLB_PAGEID
399 isync
40010:
401#elif defined(CONFIG_FSL_BOOKE)
402 rlwinm r4, r3, 0, 0, 19
403 ori r5, r4, 0x08 /* TLBSEL = 1 */
404 tlbivax 0, r4
405 tlbivax 0, r5
406 msync
407#if defined(CONFIG_SMP)
408 tlbsync
409#endif /* CONFIG_SMP */
410#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
411#if defined(CONFIG_SMP)
412 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
413 lwz r8,TI_CPU(r8)
414 oris r8,r8,11
415 mfmsr r10
416 SYNC
417 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
418 rlwinm r0,r0,0,28,26 /* clear DR */
419 mtmsr r0
420 SYNC_601
421 isync
422 lis r9,mmu_hash_lock@h
423 ori r9,r9,mmu_hash_lock@l
424 tophys(r9,r9)
42510: lwarx r7,0,r9
426 cmpwi 0,r7,0
427 bne- 10b
428 stwcx. r8,0,r9
429 bne- 10b
430 eieio
431 tlbie r3
432 sync
433 TLBSYNC
434 li r0,0
435 stw r0,0(r9) /* clear mmu_hash_lock */
436 mtmsr r10
437 SYNC_601
438 isync
439#else /* CONFIG_SMP */
440 tlbie r3
441 sync
442#endif /* CONFIG_SMP */
443#endif /* ! CONFIG_40x */
444 blr
445
446#if defined(CONFIG_FSL_BOOKE)
447/*
448 * Flush MMU TLB, but only on the local processor (no broadcast)
449 */
450_GLOBAL(_tlbil_all)
451#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
452 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
453 li r3,(MMUCSR0_TLBFI)@l
454 mtspr SPRN_MMUCSR0, r3
4551:
456 mfspr r3,SPRN_MMUCSR0
457 andi. r3,r3,MMUCSR0_TLBFI@l
458 bne 1b
459 blr
460
461/*
462 * Flush MMU TLB for a particular process id, but only on the local processor
463 * (no broadcast)
464 */
465_GLOBAL(_tlbil_pid)
466/* we currently do an invalidate all since we don't have per pid invalidate */
467 li r3,(MMUCSR0_TLBFI)@l
468 mtspr SPRN_MMUCSR0, r3
4691:
470 mfspr r3,SPRN_MMUCSR0
471 andi. r3,r3,MMUCSR0_TLBFI@l
472 bne 1b
473 msync
474 isync
475 blr
476
477/*
478 * Flush MMU TLB for a particular address, but only on the local processor
479 * (no broadcast)
480 */
481_GLOBAL(_tlbil_va)
482 mfmsr r10
483 wrteei 0
484 slwi r4,r4,16
485 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
486 tlbsx 0,r3
487 mfspr r4,SPRN_MAS1 /* check valid */
488 andis. r3,r4,MAS1_VALID@h
489 beq 1f
490 rlwinm r4,r4,0,1,31
491 mtspr SPRN_MAS1,r4
492 tlbwe
493 msync
494 isync
4951: wrtee r10
496 blr
497#endif /* CONFIG_FSL_BOOKE */
498
499 275
500/* 276/*
501 * Flush instruction cache. 277 * Flush instruction cache.
@@ -650,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
650BEGIN_FTR_SECTION 426BEGIN_FTR_SECTION
651 blr 427 blr
652END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 428END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
653 rlwinm r3,r3,0,0,19 /* Get page base address */ 429 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
654 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 430 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
655 mtctr r4 431 mtctr r4
656 mr r6,r3 432 mr r6,r3
6570: dcbst 0,r3 /* Write line to ram */ 4330: dcbst 0,r3 /* Write line to ram */
@@ -691,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
691 rlwinm r0,r10,0,28,26 /* clear DR */ 467 rlwinm r0,r10,0,28,26 /* clear DR */
692 mtmsr r0 468 mtmsr r0
693 isync 469 isync
694 rlwinm r3,r3,0,0,19 /* Get page base address */ 470 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
695 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 471 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
696 mtctr r4 472 mtctr r4
697 mr r6,r3 473 mr r6,r3
6980: dcbst 0,r3 /* Write line to ram */ 4740: dcbst 0,r3 /* Write line to ram */
@@ -716,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
716 * void clear_pages(void *page, int order) ; 492 * void clear_pages(void *page, int order) ;
717 */ 493 */
718_GLOBAL(clear_pages) 494_GLOBAL(clear_pages)
719 li r0,4096/L1_CACHE_BYTES 495 li r0,PAGE_SIZE/L1_CACHE_BYTES
720 slw r0,r0,r4 496 slw r0,r0,r4
721 mtctr r0 497 mtctr r0
722#ifdef CONFIG_8xx 498#ifdef CONFIG_8xx
@@ -774,7 +550,7 @@ _GLOBAL(copy_page)
774 dcbt r5,r4 550 dcbt r5,r4
775 li r11,L1_CACHE_BYTES+4 551 li r11,L1_CACHE_BYTES+4
776#endif /* MAX_COPY_PREFETCH */ 552#endif /* MAX_COPY_PREFETCH */
777 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH 553 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
778 crclr 4*cr0+eq 554 crclr 4*cr0+eq
7792: 5552:
780 mtctr r0 556 mtctr r0
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 7ff292475269..43e7e3a7f130 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr,
78 (void *)sect->sh_addr, 78 (void *)sect->sh_addr,
79 (void *)sect->sh_addr + sect->sh_size); 79 (void *)sect->sh_addr + sect->sh_size);
80 80
81 sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
82 if (sect != NULL)
83 do_feature_fixups(cur_cpu_spec->mmu_features,
84 (void *)sect->sh_addr,
85 (void *)sect->sh_addr + sect->sh_size);
86
81#ifdef CONFIG_PPC64 87#ifdef CONFIG_PPC64
82 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup"); 88 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
83 if (sect != NULL) 89 if (sect != NULL)
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index f3c9cae01dd5..fa983a59c4ce 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -14,7 +14,6 @@ static void of_device_make_bus_id(struct of_device *dev)
14{ 14{
15 static atomic_t bus_no_reg_magic; 15 static atomic_t bus_no_reg_magic;
16 struct device_node *node = dev->node; 16 struct device_node *node = dev->node;
17 char *name = dev->dev.bus_id;
18 const u32 *reg; 17 const u32 *reg;
19 u64 addr; 18 u64 addr;
20 int magic; 19 int magic;
@@ -27,14 +26,12 @@ static void of_device_make_bus_id(struct of_device *dev)
27 reg = of_get_property(node, "dcr-reg", NULL); 26 reg = of_get_property(node, "dcr-reg", NULL);
28 if (reg) { 27 if (reg) {
29#ifdef CONFIG_PPC_DCR_NATIVE 28#ifdef CONFIG_PPC_DCR_NATIVE
30 snprintf(name, BUS_ID_SIZE, "d%x.%s", 29 dev_set_name(&dev->dev, "d%x.%s", *reg, node->name);
31 *reg, node->name);
32#else /* CONFIG_PPC_DCR_NATIVE */ 30#else /* CONFIG_PPC_DCR_NATIVE */
33 addr = of_translate_dcr_address(node, *reg, NULL); 31 addr = of_translate_dcr_address(node, *reg, NULL);
34 if (addr != OF_BAD_ADDR) { 32 if (addr != OF_BAD_ADDR) {
35 snprintf(name, BUS_ID_SIZE, 33 dev_set_name(&dev->dev, "D%llx.%s",
36 "D%llx.%s", (unsigned long long)addr, 34 (unsigned long long)addr, node->name);
37 node->name);
38 return; 35 return;
39 } 36 }
40#endif /* !CONFIG_PPC_DCR_NATIVE */ 37#endif /* !CONFIG_PPC_DCR_NATIVE */
@@ -48,9 +45,8 @@ static void of_device_make_bus_id(struct of_device *dev)
48 if (reg) { 45 if (reg) {
49 addr = of_translate_address(node, reg); 46 addr = of_translate_address(node, reg);
50 if (addr != OF_BAD_ADDR) { 47 if (addr != OF_BAD_ADDR) {
51 snprintf(name, BUS_ID_SIZE, 48 dev_set_name(&dev->dev, "%llx.%s",
52 "%llx.%s", (unsigned long long)addr, 49 (unsigned long long)addr, node->name);
53 node->name);
54 return; 50 return;
55 } 51 }
56 } 52 }
@@ -60,7 +56,7 @@ static void of_device_make_bus_id(struct of_device *dev)
60 * counter (and pray...) 56 * counter (and pray...)
61 */ 57 */
62 magic = atomic_add_return(1, &bus_no_reg_magic); 58 magic = atomic_add_return(1, &bus_no_reg_magic);
63 snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); 59 dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1);
64} 60}
65 61
66struct of_device *of_device_alloc(struct device_node *np, 62struct of_device *of_device_alloc(struct device_node *np,
@@ -80,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
80 dev->dev.archdata.of_node = np; 76 dev->dev.archdata.of_node = np;
81 77
82 if (bus_id) 78 if (bus_id)
83 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 79 dev_set_name(&dev->dev, bus_id);
84 else 80 else
85 of_device_make_bus_id(dev); 81 of_device_make_bus_id(dev);
86 82
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 48a347133f41..c744b327bcab 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -37,6 +37,7 @@ struct lppaca lppaca[] = {
37 .end_of_quantum = 0xfffffffffffffffful, 37 .end_of_quantum = 0xfffffffffffffffful,
38 .slb_count = 64, 38 .slb_count = 64,
39 .vmxregs_in_use = 0, 39 .vmxregs_in_use = 0,
40 .page_ins = 0,
40 }, 41 },
41}; 42};
42 43
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f36936d9fda3..2538030954d8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -37,13 +37,7 @@
37#include <asm/machdep.h> 37#include <asm/machdep.h>
38#include <asm/ppc-pci.h> 38#include <asm/ppc-pci.h>
39#include <asm/firmware.h> 39#include <asm/firmware.h>
40 40#include <asm/eeh.h>
41#ifdef DEBUG
42#include <asm/udbg.h>
43#define DBG(fmt...) printk(fmt)
44#else
45#define DBG(fmt...)
46#endif
47 41
48static DEFINE_SPINLOCK(hose_spinlock); 42static DEFINE_SPINLOCK(hose_spinlock);
49 43
@@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */
53/* ISA Memory physical address */ 47/* ISA Memory physical address */
54resource_size_t isa_mem_base; 48resource_size_t isa_mem_base;
55 49
56/* Default PCI flags is 0 */ 50/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
57unsigned int ppc_pci_flags; 51unsigned int ppc_pci_flags = 0;
52
58 53
59static struct dma_mapping_ops *pci_dma_ops; 54static struct dma_mapping_ops *pci_dma_ops;
60 55
@@ -165,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr);
165 */ 160 */
166struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 161struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
167{ 162{
168 if (!have_of)
169 return NULL;
170 while(node) { 163 while(node) {
171 struct pci_controller *hose, *tmp; 164 struct pci_controller *hose, *tmp;
172 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 165 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
@@ -208,26 +201,6 @@ char __devinit *pcibios_setup(char *str)
208 return str; 201 return str;
209} 202}
210 203
211void __devinit pcibios_setup_new_device(struct pci_dev *dev)
212{
213 struct dev_archdata *sd = &dev->dev.archdata;
214
215 sd->of_node = pci_device_to_OF_node(dev);
216
217 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
218 sd->of_node ? sd->of_node->full_name : "<none>");
219
220 sd->dma_ops = pci_dma_ops;
221#ifdef CONFIG_PPC32
222 sd->dma_data = (void *)PCI_DRAM_OFFSET;
223#endif
224 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
225
226 if (ppc_md.pci_dma_dev_setup)
227 ppc_md.pci_dma_dev_setup(dev);
228}
229EXPORT_SYMBOL(pcibios_setup_new_device);
230
231/* 204/*
232 * Reads the interrupt pin to determine if interrupt is use by card. 205 * Reads the interrupt pin to determine if interrupt is use by card.
233 * If the interrupt is used, then gets the interrupt line from the 206 * If the interrupt is used, then gets the interrupt line from the
@@ -252,7 +225,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
252 return -1; 225 return -1;
253#endif 226#endif
254 227
255 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 228 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
256 229
257#ifdef DEBUG 230#ifdef DEBUG
258 memset(&oirq, 0xff, sizeof(oirq)); 231 memset(&oirq, 0xff, sizeof(oirq));
@@ -276,26 +249,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
276 line == 0xff || line == 0) { 249 line == 0xff || line == 0) {
277 return -1; 250 return -1;
278 } 251 }
279 DBG(" -> no map ! Using line %d (pin %d) from PCI config\n", 252 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
280 line, pin); 253 line, pin);
281 254
282 virq = irq_create_mapping(NULL, line); 255 virq = irq_create_mapping(NULL, line);
283 if (virq != NO_IRQ) 256 if (virq != NO_IRQ)
284 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 257 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
285 } else { 258 } else {
286 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 259 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
287 oirq.size, oirq.specifier[0], oirq.specifier[1], 260 oirq.size, oirq.specifier[0], oirq.specifier[1],
288 oirq.controller->full_name); 261 oirq.controller->full_name);
289 262
290 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 263 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
291 oirq.size); 264 oirq.size);
292 } 265 }
293 if(virq == NO_IRQ) { 266 if(virq == NO_IRQ) {
294 DBG(" -> failed to map !\n"); 267 pr_debug(" Failed to map !\n");
295 return -1; 268 return -1;
296 } 269 }
297 270
298 DBG(" -> mapped to linux irq %d\n", virq); 271 pr_debug(" Mapped to linux irq %d\n", virq);
299 272
300 pci_dev->irq = virq; 273 pci_dev->irq = virq;
301 274
@@ -397,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
397 } 370 }
398 371
399 /* XXX would be nice to have a way to ask for write-through */ 372 /* XXX would be nice to have a way to ask for write-through */
400 prot |= _PAGE_NO_CACHE;
401 if (write_combine) 373 if (write_combine)
402 prot &= ~_PAGE_GUARDED; 374 return pgprot_noncached_wc(prot);
403 else 375 else
404 prot |= _PAGE_GUARDED; 376 return pgprot_noncached(prot);
405
406 return __pgprot(prot);
407} 377}
408 378
409/* 379/*
@@ -414,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
414pgprot_t pci_phys_mem_access_prot(struct file *file, 384pgprot_t pci_phys_mem_access_prot(struct file *file,
415 unsigned long pfn, 385 unsigned long pfn,
416 unsigned long size, 386 unsigned long size,
417 pgprot_t protection) 387 pgprot_t prot)
418{ 388{
419 struct pci_dev *pdev = NULL; 389 struct pci_dev *pdev = NULL;
420 struct resource *found = NULL; 390 struct resource *found = NULL;
421 unsigned long prot = pgprot_val(protection);
422 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; 391 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
423 int i; 392 int i;
424 393
425 if (page_is_ram(pfn)) 394 if (page_is_ram(pfn))
426 return __pgprot(prot); 395 return prot;
427
428 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
429 396
397 prot = pgprot_noncached(prot);
430 for_each_pci_dev(pdev) { 398 for_each_pci_dev(pdev) {
431 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 399 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
432 struct resource *rp = &pdev->resource[i]; 400 struct resource *rp = &pdev->resource[i];
@@ -447,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
447 } 415 }
448 if (found) { 416 if (found) {
449 if (found->flags & IORESOURCE_PREFETCH) 417 if (found->flags & IORESOURCE_PREFETCH)
450 prot &= ~_PAGE_GUARDED; 418 prot = pgprot_noncached_wc(prot);
451 pci_dev_put(pdev); 419 pci_dev_put(pdev);
452 } 420 }
453 421
454 DBG("non-PCI map for %llx, prot: %lx\n", 422 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
455 (unsigned long long)offset, prot); 423 (unsigned long long)offset, pgprot_val(prot));
456 424
457 return __pgprot(prot); 425 return prot;
458} 426}
459 427
460 428
@@ -610,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
610 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 578 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
611 579
612 vma->vm_pgoff = offset >> PAGE_SHIFT; 580 vma->vm_pgoff = offset >> PAGE_SHIFT;
613 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 581 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
614 | _PAGE_NO_CACHE | _PAGE_GUARDED);
615 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 582 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
616 vma->vm_end - vma->vm_start, 583 vma->vm_end - vma->vm_start,
617 vma->vm_page_prot); 584 vma->vm_page_prot);
@@ -853,15 +820,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
853int pci_proc_domain(struct pci_bus *bus) 820int pci_proc_domain(struct pci_bus *bus)
854{ 821{
855 struct pci_controller *hose = pci_bus_to_host(bus); 822 struct pci_controller *hose = pci_bus_to_host(bus);
856#ifdef CONFIG_PPC64 823
857 return hose->buid != 0;
858#else
859 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS)) 824 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
860 return 0; 825 return 0;
861 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0) 826 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
862 return hose->global_number != 0; 827 return hose->global_number != 0;
863 return 1; 828 return 1;
864#endif
865} 829}
866 830
867void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 831void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
@@ -1083,27 +1047,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1083 } 1047 }
1084} 1048}
1085 1049
1086static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) 1050void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1087{ 1051{
1088 struct pci_dev *dev = bus->self; 1052 /* Fix up the bus resources for P2P bridges */
1089 1053 if (bus->self != NULL)
1090 pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
1091
1092 /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
1093 * now differently between 32 and 64 bits.
1094 */
1095 if (dev != NULL)
1096 pcibios_fixup_bridge(bus); 1054 pcibios_fixup_bridge(bus);
1097 1055
1098 /* Additional setup that is different between 32 and 64 bits for now */ 1056 /* Platform specific bus fixups. This is currently only used
1099 pcibios_do_bus_setup(bus); 1057 * by fsl_pci and I'm hoping to get rid of it at some point
1100 1058 */
1101 /* Platform specific bus fixups */
1102 if (ppc_md.pcibios_fixup_bus) 1059 if (ppc_md.pcibios_fixup_bus)
1103 ppc_md.pcibios_fixup_bus(bus); 1060 ppc_md.pcibios_fixup_bus(bus);
1104 1061
1105 /* Read default IRQs and fixup if necessary */ 1062 /* Setup bus DMA mappings */
1063 if (ppc_md.pci_dma_bus_setup)
1064 ppc_md.pci_dma_bus_setup(bus);
1065}
1066
1067void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1068{
1069 struct pci_dev *dev;
1070
1071 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1072 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1073
1106 list_for_each_entry(dev, &bus->devices, bus_list) { 1074 list_for_each_entry(dev, &bus->devices, bus_list) {
1075 struct dev_archdata *sd = &dev->dev.archdata;
1076
1077 /* Setup OF node pointer in archdata */
1078 sd->of_node = pci_device_to_OF_node(dev);
1079
1080 /* Fixup NUMA node as it may not be setup yet by the generic
1081 * code and is needed by the DMA init
1082 */
1083 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1084
1085 /* Hook up default DMA ops */
1086 sd->dma_ops = pci_dma_ops;
1087 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1088
1089 /* Additional platform DMA/iommu setup */
1090 if (ppc_md.pci_dma_dev_setup)
1091 ppc_md.pci_dma_dev_setup(dev);
1092
1093 /* Read default IRQs and fixup if necessary */
1107 pci_read_irq_line(dev); 1094 pci_read_irq_line(dev);
1108 if (ppc_md.pci_irq_fixup) 1095 if (ppc_md.pci_irq_fixup)
1109 ppc_md.pci_irq_fixup(dev); 1096 ppc_md.pci_irq_fixup(dev);
@@ -1113,22 +1100,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
1113void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1100void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1114{ 1101{
1115 /* When called from the generic PCI probe, read PCI<->PCI bridge 1102 /* When called from the generic PCI probe, read PCI<->PCI bridge
1116 * bases before proceeding 1103 * bases. This is -not- called when generating the PCI tree from
1104 * the OF device-tree.
1117 */ 1105 */
1118 if (bus->self != NULL) 1106 if (bus->self != NULL)
1119 pci_read_bridge_bases(bus); 1107 pci_read_bridge_bases(bus);
1120 __pcibios_fixup_bus(bus);
1121}
1122EXPORT_SYMBOL(pcibios_fixup_bus);
1123 1108
1124/* When building a bus from the OF tree rather than probing, we need a 1109 /* Now fixup the bus bus */
1125 * slightly different version of the fixup which doesn't read the 1110 pcibios_setup_bus_self(bus);
1126 * bridge bases using config space accesses 1111
1127 */ 1112 /* Now fixup devices on that bus */
1128void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus) 1113 pcibios_setup_bus_devices(bus);
1129{
1130 __pcibios_fixup_bus(bus);
1131} 1114}
1115EXPORT_SYMBOL(pcibios_fixup_bus);
1132 1116
1133static int skip_isa_ioresource_align(struct pci_dev *dev) 1117static int skip_isa_ioresource_align(struct pci_dev *dev)
1134{ 1118{
@@ -1198,10 +1182,10 @@ static int __init reparent_resources(struct resource *parent,
1198 *pp = NULL; 1182 *pp = NULL;
1199 for (p = res->child; p != NULL; p = p->sibling) { 1183 for (p = res->child; p != NULL; p = p->sibling) {
1200 p->parent = res; 1184 p->parent = res;
1201 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", 1185 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1202 p->name, 1186 p->name,
1203 (unsigned long long)p->start, 1187 (unsigned long long)p->start,
1204 (unsigned long long)p->end, res->name); 1188 (unsigned long long)p->end, res->name);
1205 } 1189 }
1206 return 0; 1190 return 0;
1207} 1191}
@@ -1245,9 +1229,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1245 int i; 1229 int i;
1246 struct resource *res, *pr; 1230 struct resource *res, *pr;
1247 1231
1232 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1233 pci_domain_nr(bus), bus->number);
1234
1248 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1235 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1249 if ((res = bus->resource[i]) == NULL || !res->flags 1236 if ((res = bus->resource[i]) == NULL || !res->flags
1250 || res->start > res->end) 1237 || res->start > res->end || res->parent)
1251 continue; 1238 continue;
1252 if (bus->parent == NULL) 1239 if (bus->parent == NULL)
1253 pr = (res->flags & IORESOURCE_IO) ? 1240 pr = (res->flags & IORESOURCE_IO) ?
@@ -1271,14 +1258,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1271 } 1258 }
1272 } 1259 }
1273 1260
1274 DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " 1261 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1275 "[0x%x], parent %p (%s)\n", 1262 "[0x%x], parent %p (%s)\n",
1276 bus->self ? pci_name(bus->self) : "PHB", 1263 bus->self ? pci_name(bus->self) : "PHB",
1277 bus->number, i, 1264 bus->number, i,
1278 (unsigned long long)res->start, 1265 (unsigned long long)res->start,
1279 (unsigned long long)res->end, 1266 (unsigned long long)res->end,
1280 (unsigned int)res->flags, 1267 (unsigned int)res->flags,
1281 pr, (pr && pr->name) ? pr->name : "nil"); 1268 pr, (pr && pr->name) ? pr->name : "nil");
1282 1269
1283 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1270 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1284 if (request_resource(pr, res) == 0) 1271 if (request_resource(pr, res) == 0)
@@ -1305,11 +1292,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1305{ 1292{
1306 struct resource *pr, *r = &dev->resource[idx]; 1293 struct resource *pr, *r = &dev->resource[idx];
1307 1294
1308 DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", 1295 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1309 pci_name(dev), idx, 1296 pci_name(dev), idx,
1310 (unsigned long long)r->start, 1297 (unsigned long long)r->start,
1311 (unsigned long long)r->end, 1298 (unsigned long long)r->end,
1312 (unsigned int)r->flags); 1299 (unsigned int)r->flags);
1313 1300
1314 pr = pci_find_parent_resource(dev, r); 1301 pr = pci_find_parent_resource(dev, r);
1315 if (!pr || (pr->flags & IORESOURCE_UNSET) || 1302 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1317,10 +1304,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1317 printk(KERN_WARNING "PCI: Cannot allocate resource region %d" 1304 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1318 " of device %s, will remap\n", idx, pci_name(dev)); 1305 " of device %s, will remap\n", idx, pci_name(dev));
1319 if (pr) 1306 if (pr)
1320 DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, 1307 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1321 (unsigned long long)pr->start, 1308 pr,
1322 (unsigned long long)pr->end, 1309 (unsigned long long)pr->start,
1323 (unsigned int)pr->flags); 1310 (unsigned long long)pr->end,
1311 (unsigned int)pr->flags);
1324 /* We'll assign a new address later */ 1312 /* We'll assign a new address later */
1325 r->flags |= IORESOURCE_UNSET; 1313 r->flags |= IORESOURCE_UNSET;
1326 r->end -= r->start; 1314 r->end -= r->start;
@@ -1358,7 +1346,8 @@ static void __init pcibios_allocate_resources(int pass)
1358 * but keep it unregistered. 1346 * but keep it unregistered.
1359 */ 1347 */
1360 u32 reg; 1348 u32 reg;
1361 DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); 1349 pr_debug("PCI: Switching off ROM of %s\n",
1350 pci_name(dev));
1362 r->flags &= ~IORESOURCE_ROM_ENABLE; 1351 r->flags &= ~IORESOURCE_ROM_ENABLE;
1363 pci_read_config_dword(dev, dev->rom_base_reg, &reg); 1352 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1364 pci_write_config_dword(dev, dev->rom_base_reg, 1353 pci_write_config_dword(dev, dev->rom_base_reg,
@@ -1383,7 +1372,7 @@ void __init pcibios_resource_survey(void)
1383 } 1372 }
1384 1373
1385 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { 1374 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
1386 DBG("PCI: Assigning unassigned resouces...\n"); 1375 pr_debug("PCI: Assigning unassigned resouces...\n");
1387 pci_assign_unassigned_resources(); 1376 pci_assign_unassigned_resources();
1388 } 1377 }
1389 1378
@@ -1393,9 +1382,11 @@ void __init pcibios_resource_survey(void)
1393} 1382}
1394 1383
1395#ifdef CONFIG_HOTPLUG 1384#ifdef CONFIG_HOTPLUG
1396/* This is used by the pSeries hotplug driver to allocate resource 1385
1386/* This is used by the PCI hotplug driver to allocate resource
1397 * of newly plugged busses. We can try to consolidate with the 1387 * of newly plugged busses. We can try to consolidate with the
1398 * rest of the code later, for now, keep it as-is 1388 * rest of the code later, for now, keep it as-is as our main
1389 * resource allocation function doesn't deal with sub-trees yet.
1399 */ 1390 */
1400void __devinit pcibios_claim_one_bus(struct pci_bus *bus) 1391void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1401{ 1392{
@@ -1410,6 +1401,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1410 1401
1411 if (r->parent || !r->start || !r->flags) 1402 if (r->parent || !r->start || !r->flags)
1412 continue; 1403 continue;
1404
1405 pr_debug("PCI: Claiming %s: "
1406 "Resource %d: %016llx..%016llx [%x]\n",
1407 pci_name(dev), i,
1408 (unsigned long long)r->start,
1409 (unsigned long long)r->end,
1410 (unsigned int)r->flags);
1411
1413 pci_claim_resource(dev, i); 1412 pci_claim_resource(dev, i);
1414 } 1413 }
1415 } 1414 }
@@ -1418,6 +1417,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1418 pcibios_claim_one_bus(child_bus); 1417 pcibios_claim_one_bus(child_bus);
1419} 1418}
1420EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 1419EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1420
1421
1422/* pcibios_finish_adding_to_bus
1423 *
1424 * This is to be called by the hotplug code after devices have been
1425 * added to a bus, this include calling it for a PHB that is just
1426 * being added
1427 */
1428void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1429{
1430 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1431 pci_domain_nr(bus), bus->number);
1432
1433 /* Allocate bus and devices resources */
1434 pcibios_allocate_bus_resources(bus);
1435 pcibios_claim_one_bus(bus);
1436
1437 /* Add new devices to global lists. Register in proc, sysfs. */
1438 pci_bus_add_devices(bus);
1439
1440 /* Fixup EEH */
1441 eeh_add_device_tree_late(bus);
1442}
1443EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1444
1421#endif /* CONFIG_HOTPLUG */ 1445#endif /* CONFIG_HOTPLUG */
1422 1446
1423int pcibios_enable_device(struct pci_dev *dev, int mask) 1447int pcibios_enable_device(struct pci_dev *dev, int mask)
@@ -1428,3 +1452,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1428 1452
1429 return pci_enable_resources(dev, mask); 1453 return pci_enable_resources(dev, mask);
1430} 1454}
1455
1456void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1457{
1458 struct pci_bus *bus = hose->bus;
1459 struct resource *res;
1460 int i;
1461
1462 /* Hookup PHB IO resource */
1463 bus->resource[0] = res = &hose->io_resource;
1464
1465 if (!res->flags) {
1466 printk(KERN_WARNING "PCI: I/O resource not set for host"
1467 " bridge %s (domain %d)\n",
1468 hose->dn->full_name, hose->global_number);
1469#ifdef CONFIG_PPC32
1470 /* Workaround for lack of IO resource only on 32-bit */
1471 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1472 res->end = res->start + IO_SPACE_LIMIT;
1473 res->flags = IORESOURCE_IO;
1474#endif /* CONFIG_PPC32 */
1475 }
1476
1477 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1478 (unsigned long long)res->start,
1479 (unsigned long long)res->end,
1480 (unsigned long)res->flags);
1481
1482 /* Hookup PHB Memory resources */
1483 for (i = 0; i < 3; ++i) {
1484 res = &hose->mem_resources[i];
1485 if (!res->flags) {
1486 if (i > 0)
1487 continue;
1488 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1489 "host bridge %s (domain %d)\n",
1490 hose->dn->full_name, hose->global_number);
1491#ifdef CONFIG_PPC32
1492 /* Workaround for lack of MEM resource only on 32-bit */
1493 res->start = hose->pci_mem_offset;
1494 res->end = (resource_size_t)-1LL;
1495 res->flags = IORESOURCE_MEM;
1496#endif /* CONFIG_PPC32 */
1497 }
1498 bus->resource[i+1] = res;
1499
1500 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
1501 (unsigned long long)res->start,
1502 (unsigned long long)res->end,
1503 (unsigned long)res->flags);
1504 }
1505
1506 pr_debug("PCI: PHB MEM offset = %016llx\n",
1507 (unsigned long long)hose->pci_mem_offset);
1508 pr_debug("PCI: PHB IO offset = %08lx\n",
1509 (unsigned long)hose->io_base_virt - _IO_BASE);
1510
1511}
1512
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 131b1dfa68c6..132cd80afa21 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -26,12 +26,6 @@
26 26
27#undef DEBUG 27#undef DEBUG
28 28
29#ifdef DEBUG
30#define DBG(x...) printk(x)
31#else
32#define DBG(x...)
33#endif
34
35unsigned long isa_io_base = 0; 29unsigned long isa_io_base = 0;
36unsigned long pci_dram_offset = 0; 30unsigned long pci_dram_offset = 0;
37int pcibios_assign_bus_offset = 1; 31int pcibios_assign_bus_offset = 1;
@@ -272,17 +266,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
272{ 266{
273 struct device_node *parent, *np; 267 struct device_node *parent, *np;
274 268
275 if (!have_of) 269 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
276 return NULL;
277
278 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
279 parent = scan_OF_for_pci_bus(bus); 270 parent = scan_OF_for_pci_bus(bus);
280 if (parent == NULL) 271 if (parent == NULL)
281 return NULL; 272 return NULL;
282 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>"); 273 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
283 np = scan_OF_for_pci_dev(parent, devfn); 274 np = scan_OF_for_pci_dev(parent, devfn);
284 of_node_put(parent); 275 of_node_put(parent);
285 DBG(" result is %s\n", np ? np->full_name : "<NULL>"); 276 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
286 277
287 /* XXX most callers don't release the returned node 278 /* XXX most callers don't release the returned node
288 * mostly because ppc64 doesn't increase the refcount, 279 * mostly because ppc64 doesn't increase the refcount,
@@ -315,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
315 struct pci_controller* hose; 306 struct pci_controller* hose;
316 struct pci_dev* dev = NULL; 307 struct pci_dev* dev = NULL;
317 308
318 if (!have_of)
319 return -ENODEV;
320 /* Make sure it's really a PCI device */ 309 /* Make sure it's really a PCI device */
321 hose = pci_find_hose_for_OF_device(node); 310 hose = pci_find_hose_for_OF_device(node);
322 if (!hose || !hose->dn) 311 if (!hose || !hose->dn)
@@ -379,10 +368,41 @@ void pcibios_make_OF_bus_map(void)
379} 368}
380#endif /* CONFIG_PPC_OF */ 369#endif /* CONFIG_PPC_OF */
381 370
371static void __devinit pcibios_scan_phb(struct pci_controller *hose)
372{
373 struct pci_bus *bus;
374 struct device_node *node = hose->dn;
375 unsigned long io_offset;
376 struct resource *res = &hose->io_resource;
377
378 pr_debug("PCI: Scanning PHB %s\n",
379 node ? node->full_name : "<NO NAME>");
380
381 /* Create an empty bus for the toplevel */
382 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
383 if (bus == NULL) {
384 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
385 hose->global_number);
386 return;
387 }
388 bus->secondary = hose->first_busno;
389 hose->bus = bus;
390
391 /* Fixup IO space offset */
392 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
393 res->start = (res->start + io_offset) & 0xffffffffu;
394 res->end = (res->end + io_offset) & 0xffffffffu;
395
396 /* Wire up PHB bus resources */
397 pcibios_setup_phb_resources(hose);
398
399 /* Scan children */
400 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
401}
402
382static int __init pcibios_init(void) 403static int __init pcibios_init(void)
383{ 404{
384 struct pci_controller *hose, *tmp; 405 struct pci_controller *hose, *tmp;
385 struct pci_bus *bus;
386 int next_busno = 0; 406 int next_busno = 0;
387 407
388 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 408 printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -395,12 +415,8 @@ static int __init pcibios_init(void)
395 if (pci_assign_all_buses) 415 if (pci_assign_all_buses)
396 hose->first_busno = next_busno; 416 hose->first_busno = next_busno;
397 hose->last_busno = 0xff; 417 hose->last_busno = 0xff;
398 bus = pci_scan_bus_parented(hose->parent, hose->first_busno, 418 pcibios_scan_phb(hose);
399 hose->ops, hose); 419 pci_bus_add_devices(hose->bus);
400 if (bus) {
401 pci_bus_add_devices(bus);
402 hose->last_busno = bus->subordinate;
403 }
404 if (pci_assign_all_buses || next_busno <= hose->last_busno) 420 if (pci_assign_all_buses || next_busno <= hose->last_busno)
405 next_busno = hose->last_busno + pcibios_assign_bus_offset; 421 next_busno = hose->last_busno + pcibios_assign_bus_offset;
406 } 422 }
@@ -410,7 +426,7 @@ static int __init pcibios_init(void)
410 * numbers vs. kernel bus numbers since we may have to 426 * numbers vs. kernel bus numbers since we may have to
411 * remap them. 427 * remap them.
412 */ 428 */
413 if (pci_assign_all_buses && have_of) 429 if (pci_assign_all_buses)
414 pcibios_make_OF_bus_map(); 430 pcibios_make_OF_bus_map();
415 431
416 /* Call common code to handle resource allocation */ 432 /* Call common code to handle resource allocation */
@@ -425,54 +441,6 @@ static int __init pcibios_init(void)
425 441
426subsys_initcall(pcibios_init); 442subsys_initcall(pcibios_init);
427 443
428void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
429{
430 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
431 unsigned long io_offset;
432 struct resource *res;
433 int i;
434 struct pci_dev *dev;
435
436 /* Hookup PHB resources */
437 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
438 if (bus->parent == NULL) {
439 /* This is a host bridge - fill in its resources */
440 hose->bus = bus;
441
442 bus->resource[0] = res = &hose->io_resource;
443 if (!res->flags) {
444 if (io_offset)
445 printk(KERN_ERR "I/O resource not set for host"
446 " bridge %d\n", hose->global_number);
447 res->start = 0;
448 res->end = IO_SPACE_LIMIT;
449 res->flags = IORESOURCE_IO;
450 }
451 res->start = (res->start + io_offset) & 0xffffffffu;
452 res->end = (res->end + io_offset) & 0xffffffffu;
453
454 for (i = 0; i < 3; ++i) {
455 res = &hose->mem_resources[i];
456 if (!res->flags) {
457 if (i > 0)
458 continue;
459 printk(KERN_ERR "Memory resource not set for "
460 "host bridge %d\n", hose->global_number);
461 res->start = hose->pci_mem_offset;
462 res->end = ~0U;
463 res->flags = IORESOURCE_MEM;
464 }
465 bus->resource[i+1] = res;
466 }
467 }
468
469 if (ppc_md.pci_dma_bus_setup)
470 ppc_md.pci_dma_bus_setup(bus);
471
472 list_for_each_entry(dev, &bus->devices, bus_list)
473 pcibios_setup_new_device(dev);
474}
475
476/* the next one is stolen from the alpha port... */ 444/* the next one is stolen from the alpha port... */
477void __init 445void __init
478pcibios_update_irq(struct pci_dev *dev, int irq) 446pcibios_update_irq(struct pci_dev *dev, int irq)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3502b9101e6b..39fadc6e1492 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -32,13 +32,6 @@
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/ppc-pci.h> 33#include <asm/ppc-pci.h>
34 34
35#ifdef DEBUG
36#include <asm/udbg.h>
37#define DBG(fmt...) printk(fmt)
38#else
39#define DBG(fmt...)
40#endif
41
42unsigned long pci_probe_only = 1; 35unsigned long pci_probe_only = 1;
43 36
44/* pci_io_base -- the base address from which io bars are offsets. 37/* pci_io_base -- the base address from which io bars are offsets.
@@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
102 addrs = of_get_property(node, "assigned-addresses", &proplen); 95 addrs = of_get_property(node, "assigned-addresses", &proplen);
103 if (!addrs) 96 if (!addrs)
104 return; 97 return;
105 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 98 pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
106 for (; proplen >= 20; proplen -= 20, addrs += 5) { 99 for (; proplen >= 20; proplen -= 20, addrs += 5) {
107 flags = pci_parse_of_flags(addrs[0]); 100 flags = pci_parse_of_flags(addrs[0]);
108 if (!flags) 101 if (!flags)
@@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
112 if (!size) 105 if (!size)
113 continue; 106 continue;
114 i = addrs[0] & 0xff; 107 i = addrs[0] & 0xff;
115 DBG(" base: %llx, size: %llx, i: %x\n", 108 pr_debug(" base: %llx, size: %llx, i: %x\n",
116 (unsigned long long)base, (unsigned long long)size, i); 109 (unsigned long long)base,
110 (unsigned long long)size, i);
117 111
118 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 112 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
119 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 113 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
144 if (type == NULL) 138 if (type == NULL)
145 type = ""; 139 type = "";
146 140
147 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 141 pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
148 142
149 dev->bus = bus; 143 dev->bus = bus;
150 dev->sysdata = node; 144 dev->sysdata = node;
@@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
165 dev->class = get_int_prop(node, "class-code", 0); 159 dev->class = get_int_prop(node, "class-code", 0);
166 dev->revision = get_int_prop(node, "revision-id", 0); 160 dev->revision = get_int_prop(node, "revision-id", 0);
167 161
168 DBG(" class: 0x%x\n", dev->class); 162 pr_debug(" class: 0x%x\n", dev->class);
169 DBG(" revision: 0x%x\n", dev->revision); 163 pr_debug(" revision: 0x%x\n", dev->revision);
170 164
171 dev->current_state = 4; /* unknown power state */ 165 dev->current_state = 4; /* unknown power state */
172 dev->error_state = pci_channel_io_normal; 166 dev->error_state = pci_channel_io_normal;
@@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
187 181
188 pci_parse_of_addrs(node, dev); 182 pci_parse_of_addrs(node, dev);
189 183
190 DBG(" adding to system ...\n"); 184 pr_debug(" adding to system ...\n");
191 185
192 pci_device_add(dev, bus); 186 pci_device_add(dev, bus);
193 187
@@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
195} 189}
196EXPORT_SYMBOL(of_create_pci_dev); 190EXPORT_SYMBOL(of_create_pci_dev);
197 191
198void __devinit of_scan_bus(struct device_node *node, 192static void __devinit __of_scan_bus(struct device_node *node,
199 struct pci_bus *bus) 193 struct pci_bus *bus, int rescan_existing)
200{ 194{
201 struct device_node *child; 195 struct device_node *child;
202 const u32 *reg; 196 const u32 *reg;
203 int reglen, devfn; 197 int reglen, devfn;
204 struct pci_dev *dev; 198 struct pci_dev *dev;
205 199
206 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 200 pr_debug("of_scan_bus(%s) bus no %d... \n",
201 node->full_name, bus->number);
207 202
208 /* Scan direct children */ 203 /* Scan direct children */
209 for_each_child_of_node(node, child) { 204 for_each_child_of_node(node, child) {
210 DBG(" * %s\n", child->full_name); 205 pr_debug(" * %s\n", child->full_name);
211 reg = of_get_property(child, "reg", &reglen); 206 reg = of_get_property(child, "reg", &reglen);
212 if (reg == NULL || reglen < 20) 207 if (reg == NULL || reglen < 20)
213 continue; 208 continue;
@@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node,
217 dev = of_create_pci_dev(child, bus, devfn); 212 dev = of_create_pci_dev(child, bus, devfn);
218 if (!dev) 213 if (!dev)
219 continue; 214 continue;
220 DBG(" dev header type: %x\n", dev->hdr_type); 215 pr_debug(" dev header type: %x\n", dev->hdr_type);
221 } 216 }
222 217
223 /* Ally all fixups */ 218 /* Apply all fixups necessary. We don't fixup the bus "self"
224 pcibios_fixup_of_probed_bus(bus); 219 * for an existing bridge that is being rescanned
220 */
221 if (!rescan_existing)
222 pcibios_setup_bus_self(bus);
223 pcibios_setup_bus_devices(bus);
225 224
226 /* Now scan child busses */ 225 /* Now scan child busses */
227 list_for_each_entry(dev, &bus->devices, bus_list) { 226 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node,
233 } 232 }
234 } 233 }
235} 234}
236EXPORT_SYMBOL(of_scan_bus); 235
236void __devinit of_scan_bus(struct device_node *node,
237 struct pci_bus *bus)
238{
239 __of_scan_bus(node, bus, 0);
240}
241EXPORT_SYMBOL_GPL(of_scan_bus);
242
243void __devinit of_rescan_bus(struct device_node *node,
244 struct pci_bus *bus)
245{
246 __of_scan_bus(node, bus, 1);
247}
248EXPORT_SYMBOL_GPL(of_rescan_bus);
237 249
238void __devinit of_scan_pci_bridge(struct device_node *node, 250void __devinit of_scan_pci_bridge(struct device_node *node,
239 struct pci_dev *dev) 251 struct pci_dev *dev)
@@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
245 unsigned int flags; 257 unsigned int flags;
246 u64 size; 258 u64 size;
247 259
248 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 260 pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
249 261
250 /* parse bus-range property */ 262 /* parse bus-range property */
251 busrange = of_get_property(node, "bus-range", &len); 263 busrange = of_get_property(node, "bus-range", &len);
@@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
309 } 321 }
310 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 322 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
311 bus->number); 323 bus->number);
312 DBG(" bus name: %s\n", bus->name); 324 pr_debug(" bus name: %s\n", bus->name);
313 325
314 mode = PCI_PROBE_NORMAL; 326 mode = PCI_PROBE_NORMAL;
315 if (ppc_md.pci_probe_mode) 327 if (ppc_md.pci_probe_mode)
316 mode = ppc_md.pci_probe_mode(bus); 328 mode = ppc_md.pci_probe_mode(bus);
317 DBG(" probe mode: %d\n", mode); 329 pr_debug(" probe mode: %d\n", mode);
318 330
319 if (mode == PCI_PROBE_DEVTREE) 331 if (mode == PCI_PROBE_DEVTREE)
320 of_scan_bus(node, bus); 332 of_scan_bus(node, bus);
@@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose)
327{ 339{
328 struct pci_bus *bus; 340 struct pci_bus *bus;
329 struct device_node *node = hose->dn; 341 struct device_node *node = hose->dn;
330 int i, mode; 342 int mode;
331 343
332 DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 344 pr_debug("PCI: Scanning PHB %s\n",
345 node ? node->full_name : "<NO NAME>");
333 346
334 /* Create an empty bus for the toplevel */ 347 /* Create an empty bus for the toplevel */
335 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 348 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
@@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose)
345 pcibios_map_io_space(bus); 358 pcibios_map_io_space(bus);
346 359
347 /* Wire up PHB bus resources */ 360 /* Wire up PHB bus resources */
348 DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n", 361 pcibios_setup_phb_resources(hose);
349 hose->io_resource.start, hose->io_resource.end,
350 hose->io_resource.flags);
351 bus->resource[0] = &hose->io_resource;
352 for (i = 0; i < 3; ++i) {
353 DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
354 hose->mem_resources[i].start,
355 hose->mem_resources[i].end,
356 hose->mem_resources[i].flags);
357 bus->resource[i+1] = &hose->mem_resources[i];
358 }
359 DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
360 DBG("PCI: PHB IO offset = %08lx\n",
361 (unsigned long)hose->io_base_virt - _IO_BASE);
362 362
363 /* Get probe mode and perform scan */ 363 /* Get probe mode and perform scan */
364 mode = PCI_PROBE_NORMAL; 364 mode = PCI_PROBE_NORMAL;
365 if (node && ppc_md.pci_probe_mode) 365 if (node && ppc_md.pci_probe_mode)
366 mode = ppc_md.pci_probe_mode(bus); 366 mode = ppc_md.pci_probe_mode(bus);
367 DBG(" probe mode: %d\n", mode); 367 pr_debug(" probe mode: %d\n", mode);
368 if (mode == PCI_PROBE_DEVTREE) { 368 if (mode == PCI_PROBE_DEVTREE) {
369 bus->subordinate = hose->last_busno; 369 bus->subordinate = hose->last_busno;
370 of_scan_bus(node, bus); 370 of_scan_bus(node, bus);
@@ -380,7 +380,7 @@ static int __init pcibios_init(void)
380 380
381 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 381 printk(KERN_INFO "PCI: Probing PCI hardware\n");
382 382
383 /* For now, override phys_mem_access_prot. If we need it, 383 /* For now, override phys_mem_access_prot. If we need it,g
384 * later, we may move that initialization to each ppc_md 384 * later, we may move that initialization to each ppc_md
385 */ 385 */
386 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 386 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
@@ -388,6 +388,11 @@ static int __init pcibios_init(void)
388 if (pci_probe_only) 388 if (pci_probe_only)
389 ppc_pci_flags |= PPC_PCI_PROBE_ONLY; 389 ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
390 390
391 /* On ppc64, we always enable PCI domains and we keep domain 0
392 * backward compatible in /proc for video cards
393 */
394 ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
395
391 /* Scan all of the recorded PCI controllers. */ 396 /* Scan all of the recorded PCI controllers. */
392 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 397 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
393 scan_phb(hose); 398 scan_phb(hose);
@@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
422 if (bus->self) { 427 if (bus->self) {
423 struct resource *res = bus->resource[0]; 428 struct resource *res = bus->resource[0];
424 429
425 DBG("IO unmapping for PCI-PCI bridge %s\n", 430 pr_debug("IO unmapping for PCI-PCI bridge %s\n",
426 pci_name(bus->self)); 431 pci_name(bus->self));
427 432
428 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 433 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
429 res->end + _IO_BASE + 1); 434 res->end + _IO_BASE + 1);
@@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
437 if (hose->io_base_alloc == 0) 442 if (hose->io_base_alloc == 0)
438 return 0; 443 return 0;
439 444
440 DBG("IO unmapping for PHB %s\n", hose->dn->full_name); 445 pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
441 DBG(" alloc=0x%p\n", hose->io_base_alloc); 446 pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
442 447
443 /* This is a PHB, we fully unmap the IO area */ 448 /* This is a PHB, we fully unmap the IO area */
444 vunmap(hose->io_base_alloc); 449 vunmap(hose->io_base_alloc);
@@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
463 * thus HPTEs will be faulted in when needed 468 * thus HPTEs will be faulted in when needed
464 */ 469 */
465 if (bus->self) { 470 if (bus->self) {
466 DBG("IO mapping for PCI-PCI bridge %s\n", 471 pr_debug("IO mapping for PCI-PCI bridge %s\n",
467 pci_name(bus->self)); 472 pci_name(bus->self));
468 DBG(" virt=0x%016lx...0x%016lx\n", 473 pr_debug(" virt=0x%016lx...0x%016lx\n",
469 bus->resource[0]->start + _IO_BASE, 474 bus->resource[0]->start + _IO_BASE,
470 bus->resource[0]->end + _IO_BASE); 475 bus->resource[0]->end + _IO_BASE);
471 return 0; 476 return 0;
472 } 477 }
473 478
@@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
496 hose->io_base_virt = (void __iomem *)(area->addr + 501 hose->io_base_virt = (void __iomem *)(area->addr +
497 hose->io_base_phys - phys_page); 502 hose->io_base_phys - phys_page);
498 503
499 DBG("IO mapping for PHB %s\n", hose->dn->full_name); 504 pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
500 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 505 pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
501 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 506 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
502 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 507 pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
503 hose->pci_io_size, size_page); 508 hose->pci_io_size, size_page);
504 509
505 /* Establish the mapping */ 510 /* Establish the mapping */
506 if (__ioremap_at(phys_page, area->addr, size_page, 511 if (__ioremap_at(phys_page, area->addr, size_page,
@@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
512 hose->io_resource.start += io_virt_offset; 517 hose->io_resource.start += io_virt_offset;
513 hose->io_resource.end += io_virt_offset; 518 hose->io_resource.end += io_virt_offset;
514 519
515 DBG(" hose->io_resource=0x%016lx...0x%016lx\n", 520 pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
516 hose->io_resource.start, hose->io_resource.end); 521 hose->io_resource.start, hose->io_resource.end);
517 522
518 return 0; 523 return 0;
519} 524}
520EXPORT_SYMBOL_GPL(pcibios_map_io_space); 525EXPORT_SYMBOL_GPL(pcibios_map_io_space);
521 526
522void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
523{
524 struct pci_dev *dev;
525
526 if (ppc_md.pci_dma_bus_setup)
527 ppc_md.pci_dma_bus_setup(bus);
528
529 list_for_each_entry(dev, &bus->devices, bus_list)
530 pcibios_setup_new_device(dev);
531}
532
533unsigned long pci_address_to_pio(phys_addr_t address) 527unsigned long pci_address_to_pio(phys_addr_t address)
534{ 528{
535 struct pci_controller *hose, *tmp; 529 struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 260089dccfb0..dcec1325d340 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe);
116 116
117#ifndef CONFIG_PPC64 117#ifndef CONFIG_PPC64
118EXPORT_SYMBOL(flush_instruction_cache); 118EXPORT_SYMBOL(flush_instruction_cache);
119EXPORT_SYMBOL(flush_tlb_kernel_range);
120EXPORT_SYMBOL(flush_tlb_page);
121EXPORT_SYMBOL(_tlbie);
122#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
123EXPORT_SYMBOL(_tlbil_va);
124#endif
125#endif 119#endif
126EXPORT_SYMBOL(__flush_icache_range); 120EXPORT_SYMBOL(__flush_icache_range);
127EXPORT_SYMBOL(flush_dcache_range); 121EXPORT_SYMBOL(flush_dcache_range);
@@ -174,8 +168,7 @@ EXPORT_SYMBOL(cacheable_memcpy);
174#endif 168#endif
175 169
176#ifdef CONFIG_PPC32 170#ifdef CONFIG_PPC32
177EXPORT_SYMBOL(next_mmu_context); 171EXPORT_SYMBOL(switch_mmu_context);
178EXPORT_SYMBOL(set_context);
179#endif 172#endif
180 173
181#ifdef CONFIG_PPC_STD_MMU_32 174#ifdef CONFIG_PPC_STD_MMU_32
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/kernel/ppc_save_regs.S
index 04c0b305ad4a..5113bd2285e1 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -22,7 +22,7 @@
22 * that will be different for 32-bit and 64-bit, because of the 22 * that will be different for 32-bit and 64-bit, because of the
23 * different ABIs, though). 23 * different ABIs, though).
24 */ 24 */
25_GLOBAL(xmon_save_regs) 25_GLOBAL(ppc_save_regs)
26 PPC_STL r0,0*SZL(r3) 26 PPC_STL r0,0*SZL(r3)
27 PPC_STL r2,2*SZL(r3) 27 PPC_STL r2,2*SZL(r3)
28 PPC_STL r3,3*SZL(r3) 28 PPC_STL r3,3*SZL(r3)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 957bded0020d..51b201ddf9a1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -467,6 +467,8 @@ static struct regbit {
467 {MSR_VEC, "VEC"}, 467 {MSR_VEC, "VEC"},
468 {MSR_VSX, "VSX"}, 468 {MSR_VSX, "VSX"},
469 {MSR_ME, "ME"}, 469 {MSR_ME, "ME"},
470 {MSR_CE, "CE"},
471 {MSR_DE, "DE"},
470 {MSR_IR, "IR"}, 472 {MSR_IR, "IR"},
471 {MSR_DR, "DR"}, 473 {MSR_DR, "DR"},
472 {0, NULL} 474 {0, NULL}
@@ -998,7 +1000,7 @@ unsigned long get_wchan(struct task_struct *p)
998 return 0; 1000 return 0;
999} 1001}
1000 1002
1001static int kstack_depth_to_print = 64; 1003static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1002 1004
1003void show_stack(struct task_struct *tsk, unsigned long *stack) 1005void show_stack(struct task_struct *tsk, unsigned long *stack)
1004{ 1006{
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3a2dc7e6586a..6f73c739f1e2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1160,6 +1160,8 @@ static inline void __init phyp_dump_reserve_mem(void) {}
1160 1160
1161void __init early_init_devtree(void *params) 1161void __init early_init_devtree(void *params)
1162{ 1162{
1163 unsigned long limit;
1164
1163 DBG(" -> early_init_devtree(%p)\n", params); 1165 DBG(" -> early_init_devtree(%p)\n", params);
1164 1166
1165 /* Setup flat device-tree pointer */ 1167 /* Setup flat device-tree pointer */
@@ -1200,7 +1202,19 @@ void __init early_init_devtree(void *params)
1200 early_reserve_mem(); 1202 early_reserve_mem();
1201 phyp_dump_reserve_mem(); 1203 phyp_dump_reserve_mem();
1202 1204
1203 lmb_enforce_memory_limit(memory_limit); 1205 limit = memory_limit;
1206 if (! limit) {
1207 unsigned long memsize;
1208
1209 /* Ensure that total memory size is page-aligned, because
1210 * otherwise mark_bootmem() gets upset. */
1211 lmb_analyze();
1212 memsize = lmb_phys_mem_size();
1213 if ((memsize & PAGE_MASK) != memsize)
1214 limit = memsize & PAGE_MASK;
1215 }
1216 lmb_enforce_memory_limit(limit);
1217
1204 lmb_analyze(); 1218 lmb_analyze();
1205 1219
1206 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1220 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1271,6 +1285,37 @@ struct device_node *of_find_node_by_phandle(phandle handle)
1271EXPORT_SYMBOL(of_find_node_by_phandle); 1285EXPORT_SYMBOL(of_find_node_by_phandle);
1272 1286
1273/** 1287/**
1288 * of_find_next_cache_node - Find a node's subsidiary cache
1289 * @np: node of type "cpu" or "cache"
1290 *
1291 * Returns a node pointer with refcount incremented, use
1292 * of_node_put() on it when done. Caller should hold a reference
1293 * to np.
1294 */
1295struct device_node *of_find_next_cache_node(struct device_node *np)
1296{
1297 struct device_node *child;
1298 const phandle *handle;
1299
1300 handle = of_get_property(np, "l2-cache", NULL);
1301 if (!handle)
1302 handle = of_get_property(np, "next-level-cache", NULL);
1303
1304 if (handle)
1305 return of_find_node_by_phandle(*handle);
1306
1307 /* OF on pmac has nodes instead of properties named "l2-cache"
1308 * beneath CPU nodes.
1309 */
1310 if (!strcmp(np->type, "cpu"))
1311 for_each_child_of_node(np, child)
1312 if (!strcmp(child->type, "cache"))
1313 return child;
1314
1315 return NULL;
1316}
1317
1318/**
1274 * of_find_all_nodes - Get next node in global list 1319 * of_find_all_nodes - Get next node in global list
1275 * @prev: Previous node or NULL to start iteration 1320 * @prev: Previous node or NULL to start iteration
1276 * of_node_put() will be called on it 1321 * of_node_put() will be called on it
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index a11d68976dc8..8c1335566089 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -734,10 +734,7 @@ void of_irq_map_init(unsigned int flags)
734 if (flags & OF_IMAP_NO_PHANDLE) { 734 if (flags & OF_IMAP_NO_PHANDLE) {
735 struct device_node *np; 735 struct device_node *np;
736 736
737 for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) { 737 for_each_node_with_property(np, "interrupt-controller") {
738 if (of_get_property(np, "interrupt-controller", NULL)
739 == NULL)
740 continue;
741 /* Skip /chosen/interrupt-controller */ 738 /* Skip /chosen/interrupt-controller */
742 if (strcmp(np->name, "chosen") == 0) 739 if (strcmp(np->name, "chosen") == 0)
743 continue; 740 continue;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1f8505c23548..fdfe14c4bdef 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -566,6 +566,32 @@ int rtas_get_sensor(int sensor, int index, int *state)
566} 566}
567EXPORT_SYMBOL(rtas_get_sensor); 567EXPORT_SYMBOL(rtas_get_sensor);
568 568
569bool rtas_indicator_present(int token, int *maxindex)
570{
571 int proplen, count, i;
572 const struct indicator_elem {
573 u32 token;
574 u32 maxindex;
575 } *indicators;
576
577 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
578 if (!indicators)
579 return false;
580
581 count = proplen / sizeof(struct indicator_elem);
582
583 for (i = 0; i < count; i++) {
584 if (indicators[i].token != token)
585 continue;
586 if (maxindex)
587 *maxindex = indicators[i].maxindex;
588 return true;
589 }
590
591 return false;
592}
593EXPORT_SYMBOL(rtas_indicator_present);
594
569int rtas_set_indicator(int indicator, int index, int new_value) 595int rtas_set_indicator(int indicator, int index, int new_value)
570{ 596{
571 int token = rtas_token("set-indicator"); 597 int token = rtas_token("set-indicator");
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 589a2797eac2..8869001ab5d7 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -301,51 +301,3 @@ void __init find_and_init_phbs(void)
301#endif /* CONFIG_PPC32 */ 301#endif /* CONFIG_PPC32 */
302 } 302 }
303} 303}
304
305/* RPA-specific bits for removing PHBs */
306int pcibios_remove_root_bus(struct pci_controller *phb)
307{
308 struct pci_bus *b = phb->bus;
309 struct resource *res;
310 int rc, i;
311
312 res = b->resource[0];
313 if (!res->flags) {
314 printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__,
315 b->name);
316 return 1;
317 }
318
319 rc = pcibios_unmap_io_space(b);
320 if (rc) {
321 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
322 __func__, b->name);
323 return 1;
324 }
325
326 if (release_resource(res)) {
327 printk(KERN_ERR "%s: failed to release IO on bus %s\n",
328 __func__, b->name);
329 return 1;
330 }
331
332 for (i = 1; i < 3; ++i) {
333 res = b->resource[i];
334 if (!res->flags && i == 0) {
335 printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
336 __func__, b->name);
337 return 1;
338 }
339 if (res->flags && release_resource(res)) {
340 printk(KERN_ERR
341 "%s: failed to release IO %d on bus %s\n",
342 __func__, i, b->name);
343 return 1;
344 }
345 }
346
347 pcibios_free_controller(phb);
348
349 return 0;
350}
351EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1a27626a940..9e1ca745d8f0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/serial.h> 39#include <asm/serial.h>
40#include <asm/udbg.h> 40#include <asm/udbg.h>
41#include <asm/mmu_context.h>
41 42
42#include "setup.h" 43#include "setup.h"
43 44
@@ -49,12 +50,12 @@ int boot_cpuid;
49EXPORT_SYMBOL_GPL(boot_cpuid); 50EXPORT_SYMBOL_GPL(boot_cpuid);
50int boot_cpuid_phys; 51int boot_cpuid_phys;
51 52
53int smp_hw_index[NR_CPUS];
54
52unsigned long ISA_DMA_THRESHOLD; 55unsigned long ISA_DMA_THRESHOLD;
53unsigned int DMA_MODE_READ; 56unsigned int DMA_MODE_READ;
54unsigned int DMA_MODE_WRITE; 57unsigned int DMA_MODE_WRITE;
55 58
56int have_of = 1;
57
58#ifdef CONFIG_VGA_CONSOLE 59#ifdef CONFIG_VGA_CONSOLE
59unsigned long vgacon_remap_base; 60unsigned long vgacon_remap_base;
60EXPORT_SYMBOL(vgacon_remap_base); 61EXPORT_SYMBOL(vgacon_remap_base);
@@ -97,6 +98,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
97 PTRRELOC(&__start___ftr_fixup), 98 PTRRELOC(&__start___ftr_fixup),
98 PTRRELOC(&__stop___ftr_fixup)); 99 PTRRELOC(&__stop___ftr_fixup));
99 100
101 do_feature_fixups(spec->mmu_features,
102 PTRRELOC(&__start___mmu_ftr_fixup),
103 PTRRELOC(&__stop___mmu_ftr_fixup));
104
100 do_lwsync_fixups(spec->cpu_features, 105 do_lwsync_fixups(spec->cpu_features,
101 PTRRELOC(&__start___lwsync_fixup), 106 PTRRELOC(&__start___lwsync_fixup),
102 PTRRELOC(&__stop___lwsync_fixup)); 107 PTRRELOC(&__stop___lwsync_fixup));
@@ -121,6 +126,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
121 126
122 probe_machine(); 127 probe_machine();
123 128
129 setup_kdump_trampoline();
130
124#ifdef CONFIG_6xx 131#ifdef CONFIG_6xx
125 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || 132 if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
126 cpu_has_feature(CPU_FTR_CAN_NAP)) 133 cpu_has_feature(CPU_FTR_CAN_NAP))
@@ -326,4 +333,8 @@ void __init setup_arch(char **cmdline_p)
326 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 333 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
327 334
328 paging_init(); 335 paging_init();
336
337 /* Initialize the MMU context management stuff */
338 mmu_context_init();
339
329} 340}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 169d74cef157..d8bd2161e738 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -70,7 +70,6 @@
70#define DBG(fmt...) 70#define DBG(fmt...)
71#endif 71#endif
72 72
73int have_of = 1;
74int boot_cpuid = 0; 73int boot_cpuid = 0;
75u64 ppc64_pft_size; 74u64 ppc64_pft_size;
76 75
@@ -362,6 +361,8 @@ void __init setup_system(void)
362 */ 361 */
363 do_feature_fixups(cur_cpu_spec->cpu_features, 362 do_feature_fixups(cur_cpu_spec->cpu_features,
364 &__start___ftr_fixup, &__stop___ftr_fixup); 363 &__start___ftr_fixup, &__stop___ftr_fixup);
364 do_feature_fixups(cur_cpu_spec->mmu_features,
365 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
365 do_feature_fixups(powerpc_firmware_features, 366 do_feature_fixups(powerpc_firmware_features,
366 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 367 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
367 do_lwsync_fixups(cur_cpu_spec->cpu_features, 368 do_lwsync_fixups(cur_cpu_spec->cpu_features,
@@ -606,8 +607,6 @@ void __init setup_per_cpu_areas(void)
606 607
607 for_each_possible_cpu(i) { 608 for_each_possible_cpu(i) {
608 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); 609 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
609 if (!ptr)
610 panic("Cannot allocate cpu data for CPU %d\n", i);
611 610
612 paca[i].data_offset = ptr - __per_cpu_start; 611 paca[i].data_offset = ptr - __per_cpu_start;
613 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index bc892e69b4f7..a5e54526403d 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void)
113{ 113{
114 int i, score, score2, old, min=0, max=5000, offset=1000; 114 int i, score, score2, old, min=0, max=5000, offset=1000;
115 115
116 printk("Synchronizing timebase\n"); 116 pr_debug("Software timebase sync\n");
117 117
118 /* if this fails then this kernel won't work anyway... */ 118 /* if this fails then this kernel won't work anyway... */
119 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); 119 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
@@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void)
123 while (!tbsync->ack) 123 while (!tbsync->ack)
124 barrier(); 124 barrier();
125 125
126 printk("Got ack\n"); 126 pr_debug("Got ack\n");
127 127
128 /* binary search */ 128 /* binary search */
129 for (old = -1; old != offset ; offset = (min+max) / 2) { 129 for (old = -1; old != offset ; offset = (min+max) / 2) {
130 score = start_contest(kSetAndTest, offset, NUM_ITER); 130 score = start_contest(kSetAndTest, offset, NUM_ITER);
131 131
132 printk("score %d, offset %d\n", score, offset ); 132 pr_debug("score %d, offset %d\n", score, offset );
133 133
134 if( score > 0 ) 134 if( score > 0 )
135 max = offset; 135 max = offset;
@@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void)
140 score = start_contest(kSetAndTest, min, NUM_ITER); 140 score = start_contest(kSetAndTest, min, NUM_ITER);
141 score2 = start_contest(kSetAndTest, max, NUM_ITER); 141 score2 = start_contest(kSetAndTest, max, NUM_ITER);
142 142
143 printk("Min %d (score %d), Max %d (score %d)\n", 143 pr_debug("Min %d (score %d), Max %d (score %d)\n",
144 min, score, max, score2); 144 min, score, max, score2);
145 score = abs(score); 145 score = abs(score);
146 score2 = abs(score2); 146 score2 = abs(score2);
147 offset = (score < score2) ? min : max; 147 offset = (score < score2) ? min : max;
@@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void)
155 if (score2 <= score || score2 < 20) 155 if (score2 <= score || score2 < 20)
156 break; 156 break;
157 } 157 }
158 printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); 158 pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
159 159
160 /* exiting */ 160 /* exiting */
161 tbsync->cmd = kExit; 161 tbsync->cmd = kExit;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d1165566f064..65484b2200b3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,7 +57,6 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60int smp_hw_index[NR_CPUS];
61struct thread_info *secondary_ti; 60struct thread_info *secondary_ti;
62 61
63DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -119,6 +118,65 @@ void smp_message_recv(int msg)
119 } 118 }
120} 119}
121 120
121static irqreturn_t call_function_action(int irq, void *data)
122{
123 generic_smp_call_function_interrupt();
124 return IRQ_HANDLED;
125}
126
127static irqreturn_t reschedule_action(int irq, void *data)
128{
129 /* we just need the return path side effect of checking need_resched */
130 return IRQ_HANDLED;
131}
132
133static irqreturn_t call_function_single_action(int irq, void *data)
134{
135 generic_smp_call_function_single_interrupt();
136 return IRQ_HANDLED;
137}
138
139static irqreturn_t debug_ipi_action(int irq, void *data)
140{
141 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
142 return IRQ_HANDLED;
143}
144
145static irq_handler_t smp_ipi_action[] = {
146 [PPC_MSG_CALL_FUNCTION] = call_function_action,
147 [PPC_MSG_RESCHEDULE] = reschedule_action,
148 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
149 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
150};
151
152const char *smp_ipi_name[] = {
153 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
154 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
155 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
156 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
157};
158
159/* optional function to request ipi, for controllers with >= 4 ipis */
160int smp_request_message_ipi(int virq, int msg)
161{
162 int err;
163
164 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
165 return -EINVAL;
166 }
167#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
168 if (msg == PPC_MSG_DEBUGGER_BREAK) {
169 return 1;
170 }
171#endif
172 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
173 smp_ipi_name[msg], 0);
174 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
175 virq, smp_ipi_name[msg], err);
176
177 return err;
178}
179
122void smp_send_reschedule(int cpu) 180void smp_send_reschedule(int cpu)
123{ 181{
124 if (likely(smp_ops)) 182 if (likely(smp_ops))
@@ -404,8 +462,7 @@ out:
404static struct device_node *cpu_to_l2cache(int cpu) 462static struct device_node *cpu_to_l2cache(int cpu)
405{ 463{
406 struct device_node *np; 464 struct device_node *np;
407 const phandle *php; 465 struct device_node *cache;
408 phandle ph;
409 466
410 if (!cpu_present(cpu)) 467 if (!cpu_present(cpu))
411 return NULL; 468 return NULL;
@@ -414,13 +471,11 @@ static struct device_node *cpu_to_l2cache(int cpu)
414 if (np == NULL) 471 if (np == NULL)
415 return NULL; 472 return NULL;
416 473
417 php = of_get_property(np, "l2-cache", NULL); 474 cache = of_find_next_cache_node(np);
418 if (php == NULL) 475
419 return NULL;
420 ph = *php;
421 of_node_put(np); 476 of_node_put(np);
422 477
423 return of_find_node_by_phandle(ph); 478 return cache;
424} 479}
425 480
426/* Activate a secondary processor. */ 481/* Activate a secondary processor. */
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 77b7b34b5955..560c96119501 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -34,6 +34,6 @@ void save_processor_state(void)
34void restore_processor_state(void) 34void restore_processor_state(void)
35{ 35{
36#ifdef CONFIG_PPC32 36#ifdef CONFIG_PPC32
37 set_context(current->active_mm->context.id, current->active_mm->pgd); 37 switch_mmu_context(NULL, current->active_mm);
38#endif 38#endif
39} 39}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 77fc76607ab2..b47d8ceffb52 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -5,7 +5,7 @@
5#include <asm/thread_info.h> 5#include <asm/thread_info.h>
6#include <asm/ppc_asm.h> 6#include <asm/ppc_asm.h>
7#include <asm/asm-offsets.h> 7#include <asm/asm-offsets.h>
8 8#include <asm/mmu.h>
9 9
10/* 10/*
11 * Structure for storing CPU registers on the save area. 11 * Structure for storing CPU registers on the save area.
@@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
279 mtibatl 3,r4 279 mtibatl 3,r4
280#endif 280#endif
281 281
282BEGIN_FTR_SECTION 282BEGIN_MMU_FTR_SECTION
283 li r4,0 283 li r4,0
284 mtspr SPRN_DBAT4U,r4 284 mtspr SPRN_DBAT4U,r4
285 mtspr SPRN_DBAT4L,r4 285 mtspr SPRN_DBAT4L,r4
@@ -297,7 +297,7 @@ BEGIN_FTR_SECTION
297 mtspr SPRN_IBAT6L,r4 297 mtspr SPRN_IBAT6L,r4
298 mtspr SPRN_IBAT7U,r4 298 mtspr SPRN_IBAT7U,r4
299 mtspr SPRN_IBAT7L,r4 299 mtspr SPRN_IBAT7L,r4
300END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) 300END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
301 301
302 /* Flush all TLBs */ 302 /* Flush all TLBs */
303 lis r4,0x1000 303 lis r4,0x1000
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 20885a38237a..0c64f10087b9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -566,7 +566,6 @@ static bool cache_is_unified(struct device_node *np)
566 566
567static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level) 567static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
568{ 568{
569 const phandle *next_cache_phandle;
570 struct device_node *next_cache; 569 struct device_node *next_cache;
571 struct cache_desc *new, **end; 570 struct cache_desc *new, **end;
572 571
@@ -591,11 +590,7 @@ static struct cache_desc * __cpuinit create_cache_index_info(struct device_node
591 while (*end) 590 while (*end)
592 end = &(*end)->next; 591 end = &(*end)->next;
593 592
594 next_cache_phandle = of_get_property(np, "l2-cache", NULL); 593 next_cache = of_find_next_cache_node(np);
595 if (!next_cache_phandle)
596 goto out;
597
598 next_cache = of_find_node_by_phandle(*next_cache_phandle);
599 if (!next_cache) 594 if (!next_cache)
600 goto out; 595 goto out;
601 596
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6f39d35d6f55..99f1ddd68582 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly;
164static unsigned tb_to_ns_shift __read_mostly; 164static unsigned tb_to_ns_shift __read_mostly;
165static unsigned long boot_tb __read_mostly; 165static unsigned long boot_tb __read_mostly;
166 166
167static struct gettimeofday_struct do_gtod;
168
169extern struct timezone sys_tz; 167extern struct timezone sys_tz;
170static long timezone_offset; 168static long timezone_offset;
171 169
@@ -415,31 +413,9 @@ void udelay(unsigned long usecs)
415} 413}
416EXPORT_SYMBOL(udelay); 414EXPORT_SYMBOL(udelay);
417 415
418
419/*
420 * There are two copies of tb_to_xs and stamp_xsec so that no
421 * lock is needed to access and use these values in
422 * do_gettimeofday. We alternate the copies and as long as a
423 * reasonable time elapses between changes, there will never
424 * be inconsistent values. ntpd has a minimum of one minute
425 * between updates.
426 */
427static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 416static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
428 u64 new_tb_to_xs) 417 u64 new_tb_to_xs)
429{ 418{
430 unsigned temp_idx;
431 struct gettimeofday_vars *temp_varp;
432
433 temp_idx = (do_gtod.var_idx == 0);
434 temp_varp = &do_gtod.vars[temp_idx];
435
436 temp_varp->tb_to_xs = new_tb_to_xs;
437 temp_varp->tb_orig_stamp = new_tb_stamp;
438 temp_varp->stamp_xsec = new_stamp_xsec;
439 smp_mb();
440 do_gtod.varp = temp_varp;
441 do_gtod.var_idx = temp_idx;
442
443 /* 419 /*
444 * tb_update_count is used to allow the userspace gettimeofday code 420 * tb_update_count is used to allow the userspace gettimeofday code
445 * to assure itself that it sees a consistent view of the tb_to_xs and 421 * to assure itself that it sees a consistent view of the tb_to_xs and
@@ -456,6 +432,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
456 vdso_data->tb_to_xs = new_tb_to_xs; 432 vdso_data->tb_to_xs = new_tb_to_xs;
457 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 433 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
458 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 434 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
435 vdso_data->stamp_xtime = xtime;
459 smp_wmb(); 436 smp_wmb();
460 ++(vdso_data->tb_update_count); 437 ++(vdso_data->tb_update_count);
461} 438}
@@ -514,9 +491,7 @@ static int __init iSeries_tb_recal(void)
514 tb_ticks_per_sec = new_tb_ticks_per_sec; 491 tb_ticks_per_sec = new_tb_ticks_per_sec;
515 calc_cputime_factors(); 492 calc_cputime_factors();
516 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 493 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
517 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
518 tb_to_xs = divres.result_low; 494 tb_to_xs = divres.result_low;
519 do_gtod.varp->tb_to_xs = tb_to_xs;
520 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 495 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
521 vdso_data->tb_to_xs = tb_to_xs; 496 vdso_data->tb_to_xs = tb_to_xs;
522 } 497 }
@@ -988,15 +963,6 @@ void __init time_init(void)
988 sys_tz.tz_dsttime = 0; 963 sys_tz.tz_dsttime = 0;
989 } 964 }
990 965
991 do_gtod.varp = &do_gtod.vars[0];
992 do_gtod.var_idx = 0;
993 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
994 __get_cpu_var(last_jiffy) = tb_last_jiffy;
995 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
996 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
997 do_gtod.varp->tb_to_xs = tb_to_xs;
998 do_gtod.tb_to_us = tb_to_us;
999
1000 vdso_data->tb_orig_stamp = tb_last_jiffy; 966 vdso_data->tb_orig_stamp = tb_last_jiffy;
1001 vdso_data->tb_update_count = 0; 967 vdso_data->tb_update_count = 0;
1002 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 968 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f5def6cf5cd6..5457e9575685 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
1160#ifdef CONFIG_SPE 1160#ifdef CONFIG_SPE
1161void SPEFloatingPointException(struct pt_regs *regs) 1161void SPEFloatingPointException(struct pt_regs *regs)
1162{ 1162{
1163 extern int do_spe_mathemu(struct pt_regs *regs);
1163 unsigned long spefscr; 1164 unsigned long spefscr;
1164 int fpexc_mode; 1165 int fpexc_mode;
1165 int code = 0; 1166 int code = 0;
1167 int err;
1168
1169 preempt_disable();
1170 if (regs->msr & MSR_SPE)
1171 giveup_spe(current);
1172 preempt_enable();
1166 1173
1167 spefscr = current->thread.spefscr; 1174 spefscr = current->thread.spefscr;
1168 fpexc_mode = current->thread.fpexc_mode; 1175 fpexc_mode = current->thread.fpexc_mode;
1169 1176
1170 /* Hardware does not neccessarily set sticky
1171 * underflow/overflow/invalid flags */
1172 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1177 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1173 code = FPE_FLTOVF; 1178 code = FPE_FLTOVF;
1174 spefscr |= SPEFSCR_FOVFS;
1175 } 1179 }
1176 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1180 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1177 code = FPE_FLTUND; 1181 code = FPE_FLTUND;
1178 spefscr |= SPEFSCR_FUNFS;
1179 } 1182 }
1180 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1183 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1181 code = FPE_FLTDIV; 1184 code = FPE_FLTDIV;
1182 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1185 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1183 code = FPE_FLTINV; 1186 code = FPE_FLTINV;
1184 spefscr |= SPEFSCR_FINVS;
1185 } 1187 }
1186 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1188 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1187 code = FPE_FLTRES; 1189 code = FPE_FLTRES;
1188 1190
1189 current->thread.spefscr = spefscr; 1191 err = do_spe_mathemu(regs);
1192 if (err == 0) {
1193 regs->nip += 4; /* skip emulated instruction */
1194 emulate_single_step(regs);
1195 return;
1196 }
1197
1198 if (err == -EFAULT) {
1199 /* got an error reading the instruction */
1200 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1201 } else if (err == -EINVAL) {
1202 /* didn't recognize the instruction */
1203 printk(KERN_ERR "unrecognized spe instruction "
1204 "in %s at %lx\n", current->comm, regs->nip);
1205 } else {
1206 _exception(SIGFPE, regs, code, regs->nip);
1207 }
1190 1208
1191 _exception(SIGFPE, regs, code, regs->nip);
1192 return; 1209 return;
1193} 1210}
1211
1212void SPEFloatingPointRoundException(struct pt_regs *regs)
1213{
1214 extern int speround_handler(struct pt_regs *regs);
1215 int err;
1216
1217 preempt_disable();
1218 if (regs->msr & MSR_SPE)
1219 giveup_spe(current);
1220 preempt_enable();
1221
1222 regs->nip -= 4;
1223 err = speround_handler(regs);
1224 if (err == 0) {
1225 regs->nip += 4; /* skip emulated instruction */
1226 emulate_single_step(regs);
1227 return;
1228 }
1229
1230 if (err == -EFAULT) {
1231 /* got an error reading the instruction */
1232 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1233 } else if (err == -EINVAL) {
1234 /* didn't recognize the instruction */
1235 printk(KERN_ERR "unrecognized spe instruction "
1236 "in %s at %lx\n", current->comm, regs->nip);
1237 } else {
1238 _exception(SIGFPE, regs, 0, regs->nip);
1239 return;
1240 }
1241}
1194#endif 1242#endif
1195 1243
1196/* 1244/*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 65639a43e644..ad06d5c75b15 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
184 * This is called from binfmt_elf, we create the special vma for the 184 * This is called from binfmt_elf, we create the special vma for the
185 * vDSO and insert it into the mm struct tree 185 * vDSO and insert it into the mm struct tree
186 */ 186 */
187int arch_setup_additional_pages(struct linux_binprm *bprm, 187int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
188 int executable_stack)
189{ 188{
190 struct mm_struct *mm = current->mm; 189 struct mm_struct *mm = current->mm;
191 struct page **vdso_pagelist; 190 struct page **vdso_pagelist;
@@ -567,6 +566,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
567 do_feature_fixups(cur_cpu_spec->cpu_features, 566 do_feature_fixups(cur_cpu_spec->cpu_features,
568 start64, start64 + size64); 567 start64, start64 + size64);
569 568
569 start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
570 if (start64)
571 do_feature_fixups(cur_cpu_spec->mmu_features,
572 start64, start64 + size64);
573
570 start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); 574 start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
571 if (start64) 575 if (start64)
572 do_feature_fixups(powerpc_firmware_features, 576 do_feature_fixups(powerpc_firmware_features,
@@ -583,6 +587,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
583 do_feature_fixups(cur_cpu_spec->cpu_features, 587 do_feature_fixups(cur_cpu_spec->cpu_features,
584 start32, start32 + size32); 588 start32, start32 + size32);
585 589
590 start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
591 if (start32)
592 do_feature_fixups(cur_cpu_spec->mmu_features,
593 start32, start32 + size32);
594
586#ifdef CONFIG_PPC64 595#ifdef CONFIG_PPC64
587 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); 596 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
588 if (start32) 597 if (start32)
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 72ca26df457e..ee038d4bf252 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -16,6 +16,13 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/unistd.h> 17#include <asm/unistd.h>
18 18
19/* Offset for the low 32-bit part of a field of long type */
20#ifdef CONFIG_PPC64
21#define LOPART 4
22#else
23#define LOPART 0
24#endif
25
19 .text 26 .text
20/* 27/*
21 * Exact prototype of gettimeofday 28 * Exact prototype of gettimeofday
@@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
90 97
91 mflr r12 /* r12 saves lr */ 98 mflr r12 /* r12 saves lr */
92 .cfi_register lr,r12 99 .cfi_register lr,r12
93 mr r10,r3 /* r10 saves id */
94 mr r11,r4 /* r11 saves tp */ 100 mr r11,r4 /* r11 saves tp */
95 bl __get_datapage@local /* get data page */ 101 bl __get_datapage@local /* get data page */
96 mr r9,r3 /* datapage ptr in r9 */ 102 mr r9,r3 /* datapage ptr in r9 */
97 beq cr1,50f /* if monotonic -> jump there */
98
99 /*
100 * CLOCK_REALTIME
101 */
102
103 bl __do_get_xsec@local /* get xsec from tb & kernel */
104 bne- 98f /* out of line -> do syscall */
105
106 /* seconds are xsec >> 20 */
107 rlwinm r5,r4,12,20,31
108 rlwimi r5,r3,12,0,19
109 stw r5,TSPC32_TV_SEC(r11)
110 103
111 /* get remaining xsec and convert to nsec. we scale 10450: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
112 * up remaining xsec by 12 bits and get the top 32 bits 105 bne cr1,80f /* not monotonic -> all done */
113 * of the multiplication, then we multiply by 1000
114 */
115 rlwinm r5,r4,12,0,19
116 lis r6,1000000@h
117 ori r6,r6,1000000@l
118 mulhwu r5,r5,r6
119 mulli r5,r5,1000
120 stw r5,TSPC32_TV_NSEC(r11)
121 mtlr r12
122 crclr cr0*4+so
123 li r3,0
124 blr
125 106
126 /* 107 /*
127 * CLOCK_MONOTONIC 108 * CLOCK_MONOTONIC
128 */ 109 */
129 110
13050: bl __do_get_xsec@local /* get xsec from tb & kernel */
131 bne- 98f /* out of line -> do syscall */
132
133 /* seconds are xsec >> 20 */
134 rlwinm r6,r4,12,20,31
135 rlwimi r6,r3,12,0,19
136
137 /* get remaining xsec and convert to nsec. we scale
138 * up remaining xsec by 12 bits and get the top 32 bits
139 * of the multiplication, then we multiply by 1000
140 */
141 rlwinm r7,r4,12,0,19
142 lis r5,1000000@h
143 ori r5,r5,1000000@l
144 mulhwu r7,r7,r5
145 mulli r7,r7,1000
146
147 /* now we must fixup using wall to monotonic. We need to snapshot 111 /* now we must fixup using wall to monotonic. We need to snapshot
148 * that value and do the counter trick again. Fortunately, we still 112 * that value and do the counter trick again. Fortunately, we still
149 * have the counter value in r8 that was returned by __do_get_xsec. 113 * have the counter value in r8 that was returned by __do_get_xsec.
150 * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 114 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
151 * can be used 115 * can be used, r7 contains NSEC_PER_SEC.
152 */ 116 */
153 117
154 lwz r3,WTOM_CLOCK_SEC(r9) 118 lwz r5,WTOM_CLOCK_SEC(r9)
155 lwz r4,WTOM_CLOCK_NSEC(r9) 119 lwz r6,WTOM_CLOCK_NSEC(r9)
156 120
157 /* We now have our result in r3,r4. We create a fake dependency 121 /* We now have our offset in r5,r6. We create a fake dependency
158 * on that result and re-check the counter 122 * on that value and re-check the counter
159 */ 123 */
160 or r5,r4,r3 124 or r0,r6,r5
161 xor r0,r5,r5 125 xor r0,r0,r0
162 add r9,r9,r0 126 add r9,r9,r0
163#ifdef CONFIG_PPC64 127 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
164 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
165#else
166 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
167#endif
168 cmpl cr0,r8,r0 /* check if updated */ 128 cmpl cr0,r8,r0 /* check if updated */
169 bne- 50b 129 bne- 50b
170 130
171 /* Calculate and store result. Note that this mimmics the C code, 131 /* Calculate and store result. Note that this mimics the C code,
172 * which may cause funny results if nsec goes negative... is that 132 * which may cause funny results if nsec goes negative... is that
173 * possible at all ? 133 * possible at all ?
174 */ 134 */
175 add r3,r3,r6 135 add r3,r3,r5
176 add r4,r4,r7 136 add r4,r4,r6
177 lis r5,NSEC_PER_SEC@h 137 cmpw cr0,r4,r7
178 ori r5,r5,NSEC_PER_SEC@l 138 cmpwi cr1,r4,0
179 cmpl cr0,r4,r5
180 cmpli cr1,r4,0
181 blt 1f 139 blt 1f
182 subf r4,r5,r4 140 subf r4,r7,r4
183 addi r3,r3,1 141 addi r3,r3,1
1841: bge cr1,1f 1421: bge cr1,80f
185 addi r3,r3,-1 143 addi r3,r3,-1
186 add r4,r4,r5 144 add r4,r4,r7
1871: stw r3,TSPC32_TV_SEC(r11) 145
14680: stw r3,TSPC32_TV_SEC(r11)
188 stw r4,TSPC32_TV_NSEC(r11) 147 stw r4,TSPC32_TV_NSEC(r11)
189 148
190 mtlr r12 149 mtlr r12
@@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
195 /* 154 /*
196 * syscall fallback 155 * syscall fallback
197 */ 156 */
19898:
199 mtlr r12
200 mr r3,r10
201 mr r4,r11
20299: 15799:
203 li r0,__NR_clock_gettime 158 li r0,__NR_clock_gettime
204 sc 159 sc
@@ -254,11 +209,7 @@ __do_get_xsec:
254 /* Check for update count & load values. We use the low 209 /* Check for update count & load values. We use the low
255 * order 32 bits of the update count 210 * order 32 bits of the update count
256 */ 211 */
257#ifdef CONFIG_PPC64 2121: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
2581: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
259#else
2601: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
261#endif
262 andi. r0,r8,1 /* pending update ? loop */ 213 andi. r0,r8,1 /* pending update ? loop */
263 bne- 1b 214 bne- 1b
264 xor r0,r8,r8 /* create dependency */ 215 xor r0,r8,r8 /* create dependency */
@@ -305,11 +256,7 @@ __do_get_xsec:
305 or r6,r4,r3 256 or r6,r4,r3
306 xor r0,r6,r6 257 xor r0,r6,r6
307 add r9,r9,r0 258 add r9,r9,r0
308#ifdef CONFIG_PPC64 259 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
309 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
310#else
311 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
312#endif
313 cmpl cr0,r8,r0 /* check if updated */ 260 cmpl cr0,r8,r0 /* check if updated */
314 bne- 1b 261 bne- 1b
315 262
@@ -322,3 +269,98 @@ __do_get_xsec:
322 */ 269 */
3233: blr 2703: blr
324 .cfi_endproc 271 .cfi_endproc
272
273/*
274 * This is the core of clock_gettime(), it returns the current
275 * time in seconds and nanoseconds in r3 and r4.
276 * It expects the datapage ptr in r9 and doesn't clobber it.
277 * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
278 * On return, r8 contains the counter value that can be reused.
279 * This clobbers cr0 but not any other cr field.
280 */
281__do_get_tspec:
282 .cfi_startproc
283 /* Check for update count & load values. We use the low
284 * order 32 bits of the update count
285 */
2861: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
287 andi. r0,r8,1 /* pending update ? loop */
288 bne- 1b
289 xor r0,r8,r8 /* create dependency */
290 add r9,r9,r0
291
292 /* Load orig stamp (offset to TB) */
293 lwz r5,CFG_TB_ORIG_STAMP(r9)
294 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
295
296 /* Get a stable TB value */
2972: mftbu r3
298 mftbl r4
299 mftbu r0
300 cmpl cr0,r3,r0
301 bne- 2b
302
303 /* Subtract tb orig stamp and shift left 12 bits.
304 */
305 subfc r7,r6,r4
306 subfe r0,r5,r3
307 slwi r0,r0,12
308 rlwimi. r0,r7,12,20,31
309 slwi r7,r7,12
310
311 /* Load scale factor & do multiplication */
312 lwz r5,CFG_TB_TO_XS(r9) /* load values */
313 lwz r6,(CFG_TB_TO_XS+4)(r9)
314 mulhwu r3,r7,r6
315 mullw r10,r7,r5
316 mulhwu r4,r7,r5
317 addc r10,r3,r10
318 li r3,0
319
320 beq+ 4f /* skip high part computation if 0 */
321 mulhwu r3,r0,r5
322 mullw r7,r0,r5
323 mulhwu r5,r0,r6
324 mullw r6,r0,r6
325 adde r4,r4,r7
326 addze r3,r3
327 addc r4,r4,r5
328 addze r3,r3
329 addc r10,r10,r6
330
3314: addze r4,r4 /* add in carry */
332 lis r7,NSEC_PER_SEC@h
333 ori r7,r7,NSEC_PER_SEC@l
334 mulhwu r4,r4,r7 /* convert to nanoseconds */
335
336 /* At this point, we have seconds & nanoseconds since the xtime
337 * stamp in r3+CA and r4. Load & add the xtime stamp.
338 */
339#ifdef CONFIG_PPC64
340 lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
341 lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
342#else
343 lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
344 lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
345#endif
346 add r4,r4,r6
347 adde r3,r3,r5
348
349 /* We now have our result in r3,r4. We create a fake dependency
350 * on that result and re-check the counter
351 */
352 or r6,r4,r3
353 xor r0,r6,r6
354 add r9,r9,r0
355 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
356 cmpl cr0,r8,r0 /* check if updated */
357 bne- 1b
358
359 /* check for nanosecond overflow and adjust if necessary */
360 cmpw r4,r7
361 bltlr /* all done if no overflow */
362 subf r4,r7,r4 /* adjust if overflow */
363 addi r3,r3,1
364
365 blr
366 .cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index be3b6a41dc09..904ef1360dd7 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -34,6 +34,9 @@ SECTIONS
34 __ftr_fixup : { *(__ftr_fixup) } 34 __ftr_fixup : { *(__ftr_fixup) }
35 35
36 . = ALIGN(8); 36 . = ALIGN(8);
37 __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
38
39 . = ALIGN(8);
37 __lwsync_fixup : { *(__lwsync_fixup) } 40 __lwsync_fixup : { *(__lwsync_fixup) }
38 41
39#ifdef CONFIG_PPC64 42#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index c6401f9e37f1..262cd5857a56 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
75 75
76 mflr r12 /* r12 saves lr */ 76 mflr r12 /* r12 saves lr */
77 .cfi_register lr,r12 77 .cfi_register lr,r12
78 mr r10,r3 /* r10 saves id */
79 mr r11,r4 /* r11 saves tp */ 78 mr r11,r4 /* r11 saves tp */
80 bl V_LOCAL_FUNC(__get_datapage) /* get data page */ 79 bl V_LOCAL_FUNC(__get_datapage) /* get data page */
81 beq cr1,50f /* if monotonic -> jump there */ 8050: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
82 81 bne cr1,80f /* if not monotonic, all done */
83 /*
84 * CLOCK_REALTIME
85 */
86
87 bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
88
89 lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
90 ori r7,r7,16960
91 rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
92 rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
93 std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
94 subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
95 mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
96 * XSEC_PER_SEC
97 */
98 rldicl r0,r0,44,20
99 mulli r0,r0,1000 /* nsec = usec * 1000 */
100 std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
101
102 mtlr r12
103 crclr cr0*4+so
104 li r3,0
105 blr
106 82
107 /* 83 /*
108 * CLOCK_MONOTONIC 84 * CLOCK_MONOTONIC
109 */ 85 */
110 86
11150: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
112
113 lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
114 ori r7,r7,16960
115 rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
116 rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
117 subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
118 mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
119 * XSEC_PER_SEC
120 */
121 rldicl r6,r0,44,20
122 mulli r6,r6,1000 /* nsec = usec * 1000 */
123
124 /* now we must fixup using wall to monotonic. We need to snapshot 87 /* now we must fixup using wall to monotonic. We need to snapshot
125 * that value and do the counter trick again. Fortunately, we still 88 * that value and do the counter trick again. Fortunately, we still
126 * have the counter value in r8 that was returned by __do_get_xsec. 89 * have the counter value in r8 that was returned by __do_get_tspec.
127 * At this point, r5,r6 contain our sec/nsec values. 90 * At this point, r4,r5 contain our sec/nsec values.
128 * can be used
129 */ 91 */
130 92
131 lwa r4,WTOM_CLOCK_SEC(r3) 93 lwa r6,WTOM_CLOCK_SEC(r3)
132 lwa r7,WTOM_CLOCK_NSEC(r3) 94 lwa r9,WTOM_CLOCK_NSEC(r3)
133 95
134 /* We now have our result in r4,r7. We create a fake dependency 96 /* We now have our result in r6,r9. We create a fake dependency
135 * on that result and re-check the counter 97 * on that result and re-check the counter
136 */ 98 */
137 or r9,r4,r7 99 or r0,r6,r9
138 xor r0,r9,r9 100 xor r0,r0,r0
139 add r3,r3,r0 101 add r3,r3,r0
140 ld r0,CFG_TB_UPDATE_COUNT(r3) 102 ld r0,CFG_TB_UPDATE_COUNT(r3)
141 cmpld cr0,r0,r8 /* check if updated */ 103 cmpld cr0,r0,r8 /* check if updated */
142 bne- 50b 104 bne- 50b
143 105
144 /* Calculate and store result. Note that this mimmics the C code, 106 /* Add wall->monotonic offset and check for overflow or underflow.
145 * which may cause funny results if nsec goes negative... is that
146 * possible at all ?
147 */ 107 */
148 add r4,r4,r5 108 add r4,r4,r6
149 add r7,r7,r6 109 add r5,r5,r9
150 lis r9,NSEC_PER_SEC@h 110 cmpd cr0,r5,r7
151 ori r9,r9,NSEC_PER_SEC@l 111 cmpdi cr1,r5,0
152 cmpl cr0,r7,r9
153 cmpli cr1,r7,0
154 blt 1f 112 blt 1f
155 subf r7,r9,r7 113 subf r5,r7,r5
156 addi r4,r4,1 114 addi r4,r4,1
1571: bge cr1,1f 1151: bge cr1,80f
158 addi r4,r4,-1 116 addi r4,r4,-1
159 add r7,r7,r9 117 add r5,r5,r7
1601: std r4,TSPC64_TV_SEC(r11) 118
161 std r7,TSPC64_TV_NSEC(r11) 11980: std r4,TSPC64_TV_SEC(r11)
120 std r5,TSPC64_TV_NSEC(r11)
162 121
163 mtlr r12 122 mtlr r12
164 crclr cr0*4+so 123 crclr cr0*4+so
@@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
168 /* 127 /*
169 * syscall fallback 128 * syscall fallback
170 */ 129 */
17198:
172 mtlr r12
173 mr r3,r10
174 mr r4,r11
17599: 13099:
176 li r0,__NR_clock_gettime 131 li r0,__NR_clock_gettime
177 sc 132 sc
@@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec)
253 blr 208 blr
254 .cfi_endproc 209 .cfi_endproc
255V_FUNCTION_END(__do_get_xsec) 210V_FUNCTION_END(__do_get_xsec)
211
212/*
213 * This is the core of clock_gettime(), it returns the current
214 * time in seconds and nanoseconds in r4 and r5.
215 * It expects the datapage ptr in r3 and doesn't clobber it.
216 * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7.
217 * On return, r8 contains the counter value that can be reused.
218 * This clobbers cr0 but not any other cr field.
219 */
220V_FUNCTION_BEGIN(__do_get_tspec)
221 .cfi_startproc
222 /* check for update count & load values */
2231: ld r8,CFG_TB_UPDATE_COUNT(r3)
224 andi. r0,r8,1 /* pending update ? loop */
225 bne- 1b
226 xor r0,r8,r8 /* create dependency */
227 add r3,r3,r0
228
229 /* Get TB & offset it. We use the MFTB macro which will generate
230 * workaround code for Cell.
231 */
232 MFTB(r7)
233 ld r9,CFG_TB_ORIG_STAMP(r3)
234 subf r7,r9,r7
235
236 /* Scale result */
237 ld r5,CFG_TB_TO_XS(r3)
238 sldi r7,r7,12 /* compute time since stamp_xtime */
239 mulhdu r6,r7,r5 /* in units of 2^-32 seconds */
240
241 /* Add stamp since epoch */
242 ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
243 ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
244 or r0,r4,r5
245 or r0,r0,r6
246 xor r0,r0,r0
247 add r3,r3,r0
248 ld r0,CFG_TB_UPDATE_COUNT(r3)
249 cmpld r0,r8 /* check if updated */
250 bne- 1b /* reload if so */
251
252 /* convert to seconds & nanoseconds and add to stamp */
253 lis r7,NSEC_PER_SEC@h
254 ori r7,r7,NSEC_PER_SEC@l
255 mulhwu r0,r6,r7 /* compute nanoseconds and */
256 srdi r6,r6,32 /* seconds since stamp_xtime */
257 clrldi r0,r0,32
258 add r5,r5,r0 /* add nanoseconds together */
259 cmpd r5,r7 /* overflow? */
260 add r4,r4,r6
261 bltlr /* all done if no overflow */
262 subf r5,r7,r5 /* if overflow, adjust */
263 addi r4,r4,1
264 blr
265 .cfi_endproc
266V_FUNCTION_END(__do_get_tspec)
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index d0b2526dd38d..0e615404e247 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -35,6 +35,9 @@ SECTIONS
35 __ftr_fixup : { *(__ftr_fixup) } 35 __ftr_fixup : { *(__ftr_fixup) }
36 36
37 . = ALIGN(8); 37 . = ALIGN(8);
38 __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
39
40 . = ALIGN(8);
38 __lwsync_fixup : { *(__lwsync_fixup) } 41 __lwsync_fixup : { *(__lwsync_fixup) }
39 42
40 . = ALIGN(8); 43 . = ALIGN(8);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index a11e6bc59b30..94aa7b011b27 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -41,9 +41,9 @@
41static struct bus_type vio_bus_type; 41static struct bus_type vio_bus_type;
42 42
43static struct vio_dev vio_bus_device = { /* fake "parent" device */ 43static struct vio_dev vio_bus_device = { /* fake "parent" device */
44 .name = vio_bus_device.dev.bus_id, 44 .name = "vio",
45 .type = "", 45 .type = "",
46 .dev.bus_id = "vio", 46 .dev.init_name = "vio",
47 .dev.bus = &vio_bus_type, 47 .dev.bus = &vio_bus_type,
48}; 48};
49 49
@@ -1216,7 +1216,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1216 1216
1217 viodev->irq = irq_of_parse_and_map(of_node, 0); 1217 viodev->irq = irq_of_parse_and_map(of_node, 0);
1218 1218
1219 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 1219 dev_set_name(&viodev->dev, "%x", *unit_address);
1220 viodev->name = of_node->name; 1220 viodev->name = of_node->name;
1221 viodev->type = of_node->type; 1221 viodev->type = of_node->type;
1222 viodev->unit_address = *unit_address; 1222 viodev->unit_address = *unit_address;
@@ -1243,7 +1243,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1243 /* register with generic device framework */ 1243 /* register with generic device framework */
1244 if (device_register(&viodev->dev)) { 1244 if (device_register(&viodev->dev)) {
1245 printk(KERN_ERR "%s: failed to register device %s\n", 1245 printk(KERN_ERR "%s: failed to register device %s\n",
1246 __func__, viodev->dev.bus_id); 1246 __func__, dev_name(&viodev->dev));
1247 /* XXX free TCE table */ 1247 /* XXX free TCE table */
1248 kfree(viodev); 1248 kfree(viodev);
1249 return NULL; 1249 return NULL;
@@ -1400,13 +1400,13 @@ static struct vio_dev *vio_find_name(const char *name)
1400struct vio_dev *vio_find_node(struct device_node *vnode) 1400struct vio_dev *vio_find_node(struct device_node *vnode)
1401{ 1401{
1402 const uint32_t *unit_address; 1402 const uint32_t *unit_address;
1403 char kobj_name[BUS_ID_SIZE]; 1403 char kobj_name[20];
1404 1404
1405 /* construct the kobject name from the device node */ 1405 /* construct the kobject name from the device node */
1406 unit_address = of_get_property(vnode, "reg", NULL); 1406 unit_address = of_get_property(vnode, "reg", NULL);
1407 if (!unit_address) 1407 if (!unit_address)
1408 return NULL; 1408 return NULL;
1409 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); 1409 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
1410 1410
1411 return vio_find_name(kobj_name); 1411 return vio_find_name(kobj_name);
1412} 1412}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2412c056baa4..47bf15cd2c9e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -152,6 +152,12 @@ SECTIONS
152 __stop___ftr_fixup = .; 152 __stop___ftr_fixup = .;
153 } 153 }
154 . = ALIGN(8); 154 . = ALIGN(8);
155 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
156 __start___mmu_ftr_fixup = .;
157 *(__mmu_ftr_fixup)
158 __stop___mmu_ftr_fixup = .;
159 }
160 . = ALIGN(8);
155 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { 161 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
156 __start___lwsync_fixup = .; 162 __start___lwsync_fixup = .;
157 *(__lwsync_fixup) 163 *(__lwsync_fixup)
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
new file mode 100644
index 000000000000..a66bec57265a
--- /dev/null
+++ b/arch/powerpc/kvm/44x.c
@@ -0,0 +1,228 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/err.h>
22
23#include <asm/reg.h>
24#include <asm/cputable.h>
25#include <asm/tlbflush.h>
26#include <asm/kvm_44x.h>
27#include <asm/kvm_ppc.h>
28
29#include "44x_tlb.h"
30
31/* Note: clearing MSR[DE] just means that the debug interrupt will not be
32 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
33 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
34 * will be delivered as an "imprecise debug event" (which is indicated by
35 * DBSR[IDE].
36 */
37static void kvm44x_disable_debug_interrupts(void)
38{
39 mtmsr(mfmsr() & ~MSR_DE);
40}
41
42void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
43{
44 kvm44x_disable_debug_interrupts();
45
46 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
47 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
48 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
49 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
50 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
51 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
52 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
53 mtmsr(vcpu->arch.host_msr);
54}
55
56void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
57{
58 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
59 u32 dbcr0 = 0;
60
61 vcpu->arch.host_msr = mfmsr();
62 kvm44x_disable_debug_interrupts();
63
64 /* Save host debug register state. */
65 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
66 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
67 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
68 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
69 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
70 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
71 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
72
73 /* set registers up for guest */
74
75 if (dbg->bp[0]) {
76 mtspr(SPRN_IAC1, dbg->bp[0]);
77 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
78 }
79 if (dbg->bp[1]) {
80 mtspr(SPRN_IAC2, dbg->bp[1]);
81 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
82 }
83 if (dbg->bp[2]) {
84 mtspr(SPRN_IAC3, dbg->bp[2]);
85 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
86 }
87 if (dbg->bp[3]) {
88 mtspr(SPRN_IAC4, dbg->bp[3]);
89 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
90 }
91
92 mtspr(SPRN_DBCR0, dbcr0);
93 mtspr(SPRN_DBCR1, 0);
94 mtspr(SPRN_DBCR2, 0);
95}
96
97void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
98{
99 kvmppc_44x_tlb_load(vcpu);
100}
101
102void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
103{
104 kvmppc_44x_tlb_put(vcpu);
105}
106
107int kvmppc_core_check_processor_compat(void)
108{
109 int r;
110
111 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
112 r = 0;
113 else
114 r = -ENOTSUPP;
115
116 return r;
117}
118
119int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
120{
121 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
122 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
123 int i;
124
125 tlbe->tid = 0;
126 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
127 tlbe->word1 = 0;
128 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
129
130 tlbe++;
131 tlbe->tid = 0;
132 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
133 tlbe->word1 = 0xef600000;
134 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
135 | PPC44x_TLB_I | PPC44x_TLB_G;
136
137 /* Since the guest can directly access the timebase, it must know the
138 * real timebase frequency. Accordingly, it must see the state of
139 * CCR1[TCS]. */
140 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
141
142 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
143 vcpu_44x->shadow_refs[i].gtlb_index = -1;
144
145 return 0;
146}
147
148/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
149int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
150 struct kvm_translation *tr)
151{
152 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
153 struct kvmppc_44x_tlbe *gtlbe;
154 int index;
155 gva_t eaddr;
156 u8 pid;
157 u8 as;
158
159 eaddr = tr->linear_address;
160 pid = (tr->linear_address >> 32) & 0xff;
161 as = (tr->linear_address >> 40) & 0x1;
162
163 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
164 if (index == -1) {
165 tr->valid = 0;
166 return 0;
167 }
168
169 gtlbe = &vcpu_44x->guest_tlb[index];
170
171 tr->physical_address = tlb_xlate(gtlbe, eaddr);
172 /* XXX what does "writeable" and "usermode" even mean? */
173 tr->valid = 1;
174
175 return 0;
176}
177
178struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
179{
180 struct kvmppc_vcpu_44x *vcpu_44x;
181 struct kvm_vcpu *vcpu;
182 int err;
183
184 vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
185 if (!vcpu_44x) {
186 err = -ENOMEM;
187 goto out;
188 }
189
190 vcpu = &vcpu_44x->vcpu;
191 err = kvm_vcpu_init(vcpu, kvm, id);
192 if (err)
193 goto free_vcpu;
194
195 return vcpu;
196
197free_vcpu:
198 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
199out:
200 return ERR_PTR(err);
201}
202
203void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
204{
205 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
206
207 kvm_vcpu_uninit(vcpu);
208 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
209}
210
211static int kvmppc_44x_init(void)
212{
213 int r;
214
215 r = kvmppc_booke_init();
216 if (r)
217 return r;
218
219 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
220}
221
222static void kvmppc_44x_exit(void)
223{
224 kvmppc_booke_exit();
225}
226
227module_init(kvmppc_44x_init);
228module_exit(kvmppc_44x_exit);
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
new file mode 100644
index 000000000000..82489a743a6f
--- /dev/null
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -0,0 +1,371 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/dcr.h>
22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h>
24#include <asm/kvm_44x.h>
25#include "timing.h"
26
27#include "booke.h"
28#include "44x_tlb.h"
29
30#define OP_RFI 19
31
32#define XOP_RFI 50
33#define XOP_MFMSR 83
34#define XOP_WRTEE 131
35#define XOP_MTMSR 146
36#define XOP_WRTEEI 163
37#define XOP_MFDCR 323
38#define XOP_MTDCR 451
39#define XOP_TLBSX 914
40#define XOP_ICCCI 966
41#define XOP_TLBWE 978
42
43static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
44{
45 vcpu->arch.pc = vcpu->arch.srr0;
46 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
47}
48
49int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
50 unsigned int inst, int *advance)
51{
52 int emulated = EMULATE_DONE;
53 int dcrn;
54 int ra;
55 int rb;
56 int rc;
57 int rs;
58 int rt;
59 int ws;
60
61 switch (get_op(inst)) {
62 case OP_RFI:
63 switch (get_xop(inst)) {
64 case XOP_RFI:
65 kvmppc_emul_rfi(vcpu);
66 kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
67 *advance = 0;
68 break;
69
70 default:
71 emulated = EMULATE_FAIL;
72 break;
73 }
74 break;
75
76 case 31:
77 switch (get_xop(inst)) {
78
79 case XOP_MFMSR:
80 rt = get_rt(inst);
81 vcpu->arch.gpr[rt] = vcpu->arch.msr;
82 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
83 break;
84
85 case XOP_MTMSR:
86 rs = get_rs(inst);
87 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
88 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
89 break;
90
91 case XOP_WRTEE:
92 rs = get_rs(inst);
93 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
94 | (vcpu->arch.gpr[rs] & MSR_EE);
95 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
96 break;
97
98 case XOP_WRTEEI:
99 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
100 | (inst & MSR_EE);
101 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
102 break;
103
104 case XOP_MFDCR:
105 dcrn = get_dcrn(inst);
106 rt = get_rt(inst);
107
108 /* The guest may access CPR0 registers to determine the timebase
109 * frequency, and it must know the real host frequency because it
110 * can directly access the timebase registers.
111 *
112 * It would be possible to emulate those accesses in userspace,
113 * but userspace can really only figure out the end frequency.
114 * We could decompose that into the factors that compute it, but
115 * that's tricky math, and it's easier to just report the real
116 * CPR0 values.
117 */
118 switch (dcrn) {
119 case DCRN_CPR0_CONFIG_ADDR:
120 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
121 break;
122 case DCRN_CPR0_CONFIG_DATA:
123 local_irq_disable();
124 mtdcr(DCRN_CPR0_CONFIG_ADDR,
125 vcpu->arch.cpr0_cfgaddr);
126 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
127 local_irq_enable();
128 break;
129 default:
130 run->dcr.dcrn = dcrn;
131 run->dcr.data = 0;
132 run->dcr.is_write = 0;
133 vcpu->arch.io_gpr = rt;
134 vcpu->arch.dcr_needed = 1;
135 kvmppc_account_exit(vcpu, DCR_EXITS);
136 emulated = EMULATE_DO_DCR;
137 }
138
139 break;
140
141 case XOP_MTDCR:
142 dcrn = get_dcrn(inst);
143 rs = get_rs(inst);
144
145 /* emulate some access in kernel */
146 switch (dcrn) {
147 case DCRN_CPR0_CONFIG_ADDR:
148 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
149 break;
150 default:
151 run->dcr.dcrn = dcrn;
152 run->dcr.data = vcpu->arch.gpr[rs];
153 run->dcr.is_write = 1;
154 vcpu->arch.dcr_needed = 1;
155 kvmppc_account_exit(vcpu, DCR_EXITS);
156 emulated = EMULATE_DO_DCR;
157 }
158
159 break;
160
161 case XOP_TLBWE:
162 ra = get_ra(inst);
163 rs = get_rs(inst);
164 ws = get_ws(inst);
165 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
166 break;
167
168 case XOP_TLBSX:
169 rt = get_rt(inst);
170 ra = get_ra(inst);
171 rb = get_rb(inst);
172 rc = get_rc(inst);
173 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
174 break;
175
176 case XOP_ICCCI:
177 break;
178
179 default:
180 emulated = EMULATE_FAIL;
181 }
182
183 break;
184
185 default:
186 emulated = EMULATE_FAIL;
187 }
188
189 return emulated;
190}
191
192int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
193{
194 switch (sprn) {
195 case SPRN_MMUCR:
196 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
197 case SPRN_PID:
198 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
199 case SPRN_CCR0:
200 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
201 case SPRN_CCR1:
202 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
203 case SPRN_DEAR:
204 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
205 case SPRN_ESR:
206 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
207 case SPRN_DBCR0:
208 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
209 case SPRN_DBCR1:
210 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
211 case SPRN_TSR:
212 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
213 case SPRN_TCR:
214 vcpu->arch.tcr = vcpu->arch.gpr[rs];
215 kvmppc_emulate_dec(vcpu);
216 break;
217
218 /* Note: SPRG4-7 are user-readable. These values are
219 * loaded into the real SPRGs when resuming the
220 * guest. */
221 case SPRN_SPRG4:
222 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
223 case SPRN_SPRG5:
224 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
225 case SPRN_SPRG6:
226 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
227 case SPRN_SPRG7:
228 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
229
230 case SPRN_IVPR:
231 vcpu->arch.ivpr = vcpu->arch.gpr[rs];
232 break;
233 case SPRN_IVOR0:
234 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
235 break;
236 case SPRN_IVOR1:
237 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
238 break;
239 case SPRN_IVOR2:
240 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
241 break;
242 case SPRN_IVOR3:
243 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
244 break;
245 case SPRN_IVOR4:
246 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
247 break;
248 case SPRN_IVOR5:
249 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
250 break;
251 case SPRN_IVOR6:
252 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
253 break;
254 case SPRN_IVOR7:
255 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
256 break;
257 case SPRN_IVOR8:
258 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
259 break;
260 case SPRN_IVOR9:
261 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
262 break;
263 case SPRN_IVOR10:
264 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
265 break;
266 case SPRN_IVOR11:
267 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
268 break;
269 case SPRN_IVOR12:
270 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
271 break;
272 case SPRN_IVOR13:
273 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
274 break;
275 case SPRN_IVOR14:
276 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
277 break;
278 case SPRN_IVOR15:
279 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
280 break;
281
282 default:
283 return EMULATE_FAIL;
284 }
285
286 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
287 return EMULATE_DONE;
288}
289
290int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
291{
292 switch (sprn) {
293 /* 440 */
294 case SPRN_MMUCR:
295 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
296 case SPRN_CCR0:
297 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
298 case SPRN_CCR1:
299 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
300
301 /* Book E */
302 case SPRN_PID:
303 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
304 case SPRN_IVPR:
305 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
306 case SPRN_DEAR:
307 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
308 case SPRN_ESR:
309 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
310 case SPRN_DBCR0:
311 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
312 case SPRN_DBCR1:
313 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
314
315 case SPRN_IVOR0:
316 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
317 break;
318 case SPRN_IVOR1:
319 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
320 break;
321 case SPRN_IVOR2:
322 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
323 break;
324 case SPRN_IVOR3:
325 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
326 break;
327 case SPRN_IVOR4:
328 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
329 break;
330 case SPRN_IVOR5:
331 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
332 break;
333 case SPRN_IVOR6:
334 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
335 break;
336 case SPRN_IVOR7:
337 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
338 break;
339 case SPRN_IVOR8:
340 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
341 break;
342 case SPRN_IVOR9:
343 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
344 break;
345 case SPRN_IVOR10:
346 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
347 break;
348 case SPRN_IVOR11:
349 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
350 break;
351 case SPRN_IVOR12:
352 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
353 break;
354 case SPRN_IVOR13:
355 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
356 break;
357 case SPRN_IVOR14:
358 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
359 break;
360 case SPRN_IVOR15:
361 vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
362 break;
363
364 default:
365 return EMULATE_FAIL;
366 }
367
368 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
369 return EMULATE_DONE;
370}
371
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ad72c6f9811f..9a34b8edb9e2 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -22,20 +22,103 @@
22#include <linux/kvm.h> 22#include <linux/kvm.h>
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
25#include <asm/mmu-44x.h> 27#include <asm/mmu-44x.h>
26#include <asm/kvm_ppc.h> 28#include <asm/kvm_ppc.h>
29#include <asm/kvm_44x.h>
30#include "timing.h"
27 31
28#include "44x_tlb.h" 32#include "44x_tlb.h"
29 33
34#ifndef PPC44x_TLBE_SIZE
35#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
36#endif
37
38#define PAGE_SIZE_4K (1<<12)
39#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
40
41#define PPC44x_TLB_UATTR_MASK \
42 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
30#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) 43#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
31#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) 44#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
32 45
33static unsigned int kvmppc_tlb_44x_pos; 46#ifdef DEBUG
47void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
48{
49 struct kvmppc_44x_tlbe *tlbe;
50 int i;
51
52 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
53 printk("| %2s | %3s | %8s | %8s | %8s |\n",
54 "nr", "tid", "word0", "word1", "word2");
55
56 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
57 tlbe = &vcpu_44x->guest_tlb[i];
58 if (tlbe->word0 & PPC44x_TLB_VALID)
59 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
60 i, tlbe->tid, tlbe->word0, tlbe->word1,
61 tlbe->word2);
62 }
63}
64#endif
65
66static inline void kvmppc_44x_tlbie(unsigned int index)
67{
68 /* 0 <= index < 64, so the V bit is clear and we can use the index as
69 * word0. */
70 asm volatile(
71 "tlbwe %[index], %[index], 0\n"
72 :
73 : [index] "r"(index)
74 );
75}
76
77static inline void kvmppc_44x_tlbre(unsigned int index,
78 struct kvmppc_44x_tlbe *tlbe)
79{
80 asm volatile(
81 "tlbre %[word0], %[index], 0\n"
82 "mfspr %[tid], %[sprn_mmucr]\n"
83 "andi. %[tid], %[tid], 0xff\n"
84 "tlbre %[word1], %[index], 1\n"
85 "tlbre %[word2], %[index], 2\n"
86 : [word0] "=r"(tlbe->word0),
87 [word1] "=r"(tlbe->word1),
88 [word2] "=r"(tlbe->word2),
89 [tid] "=r"(tlbe->tid)
90 : [index] "r"(index),
91 [sprn_mmucr] "i"(SPRN_MMUCR)
92 : "cc"
93 );
94}
95
96static inline void kvmppc_44x_tlbwe(unsigned int index,
97 struct kvmppc_44x_tlbe *stlbe)
98{
99 unsigned long tmp;
100
101 asm volatile(
102 "mfspr %[tmp], %[sprn_mmucr]\n"
103 "rlwimi %[tmp], %[tid], 0, 0xff\n"
104 "mtspr %[sprn_mmucr], %[tmp]\n"
105 "tlbwe %[word0], %[index], 0\n"
106 "tlbwe %[word1], %[index], 1\n"
107 "tlbwe %[word2], %[index], 2\n"
108 : [tmp] "=&r"(tmp)
109 : [word0] "r"(stlbe->word0),
110 [word1] "r"(stlbe->word1),
111 [word2] "r"(stlbe->word2),
112 [tid] "r"(stlbe->tid),
113 [index] "r"(index),
114 [sprn_mmucr] "i"(SPRN_MMUCR)
115 );
116}
34 117
35static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) 118static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
36{ 119{
37 /* Mask off reserved bits. */ 120 /* We only care about the guest's permission and user bits. */
38 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK; 121 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
39 122
40 if (!usermode) { 123 if (!usermode) {
41 /* Guest is in supervisor mode, so we need to translate guest 124 /* Guest is in supervisor mode, so we need to translate guest
@@ -47,18 +130,60 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
47 /* Make sure host can always access this memory. */ 130 /* Make sure host can always access this memory. */
48 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; 131 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
49 132
133 /* WIMGE = 0b00100 */
134 attrib |= PPC44x_TLB_M;
135
50 return attrib; 136 return attrib;
51} 137}
52 138
139/* Load shadow TLB back into hardware. */
140void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
141{
142 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
143 int i;
144
145 for (i = 0; i <= tlb_44x_hwater; i++) {
146 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
147
148 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
149 kvmppc_44x_tlbwe(i, stlbe);
150 }
151}
152
153static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
154 unsigned int i)
155{
156 vcpu_44x->shadow_tlb_mod[i] = 1;
157}
158
159/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
160void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
161{
162 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
163 int i;
164
165 for (i = 0; i <= tlb_44x_hwater; i++) {
166 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
167
168 if (vcpu_44x->shadow_tlb_mod[i])
169 kvmppc_44x_tlbre(i, stlbe);
170
171 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
172 kvmppc_44x_tlbie(i);
173 }
174}
175
176
53/* Search the guest TLB for a matching entry. */ 177/* Search the guest TLB for a matching entry. */
54int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, 178int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
55 unsigned int as) 179 unsigned int as)
56{ 180{
181 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
57 int i; 182 int i;
58 183
59 /* XXX Replace loop with fancy data structures. */ 184 /* XXX Replace loop with fancy data structures. */
60 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 185 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
61 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; 186 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
62 unsigned int tid; 187 unsigned int tid;
63 188
64 if (eaddr < get_tlb_eaddr(tlbe)) 189 if (eaddr < get_tlb_eaddr(tlbe))
@@ -83,78 +208,89 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
83 return -1; 208 return -1;
84} 209}
85 210
86struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) 211int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
87{ 212{
88 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 213 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
89 unsigned int index;
90 214
91 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 215 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
92 if (index == -1)
93 return NULL;
94 return &vcpu->arch.guest_tlb[index];
95} 216}
96 217
97struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) 218int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
98{ 219{
99 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 220 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
100 unsigned int index;
101 221
102 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 222 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
103 if (index == -1)
104 return NULL;
105 return &vcpu->arch.guest_tlb[index];
106} 223}
107 224
108static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) 225static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
226 unsigned int stlb_index)
109{ 227{
110 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); 228 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
111}
112 229
113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 230 if (!ref->page)
114 unsigned int index) 231 return;
115{
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index];
118 232
119 if (get_tlb_v(stlbe)) { 233 /* Discard from the TLB. */
120 if (kvmppc_44x_tlbe_is_writable(stlbe)) 234 /* Note: we could actually invalidate a host mapping, if the host overwrote
121 kvm_release_page_dirty(page); 235 * this TLB entry since we inserted a guest mapping. */
122 else 236 kvmppc_44x_tlbie(stlb_index);
123 kvm_release_page_clean(page); 237
124 } 238 /* Now release the page. */
239 if (ref->writeable)
240 kvm_release_page_dirty(ref->page);
241 else
242 kvm_release_page_clean(ref->page);
243
244 ref->page = NULL;
245
246 /* XXX set tlb_44x_index to stlb_index? */
247
248 KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
125} 249}
126 250
127void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) 251void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
128{ 252{
253 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
129 int i; 254 int i;
130 255
131 for (i = 0; i <= tlb_44x_hwater; i++) 256 for (i = 0; i <= tlb_44x_hwater; i++)
132 kvmppc_44x_shadow_release(vcpu, i); 257 kvmppc_44x_shadow_release(vcpu_44x, i);
133}
134
135void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
136{
137 vcpu->arch.shadow_tlb_mod[i] = 1;
138} 258}
139 259
140/* Caller must ensure that the specified guest TLB entry is safe to insert into 260/**
141 * the shadow TLB. */ 261 * kvmppc_mmu_map -- create a host mapping for guest memory
142void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, 262 *
143 u32 flags) 263 * If the guest wanted a larger page than the host supports, only the first
264 * host page is mapped here and the rest are demand faulted.
265 *
266 * If the guest wanted a smaller page than the host page size, we map only the
267 * guest-size page (i.e. not a full host page mapping).
268 *
269 * Caller must ensure that the specified guest TLB entry is safe to insert into
270 * the shadow TLB.
271 */
272void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
273 u32 flags, u32 max_bytes, unsigned int gtlb_index)
144{ 274{
275 struct kvmppc_44x_tlbe stlbe;
276 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
277 struct kvmppc_44x_shadow_ref *ref;
145 struct page *new_page; 278 struct page *new_page;
146 struct tlbe *stlbe;
147 hpa_t hpaddr; 279 hpa_t hpaddr;
280 gfn_t gfn;
148 unsigned int victim; 281 unsigned int victim;
149 282
150 /* Future optimization: don't overwrite the TLB entry containing the 283 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
151 * current PC (or stack?). */ 284 * miss handler by disabling interrupts. */
152 victim = kvmppc_tlb_44x_pos++; 285 local_irq_disable();
153 if (kvmppc_tlb_44x_pos > tlb_44x_hwater) 286 victim = ++tlb_44x_index;
154 kvmppc_tlb_44x_pos = 0; 287 if (victim > tlb_44x_hwater)
155 stlbe = &vcpu->arch.shadow_tlb[victim]; 288 victim = 0;
289 tlb_44x_index = victim;
290 local_irq_enable();
156 291
157 /* Get reference to new page. */ 292 /* Get reference to new page. */
293 gfn = gpaddr >> PAGE_SHIFT;
158 new_page = gfn_to_page(vcpu->kvm, gfn); 294 new_page = gfn_to_page(vcpu->kvm, gfn);
159 if (is_error_page(new_page)) { 295 if (is_error_page(new_page)) {
160 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); 296 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
@@ -163,10 +299,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
163 } 299 }
164 hpaddr = page_to_phys(new_page); 300 hpaddr = page_to_phys(new_page);
165 301
166 /* Drop reference to old page. */ 302 /* Invalidate any previous shadow mappings. */
167 kvmppc_44x_shadow_release(vcpu, victim); 303 kvmppc_44x_shadow_release(vcpu_44x, victim);
168
169 vcpu->arch.shadow_pages[victim] = new_page;
170 304
171 /* XXX Make sure (va, size) doesn't overlap any other 305 /* XXX Make sure (va, size) doesn't overlap any other
172 * entries. 440x6 user manual says the result would be 306 * entries. 440x6 user manual says the result would be
@@ -174,78 +308,193 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
174 308
175 /* XXX what about AS? */ 309 /* XXX what about AS? */
176 310
177 stlbe->tid = !(asid & 0xff);
178
179 /* Force TS=1 for all guest mappings. */ 311 /* Force TS=1 for all guest mappings. */
180 /* For now we hardcode 4KB mappings, but it will be important to 312 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
181 * use host large pages in the future. */ 313
182 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS 314 if (max_bytes >= PAGE_SIZE) {
183 | PPC44x_TLB_4K; 315 /* Guest mapping is larger than or equal to host page size. We can use
184 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 316 * a "native" host mapping. */
185 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, 317 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
186 vcpu->arch.msr & MSR_PR); 318 } else {
187 kvmppc_tlbe_set_modified(vcpu, victim); 319 /* Guest mapping is smaller than host page size. We must restrict the
320 * size of the mapping to be at most the smaller of the two, but for
321 * simplicity we fall back to a 4K mapping (this is probably what the
322 * guest is using anyways). */
323 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
324
325 /* 'hpaddr' is a host page, which is larger than the mapping we're
326 * inserting here. To compensate, we must add the in-page offset to the
327 * sub-page. */
328 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
329 }
188 330
189 KVMTRACE_5D(STLB_WRITE, vcpu, victim, 331 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
190 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, 332 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
191 handler); 333 vcpu->arch.msr & MSR_PR);
334 stlbe.tid = !(asid & 0xff);
335
336 /* Keep track of the reference so we can properly release it later. */
337 ref = &vcpu_44x->shadow_refs[victim];
338 ref->page = new_page;
339 ref->gtlb_index = gtlb_index;
340 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
341 ref->tid = stlbe.tid;
342
343 /* Insert shadow mapping into hardware TLB. */
344 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
345 kvmppc_44x_tlbwe(victim, &stlbe);
346 KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
347 stlbe.word2, handler);
192} 348}
193 349
194void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 350/* For a particular guest TLB entry, invalidate the corresponding host TLB
195 gva_t eend, u32 asid) 351 * mappings and release the host pages. */
352static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
353 unsigned int gtlb_index)
196{ 354{
197 unsigned int pid = !(asid & 0xff); 355 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
198 int i; 356 int i;
199 357
200 /* XXX Replace loop with fancy data structures. */ 358 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
201 for (i = 0; i <= tlb_44x_hwater; i++) { 359 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
202 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 360 if (ref->gtlb_index == gtlb_index)
203 unsigned int tid; 361 kvmppc_44x_shadow_release(vcpu_44x, i);
362 }
363}
204 364
205 if (!get_tlb_v(stlbe)) 365void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
206 continue; 366{
367 vcpu->arch.shadow_pid = !usermode;
368}
207 369
208 if (eend < get_tlb_eaddr(stlbe)) 370void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
209 continue; 371{
372 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
373 int i;
210 374
211 if (eaddr > get_tlb_end(stlbe)) 375 if (unlikely(vcpu->arch.pid == new_pid))
212 continue; 376 return;
213 377
214 tid = get_tlb_tid(stlbe); 378 vcpu->arch.pid = new_pid;
215 if (tid && (tid != pid))
216 continue;
217 379
218 kvmppc_44x_shadow_release(vcpu, i); 380 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
219 stlbe->word0 = 0; 381 * can't access guest kernel mappings (TID=1). When we switch to a new
220 kvmppc_tlbe_set_modified(vcpu, i); 382 * guest PID, which will also use host PID=0, we must discard the old guest
221 KVMTRACE_5D(STLB_INVAL, vcpu, i, 383 * userspace mappings. */
222 stlbe->tid, stlbe->word0, stlbe->word1, 384 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
223 stlbe->word2, handler); 385 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
386
387 if (ref->tid == 0)
388 kvmppc_44x_shadow_release(vcpu_44x, i);
224 } 389 }
225} 390}
226 391
227/* Invalidate all mappings on the privilege switch after PID has been changed. 392static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
228 * The guest always runs with PID=1, so we must clear the entire TLB when 393 const struct kvmppc_44x_tlbe *tlbe)
229 * switching address spaces. */
230void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
231{ 394{
232 int i; 395 gpa_t gpa;
233 396
234 if (vcpu->arch.swap_pid) { 397 if (!get_tlb_v(tlbe))
235 /* XXX Replace loop with fancy data structures. */ 398 return 0;
236 for (i = 0; i <= tlb_44x_hwater; i++) { 399
237 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 400 /* Does it match current guest AS? */
238 401 /* XXX what about IS != DS? */
239 /* Future optimization: clear only userspace mappings. */ 402 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
240 kvmppc_44x_shadow_release(vcpu, i); 403 return 0;
241 stlbe->word0 = 0; 404
242 kvmppc_tlbe_set_modified(vcpu, i); 405 gpa = get_tlb_raddr(tlbe);
243 KVMTRACE_5D(STLB_INVAL, vcpu, i, 406 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
244 stlbe->tid, stlbe->word0, stlbe->word1, 407 /* Mapping is not for RAM. */
245 stlbe->word2, handler); 408 return 0;
246 } 409
247 vcpu->arch.swap_pid = 0; 410 return 1;
411}
412
413int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
414{
415 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
416 struct kvmppc_44x_tlbe *tlbe;
417 unsigned int gtlb_index;
418
419 gtlb_index = vcpu->arch.gpr[ra];
420 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
421 printk("%s: index %d\n", __func__, gtlb_index);
422 kvmppc_dump_vcpu(vcpu);
423 return EMULATE_FAIL;
248 } 424 }
249 425
250 vcpu->arch.shadow_pid = !usermode; 426 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
427
428 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
429 if (tlbe->word0 & PPC44x_TLB_VALID)
430 kvmppc_44x_invalidate(vcpu, gtlb_index);
431
432 switch (ws) {
433 case PPC44x_TLB_PAGEID:
434 tlbe->tid = get_mmucr_stid(vcpu);
435 tlbe->word0 = vcpu->arch.gpr[rs];
436 break;
437
438 case PPC44x_TLB_XLAT:
439 tlbe->word1 = vcpu->arch.gpr[rs];
440 break;
441
442 case PPC44x_TLB_ATTRIB:
443 tlbe->word2 = vcpu->arch.gpr[rs];
444 break;
445
446 default:
447 return EMULATE_FAIL;
448 }
449
450 if (tlbe_is_host_safe(vcpu, tlbe)) {
451 u64 asid;
452 gva_t eaddr;
453 gpa_t gpaddr;
454 u32 flags;
455 u32 bytes;
456
457 eaddr = get_tlb_eaddr(tlbe);
458 gpaddr = get_tlb_raddr(tlbe);
459
460 /* Use the advertised page size to mask effective and real addrs. */
461 bytes = get_tlb_bytes(tlbe);
462 eaddr &= ~(bytes - 1);
463 gpaddr &= ~(bytes - 1);
464
465 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
466 flags = tlbe->word2 & 0xffff;
467
468 kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
469 }
470
471 KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
472 tlbe->word1, tlbe->word2, handler);
473
474 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
475 return EMULATE_DONE;
476}
477
478int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
479{
480 u32 ea;
481 int gtlb_index;
482 unsigned int as = get_mmucr_sts(vcpu);
483 unsigned int pid = get_mmucr_stid(vcpu);
484
485 ea = vcpu->arch.gpr[rb];
486 if (ra)
487 ea += vcpu->arch.gpr[ra];
488
489 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
490 if (rc) {
491 if (gtlb_index < 0)
492 vcpu->arch.cr &= ~0x20000000;
493 else
494 vcpu->arch.cr |= 0x20000000;
495 }
496 vcpu->arch.gpr[rt] = gtlb_index;
497
498 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
499 return EMULATE_DONE;
251} 500}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index 2ccd46b6f6b7..772191f29e62 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -25,48 +25,52 @@
25 25
26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, 26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
27 unsigned int pid, unsigned int as); 27 unsigned int pid, unsigned int as);
28extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); 28extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
29extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); 29extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
30
31extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
32 u8 rc);
33extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
30 34
31/* TLB helper functions */ 35/* TLB helper functions */
32static inline unsigned int get_tlb_size(const struct tlbe *tlbe) 36static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
33{ 37{
34 return (tlbe->word0 >> 4) & 0xf; 38 return (tlbe->word0 >> 4) & 0xf;
35} 39}
36 40
37static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) 41static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
38{ 42{
39 return tlbe->word0 & 0xfffffc00; 43 return tlbe->word0 & 0xfffffc00;
40} 44}
41 45
42static inline gva_t get_tlb_bytes(const struct tlbe *tlbe) 46static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
43{ 47{
44 unsigned int pgsize = get_tlb_size(tlbe); 48 unsigned int pgsize = get_tlb_size(tlbe);
45 return 1 << 10 << (pgsize << 1); 49 return 1 << 10 << (pgsize << 1);
46} 50}
47 51
48static inline gva_t get_tlb_end(const struct tlbe *tlbe) 52static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
49{ 53{
50 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; 54 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
51} 55}
52 56
53static inline u64 get_tlb_raddr(const struct tlbe *tlbe) 57static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
54{ 58{
55 u64 word1 = tlbe->word1; 59 u64 word1 = tlbe->word1;
56 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); 60 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
57} 61}
58 62
59static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) 63static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
60{ 64{
61 return tlbe->tid & 0xff; 65 return tlbe->tid & 0xff;
62} 66}
63 67
64static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) 68static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
65{ 69{
66 return (tlbe->word0 >> 8) & 0x1; 70 return (tlbe->word0 >> 8) & 0x1;
67} 71}
68 72
69static inline unsigned int get_tlb_v(const struct tlbe *tlbe) 73static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
70{ 74{
71 return (tlbe->word0 >> 9) & 0x1; 75 return (tlbe->word0 >> 9) & 0x1;
72} 76}
@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
81 return (vcpu->arch.mmucr >> 16) & 0x1; 85 return (vcpu->arch.mmucr >> 16) & 0x1;
82} 86}
83 87
84static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr) 88static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr)
85{ 89{
86 unsigned int pgmask = get_tlb_bytes(tlbe) - 1; 90 unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
87 91
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 53aaa66b25e5..6dbdc4817d80 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION
15if VIRTUALIZATION 15if VIRTUALIZATION
16 16
17config KVM 17config KVM
18 bool "Kernel-based Virtual Machine (KVM) support" 18 bool
19 depends on 44x && EXPERIMENTAL
20 select PREEMPT_NOTIFIERS 19 select PREEMPT_NOTIFIERS
21 select ANON_INODES 20 select ANON_INODES
22 # We can only run on Book E hosts so far 21
23 select KVM_BOOKE_HOST 22config KVM_440
23 bool "KVM support for PowerPC 440 processors"
24 depends on EXPERIMENTAL && 44x
25 select KVM
24 ---help--- 26 ---help---
25 Support hosting virtualized guest machines. You will also 27 Support running unmodified 440 guest kernels in virtual machines on
26 need to select one or more of the processor modules below. 28 440 host processors.
27 29
28 This module provides access to the hardware capabilities through 30 This module provides access to the hardware capabilities through
29 a character device node named /dev/kvm. 31 a character device node named /dev/kvm.
30 32
31 If unsure, say N. 33 If unsure, say N.
32 34
33config KVM_BOOKE_HOST 35config KVM_EXIT_TIMING
34 bool "KVM host support for Book E PowerPC processors" 36 bool "Detailed exit timing"
35 depends on KVM && 44x 37 depends on KVM
36 ---help--- 38 ---help---
37 Provides host support for KVM on Book E PowerPC processors. Currently 39 Calculate elapsed time for every exit/enter cycle. A per-vcpu
38 this works on 440 processors only. 40 report is available in debugfs kvm/vm#_vcpu#_timing.
41 The overhead is relatively small, however it is not recommended for
42 production environments.
43
44 If unsure, say N.
39 45
40config KVM_TRACE 46config KVM_TRACE
41 bool "KVM trace support" 47 bool "KVM trace support"
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2a5d4397ac4b..df7ba59e6d53 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
8 8
9common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) 9common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
10 10
11kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o 11kvm-objs := $(common-objs-y) powerpc.o emulate.o
12obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
12obj-$(CONFIG_KVM) += kvm.o 13obj-$(CONFIG_KVM) += kvm.o
13 14
14AFLAGS_booke_interrupts.o := -I$(obj) 15AFLAGS_booke_interrupts.o := -I$(obj)
15 16
16kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o 17kvm-440-objs := \
17obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o 18 booke.o \
19 booke_interrupts.o \
20 44x.o \
21 44x_tlb.o \
22 44x_emulate.o
23obj-$(CONFIG_KVM_440) += kvm-440.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke.c
index 7b2591e26bae..35485dd6927e 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke.c
@@ -24,21 +24,26 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <linux/fs.h> 26#include <linux/fs.h>
27
27#include <asm/cputable.h> 28#include <asm/cputable.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h> 30#include <asm/kvm_ppc.h>
31#include "timing.h"
32#include <asm/cacheflush.h>
33#include <asm/kvm_44x.h>
30 34
35#include "booke.h"
31#include "44x_tlb.h" 36#include "44x_tlb.h"
32 37
38unsigned long kvmppc_booke_handlers;
39
33#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 40#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 42
36struct kvm_stats_debugfs_item debugfs_entries[] = { 43struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "exits", VCPU_STAT(sum_exits) },
38 { "mmio", VCPU_STAT(mmio_exits) }, 44 { "mmio", VCPU_STAT(mmio_exits) },
39 { "dcr", VCPU_STAT(dcr_exits) }, 45 { "dcr", VCPU_STAT(dcr_exits) },
40 { "sig", VCPU_STAT(signal_exits) }, 46 { "sig", VCPU_STAT(signal_exits) },
41 { "light", VCPU_STAT(light_exits) },
42 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, 47 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
43 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, 48 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
44 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, 49 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
@@ -53,103 +58,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { NULL } 58 { NULL }
54}; 59};
55 60
56static const u32 interrupt_msr_mask[16] = {
57 [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
58 [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
59 [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
60 [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
61 [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
62 [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
63 [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
64 [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
65 [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
66 [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
67 [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
68 [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
69 [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
70 [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
71 [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
72 [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
73};
74
75const unsigned char exception_priority[] = {
76 [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
77 [BOOKE_INTERRUPT_INST_STORAGE] = 1,
78 [BOOKE_INTERRUPT_ALIGNMENT] = 2,
79 [BOOKE_INTERRUPT_PROGRAM] = 3,
80 [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
81 [BOOKE_INTERRUPT_SYSCALL] = 5,
82 [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
83 [BOOKE_INTERRUPT_DTLB_MISS] = 7,
84 [BOOKE_INTERRUPT_ITLB_MISS] = 8,
85 [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
86 [BOOKE_INTERRUPT_DEBUG] = 10,
87 [BOOKE_INTERRUPT_CRITICAL] = 11,
88 [BOOKE_INTERRUPT_WATCHDOG] = 12,
89 [BOOKE_INTERRUPT_EXTERNAL] = 13,
90 [BOOKE_INTERRUPT_FIT] = 14,
91 [BOOKE_INTERRUPT_DECREMENTER] = 15,
92};
93
94const unsigned char priority_exception[] = {
95 BOOKE_INTERRUPT_DATA_STORAGE,
96 BOOKE_INTERRUPT_INST_STORAGE,
97 BOOKE_INTERRUPT_ALIGNMENT,
98 BOOKE_INTERRUPT_PROGRAM,
99 BOOKE_INTERRUPT_FP_UNAVAIL,
100 BOOKE_INTERRUPT_SYSCALL,
101 BOOKE_INTERRUPT_AP_UNAVAIL,
102 BOOKE_INTERRUPT_DTLB_MISS,
103 BOOKE_INTERRUPT_ITLB_MISS,
104 BOOKE_INTERRUPT_MACHINE_CHECK,
105 BOOKE_INTERRUPT_DEBUG,
106 BOOKE_INTERRUPT_CRITICAL,
107 BOOKE_INTERRUPT_WATCHDOG,
108 BOOKE_INTERRUPT_EXTERNAL,
109 BOOKE_INTERRUPT_FIT,
110 BOOKE_INTERRUPT_DECREMENTER,
111};
112
113
114void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
115{
116 struct tlbe *tlbe;
117 int i;
118
119 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
120 printk("| %2s | %3s | %8s | %8s | %8s |\n",
121 "nr", "tid", "word0", "word1", "word2");
122
123 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
124 tlbe = &vcpu->arch.guest_tlb[i];
125 if (tlbe->word0 & PPC44x_TLB_VALID)
126 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
127 i, tlbe->tid, tlbe->word0, tlbe->word1,
128 tlbe->word2);
129 }
130
131 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
132 tlbe = &vcpu->arch.shadow_tlb[i];
133 if (tlbe->word0 & PPC44x_TLB_VALID)
134 printk(" S%2d | %02X | %08X | %08X | %08X |\n",
135 i, tlbe->tid, tlbe->word0, tlbe->word1,
136 tlbe->word2);
137 }
138}
139
140/* TODO: use vcpu_printf() */ 61/* TODO: use vcpu_printf() */
141void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) 62void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
142{ 63{
143 int i; 64 int i;
144 65
145 printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr); 66 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
146 printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr); 67 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
147 printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1); 68 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
148 69
149 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); 70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
150 71
151 for (i = 0; i < 32; i += 4) { 72 for (i = 0; i < 32; i += 4) {
152 printk("gpr%02d: %08x %08x %08x %08x\n", i, 73 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
153 vcpu->arch.gpr[i], 74 vcpu->arch.gpr[i],
154 vcpu->arch.gpr[i+1], 75 vcpu->arch.gpr[i+1],
155 vcpu->arch.gpr[i+2], 76 vcpu->arch.gpr[i+2],
@@ -157,69 +78,96 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
157 } 78 }
158} 79}
159 80
160/* Check if we are ready to deliver the interrupt */ 81static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
161static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) 82 unsigned int priority)
162{ 83{
163 int r; 84 set_bit(priority, &vcpu->arch.pending_exceptions);
85}
164 86
165 switch (interrupt) { 87void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
166 case BOOKE_INTERRUPT_CRITICAL: 88{
167 r = vcpu->arch.msr & MSR_CE; 89 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
168 break; 90}
169 case BOOKE_INTERRUPT_MACHINE_CHECK: 91
170 r = vcpu->arch.msr & MSR_ME; 92void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
171 break; 93{
172 case BOOKE_INTERRUPT_EXTERNAL: 94 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
173 r = vcpu->arch.msr & MSR_EE; 95}
96
97int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
98{
99 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
100}
101
102void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
103 struct kvm_interrupt *irq)
104{
105 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
106}
107
108/* Deliver the interrupt of the corresponding priority, if possible. */
109static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
110 unsigned int priority)
111{
112 int allowed = 0;
113 ulong msr_mask;
114
115 switch (priority) {
116 case BOOKE_IRQPRIO_PROGRAM:
117 case BOOKE_IRQPRIO_DTLB_MISS:
118 case BOOKE_IRQPRIO_ITLB_MISS:
119 case BOOKE_IRQPRIO_SYSCALL:
120 case BOOKE_IRQPRIO_DATA_STORAGE:
121 case BOOKE_IRQPRIO_INST_STORAGE:
122 case BOOKE_IRQPRIO_FP_UNAVAIL:
123 case BOOKE_IRQPRIO_AP_UNAVAIL:
124 case BOOKE_IRQPRIO_ALIGNMENT:
125 allowed = 1;
126 msr_mask = MSR_CE|MSR_ME|MSR_DE;
174 break; 127 break;
175 case BOOKE_INTERRUPT_DECREMENTER: 128 case BOOKE_IRQPRIO_CRITICAL:
176 r = vcpu->arch.msr & MSR_EE; 129 case BOOKE_IRQPRIO_WATCHDOG:
130 allowed = vcpu->arch.msr & MSR_CE;
131 msr_mask = MSR_ME;
177 break; 132 break;
178 case BOOKE_INTERRUPT_FIT: 133 case BOOKE_IRQPRIO_MACHINE_CHECK:
179 r = vcpu->arch.msr & MSR_EE; 134 allowed = vcpu->arch.msr & MSR_ME;
135 msr_mask = 0;
180 break; 136 break;
181 case BOOKE_INTERRUPT_WATCHDOG: 137 case BOOKE_IRQPRIO_EXTERNAL:
182 r = vcpu->arch.msr & MSR_CE; 138 case BOOKE_IRQPRIO_DECREMENTER:
139 case BOOKE_IRQPRIO_FIT:
140 allowed = vcpu->arch.msr & MSR_EE;
141 msr_mask = MSR_CE|MSR_ME|MSR_DE;
183 break; 142 break;
184 case BOOKE_INTERRUPT_DEBUG: 143 case BOOKE_IRQPRIO_DEBUG:
185 r = vcpu->arch.msr & MSR_DE; 144 allowed = vcpu->arch.msr & MSR_DE;
145 msr_mask = MSR_ME;
186 break; 146 break;
187 default:
188 r = 1;
189 } 147 }
190 148
191 return r; 149 if (allowed) {
192} 150 vcpu->arch.srr0 = vcpu->arch.pc;
151 vcpu->arch.srr1 = vcpu->arch.msr;
152 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
153 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
193 154
194static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) 155 clear_bit(priority, &vcpu->arch.pending_exceptions);
195{
196 switch (interrupt) {
197 case BOOKE_INTERRUPT_DECREMENTER:
198 vcpu->arch.tsr |= TSR_DIS;
199 break;
200 } 156 }
201 157
202 vcpu->arch.srr0 = vcpu->arch.pc; 158 return allowed;
203 vcpu->arch.srr1 = vcpu->arch.msr;
204 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
205 kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
206} 159}
207 160
208/* Check pending exceptions and deliver one, if possible. */ 161/* Check pending exceptions and deliver one, if possible. */
209void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) 162void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
210{ 163{
211 unsigned long *pending = &vcpu->arch.pending_exceptions; 164 unsigned long *pending = &vcpu->arch.pending_exceptions;
212 unsigned int exception;
213 unsigned int priority; 165 unsigned int priority;
214 166
215 priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending)); 167 priority = __ffs(*pending);
216 while (priority <= BOOKE_MAX_INTERRUPT) { 168 while (priority <= BOOKE_MAX_INTERRUPT) {
217 exception = priority_exception[priority]; 169 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
218 if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
219 kvmppc_clear_exception(vcpu, exception);
220 kvmppc_deliver_interrupt(vcpu, exception);
221 break; 170 break;
222 }
223 171
224 priority = find_next_bit(pending, 172 priority = find_next_bit(pending,
225 BITS_PER_BYTE * sizeof(*pending), 173 BITS_PER_BYTE * sizeof(*pending),
@@ -238,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
238 enum emulation_result er; 186 enum emulation_result er;
239 int r = RESUME_HOST; 187 int r = RESUME_HOST;
240 188
189 /* update before a new last_exit_type is rewritten */
190 kvmppc_update_timing_stats(vcpu);
191
241 local_irq_enable(); 192 local_irq_enable();
242 193
243 run->exit_reason = KVM_EXIT_UNKNOWN; 194 run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -251,21 +202,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
251 break; 202 break;
252 203
253 case BOOKE_INTERRUPT_EXTERNAL: 204 case BOOKE_INTERRUPT_EXTERNAL:
205 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
206 if (need_resched())
207 cond_resched();
208 r = RESUME_GUEST;
209 break;
210
254 case BOOKE_INTERRUPT_DECREMENTER: 211 case BOOKE_INTERRUPT_DECREMENTER:
255 /* Since we switched IVPR back to the host's value, the host 212 /* Since we switched IVPR back to the host's value, the host
256 * handled this interrupt the moment we enabled interrupts. 213 * handled this interrupt the moment we enabled interrupts.
257 * Now we just offer it a chance to reschedule the guest. */ 214 * Now we just offer it a chance to reschedule the guest. */
258 215 kvmppc_account_exit(vcpu, DEC_EXITS);
259 /* XXX At this point the TLB still holds our shadow TLB, so if
260 * we do reschedule the host will fault over it. Perhaps we
261 * should politely restore the host's entries to minimize
262 * misses before ceding control. */
263 if (need_resched()) 216 if (need_resched())
264 cond_resched(); 217 cond_resched();
265 if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
266 vcpu->stat.dec_exits++;
267 else
268 vcpu->stat.ext_intr_exits++;
269 r = RESUME_GUEST; 218 r = RESUME_GUEST;
270 break; 219 break;
271 220
@@ -274,17 +223,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
274 /* Program traps generated by user-level software must be handled 223 /* Program traps generated by user-level software must be handled
275 * by the guest kernel. */ 224 * by the guest kernel. */
276 vcpu->arch.esr = vcpu->arch.fault_esr; 225 vcpu->arch.esr = vcpu->arch.fault_esr;
277 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); 226 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
278 r = RESUME_GUEST; 227 r = RESUME_GUEST;
228 kvmppc_account_exit(vcpu, USR_PR_INST);
279 break; 229 break;
280 } 230 }
281 231
282 er = kvmppc_emulate_instruction(run, vcpu); 232 er = kvmppc_emulate_instruction(run, vcpu);
283 switch (er) { 233 switch (er) {
284 case EMULATE_DONE: 234 case EMULATE_DONE:
235 /* don't overwrite subtypes, just account kvm_stats */
236 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
285 /* Future optimization: only reload non-volatiles if 237 /* Future optimization: only reload non-volatiles if
286 * they were actually modified by emulation. */ 238 * they were actually modified by emulation. */
287 vcpu->stat.emulated_inst_exits++;
288 r = RESUME_GUEST_NV; 239 r = RESUME_GUEST_NV;
289 break; 240 break;
290 case EMULATE_DO_DCR: 241 case EMULATE_DO_DCR:
@@ -293,7 +244,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
293 break; 244 break;
294 case EMULATE_FAIL: 245 case EMULATE_FAIL:
295 /* XXX Deliver Program interrupt to guest. */ 246 /* XXX Deliver Program interrupt to guest. */
296 printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n", 247 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
297 __func__, vcpu->arch.pc, vcpu->arch.last_inst); 248 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
298 /* For debugging, encode the failing instruction and 249 /* For debugging, encode the failing instruction and
299 * report it to userspace. */ 250 * report it to userspace. */
@@ -307,48 +258,53 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
307 break; 258 break;
308 259
309 case BOOKE_INTERRUPT_FP_UNAVAIL: 260 case BOOKE_INTERRUPT_FP_UNAVAIL:
310 kvmppc_queue_exception(vcpu, exit_nr); 261 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
262 kvmppc_account_exit(vcpu, FP_UNAVAIL);
311 r = RESUME_GUEST; 263 r = RESUME_GUEST;
312 break; 264 break;
313 265
314 case BOOKE_INTERRUPT_DATA_STORAGE: 266 case BOOKE_INTERRUPT_DATA_STORAGE:
315 vcpu->arch.dear = vcpu->arch.fault_dear; 267 vcpu->arch.dear = vcpu->arch.fault_dear;
316 vcpu->arch.esr = vcpu->arch.fault_esr; 268 vcpu->arch.esr = vcpu->arch.fault_esr;
317 kvmppc_queue_exception(vcpu, exit_nr); 269 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
318 vcpu->stat.dsi_exits++; 270 kvmppc_account_exit(vcpu, DSI_EXITS);
319 r = RESUME_GUEST; 271 r = RESUME_GUEST;
320 break; 272 break;
321 273
322 case BOOKE_INTERRUPT_INST_STORAGE: 274 case BOOKE_INTERRUPT_INST_STORAGE:
323 vcpu->arch.esr = vcpu->arch.fault_esr; 275 vcpu->arch.esr = vcpu->arch.fault_esr;
324 kvmppc_queue_exception(vcpu, exit_nr); 276 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
325 vcpu->stat.isi_exits++; 277 kvmppc_account_exit(vcpu, ISI_EXITS);
326 r = RESUME_GUEST; 278 r = RESUME_GUEST;
327 break; 279 break;
328 280
329 case BOOKE_INTERRUPT_SYSCALL: 281 case BOOKE_INTERRUPT_SYSCALL:
330 kvmppc_queue_exception(vcpu, exit_nr); 282 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
331 vcpu->stat.syscall_exits++; 283 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
332 r = RESUME_GUEST; 284 r = RESUME_GUEST;
333 break; 285 break;
334 286
287 /* XXX move to a 440-specific file. */
335 case BOOKE_INTERRUPT_DTLB_MISS: { 288 case BOOKE_INTERRUPT_DTLB_MISS: {
336 struct tlbe *gtlbe; 289 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
290 struct kvmppc_44x_tlbe *gtlbe;
337 unsigned long eaddr = vcpu->arch.fault_dear; 291 unsigned long eaddr = vcpu->arch.fault_dear;
292 int gtlb_index;
338 gfn_t gfn; 293 gfn_t gfn;
339 294
340 /* Check the guest TLB. */ 295 /* Check the guest TLB. */
341 gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); 296 gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr);
342 if (!gtlbe) { 297 if (gtlb_index < 0) {
343 /* The guest didn't have a mapping for it. */ 298 /* The guest didn't have a mapping for it. */
344 kvmppc_queue_exception(vcpu, exit_nr); 299 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
345 vcpu->arch.dear = vcpu->arch.fault_dear; 300 vcpu->arch.dear = vcpu->arch.fault_dear;
346 vcpu->arch.esr = vcpu->arch.fault_esr; 301 vcpu->arch.esr = vcpu->arch.fault_esr;
347 vcpu->stat.dtlb_real_miss_exits++; 302 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
348 r = RESUME_GUEST; 303 r = RESUME_GUEST;
349 break; 304 break;
350 } 305 }
351 306
307 gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
352 vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); 308 vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
353 gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; 309 gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
354 310
@@ -359,38 +315,45 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
359 * b) the guest used a large mapping which we're faking 315 * b) the guest used a large mapping which we're faking
360 * Either way, we need to satisfy the fault without 316 * Either way, we need to satisfy the fault without
361 * invoking the guest. */ 317 * invoking the guest. */
362 kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, 318 kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid,
363 gtlbe->word2); 319 gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
364 vcpu->stat.dtlb_virt_miss_exits++; 320 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
365 r = RESUME_GUEST; 321 r = RESUME_GUEST;
366 } else { 322 } else {
367 /* Guest has mapped and accessed a page which is not 323 /* Guest has mapped and accessed a page which is not
368 * actually RAM. */ 324 * actually RAM. */
369 r = kvmppc_emulate_mmio(run, vcpu); 325 r = kvmppc_emulate_mmio(run, vcpu);
326 kvmppc_account_exit(vcpu, MMIO_EXITS);
370 } 327 }
371 328
372 break; 329 break;
373 } 330 }
374 331
332 /* XXX move to a 440-specific file. */
375 case BOOKE_INTERRUPT_ITLB_MISS: { 333 case BOOKE_INTERRUPT_ITLB_MISS: {
376 struct tlbe *gtlbe; 334 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
335 struct kvmppc_44x_tlbe *gtlbe;
377 unsigned long eaddr = vcpu->arch.pc; 336 unsigned long eaddr = vcpu->arch.pc;
337 gpa_t gpaddr;
378 gfn_t gfn; 338 gfn_t gfn;
339 int gtlb_index;
379 340
380 r = RESUME_GUEST; 341 r = RESUME_GUEST;
381 342
382 /* Check the guest TLB. */ 343 /* Check the guest TLB. */
383 gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); 344 gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr);
384 if (!gtlbe) { 345 if (gtlb_index < 0) {
385 /* The guest didn't have a mapping for it. */ 346 /* The guest didn't have a mapping for it. */
386 kvmppc_queue_exception(vcpu, exit_nr); 347 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
387 vcpu->stat.itlb_real_miss_exits++; 348 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
388 break; 349 break;
389 } 350 }
390 351
391 vcpu->stat.itlb_virt_miss_exits++; 352 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
392 353
393 gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT; 354 gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
355 gpaddr = tlb_xlate(gtlbe, eaddr);
356 gfn = gpaddr >> PAGE_SHIFT;
394 357
395 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 358 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
396 /* The guest TLB had a mapping, but the shadow TLB 359 /* The guest TLB had a mapping, but the shadow TLB
@@ -399,12 +362,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
399 * b) the guest used a large mapping which we're faking 362 * b) the guest used a large mapping which we're faking
400 * Either way, we need to satisfy the fault without 363 * Either way, we need to satisfy the fault without
401 * invoking the guest. */ 364 * invoking the guest. */
402 kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, 365 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid,
403 gtlbe->word2); 366 gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
404 } else { 367 } else {
405 /* Guest mapped and leaped at non-RAM! */ 368 /* Guest mapped and leaped at non-RAM! */
406 kvmppc_queue_exception(vcpu, 369 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
407 BOOKE_INTERRUPT_MACHINE_CHECK);
408 } 370 }
409 371
410 break; 372 break;
@@ -421,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
421 mtspr(SPRN_DBSR, dbsr); 383 mtspr(SPRN_DBSR, dbsr);
422 384
423 run->exit_reason = KVM_EXIT_DEBUG; 385 run->exit_reason = KVM_EXIT_DEBUG;
386 kvmppc_account_exit(vcpu, DEBUG_EXITS);
424 r = RESUME_HOST; 387 r = RESUME_HOST;
425 break; 388 break;
426 } 389 }
@@ -432,10 +395,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
432 395
433 local_irq_disable(); 396 local_irq_disable();
434 397
435 kvmppc_check_and_deliver_interrupts(vcpu); 398 kvmppc_core_deliver_interrupts(vcpu);
436 399
437 /* Do some exit accounting. */
438 vcpu->stat.sum_exits++;
439 if (!(r & RESUME_HOST)) { 400 if (!(r & RESUME_HOST)) {
440 /* To avoid clobbering exit_reason, only check for signals if 401 /* To avoid clobbering exit_reason, only check for signals if
441 * we aren't already exiting to userspace for some other 402 * we aren't already exiting to userspace for some other
@@ -443,22 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
443 if (signal_pending(current)) { 404 if (signal_pending(current)) {
444 run->exit_reason = KVM_EXIT_INTR; 405 run->exit_reason = KVM_EXIT_INTR;
445 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 406 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
446 407 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
447 vcpu->stat.signal_exits++;
448 } else {
449 vcpu->stat.light_exits++;
450 }
451 } else {
452 switch (run->exit_reason) {
453 case KVM_EXIT_MMIO:
454 vcpu->stat.mmio_exits++;
455 break;
456 case KVM_EXIT_DCR:
457 vcpu->stat.dcr_exits++;
458 break;
459 case KVM_EXIT_INTR:
460 vcpu->stat.signal_exits++;
461 break;
462 } 408 }
463 } 409 }
464 410
@@ -468,20 +414,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
468/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ 414/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
469int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 415int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
470{ 416{
471 struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
472
473 tlbe->tid = 0;
474 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
475 tlbe->word1 = 0;
476 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
477
478 tlbe++;
479 tlbe->tid = 0;
480 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
481 tlbe->word1 = 0xef600000;
482 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
483 | PPC44x_TLB_I | PPC44x_TLB_G;
484
485 vcpu->arch.pc = 0; 417 vcpu->arch.pc = 0;
486 vcpu->arch.msr = 0; 418 vcpu->arch.msr = 0;
487 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 419 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
@@ -492,12 +424,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
492 * before it's programmed its own IVPR. */ 424 * before it's programmed its own IVPR. */
493 vcpu->arch.ivpr = 0x55550000; 425 vcpu->arch.ivpr = 0x55550000;
494 426
495 /* Since the guest can directly access the timebase, it must know the 427 kvmppc_init_timing_stats(vcpu);
496 * real timebase frequency. Accordingly, it must see the state of
497 * CCR1[TCS]. */
498 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
499 428
500 return 0; 429 return kvmppc_core_vcpu_setup(vcpu);
501} 430}
502 431
503int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 432int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -536,7 +465,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
536 vcpu->arch.ctr = regs->ctr; 465 vcpu->arch.ctr = regs->ctr;
537 vcpu->arch.lr = regs->lr; 466 vcpu->arch.lr = regs->lr;
538 vcpu->arch.xer = regs->xer; 467 vcpu->arch.xer = regs->xer;
539 vcpu->arch.msr = regs->msr; 468 kvmppc_set_msr(vcpu, regs->msr);
540 vcpu->arch.srr0 = regs->srr0; 469 vcpu->arch.srr0 = regs->srr0;
541 vcpu->arch.srr1 = regs->srr1; 470 vcpu->arch.srr1 = regs->srr1;
542 vcpu->arch.sprg0 = regs->sprg0; 471 vcpu->arch.sprg0 = regs->sprg0;
@@ -575,31 +504,62 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
575 return -ENOTSUPP; 504 return -ENOTSUPP;
576} 505}
577 506
578/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
579int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 507int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
580 struct kvm_translation *tr) 508 struct kvm_translation *tr)
581{ 509{
582 struct tlbe *gtlbe; 510 return kvmppc_core_vcpu_translate(vcpu, tr);
583 int index; 511}
584 gva_t eaddr;
585 u8 pid;
586 u8 as;
587
588 eaddr = tr->linear_address;
589 pid = (tr->linear_address >> 32) & 0xff;
590 as = (tr->linear_address >> 40) & 0x1;
591
592 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
593 if (index == -1) {
594 tr->valid = 0;
595 return 0;
596 }
597 512
598 gtlbe = &vcpu->arch.guest_tlb[index]; 513int kvmppc_booke_init(void)
514{
515 unsigned long ivor[16];
516 unsigned long max_ivor = 0;
517 int i;
599 518
600 tr->physical_address = tlb_xlate(gtlbe, eaddr); 519 /* We install our own exception handlers by hijacking IVPR. IVPR must
601 /* XXX what does "writeable" and "usermode" even mean? */ 520 * be 16-bit aligned, so we need a 64KB allocation. */
602 tr->valid = 1; 521 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
522 VCPU_SIZE_ORDER);
523 if (!kvmppc_booke_handlers)
524 return -ENOMEM;
525
526 /* XXX make sure our handlers are smaller than Linux's */
527
528 /* Copy our interrupt handlers to match host IVORs. That way we don't
529 * have to swap the IVORs on every guest/host transition. */
530 ivor[0] = mfspr(SPRN_IVOR0);
531 ivor[1] = mfspr(SPRN_IVOR1);
532 ivor[2] = mfspr(SPRN_IVOR2);
533 ivor[3] = mfspr(SPRN_IVOR3);
534 ivor[4] = mfspr(SPRN_IVOR4);
535 ivor[5] = mfspr(SPRN_IVOR5);
536 ivor[6] = mfspr(SPRN_IVOR6);
537 ivor[7] = mfspr(SPRN_IVOR7);
538 ivor[8] = mfspr(SPRN_IVOR8);
539 ivor[9] = mfspr(SPRN_IVOR9);
540 ivor[10] = mfspr(SPRN_IVOR10);
541 ivor[11] = mfspr(SPRN_IVOR11);
542 ivor[12] = mfspr(SPRN_IVOR12);
543 ivor[13] = mfspr(SPRN_IVOR13);
544 ivor[14] = mfspr(SPRN_IVOR14);
545 ivor[15] = mfspr(SPRN_IVOR15);
546
547 for (i = 0; i < 16; i++) {
548 if (ivor[i] > max_ivor)
549 max_ivor = ivor[i];
550
551 memcpy((void *)kvmppc_booke_handlers + ivor[i],
552 kvmppc_handlers_start + i * kvmppc_handler_len,
553 kvmppc_handler_len);
554 }
555 flush_icache_range(kvmppc_booke_handlers,
556 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
603 557
604 return 0; 558 return 0;
605} 559}
560
561void __exit kvmppc_booke_exit(void)
562{
563 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
564 kvm_exit();
565}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
new file mode 100644
index 000000000000..cf7c94ca24bf
--- /dev/null
+++ b/arch/powerpc/kvm/booke.h
@@ -0,0 +1,60 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_BOOKE_H__
21#define __KVM_BOOKE_H__
22
23#include <linux/types.h>
24#include <linux/kvm_host.h>
25#include "timing.h"
26
27/* interrupt priortity ordering */
28#define BOOKE_IRQPRIO_DATA_STORAGE 0
29#define BOOKE_IRQPRIO_INST_STORAGE 1
30#define BOOKE_IRQPRIO_ALIGNMENT 2
31#define BOOKE_IRQPRIO_PROGRAM 3
32#define BOOKE_IRQPRIO_FP_UNAVAIL 4
33#define BOOKE_IRQPRIO_SYSCALL 5
34#define BOOKE_IRQPRIO_AP_UNAVAIL 6
35#define BOOKE_IRQPRIO_DTLB_MISS 7
36#define BOOKE_IRQPRIO_ITLB_MISS 8
37#define BOOKE_IRQPRIO_MACHINE_CHECK 9
38#define BOOKE_IRQPRIO_DEBUG 10
39#define BOOKE_IRQPRIO_CRITICAL 11
40#define BOOKE_IRQPRIO_WATCHDOG 12
41#define BOOKE_IRQPRIO_EXTERNAL 13
42#define BOOKE_IRQPRIO_FIT 14
43#define BOOKE_IRQPRIO_DECREMENTER 15
44
45/* Helper function for "full" MSR writes. No need to call this if only EE is
46 * changing. */
47static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
48{
49 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
50 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
51
52 vcpu->arch.msr = new_msr;
53
54 if (vcpu->arch.msr & MSR_WE) {
55 kvm_vcpu_block(vcpu);
56 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
57 };
58}
59
60#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
deleted file mode 100644
index b480341bc31e..000000000000
--- a/arch/powerpc/kvm/booke_host.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/errno.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <asm/cacheflush.h>
24#include <asm/kvm_ppc.h>
25
26unsigned long kvmppc_booke_handlers;
27
28static int kvmppc_booke_init(void)
29{
30 unsigned long ivor[16];
31 unsigned long max_ivor = 0;
32 int i;
33
34 /* We install our own exception handlers by hijacking IVPR. IVPR must
35 * be 16-bit aligned, so we need a 64KB allocation. */
36 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
37 VCPU_SIZE_ORDER);
38 if (!kvmppc_booke_handlers)
39 return -ENOMEM;
40
41 /* XXX make sure our handlers are smaller than Linux's */
42
43 /* Copy our interrupt handlers to match host IVORs. That way we don't
44 * have to swap the IVORs on every guest/host transition. */
45 ivor[0] = mfspr(SPRN_IVOR0);
46 ivor[1] = mfspr(SPRN_IVOR1);
47 ivor[2] = mfspr(SPRN_IVOR2);
48 ivor[3] = mfspr(SPRN_IVOR3);
49 ivor[4] = mfspr(SPRN_IVOR4);
50 ivor[5] = mfspr(SPRN_IVOR5);
51 ivor[6] = mfspr(SPRN_IVOR6);
52 ivor[7] = mfspr(SPRN_IVOR7);
53 ivor[8] = mfspr(SPRN_IVOR8);
54 ivor[9] = mfspr(SPRN_IVOR9);
55 ivor[10] = mfspr(SPRN_IVOR10);
56 ivor[11] = mfspr(SPRN_IVOR11);
57 ivor[12] = mfspr(SPRN_IVOR12);
58 ivor[13] = mfspr(SPRN_IVOR13);
59 ivor[14] = mfspr(SPRN_IVOR14);
60 ivor[15] = mfspr(SPRN_IVOR15);
61
62 for (i = 0; i < 16; i++) {
63 if (ivor[i] > max_ivor)
64 max_ivor = ivor[i];
65
66 memcpy((void *)kvmppc_booke_handlers + ivor[i],
67 kvmppc_handlers_start + i * kvmppc_handler_len,
68 kvmppc_handler_len);
69 }
70 flush_icache_range(kvmppc_booke_handlers,
71 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
72
73 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
74}
75
76static void __exit kvmppc_booke_exit(void)
77{
78 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
79 kvm_exit();
80}
81
82module_init(kvmppc_booke_init)
83module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 95e165baf85f..084ebcd7dd83 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host)
107 li r6, 1 107 li r6, 1
108 slw r6, r6, r5 108 slw r6, r6, r5
109 109
110#ifdef CONFIG_KVM_EXIT_TIMING
111 /* save exit time */
1121:
113 mfspr r7, SPRN_TBRU
114 mfspr r8, SPRN_TBRL
115 mfspr r9, SPRN_TBRU
116 cmpw r9, r7
117 bne 1b
118 stw r8, VCPU_TIMING_EXIT_TBL(r4)
119 stw r9, VCPU_TIMING_EXIT_TBU(r4)
120#endif
121
110 /* Save the faulting instruction and all GPRs for emulation. */ 122 /* Save the faulting instruction and all GPRs for emulation. */
111 andi. r7, r6, NEED_INST_MASK 123 andi. r7, r6, NEED_INST_MASK
112 beq ..skip_inst_copy 124 beq ..skip_inst_copy
@@ -335,54 +347,6 @@ lightweight_exit:
335 lwz r3, VCPU_SHADOW_PID(r4) 347 lwz r3, VCPU_SHADOW_PID(r4)
336 mtspr SPRN_PID, r3 348 mtspr SPRN_PID, r3
337 349
338 /* Prevent all asynchronous TLB updates. */
339 mfmsr r5
340 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
341 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
342 andc r6, r5, r6
343 mtmsr r6
344
345 /* Load the guest mappings, leaving the host's "pinned" kernel mappings
346 * in place. */
347 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
348 li r5, PPC44x_TLB_SIZE
349 lis r5, tlb_44x_hwater@ha
350 lwz r5, tlb_44x_hwater@l(r5)
351 mtctr r5
352 addi r9, r4, VCPU_SHADOW_TLB
353 addi r5, r4, VCPU_SHADOW_MOD
354 li r3, 0
3551:
356 lbzx r7, r3, r5
357 cmpwi r7, 0
358 beq 3f
359
360 /* Load guest entry. */
361 mulli r11, r3, TLBE_BYTES
362 add r11, r11, r9
363 lwz r7, 0(r11)
364 mtspr SPRN_MMUCR, r7
365 lwz r7, 4(r11)
366 tlbwe r7, r3, PPC44x_TLB_PAGEID
367 lwz r7, 8(r11)
368 tlbwe r7, r3, PPC44x_TLB_XLAT
369 lwz r7, 12(r11)
370 tlbwe r7, r3, PPC44x_TLB_ATTRIB
3713:
372 addi r3, r3, 1 /* Increment index. */
373 bdnz 1b
374
375 mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
376
377 /* Clear bitmap of modified TLB entries */
378 li r5, PPC44x_TLB_SIZE>>2
379 mtctr r5
380 addi r5, r4, VCPU_SHADOW_MOD - 4
381 li r6, 0
3821:
383 stwu r6, 4(r5)
384 bdnz 1b
385
386 iccci 0, 0 /* XXX hack */ 350 iccci 0, 0 /* XXX hack */
387 351
388 /* Load some guest volatiles. */ 352 /* Load some guest volatiles. */
@@ -423,6 +387,18 @@ lightweight_exit:
423 lwz r3, VCPU_SPRG7(r4) 387 lwz r3, VCPU_SPRG7(r4)
424 mtspr SPRN_SPRG7, r3 388 mtspr SPRN_SPRG7, r3
425 389
390#ifdef CONFIG_KVM_EXIT_TIMING
391 /* save enter time */
3921:
393 mfspr r6, SPRN_TBRU
394 mfspr r7, SPRN_TBRL
395 mfspr r8, SPRN_TBRU
396 cmpw r8, r6
397 bne 1b
398 stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
399 stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
400#endif
401
426 /* Finish loading guest volatiles and jump to guest. */ 402 /* Finish loading guest volatiles and jump to guest. */
427 lwz r3, VCPU_CTR(r4) 403 lwz r3, VCPU_CTR(r4)
428 mtctr r3 404 mtctr r3
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 0fce4fbdc20d..d1d38daa93fb 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,161 +23,14 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26#include <asm/dcr.h> 26#include <asm/reg.h>
27#include <asm/dcr-regs.h>
28#include <asm/time.h> 27#include <asm/time.h>
29#include <asm/byteorder.h> 28#include <asm/byteorder.h>
30#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30#include <asm/disassemble.h>
31#include "timing.h"
31 32
32#include "44x_tlb.h" 33void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
33
34/* Instruction decoding */
35static inline unsigned int get_op(u32 inst)
36{
37 return inst >> 26;
38}
39
40static inline unsigned int get_xop(u32 inst)
41{
42 return (inst >> 1) & 0x3ff;
43}
44
45static inline unsigned int get_sprn(u32 inst)
46{
47 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
48}
49
50static inline unsigned int get_dcrn(u32 inst)
51{
52 return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
53}
54
55static inline unsigned int get_rt(u32 inst)
56{
57 return (inst >> 21) & 0x1f;
58}
59
60static inline unsigned int get_rs(u32 inst)
61{
62 return (inst >> 21) & 0x1f;
63}
64
65static inline unsigned int get_ra(u32 inst)
66{
67 return (inst >> 16) & 0x1f;
68}
69
70static inline unsigned int get_rb(u32 inst)
71{
72 return (inst >> 11) & 0x1f;
73}
74
75static inline unsigned int get_rc(u32 inst)
76{
77 return inst & 0x1;
78}
79
80static inline unsigned int get_ws(u32 inst)
81{
82 return (inst >> 11) & 0x1f;
83}
84
85static inline unsigned int get_d(u32 inst)
86{
87 return inst & 0xffff;
88}
89
90static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
91 const struct tlbe *tlbe)
92{
93 gpa_t gpa;
94
95 if (!get_tlb_v(tlbe))
96 return 0;
97
98 /* Does it match current guest AS? */
99 /* XXX what about IS != DS? */
100 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
101 return 0;
102
103 gpa = get_tlb_raddr(tlbe);
104 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
105 /* Mapping is not for RAM. */
106 return 0;
107
108 return 1;
109}
110
111static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
112{
113 u64 eaddr;
114 u64 raddr;
115 u64 asid;
116 u32 flags;
117 struct tlbe *tlbe;
118 unsigned int ra;
119 unsigned int rs;
120 unsigned int ws;
121 unsigned int index;
122
123 ra = get_ra(inst);
124 rs = get_rs(inst);
125 ws = get_ws(inst);
126
127 index = vcpu->arch.gpr[ra];
128 if (index > PPC44x_TLB_SIZE) {
129 printk("%s: index %d\n", __func__, index);
130 kvmppc_dump_vcpu(vcpu);
131 return EMULATE_FAIL;
132 }
133
134 tlbe = &vcpu->arch.guest_tlb[index];
135
136 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
137 if (tlbe->word0 & PPC44x_TLB_VALID) {
138 eaddr = get_tlb_eaddr(tlbe);
139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
140 kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
141 }
142
143 switch (ws) {
144 case PPC44x_TLB_PAGEID:
145 tlbe->tid = vcpu->arch.mmucr & 0xff;
146 tlbe->word0 = vcpu->arch.gpr[rs];
147 break;
148
149 case PPC44x_TLB_XLAT:
150 tlbe->word1 = vcpu->arch.gpr[rs];
151 break;
152
153 case PPC44x_TLB_ATTRIB:
154 tlbe->word2 = vcpu->arch.gpr[rs];
155 break;
156
157 default:
158 return EMULATE_FAIL;
159 }
160
161 if (tlbe_is_host_safe(vcpu, tlbe)) {
162 eaddr = get_tlb_eaddr(tlbe);
163 raddr = get_tlb_raddr(tlbe);
164 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
165 flags = tlbe->word2 & 0xffff;
166
167 /* Create a 4KB mapping on the host. If the guest wanted a
168 * large page, only the first 4KB is mapped here and the rest
169 * are mapped on the fly. */
170 kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
171 }
172
173 KVMTRACE_5D(GTLB_WRITE, vcpu, index,
174 tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
175 handler);
176
177 return EMULATE_DONE;
178}
179
180static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
181{ 34{
182 if (vcpu->arch.tcr & TCR_DIE) { 35 if (vcpu->arch.tcr & TCR_DIE) {
183 /* The decrementer ticks at the same rate as the timebase, so 36 /* The decrementer ticks at the same rate as the timebase, so
@@ -193,12 +46,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
193 } 46 }
194} 47}
195 48
196static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
197{
198 vcpu->arch.pc = vcpu->arch.srr0;
199 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
200}
201
202/* XXX to do: 49/* XXX to do:
203 * lhax 50 * lhax
204 * lhaux 51 * lhaux
@@ -213,40 +60,30 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
213 * 60 *
214 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 61 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
215 */ 62 */
63/* XXX Should probably auto-generate instruction decoding for a particular core
64 * from opcode tables in the future. */
216int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 65int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
217{ 66{
218 u32 inst = vcpu->arch.last_inst; 67 u32 inst = vcpu->arch.last_inst;
219 u32 ea; 68 u32 ea;
220 int ra; 69 int ra;
221 int rb; 70 int rb;
222 int rc;
223 int rs; 71 int rs;
224 int rt; 72 int rt;
225 int sprn; 73 int sprn;
226 int dcrn;
227 enum emulation_result emulated = EMULATE_DONE; 74 enum emulation_result emulated = EMULATE_DONE;
228 int advance = 1; 75 int advance = 1;
229 76
77 /* this default type might be overwritten by subcategories */
78 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
79
230 switch (get_op(inst)) { 80 switch (get_op(inst)) {
231 case 3: /* trap */ 81 case 3: /* trap */
232 printk("trap!\n"); 82 vcpu->arch.esr |= ESR_PTR;
233 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); 83 kvmppc_core_queue_program(vcpu);
234 advance = 0; 84 advance = 0;
235 break; 85 break;
236 86
237 case 19:
238 switch (get_xop(inst)) {
239 case 50: /* rfi */
240 kvmppc_emul_rfi(vcpu);
241 advance = 0;
242 break;
243
244 default:
245 emulated = EMULATE_FAIL;
246 break;
247 }
248 break;
249
250 case 31: 87 case 31:
251 switch (get_xop(inst)) { 88 switch (get_xop(inst)) {
252 89
@@ -255,27 +92,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
255 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 92 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
256 break; 93 break;
257 94
258 case 83: /* mfmsr */
259 rt = get_rt(inst);
260 vcpu->arch.gpr[rt] = vcpu->arch.msr;
261 break;
262
263 case 87: /* lbzx */ 95 case 87: /* lbzx */
264 rt = get_rt(inst); 96 rt = get_rt(inst);
265 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 97 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
266 break; 98 break;
267 99
268 case 131: /* wrtee */
269 rs = get_rs(inst);
270 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
271 | (vcpu->arch.gpr[rs] & MSR_EE);
272 break;
273
274 case 146: /* mtmsr */
275 rs = get_rs(inst);
276 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
277 break;
278
279 case 151: /* stwx */ 100 case 151: /* stwx */
280 rs = get_rs(inst); 101 rs = get_rs(inst);
281 emulated = kvmppc_handle_store(run, vcpu, 102 emulated = kvmppc_handle_store(run, vcpu,
@@ -283,11 +104,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
283 4, 1); 104 4, 1);
284 break; 105 break;
285 106
286 case 163: /* wrteei */
287 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
288 | (inst & MSR_EE);
289 break;
290
291 case 215: /* stbx */ 107 case 215: /* stbx */
292 rs = get_rs(inst); 108 rs = get_rs(inst);
293 emulated = kvmppc_handle_store(run, vcpu, 109 emulated = kvmppc_handle_store(run, vcpu,
@@ -328,42 +144,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
328 vcpu->arch.gpr[ra] = ea; 144 vcpu->arch.gpr[ra] = ea;
329 break; 145 break;
330 146
331 case 323: /* mfdcr */
332 dcrn = get_dcrn(inst);
333 rt = get_rt(inst);
334
335 /* The guest may access CPR0 registers to determine the timebase
336 * frequency, and it must know the real host frequency because it
337 * can directly access the timebase registers.
338 *
339 * It would be possible to emulate those accesses in userspace,
340 * but userspace can really only figure out the end frequency.
341 * We could decompose that into the factors that compute it, but
342 * that's tricky math, and it's easier to just report the real
343 * CPR0 values.
344 */
345 switch (dcrn) {
346 case DCRN_CPR0_CONFIG_ADDR:
347 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
348 break;
349 case DCRN_CPR0_CONFIG_DATA:
350 local_irq_disable();
351 mtdcr(DCRN_CPR0_CONFIG_ADDR,
352 vcpu->arch.cpr0_cfgaddr);
353 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
354 local_irq_enable();
355 break;
356 default:
357 run->dcr.dcrn = dcrn;
358 run->dcr.data = 0;
359 run->dcr.is_write = 0;
360 vcpu->arch.io_gpr = rt;
361 vcpu->arch.dcr_needed = 1;
362 emulated = EMULATE_DO_DCR;
363 }
364
365 break;
366
367 case 339: /* mfspr */ 147 case 339: /* mfspr */
368 sprn = get_sprn(inst); 148 sprn = get_sprn(inst);
369 rt = get_rt(inst); 149 rt = get_rt(inst);
@@ -373,26 +153,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
373 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 153 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
374 case SPRN_SRR1: 154 case SPRN_SRR1:
375 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 155 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
376 case SPRN_MMUCR:
377 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
378 case SPRN_PID:
379 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
380 case SPRN_IVPR:
381 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
382 case SPRN_CCR0:
383 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
384 case SPRN_CCR1:
385 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
386 case SPRN_PVR: 156 case SPRN_PVR:
387 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 157 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
388 case SPRN_DEAR:
389 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
390 case SPRN_ESR:
391 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
392 case SPRN_DBCR0:
393 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
394 case SPRN_DBCR1:
395 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
396 158
397 /* Note: mftb and TBRL/TBWL are user-accessible, so 159 /* Note: mftb and TBRL/TBWL are user-accessible, so
398 * the guest can always access the real TB anyways. 160 * the guest can always access the real TB anyways.
@@ -413,42 +175,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
413 /* Note: SPRG4-7 are user-readable, so we don't get 175 /* Note: SPRG4-7 are user-readable, so we don't get
414 * a trap. */ 176 * a trap. */
415 177
416 case SPRN_IVOR0:
417 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
418 case SPRN_IVOR1:
419 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
420 case SPRN_IVOR2:
421 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
422 case SPRN_IVOR3:
423 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
424 case SPRN_IVOR4:
425 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
426 case SPRN_IVOR5:
427 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
428 case SPRN_IVOR6:
429 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
430 case SPRN_IVOR7:
431 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
432 case SPRN_IVOR8:
433 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
434 case SPRN_IVOR9:
435 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
436 case SPRN_IVOR10:
437 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
438 case SPRN_IVOR11:
439 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
440 case SPRN_IVOR12:
441 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
442 case SPRN_IVOR13:
443 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
444 case SPRN_IVOR14:
445 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
446 case SPRN_IVOR15:
447 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
448
449 default: 178 default:
450 printk("mfspr: unknown spr %x\n", sprn); 179 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
451 vcpu->arch.gpr[rt] = 0; 180 if (emulated == EMULATE_FAIL) {
181 printk("mfspr: unknown spr %x\n", sprn);
182 vcpu->arch.gpr[rt] = 0;
183 }
452 break; 184 break;
453 } 185 }
454 break; 186 break;
@@ -478,25 +210,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
478 vcpu->arch.gpr[ra] = ea; 210 vcpu->arch.gpr[ra] = ea;
479 break; 211 break;
480 212
481 case 451: /* mtdcr */
482 dcrn = get_dcrn(inst);
483 rs = get_rs(inst);
484
485 /* emulate some access in kernel */
486 switch (dcrn) {
487 case DCRN_CPR0_CONFIG_ADDR:
488 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
489 break;
490 default:
491 run->dcr.dcrn = dcrn;
492 run->dcr.data = vcpu->arch.gpr[rs];
493 run->dcr.is_write = 1;
494 vcpu->arch.dcr_needed = 1;
495 emulated = EMULATE_DO_DCR;
496 }
497
498 break;
499
500 case 467: /* mtspr */ 213 case 467: /* mtspr */
501 sprn = get_sprn(inst); 214 sprn = get_sprn(inst);
502 rs = get_rs(inst); 215 rs = get_rs(inst);
@@ -505,22 +218,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
505 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 218 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
506 case SPRN_SRR1: 219 case SPRN_SRR1:
507 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 220 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
508 case SPRN_MMUCR:
509 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
510 case SPRN_PID:
511 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
512 case SPRN_CCR0:
513 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
514 case SPRN_CCR1:
515 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
516 case SPRN_DEAR:
517 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
518 case SPRN_ESR:
519 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
520 case SPRN_DBCR0:
521 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
522 case SPRN_DBCR1:
523 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
524 221
525 /* XXX We need to context-switch the timebase for 222 /* XXX We need to context-switch the timebase for
526 * watchdog and FIT. */ 223 * watchdog and FIT. */
@@ -532,14 +229,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
532 kvmppc_emulate_dec(vcpu); 229 kvmppc_emulate_dec(vcpu);
533 break; 230 break;
534 231
535 case SPRN_TSR:
536 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
537
538 case SPRN_TCR:
539 vcpu->arch.tcr = vcpu->arch.gpr[rs];
540 kvmppc_emulate_dec(vcpu);
541 break;
542
543 case SPRN_SPRG0: 232 case SPRN_SPRG0:
544 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 233 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
545 case SPRN_SPRG1: 234 case SPRN_SPRG1:
@@ -549,56 +238,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
549 case SPRN_SPRG3: 238 case SPRN_SPRG3:
550 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 239 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
551 240
552 /* Note: SPRG4-7 are user-readable. These values are
553 * loaded into the real SPRGs when resuming the
554 * guest. */
555 case SPRN_SPRG4:
556 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
557 case SPRN_SPRG5:
558 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
559 case SPRN_SPRG6:
560 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
561 case SPRN_SPRG7:
562 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
563
564 case SPRN_IVPR:
565 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
566 case SPRN_IVOR0:
567 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
568 case SPRN_IVOR1:
569 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
570 case SPRN_IVOR2:
571 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
572 case SPRN_IVOR3:
573 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
574 case SPRN_IVOR4:
575 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
576 case SPRN_IVOR5:
577 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
578 case SPRN_IVOR6:
579 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
580 case SPRN_IVOR7:
581 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
582 case SPRN_IVOR8:
583 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
584 case SPRN_IVOR9:
585 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
586 case SPRN_IVOR10:
587 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
588 case SPRN_IVOR11:
589 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
590 case SPRN_IVOR12:
591 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
592 case SPRN_IVOR13:
593 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
594 case SPRN_IVOR14:
595 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
596 case SPRN_IVOR15:
597 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
598
599 default: 241 default:
600 printk("mtspr: unknown spr %x\n", sprn); 242 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
601 emulated = EMULATE_FAIL; 243 if (emulated == EMULATE_FAIL)
244 printk("mtspr: unknown spr %x\n", sprn);
602 break; 245 break;
603 } 246 }
604 break; 247 break;
@@ -629,36 +272,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
629 4, 0); 272 4, 0);
630 break; 273 break;
631 274
632 case 978: /* tlbwe */
633 emulated = kvmppc_emul_tlbwe(vcpu, inst);
634 break;
635
636 case 914: { /* tlbsx */
637 int index;
638 unsigned int as = get_mmucr_sts(vcpu);
639 unsigned int pid = get_mmucr_stid(vcpu);
640
641 rt = get_rt(inst);
642 ra = get_ra(inst);
643 rb = get_rb(inst);
644 rc = get_rc(inst);
645
646 ea = vcpu->arch.gpr[rb];
647 if (ra)
648 ea += vcpu->arch.gpr[ra];
649
650 index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
651 if (rc) {
652 if (index < 0)
653 vcpu->arch.cr &= ~0x20000000;
654 else
655 vcpu->arch.cr |= 0x20000000;
656 }
657 vcpu->arch.gpr[rt] = index;
658
659 }
660 break;
661
662 case 790: /* lhbrx */ 275 case 790: /* lhbrx */
663 rt = get_rt(inst); 276 rt = get_rt(inst);
664 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 277 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
@@ -674,14 +287,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
674 2, 0); 287 2, 0);
675 break; 288 break;
676 289
677 case 966: /* iccci */
678 break;
679
680 default: 290 default:
681 printk("unknown: op %d xop %d\n", get_op(inst), 291 /* Attempt core-specific emulation below. */
682 get_xop(inst));
683 emulated = EMULATE_FAIL; 292 emulated = EMULATE_FAIL;
684 break;
685 } 293 }
686 break; 294 break;
687 295
@@ -764,12 +372,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
764 break; 372 break;
765 373
766 default: 374 default:
767 printk("unknown op %d\n", get_op(inst));
768 emulated = EMULATE_FAIL; 375 emulated = EMULATE_FAIL;
769 break;
770 } 376 }
771 377
772 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); 378 if (emulated == EMULATE_FAIL) {
379 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
380 if (emulated == EMULATE_FAIL) {
381 advance = 0;
382 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
383 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
384 }
385 }
386
387 KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit);
773 388
774 if (advance) 389 if (advance)
775 vcpu->arch.pc += 4; /* Advance past emulated instruction. */ 390 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fda9baada132..2822c8ccfaaf 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,7 +28,8 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31#include "timing.h"
32#include "../mm/mmu_decl.h"
32 33
33gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 34gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
34{ 35{
@@ -98,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
98 99
99void kvm_arch_check_processor_compat(void *rtn) 100void kvm_arch_check_processor_compat(void *rtn)
100{ 101{
101 int r; 102 *(int *)rtn = kvmppc_core_check_processor_compat();
102
103 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
104 r = 0;
105 else
106 r = -ENOTSUPP;
107
108 *(int *)rtn = r;
109} 103}
110 104
111struct kvm *kvm_arch_create_vm(void) 105struct kvm *kvm_arch_create_vm(void)
@@ -143,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext)
143 int r; 137 int r;
144 138
145 switch (ext) { 139 switch (ext) {
146 case KVM_CAP_USER_MEMORY:
147 r = 1;
148 break;
149 case KVM_CAP_COALESCED_MMIO: 140 case KVM_CAP_COALESCED_MMIO:
150 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 141 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
151 break; 142 break;
@@ -178,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
178struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 169struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
179{ 170{
180 struct kvm_vcpu *vcpu; 171 struct kvm_vcpu *vcpu;
181 int err; 172 vcpu = kvmppc_core_vcpu_create(kvm, id);
182 173 kvmppc_create_vcpu_debugfs(vcpu, id);
183 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
184 if (!vcpu) {
185 err = -ENOMEM;
186 goto out;
187 }
188
189 err = kvm_vcpu_init(vcpu, kvm, id);
190 if (err)
191 goto free_vcpu;
192
193 return vcpu; 174 return vcpu;
194
195free_vcpu:
196 kmem_cache_free(kvm_vcpu_cache, vcpu);
197out:
198 return ERR_PTR(err);
199} 175}
200 176
201void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 177void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
202{ 178{
203 kvm_vcpu_uninit(vcpu); 179 kvmppc_remove_vcpu_debugfs(vcpu);
204 kmem_cache_free(kvm_vcpu_cache, vcpu); 180 kvmppc_core_vcpu_free(vcpu);
205} 181}
206 182
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 183void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -211,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
211 187
212int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 188int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
213{ 189{
214 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; 190 return kvmppc_core_pending_dec(vcpu);
215
216 return test_bit(priority, &vcpu->arch.pending_exceptions);
217} 191}
218 192
219static void kvmppc_decrementer_func(unsigned long data) 193static void kvmppc_decrementer_func(unsigned long data)
220{ 194{
221 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 195 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
222 196
223 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); 197 kvmppc_core_queue_dec(vcpu);
224 198
225 if (waitqueue_active(&vcpu->wq)) { 199 if (waitqueue_active(&vcpu->wq)) {
226 wake_up_interruptible(&vcpu->wq); 200 wake_up_interruptible(&vcpu->wq);
@@ -241,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
241 kvmppc_core_destroy_mmu(vcpu); 215 kvmppc_core_destroy_mmu(vcpu);
242} 216}
243 217
244/* Note: clearing MSR[DE] just means that the debug interrupt will not be
245 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
246 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
247 * will be delivered as an "imprecise debug event" (which is indicated by
248 * DBSR[IDE].
249 */
250static void kvmppc_disable_debug_interrupts(void)
251{
252 mtmsr(mfmsr() & ~MSR_DE);
253}
254
255static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
256{
257 kvmppc_disable_debug_interrupts();
258
259 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
260 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
261 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
262 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
263 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
264 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
265 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
266 mtmsr(vcpu->arch.host_msr);
267}
268
269static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
270{
271 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
272 u32 dbcr0 = 0;
273
274 vcpu->arch.host_msr = mfmsr();
275 kvmppc_disable_debug_interrupts();
276
277 /* Save host debug register state. */
278 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
279 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
280 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
281 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
282 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
283 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
284 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
285
286 /* set registers up for guest */
287
288 if (dbg->bp[0]) {
289 mtspr(SPRN_IAC1, dbg->bp[0]);
290 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
291 }
292 if (dbg->bp[1]) {
293 mtspr(SPRN_IAC2, dbg->bp[1]);
294 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
295 }
296 if (dbg->bp[2]) {
297 mtspr(SPRN_IAC3, dbg->bp[2]);
298 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
299 }
300 if (dbg->bp[3]) {
301 mtspr(SPRN_IAC4, dbg->bp[3]);
302 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
303 }
304
305 mtspr(SPRN_DBCR0, dbcr0);
306 mtspr(SPRN_DBCR1, 0);
307 mtspr(SPRN_DBCR2, 0);
308}
309
310void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 218void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
311{ 219{
312 int i;
313
314 if (vcpu->guest_debug.enabled) 220 if (vcpu->guest_debug.enabled)
315 kvmppc_load_guest_debug_registers(vcpu); 221 kvmppc_core_load_guest_debugstate(vcpu);
316 222
317 /* Mark every guest entry in the shadow TLB entry modified, so that they 223 kvmppc_core_vcpu_load(vcpu, cpu);
318 * will all be reloaded on the next vcpu run (instead of being
319 * demand-faulted). */
320 for (i = 0; i <= tlb_44x_hwater; i++)
321 kvmppc_tlbe_set_modified(vcpu, i);
322} 224}
323 225
324void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 226void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
325{ 227{
326 if (vcpu->guest_debug.enabled) 228 if (vcpu->guest_debug.enabled)
327 kvmppc_restore_host_debug_state(vcpu); 229 kvmppc_core_load_host_debugstate(vcpu);
328 230
329 /* Don't leave guest TLB entries resident when being de-scheduled. */ 231 /* Don't leave guest TLB entries resident when being de-scheduled. */
330 /* XXX It would be nice to differentiate between heavyweight exit and 232 /* XXX It would be nice to differentiate between heavyweight exit and
331 * sched_out here, since we could avoid the TLB flush for heavyweight 233 * sched_out here, since we could avoid the TLB flush for heavyweight
332 * exits. */ 234 * exits. */
333 _tlbia(); 235 _tlbil_all();
236 kvmppc_core_vcpu_put(vcpu);
334} 237}
335 238
336int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 239int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
@@ -354,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
354static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 257static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
355 struct kvm_run *run) 258 struct kvm_run *run)
356{ 259{
357 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 260 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
358 *gpr = run->dcr.data; 261 *gpr = run->dcr.data;
359} 262}
360 263
361static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 264static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
362 struct kvm_run *run) 265 struct kvm_run *run)
363{ 266{
364 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 267 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
365 268
366 if (run->mmio.len > sizeof(*gpr)) { 269 if (run->mmio.len > sizeof(*gpr)) {
367 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 270 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -459,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
459 vcpu->arch.dcr_needed = 0; 362 vcpu->arch.dcr_needed = 0;
460 } 363 }
461 364
462 kvmppc_check_and_deliver_interrupts(vcpu); 365 kvmppc_core_deliver_interrupts(vcpu);
463 366
464 local_irq_disable(); 367 local_irq_disable();
465 kvm_guest_enter(); 368 kvm_guest_enter();
@@ -477,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
477 380
478int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 381int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
479{ 382{
480 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); 383 kvmppc_core_queue_external(vcpu, irq);
481 384
482 if (waitqueue_active(&vcpu->wq)) { 385 if (waitqueue_active(&vcpu->wq)) {
483 wake_up_interruptible(&vcpu->wq); 386 wake_up_interruptible(&vcpu->wq);
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
new file mode 100644
index 000000000000..47ee603f558e
--- /dev/null
+++ b/arch/powerpc/kvm/timing.c
@@ -0,0 +1,239 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/fs.h>
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
25#include <linux/uaccess.h>
26
27#include <asm/time.h>
28#include <asm-generic/div64.h>
29
30#include "timing.h"
31
32void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
33{
34 int i;
35
36 /* pause guest execution to avoid concurrent updates */
37 local_irq_disable();
38 mutex_lock(&vcpu->mutex);
39
40 vcpu->arch.last_exit_type = 0xDEAD;
41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
42 vcpu->arch.timing_count_type[i] = 0;
43 vcpu->arch.timing_max_duration[i] = 0;
44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
45 vcpu->arch.timing_sum_duration[i] = 0;
46 vcpu->arch.timing_sum_quad_duration[i] = 0;
47 }
48 vcpu->arch.timing_last_exit = 0;
49 vcpu->arch.timing_exit.tv64 = 0;
50 vcpu->arch.timing_last_enter.tv64 = 0;
51
52 mutex_unlock(&vcpu->mutex);
53 local_irq_enable();
54}
55
56static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
57{
58 u64 old;
59
60 do_div(duration, tb_ticks_per_usec);
61 if (unlikely(duration > 0xFFFFFFFF)) {
62 printk(KERN_ERR"%s - duration too big -> overflow"
63 " duration %lld type %d exit #%d\n",
64 __func__, duration, type,
65 vcpu->arch.timing_count_type[type]);
66 return;
67 }
68
69 vcpu->arch.timing_count_type[type]++;
70
71 /* sum */
72 old = vcpu->arch.timing_sum_duration[type];
73 vcpu->arch.timing_sum_duration[type] += duration;
74 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
75 printk(KERN_ERR"%s - wrap adding sum of durations"
76 " old %lld new %lld type %d exit # of type %d\n",
77 __func__, old, vcpu->arch.timing_sum_duration[type],
78 type, vcpu->arch.timing_count_type[type]);
79 }
80
81 /* square sum */
82 old = vcpu->arch.timing_sum_quad_duration[type];
83 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
84 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
85 printk(KERN_ERR"%s - wrap adding sum of squared durations"
86 " old %lld new %lld type %d exit # of type %d\n",
87 __func__, old,
88 vcpu->arch.timing_sum_quad_duration[type],
89 type, vcpu->arch.timing_count_type[type]);
90 }
91
92 /* set min/max */
93 if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
94 vcpu->arch.timing_min_duration[type] = duration;
95 if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
96 vcpu->arch.timing_max_duration[type] = duration;
97}
98
99void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
100{
101 u64 exit = vcpu->arch.timing_last_exit;
102 u64 enter = vcpu->arch.timing_last_enter.tv64;
103
104 /* save exit time, used next exit when the reenter time is known */
105 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
106
107 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
108 return; /* skip incomplete cycle (e.g. after reset) */
109
110 /* update statistics for average and standard deviation */
111 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
112 /* enter -> timing_last_exit is time spent in guest - log this too */
113 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
114 TIMEINGUEST);
115}
116
117static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
118 [MMIO_EXITS] = "MMIO",
119 [DCR_EXITS] = "DCR",
120 [SIGNAL_EXITS] = "SIGNAL",
121 [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
122 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
123 [DTLB_REAL_MISS_EXITS] = "DTLBREAL",
124 [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT",
125 [SYSCALL_EXITS] = "SYSCALL",
126 [ISI_EXITS] = "ISI",
127 [DSI_EXITS] = "DSI",
128 [EMULATED_INST_EXITS] = "EMULINST",
129 [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT",
130 [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE",
131 [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR",
132 [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR",
133 [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR",
134 [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR",
135 [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX",
136 [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE",
137 [EMULATED_RFI_EXITS] = "EMUL_RFI",
138 [DEC_EXITS] = "DEC",
139 [EXT_INTR_EXITS] = "EXTINT",
140 [HALT_WAKEUP] = "HALT",
141 [USR_PR_INST] = "USR_PR_INST",
142 [FP_UNAVAIL] = "FP_UNAVAIL",
143 [DEBUG_EXITS] = "DEBUG",
144 [TIMEINGUEST] = "TIMEINGUEST"
145};
146
147static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
148{
149 struct kvm_vcpu *vcpu = m->private;
150 int i;
151
152 seq_printf(m, "%s", "type count min max sum sum_squared\n");
153
154 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
155 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
156 kvm_exit_names[i],
157 vcpu->arch.timing_count_type[i],
158 vcpu->arch.timing_min_duration[i],
159 vcpu->arch.timing_max_duration[i],
160 vcpu->arch.timing_sum_duration[i],
161 vcpu->arch.timing_sum_quad_duration[i]);
162 }
163 return 0;
164}
165
166/* Write 'c' to clear the timing statistics. */
167static ssize_t kvmppc_exit_timing_write(struct file *file,
168 const char __user *user_buf,
169 size_t count, loff_t *ppos)
170{
171 int err = -EINVAL;
172 char c;
173
174 if (count > 1) {
175 goto done;
176 }
177
178 if (get_user(c, user_buf)) {
179 err = -EFAULT;
180 goto done;
181 }
182
183 if (c == 'c') {
184 struct seq_file *seqf = (struct seq_file *)file->private_data;
185 struct kvm_vcpu *vcpu = seqf->private;
186 /* Write does not affect our buffers previously generated with
187 * show. seq_file is locked here to prevent races of init with
188 * a show call */
189 mutex_lock(&seqf->lock);
190 kvmppc_init_timing_stats(vcpu);
191 mutex_unlock(&seqf->lock);
192 err = count;
193 }
194
195done:
196 return err;
197}
198
199static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
200{
201 return single_open(file, kvmppc_exit_timing_show, inode->i_private);
202}
203
204static struct file_operations kvmppc_exit_timing_fops = {
205 .owner = THIS_MODULE,
206 .open = kvmppc_exit_timing_open,
207 .read = seq_read,
208 .write = kvmppc_exit_timing_write,
209 .llseek = seq_lseek,
210 .release = single_release,
211};
212
213void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
214{
215 static char dbg_fname[50];
216 struct dentry *debugfs_file;
217
218 snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
219 current->pid, id);
220 debugfs_file = debugfs_create_file(dbg_fname, 0666,
221 kvm_debugfs_dir, vcpu,
222 &kvmppc_exit_timing_fops);
223
224 if (!debugfs_file) {
225 printk(KERN_ERR"%s: error creating debugfs file %s\n",
226 __func__, dbg_fname);
227 return;
228 }
229
230 vcpu->arch.debugfs_exit_timing = debugfs_file;
231}
232
233void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
234{
235 if (vcpu->arch.debugfs_exit_timing) {
236 debugfs_remove(vcpu->arch.debugfs_exit_timing);
237 vcpu->arch.debugfs_exit_timing = NULL;
238 }
239}
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
new file mode 100644
index 000000000000..bb13b1f3cd5a
--- /dev/null
+++ b/arch/powerpc/kvm/timing.h
@@ -0,0 +1,102 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_EXITTIMING_H__
21#define __POWERPC_KVM_EXITTIMING_H__
22
23#include <linux/kvm_host.h>
24#include <asm/kvm_host.h>
25
26#ifdef CONFIG_KVM_EXIT_TIMING
27void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
28void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
29void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
30void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
31
32static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
33{
34 vcpu->arch.last_exit_type = type;
35}
36
37#else
38/* if exit timing is not configured there is no need to build the c file */
39static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {}
40static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {}
41static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu,
42 unsigned int id) {}
43static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
44static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
45#endif /* CONFIG_KVM_EXIT_TIMING */
46
47/* account the exit in kvm_stats */
48static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
49{
50 /* type has to be known at build time for optimization */
51 BUILD_BUG_ON(__builtin_constant_p(type));
52 switch (type) {
53 case EXT_INTR_EXITS:
54 vcpu->stat.ext_intr_exits++;
55 break;
56 case DEC_EXITS:
57 vcpu->stat.dec_exits++;
58 break;
59 case EMULATED_INST_EXITS:
60 vcpu->stat.emulated_inst_exits++;
61 break;
62 case DCR_EXITS:
63 vcpu->stat.dcr_exits++;
64 break;
65 case DSI_EXITS:
66 vcpu->stat.dsi_exits++;
67 break;
68 case ISI_EXITS:
69 vcpu->stat.isi_exits++;
70 break;
71 case SYSCALL_EXITS:
72 vcpu->stat.syscall_exits++;
73 break;
74 case DTLB_REAL_MISS_EXITS:
75 vcpu->stat.dtlb_real_miss_exits++;
76 break;
77 case DTLB_VIRT_MISS_EXITS:
78 vcpu->stat.dtlb_virt_miss_exits++;
79 break;
80 case MMIO_EXITS:
81 vcpu->stat.mmio_exits++;
82 break;
83 case ITLB_REAL_MISS_EXITS:
84 vcpu->stat.itlb_real_miss_exits++;
85 break;
86 case ITLB_VIRT_MISS_EXITS:
87 vcpu->stat.itlb_virt_miss_exits++;
88 break;
89 case SIGNAL_EXITS:
90 vcpu->stat.signal_exits++;
91 break;
92 }
93}
94
95/* wrapper to set exit time and account for it in kvm_stats */
96static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type)
97{
98 kvmppc_set_exit_type(vcpu, type);
99 kvmppc_account_exit_stat(vcpu, type);
100}
101
102#endif /* __POWERPC_KVM_EXITTIMING_H__ */
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 25ec5378afa4..70693a5c12a1 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user)
26 andi. r6,r6,7 26 andi. r6,r6,7
27 PPC_MTOCRF 0x01,r5 27 PPC_MTOCRF 0x01,r5
28 blt cr1,.Lshort_copy 28 blt cr1,.Lshort_copy
29/* Below we want to nop out the bne if we're on a CPU that has the
30 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
31 * cleared.
32 * At the time of writing the only CPU that has this combination of bits
33 * set is Power6.
34 */
35BEGIN_FTR_SECTION
36 nop
37FTR_SECTION_ELSE
29 bne .Ldst_unaligned 38 bne .Ldst_unaligned
39ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
40 CPU_FTR_UNALIGNED_LD_STD)
30.Ldst_aligned: 41.Ldst_aligned:
31 andi. r0,r4,7
32 addi r3,r3,-16 42 addi r3,r3,-16
43BEGIN_FTR_SECTION
44 andi. r0,r4,7
33 bne .Lsrc_unaligned 45 bne .Lsrc_unaligned
46END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
34 srdi r7,r5,4 47 srdi r7,r5,4
3520: ld r9,0(r4) 4820: ld r9,0(r4)
36 addi r4,r4,-8 49 addi r4,r4,-8
@@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user)
138 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 151 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
139 subf r5,r6,r5 152 subf r5,r6,r5
140 li r7,0 153 li r7,0
141 cmpldi r1,r5,16 154 cmpldi cr1,r5,16
142 bf cr7*4+3,1f 155 bf cr7*4+3,1f
14335: lbz r0,0(r4) 15635: lbz r0,0(r4)
14481: stb r0,0(r3) 15781: stb r0,0(r3)
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index 31734c0969cd..b7dc4c19f582 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock);
77 * the amount of RAM found at boot time.) I would imagine that get_vm_area() 77 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
78 * would have to initialise this each time prior to calling vm_region_alloc(). 78 * would have to initialise this each time prior to calling vm_region_alloc().
79 */ 79 */
80struct vm_region { 80struct ppc_vm_region {
81 struct list_head vm_list; 81 struct list_head vm_list;
82 unsigned long vm_start; 82 unsigned long vm_start;
83 unsigned long vm_end; 83 unsigned long vm_end;
84}; 84};
85 85
86static struct vm_region consistent_head = { 86static struct ppc_vm_region consistent_head = {
87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
88 .vm_start = CONSISTENT_BASE, 88 .vm_start = CONSISTENT_BASE,
89 .vm_end = CONSISTENT_END, 89 .vm_end = CONSISTENT_END,
90}; 90};
91 91
92static struct vm_region * 92static struct ppc_vm_region *
93vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) 93ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
94{ 94{
95 unsigned long addr = head->vm_start, end = head->vm_end - size; 95 unsigned long addr = head->vm_start, end = head->vm_end - size;
96 unsigned long flags; 96 unsigned long flags;
97 struct vm_region *c, *new; 97 struct ppc_vm_region *c, *new;
98 98
99 new = kmalloc(sizeof(struct vm_region), gfp); 99 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
100 if (!new) 100 if (!new)
101 goto out; 101 goto out;
102 102
@@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
130 return NULL; 130 return NULL;
131} 131}
132 132
133static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) 133static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
134{ 134{
135 struct vm_region *c; 135 struct ppc_vm_region *c;
136 136
137 list_for_each_entry(c, &head->vm_list, vm_list) { 137 list_for_each_entry(c, &head->vm_list, vm_list) {
138 if (c->vm_start == addr) 138 if (c->vm_start == addr)
@@ -151,7 +151,7 @@ void *
151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) 151__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
152{ 152{
153 struct page *page; 153 struct page *page;
154 struct vm_region *c; 154 struct ppc_vm_region *c;
155 unsigned long order; 155 unsigned long order;
156 u64 mask = 0x00ffffff, limit; /* ISA default */ 156 u64 mask = 0x00ffffff, limit; /* ISA default */
157 157
@@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
191 /* 191 /*
192 * Allocate a virtual address in the consistent mapping region. 192 * Allocate a virtual address in the consistent mapping region.
193 */ 193 */
194 c = vm_region_alloc(&consistent_head, size, 194 c = ppc_vm_region_alloc(&consistent_head, size,
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196 if (c) { 196 if (c) {
197 unsigned long vaddr = c->vm_start; 197 unsigned long vaddr = c->vm_start;
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent);
239 */ 239 */
240void __dma_free_coherent(size_t size, void *vaddr) 240void __dma_free_coherent(size_t size, void *vaddr)
241{ 241{
242 struct vm_region *c; 242 struct ppc_vm_region *c;
243 unsigned long flags, addr; 243 unsigned long flags, addr;
244 pte_t *ptep; 244 pte_t *ptep;
245 245
@@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
247 247
248 spin_lock_irqsave(&consistent_lock, flags); 248 spin_lock_irqsave(&consistent_lock, flags);
249 249
250 c = vm_region_find(&consistent_head, (unsigned long)vaddr); 250 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
251 if (!c) 251 if (!c)
252 goto no_area; 252 goto no_area;
253 253
@@ -320,7 +320,6 @@ static int __init dma_alloc_init(void)
320 ret = -ENOMEM; 320 ret = -ENOMEM;
321 break; 321 break;
322 } 322 }
323 WARN_ON(!pmd_none(*pmd));
324 323
325 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); 324 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
326 if (!pte) { 325 if (!pte) {
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 3f131129d1c1..fe2d34e5332d 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -18,11 +18,23 @@ _GLOBAL(memcpy)
18 andi. r6,r6,7 18 andi. r6,r6,7
19 dcbt 0,r4 19 dcbt 0,r4
20 blt cr1,.Lshort_copy 20 blt cr1,.Lshort_copy
21/* Below we want to nop out the bne if we're on a CPU that has the
22 CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
23 cleared.
24 At the time of writing the only CPU that has this combination of bits
25 set is Power6. */
26BEGIN_FTR_SECTION
27 nop
28FTR_SECTION_ELSE
21 bne .Ldst_unaligned 29 bne .Ldst_unaligned
30ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
31 CPU_FTR_UNALIGNED_LD_STD)
22.Ldst_aligned: 32.Ldst_aligned:
23 andi. r0,r4,7
24 addi r3,r3,-16 33 addi r3,r3,-16
34BEGIN_FTR_SECTION
35 andi. r0,r4,7
25 bne .Lsrc_unaligned 36 bne .Lsrc_unaligned
37END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
26 srdi r7,r5,4 38 srdi r7,r5,4
27 ld r9,0(r4) 39 ld r9,0(r4)
28 addi r4,r4,-8 40 addi r4,r4,-8
@@ -131,7 +143,7 @@ _GLOBAL(memcpy)
131 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 143 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
132 subf r5,r6,r5 144 subf r5,r6,r5
133 li r7,0 145 li r7,0
134 cmpldi r1,r5,16 146 cmpldi cr1,r5,16
135 bf cr7*4+3,1f 147 bf cr7*4+3,1f
136 lbz r0,0(r4) 148 lbz r0,0(r4)
137 stb r0,0(r3) 149 stb r0,0(r3)
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile
index 03aa98dd9f0a..f9e506a735ae 100644
--- a/arch/powerpc/math-emu/Makefile
+++ b/arch/powerpc/math-emu/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \
11 mcrfs.o mffs.o mtfsb0.o mtfsb1.o \ 11 mcrfs.o mffs.o mtfsb0.o mtfsb1.o \
12 mtfsf.o mtfsfi.o stfiwx.o stfs.o 12 mtfsf.o mtfsfi.o stfiwx.o stfs.o
13 13
14obj-$(CONFIG_SPE) += math_efp.o
15
14CFLAGS_fabs.o = -fno-builtin-fabs 16CFLAGS_fabs.o = -fno-builtin-fabs
15CFLAGS_math.o = -fno-builtin-fabs 17CFLAGS_math.o = -fno-builtin-fabs
16 18
diff --git a/arch/powerpc/math-emu/fadd.c b/arch/powerpc/math-emu/fadd.c
index 04d3b4aa32ce..0158a16e2b82 100644
--- a/arch/powerpc/math-emu/fadd.c
+++ b/arch/powerpc/math-emu/fadd.c
@@ -13,7 +13,6 @@ fadd(void *frD, void *frA, void *frB)
13 FP_DECL_D(B); 13 FP_DECL_D(B);
14 FP_DECL_D(R); 14 FP_DECL_D(R);
15 FP_DECL_EX; 15 FP_DECL_EX;
16 int ret = 0;
17 16
18#ifdef DEBUG 17#ifdef DEBUG
19 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 18 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
diff --git a/arch/powerpc/math-emu/fcmpo.c b/arch/powerpc/math-emu/fcmpo.c
index b5dc4498cd71..5bce011c2aec 100644
--- a/arch/powerpc/math-emu/fcmpo.c
+++ b/arch/powerpc/math-emu/fcmpo.c
@@ -14,7 +14,6 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
14 FP_DECL_EX; 14 FP_DECL_EX;
15 int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) }; 15 int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) };
16 long cmp; 16 long cmp;
17 int ret = 0;
18 17
19#ifdef DEBUG 18#ifdef DEBUG
20 printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB); 19 printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB);
@@ -29,7 +28,7 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
29#endif 28#endif
30 29
31 if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN) 30 if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN)
32 ret |= EFLAG_VXVC; 31 FP_SET_EXCEPTION(EFLAG_VXVC);
33 32
34 FP_CMP_D(cmp, A, B, 2); 33 FP_CMP_D(cmp, A, B, 2);
35 cmp = code[(cmp + 1) & 3]; 34 cmp = code[(cmp + 1) & 3];
@@ -44,5 +43,5 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
44 printk("CR: %08x\n", *ccr); 43 printk("CR: %08x\n", *ccr);
45#endif 44#endif
46 45
47 return ret; 46 return FP_CUR_EXCEPTIONS;
48} 47}
diff --git a/arch/powerpc/math-emu/fdiv.c b/arch/powerpc/math-emu/fdiv.c
index 2db15097d98e..a29239c05e3e 100644
--- a/arch/powerpc/math-emu/fdiv.c
+++ b/arch/powerpc/math-emu/fdiv.c
@@ -13,7 +13,6 @@ fdiv(void *frD, void *frA, void *frB)
13 FP_DECL_D(B); 13 FP_DECL_D(B);
14 FP_DECL_D(R); 14 FP_DECL_D(R);
15 FP_DECL_EX; 15 FP_DECL_EX;
16 int ret = 0;
17 16
18#ifdef DEBUG 17#ifdef DEBUG
19 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 18 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -28,22 +27,22 @@ fdiv(void *frD, void *frA, void *frB)
28#endif 27#endif
29 28
30 if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { 29 if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
31 ret |= EFLAG_VXZDZ; 30 FP_SET_EXCEPTION(EFLAG_VXZDZ);
32#ifdef DEBUG 31#ifdef DEBUG
33 printk("%s: FPSCR_VXZDZ raised\n", __func__); 32 printk("%s: FPSCR_VXZDZ raised\n", __func__);
34#endif 33#endif
35 } 34 }
36 if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { 35 if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
37 ret |= EFLAG_VXIDI; 36 FP_SET_EXCEPTION(EFLAG_VXIDI);
38#ifdef DEBUG 37#ifdef DEBUG
39 printk("%s: FPSCR_VXIDI raised\n", __func__); 38 printk("%s: FPSCR_VXIDI raised\n", __func__);
40#endif 39#endif
41 } 40 }
42 41
43 if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { 42 if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
44 ret |= EFLAG_DIVZERO; 43 FP_SET_EXCEPTION(EFLAG_DIVZERO);
45 if (__FPU_TRAP_P(EFLAG_DIVZERO)) 44 if (__FPU_TRAP_P(EFLAG_DIVZERO))
46 return ret; 45 return FP_CUR_EXCEPTIONS;
47 } 46 }
48 FP_DIV_D(R, A, B); 47 FP_DIV_D(R, A, B);
49 48
diff --git a/arch/powerpc/math-emu/fdivs.c b/arch/powerpc/math-emu/fdivs.c
index 797f6a9a20b5..526bc261275f 100644
--- a/arch/powerpc/math-emu/fdivs.c
+++ b/arch/powerpc/math-emu/fdivs.c
@@ -14,7 +14,6 @@ fdivs(void *frD, void *frA, void *frB)
14 FP_DECL_D(B); 14 FP_DECL_D(B);
15 FP_DECL_D(R); 15 FP_DECL_D(R);
16 FP_DECL_EX; 16 FP_DECL_EX;
17 int ret = 0;
18 17
19#ifdef DEBUG 18#ifdef DEBUG
20 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 19 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -29,22 +28,22 @@ fdivs(void *frD, void *frA, void *frB)
29#endif 28#endif
30 29
31 if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { 30 if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
32 ret |= EFLAG_VXZDZ; 31 FP_SET_EXCEPTION(EFLAG_VXZDZ);
33#ifdef DEBUG 32#ifdef DEBUG
34 printk("%s: FPSCR_VXZDZ raised\n", __func__); 33 printk("%s: FPSCR_VXZDZ raised\n", __func__);
35#endif 34#endif
36 } 35 }
37 if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { 36 if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
38 ret |= EFLAG_VXIDI; 37 FP_SET_EXCEPTION(EFLAG_VXIDI);
39#ifdef DEBUG 38#ifdef DEBUG
40 printk("%s: FPSCR_VXIDI raised\n", __func__); 39 printk("%s: FPSCR_VXIDI raised\n", __func__);
41#endif 40#endif
42 } 41 }
43 42
44 if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { 43 if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
45 ret |= EFLAG_DIVZERO; 44 FP_SET_EXCEPTION(EFLAG_DIVZERO);
46 if (__FPU_TRAP_P(EFLAG_DIVZERO)) 45 if (__FPU_TRAP_P(EFLAG_DIVZERO))
47 return ret; 46 return FP_CUR_EXCEPTIONS;
48 } 47 }
49 48
50 FP_DIV_D(R, A, B); 49 FP_DIV_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fmadd.c b/arch/powerpc/math-emu/fmadd.c
index 925313aa6f82..8c3f20aa5a95 100644
--- a/arch/powerpc/math-emu/fmadd.c
+++ b/arch/powerpc/math-emu/fmadd.c
@@ -15,7 +15,6 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
15 FP_DECL_D(C); 15 FP_DECL_D(C);
16 FP_DECL_D(T); 16 FP_DECL_D(T);
17 FP_DECL_EX; 17 FP_DECL_EX;
18 int ret = 0;
19 18
20#ifdef DEBUG 19#ifdef DEBUG
21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 20 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
33 32
34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 33 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 34 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
36 ret |= EFLAG_VXIMZ; 35 FP_SET_EXCEPTION(EFLAG_VXIMZ);
37 36
38 FP_MUL_D(T, A, C); 37 FP_MUL_D(T, A, C);
39 38
40 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 39 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
41 ret |= EFLAG_VXISI; 40 FP_SET_EXCEPTION(EFLAG_VXISI);
42 41
43 FP_ADD_D(R, T, B); 42 FP_ADD_D(R, T, B);
44 43
diff --git a/arch/powerpc/math-emu/fmadds.c b/arch/powerpc/math-emu/fmadds.c
index aea80ef79399..794fb31e59d1 100644
--- a/arch/powerpc/math-emu/fmadds.c
+++ b/arch/powerpc/math-emu/fmadds.c
@@ -16,7 +16,6 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
16 FP_DECL_D(C); 16 FP_DECL_D(C);
17 FP_DECL_D(T); 17 FP_DECL_D(T);
18 FP_DECL_EX; 18 FP_DECL_EX;
19 int ret = 0;
20 19
21#ifdef DEBUG 20#ifdef DEBUG
22 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
34 33
35 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
36 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
37 ret |= EFLAG_VXIMZ; 36 FP_SET_EXCEPTION(EFLAG_VXIMZ);
38 37
39 FP_MUL_D(T, A, C); 38 FP_MUL_D(T, A, C);
40 39
41 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 40 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
42 ret |= EFLAG_VXISI; 41 FP_SET_EXCEPTION(EFLAG_VXISI);
43 42
44 FP_ADD_D(R, T, B); 43 FP_ADD_D(R, T, B);
45 44
diff --git a/arch/powerpc/math-emu/fmsub.c b/arch/powerpc/math-emu/fmsub.c
index a644d525fca6..626f6fed84ac 100644
--- a/arch/powerpc/math-emu/fmsub.c
+++ b/arch/powerpc/math-emu/fmsub.c
@@ -15,7 +15,6 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
15 FP_DECL_D(C); 15 FP_DECL_D(C);
16 FP_DECL_D(T); 16 FP_DECL_D(T);
17 FP_DECL_EX; 17 FP_DECL_EX;
18 int ret = 0;
19 18
20#ifdef DEBUG 19#ifdef DEBUG
21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 20 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
33 32
34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 33 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 34 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
36 ret |= EFLAG_VXIMZ; 35 FP_SET_EXCEPTION(EFLAG_VXIMZ);
37 36
38 FP_MUL_D(T, A, C); 37 FP_MUL_D(T, A, C);
39 38
@@ -41,7 +40,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
41 B_s ^= 1; 40 B_s ^= 1;
42 41
43 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 42 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
44 ret |= EFLAG_VXISI; 43 FP_SET_EXCEPTION(EFLAG_VXISI);
45 44
46 FP_ADD_D(R, T, B); 45 FP_ADD_D(R, T, B);
47 46
diff --git a/arch/powerpc/math-emu/fmsubs.c b/arch/powerpc/math-emu/fmsubs.c
index 2fdeeb9bb569..3425bc899760 100644
--- a/arch/powerpc/math-emu/fmsubs.c
+++ b/arch/powerpc/math-emu/fmsubs.c
@@ -16,7 +16,6 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
16 FP_DECL_D(C); 16 FP_DECL_D(C);
17 FP_DECL_D(T); 17 FP_DECL_D(T);
18 FP_DECL_EX; 18 FP_DECL_EX;
19 int ret = 0;
20 19
21#ifdef DEBUG 20#ifdef DEBUG
22 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
34 33
35 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
36 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
37 ret |= EFLAG_VXIMZ; 36 FP_SET_EXCEPTION(EFLAG_VXIMZ);
38 37
39 FP_MUL_D(T, A, C); 38 FP_MUL_D(T, A, C);
40 39
@@ -42,7 +41,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
42 B_s ^= 1; 41 B_s ^= 1;
43 42
44 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 43 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
45 ret |= EFLAG_VXISI; 44 FP_SET_EXCEPTION(EFLAG_VXISI);
46 45
47 FP_ADD_D(R, T, B); 46 FP_ADD_D(R, T, B);
48 47
diff --git a/arch/powerpc/math-emu/fmul.c b/arch/powerpc/math-emu/fmul.c
index 391fd17d3440..2c1929779892 100644
--- a/arch/powerpc/math-emu/fmul.c
+++ b/arch/powerpc/math-emu/fmul.c
@@ -13,7 +13,6 @@ fmul(void *frD, void *frA, void *frB)
13 FP_DECL_D(B); 13 FP_DECL_D(B);
14 FP_DECL_D(R); 14 FP_DECL_D(R);
15 FP_DECL_EX; 15 FP_DECL_EX;
16 int ret = 0;
17 16
18#ifdef DEBUG 17#ifdef DEBUG
19 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 18 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fmul(void *frD, void *frA, void *frB)
31 30
32 if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || 31 if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
33 (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) 32 (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
34 ret |= EFLAG_VXIMZ; 33 FP_SET_EXCEPTION(EFLAG_VXIMZ);
35 34
36 FP_MUL_D(R, A, B); 35 FP_MUL_D(R, A, B);
37 36
diff --git a/arch/powerpc/math-emu/fmuls.c b/arch/powerpc/math-emu/fmuls.c
index 2d3ec5f7da20..f5ad5c9c77d0 100644
--- a/arch/powerpc/math-emu/fmuls.c
+++ b/arch/powerpc/math-emu/fmuls.c
@@ -14,7 +14,6 @@ fmuls(void *frD, void *frA, void *frB)
14 FP_DECL_D(B); 14 FP_DECL_D(B);
15 FP_DECL_D(R); 15 FP_DECL_D(R);
16 FP_DECL_EX; 16 FP_DECL_EX;
17 int ret = 0;
18 17
19#ifdef DEBUG 18#ifdef DEBUG
20 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 19 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fmuls(void *frD, void *frA, void *frB)
32 31
33 if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || 32 if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
34 (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) 33 (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
35 ret |= EFLAG_VXIMZ; 34 FP_SET_EXCEPTION(EFLAG_VXIMZ);
36 35
37 FP_MUL_D(R, A, B); 36 FP_MUL_D(R, A, B);
38 37
diff --git a/arch/powerpc/math-emu/fnmadd.c b/arch/powerpc/math-emu/fnmadd.c
index 2497b86494e5..e817bc5453ef 100644
--- a/arch/powerpc/math-emu/fnmadd.c
+++ b/arch/powerpc/math-emu/fnmadd.c
@@ -15,7 +15,6 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
15 FP_DECL_D(C); 15 FP_DECL_D(C);
16 FP_DECL_D(T); 16 FP_DECL_D(T);
17 FP_DECL_EX; 17 FP_DECL_EX;
18 int ret = 0;
19 18
20#ifdef DEBUG 19#ifdef DEBUG
21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 20 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
33 32
34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 33 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 34 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
36 ret |= EFLAG_VXIMZ; 35 FP_SET_EXCEPTION(EFLAG_VXIMZ);
37 36
38 FP_MUL_D(T, A, C); 37 FP_MUL_D(T, A, C);
39 38
40 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 39 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
41 ret |= EFLAG_VXISI; 40 FP_SET_EXCEPTION(EFLAG_VXISI);
42 41
43 FP_ADD_D(R, T, B); 42 FP_ADD_D(R, T, B);
44 43
diff --git a/arch/powerpc/math-emu/fnmadds.c b/arch/powerpc/math-emu/fnmadds.c
index ee9d71e0b376..4db4b7d9ba8d 100644
--- a/arch/powerpc/math-emu/fnmadds.c
+++ b/arch/powerpc/math-emu/fnmadds.c
@@ -16,7 +16,6 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
16 FP_DECL_D(C); 16 FP_DECL_D(C);
17 FP_DECL_D(T); 17 FP_DECL_D(T);
18 FP_DECL_EX; 18 FP_DECL_EX;
19 int ret = 0;
20 19
21#ifdef DEBUG 20#ifdef DEBUG
22 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
34 33
35 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
36 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
37 ret |= EFLAG_VXIMZ; 36 FP_SET_EXCEPTION(EFLAG_VXIMZ);
38 37
39 FP_MUL_D(T, A, C); 38 FP_MUL_D(T, A, C);
40 39
41 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 40 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
42 ret |= EFLAG_VXISI; 41 FP_SET_EXCEPTION(EFLAG_VXISI);
43 42
44 FP_ADD_D(R, T, B); 43 FP_ADD_D(R, T, B);
45 44
diff --git a/arch/powerpc/math-emu/fnmsub.c b/arch/powerpc/math-emu/fnmsub.c
index 3885a77acc93..f65979fa770e 100644
--- a/arch/powerpc/math-emu/fnmsub.c
+++ b/arch/powerpc/math-emu/fnmsub.c
@@ -15,7 +15,6 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
15 FP_DECL_D(C); 15 FP_DECL_D(C);
16 FP_DECL_D(T); 16 FP_DECL_D(T);
17 FP_DECL_EX; 17 FP_DECL_EX;
18 int ret = 0;
19 18
20#ifdef DEBUG 19#ifdef DEBUG
21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 20 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
33 32
34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 33 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 34 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
36 ret |= EFLAG_VXIMZ; 35 FP_SET_EXCEPTION(EFLAG_VXIMZ);
37 36
38 FP_MUL_D(T, A, C); 37 FP_MUL_D(T, A, C);
39 38
@@ -41,7 +40,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
41 B_s ^= 1; 40 B_s ^= 1;
42 41
43 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 42 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
44 ret |= EFLAG_VXISI; 43 FP_SET_EXCEPTION(EFLAG_VXISI);
45 44
46 FP_ADD_D(R, T, B); 45 FP_ADD_D(R, T, B);
47 46
diff --git a/arch/powerpc/math-emu/fnmsubs.c b/arch/powerpc/math-emu/fnmsubs.c
index f835dfeb0fd1..9021dacc03b8 100644
--- a/arch/powerpc/math-emu/fnmsubs.c
+++ b/arch/powerpc/math-emu/fnmsubs.c
@@ -16,7 +16,6 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
16 FP_DECL_D(C); 16 FP_DECL_D(C);
17 FP_DECL_D(T); 17 FP_DECL_D(T);
18 FP_DECL_EX; 18 FP_DECL_EX;
19 int ret = 0;
20 19
21#ifdef DEBUG 20#ifdef DEBUG
22 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); 21 printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
34 33
35 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || 34 if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
36 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) 35 (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
37 ret |= EFLAG_VXIMZ; 36 FP_SET_EXCEPTION(EFLAG_VXIMZ);
38 37
39 FP_MUL_D(T, A, C); 38 FP_MUL_D(T, A, C);
40 39
@@ -42,7 +41,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
42 B_s ^= 1; 41 B_s ^= 1;
43 42
44 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) 43 if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
45 ret |= EFLAG_VXISI; 44 FP_SET_EXCEPTION(EFLAG_VXISI);
46 45
47 FP_ADD_D(R, T, B); 46 FP_ADD_D(R, T, B);
48 47
diff --git a/arch/powerpc/math-emu/fsqrt.c b/arch/powerpc/math-emu/fsqrt.c
index 3e90072693a0..a55fc7d49983 100644
--- a/arch/powerpc/math-emu/fsqrt.c
+++ b/arch/powerpc/math-emu/fsqrt.c
@@ -12,7 +12,6 @@ fsqrt(void *frD, void *frB)
12 FP_DECL_D(B); 12 FP_DECL_D(B);
13 FP_DECL_D(R); 13 FP_DECL_D(R);
14 FP_DECL_EX; 14 FP_DECL_EX;
15 int ret = 0;
16 15
17#ifdef DEBUG 16#ifdef DEBUG
18 printk("%s: %p %p %p %p\n", __func__, frD, frB); 17 printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -25,9 +24,9 @@ fsqrt(void *frD, void *frB)
25#endif 24#endif
26 25
27 if (B_s && B_c != FP_CLS_ZERO) 26 if (B_s && B_c != FP_CLS_ZERO)
28 ret |= EFLAG_VXSQRT; 27 FP_SET_EXCEPTION(EFLAG_VXSQRT);
29 if (B_c == FP_CLS_NAN) 28 if (B_c == FP_CLS_NAN)
30 ret |= EFLAG_VXSNAN; 29 FP_SET_EXCEPTION(EFLAG_VXSNAN);
31 30
32 FP_SQRT_D(R, B); 31 FP_SQRT_D(R, B);
33 32
diff --git a/arch/powerpc/math-emu/fsqrts.c b/arch/powerpc/math-emu/fsqrts.c
index 2843be986e2e..31dccbfc39ff 100644
--- a/arch/powerpc/math-emu/fsqrts.c
+++ b/arch/powerpc/math-emu/fsqrts.c
@@ -13,7 +13,6 @@ fsqrts(void *frD, void *frB)
13 FP_DECL_D(B); 13 FP_DECL_D(B);
14 FP_DECL_D(R); 14 FP_DECL_D(R);
15 FP_DECL_EX; 15 FP_DECL_EX;
16 int ret = 0;
17 16
18#ifdef DEBUG 17#ifdef DEBUG
19 printk("%s: %p %p %p %p\n", __func__, frD, frB); 18 printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -26,9 +25,9 @@ fsqrts(void *frD, void *frB)
26#endif 25#endif
27 26
28 if (B_s && B_c != FP_CLS_ZERO) 27 if (B_s && B_c != FP_CLS_ZERO)
29 ret |= EFLAG_VXSQRT; 28 FP_SET_EXCEPTION(EFLAG_VXSQRT);
30 if (B_c == FP_CLS_NAN) 29 if (B_c == FP_CLS_NAN)
31 ret |= EFLAG_VXSNAN; 30 FP_SET_EXCEPTION(EFLAG_VXSNAN);
32 31
33 FP_SQRT_D(R, B); 32 FP_SQRT_D(R, B);
34 33
diff --git a/arch/powerpc/math-emu/fsub.c b/arch/powerpc/math-emu/fsub.c
index 78b09446a0e1..02c5dff458ba 100644
--- a/arch/powerpc/math-emu/fsub.c
+++ b/arch/powerpc/math-emu/fsub.c
@@ -13,7 +13,6 @@ fsub(void *frD, void *frA, void *frB)
13 FP_DECL_D(B); 13 FP_DECL_D(B);
14 FP_DECL_D(R); 14 FP_DECL_D(R);
15 FP_DECL_EX; 15 FP_DECL_EX;
16 int ret = 0;
17 16
18#ifdef DEBUG 17#ifdef DEBUG
19 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 18 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fsub(void *frD, void *frA, void *frB)
31 B_s ^= 1; 30 B_s ^= 1;
32 31
33 if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) 32 if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
34 ret |= EFLAG_VXISI; 33 FP_SET_EXCEPTION(EFLAG_VXISI);
35 34
36 FP_ADD_D(R, A, B); 35 FP_ADD_D(R, A, B);
37 36
diff --git a/arch/powerpc/math-emu/fsubs.c b/arch/powerpc/math-emu/fsubs.c
index d3bf90863cf2..5d9b18c35e07 100644
--- a/arch/powerpc/math-emu/fsubs.c
+++ b/arch/powerpc/math-emu/fsubs.c
@@ -14,7 +14,6 @@ fsubs(void *frD, void *frA, void *frB)
14 FP_DECL_D(B); 14 FP_DECL_D(B);
15 FP_DECL_D(R); 15 FP_DECL_D(R);
16 FP_DECL_EX; 16 FP_DECL_EX;
17 int ret = 0;
18 17
19#ifdef DEBUG 18#ifdef DEBUG
20 printk("%s: %p %p %p\n", __func__, frD, frA, frB); 19 printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fsubs(void *frD, void *frA, void *frB)
32 B_s ^= 1; 31 B_s ^= 1;
33 32
34 if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) 33 if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
35 ret |= EFLAG_VXISI; 34 FP_SET_EXCEPTION(EFLAG_VXISI);
36 35
37 FP_ADD_D(R, A, B); 36 FP_ADD_D(R, A, B);
38 37
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
new file mode 100644
index 000000000000..41f4ef30e480
--- /dev/null
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -0,0 +1,720 @@
1/*
2 * arch/powerpc/math-emu/math_efp.c
3 *
4 * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author: Ebony Zhu, <ebony.zhu@freescale.com>
7 * Yu Liu, <yu.liu@freescale.com>
8 *
9 * Derived from arch/alpha/math-emu/math.c
10 * arch/powerpc/math-emu/math.c
11 *
12 * Description:
13 * This file is the exception handler to make E500 SPE instructions
14 * fully comply with IEEE-754 floating point standard.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 */
21
22#include <linux/types.h>
23
24#include <asm/uaccess.h>
25#include <asm/reg.h>
26
27#define FP_EX_BOOKE_E500_SPE
28#include <asm/sfp-machine.h>
29
30#include <math-emu/soft-fp.h>
31#include <math-emu/single.h>
32#include <math-emu/double.h>
33
34#define EFAPU 0x4
35
36#define VCT 0x4
37#define SPFP 0x6
38#define DPFP 0x7
39
40#define EFSADD 0x2c0
41#define EFSSUB 0x2c1
42#define EFSABS 0x2c4
43#define EFSNABS 0x2c5
44#define EFSNEG 0x2c6
45#define EFSMUL 0x2c8
46#define EFSDIV 0x2c9
47#define EFSCMPGT 0x2cc
48#define EFSCMPLT 0x2cd
49#define EFSCMPEQ 0x2ce
50#define EFSCFD 0x2cf
51#define EFSCFSI 0x2d1
52#define EFSCTUI 0x2d4
53#define EFSCTSI 0x2d5
54#define EFSCTUF 0x2d6
55#define EFSCTSF 0x2d7
56#define EFSCTUIZ 0x2d8
57#define EFSCTSIZ 0x2da
58
59#define EVFSADD 0x280
60#define EVFSSUB 0x281
61#define EVFSABS 0x284
62#define EVFSNABS 0x285
63#define EVFSNEG 0x286
64#define EVFSMUL 0x288
65#define EVFSDIV 0x289
66#define EVFSCMPGT 0x28c
67#define EVFSCMPLT 0x28d
68#define EVFSCMPEQ 0x28e
69#define EVFSCTUI 0x294
70#define EVFSCTSI 0x295
71#define EVFSCTUF 0x296
72#define EVFSCTSF 0x297
73#define EVFSCTUIZ 0x298
74#define EVFSCTSIZ 0x29a
75
76#define EFDADD 0x2e0
77#define EFDSUB 0x2e1
78#define EFDABS 0x2e4
79#define EFDNABS 0x2e5
80#define EFDNEG 0x2e6
81#define EFDMUL 0x2e8
82#define EFDDIV 0x2e9
83#define EFDCTUIDZ 0x2ea
84#define EFDCTSIDZ 0x2eb
85#define EFDCMPGT 0x2ec
86#define EFDCMPLT 0x2ed
87#define EFDCMPEQ 0x2ee
88#define EFDCFS 0x2ef
89#define EFDCTUI 0x2f4
90#define EFDCTSI 0x2f5
91#define EFDCTUF 0x2f6
92#define EFDCTSF 0x2f7
93#define EFDCTUIZ 0x2f8
94#define EFDCTSIZ 0x2fa
95
96#define AB 2
97#define XA 3
98#define XB 4
99#define XCR 5
100#define NOTYPE 0
101
102#define SIGN_BIT_S (1UL << 31)
103#define SIGN_BIT_D (1ULL << 63)
104#define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
105 FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
106
107union dw_union {
108 u64 dp[1];
109 u32 wp[2];
110};
111
112static unsigned long insn_type(unsigned long speinsn)
113{
114 unsigned long ret = NOTYPE;
115
116 switch (speinsn & 0x7ff) {
117 case EFSABS: ret = XA; break;
118 case EFSADD: ret = AB; break;
119 case EFSCFD: ret = XB; break;
120 case EFSCMPEQ: ret = XCR; break;
121 case EFSCMPGT: ret = XCR; break;
122 case EFSCMPLT: ret = XCR; break;
123 case EFSCTSF: ret = XB; break;
124 case EFSCTSI: ret = XB; break;
125 case EFSCTSIZ: ret = XB; break;
126 case EFSCTUF: ret = XB; break;
127 case EFSCTUI: ret = XB; break;
128 case EFSCTUIZ: ret = XB; break;
129 case EFSDIV: ret = AB; break;
130 case EFSMUL: ret = AB; break;
131 case EFSNABS: ret = XA; break;
132 case EFSNEG: ret = XA; break;
133 case EFSSUB: ret = AB; break;
134 case EFSCFSI: ret = XB; break;
135
136 case EVFSABS: ret = XA; break;
137 case EVFSADD: ret = AB; break;
138 case EVFSCMPEQ: ret = XCR; break;
139 case EVFSCMPGT: ret = XCR; break;
140 case EVFSCMPLT: ret = XCR; break;
141 case EVFSCTSF: ret = XB; break;
142 case EVFSCTSI: ret = XB; break;
143 case EVFSCTSIZ: ret = XB; break;
144 case EVFSCTUF: ret = XB; break;
145 case EVFSCTUI: ret = XB; break;
146 case EVFSCTUIZ: ret = XB; break;
147 case EVFSDIV: ret = AB; break;
148 case EVFSMUL: ret = AB; break;
149 case EVFSNABS: ret = XA; break;
150 case EVFSNEG: ret = XA; break;
151 case EVFSSUB: ret = AB; break;
152
153 case EFDABS: ret = XA; break;
154 case EFDADD: ret = AB; break;
155 case EFDCFS: ret = XB; break;
156 case EFDCMPEQ: ret = XCR; break;
157 case EFDCMPGT: ret = XCR; break;
158 case EFDCMPLT: ret = XCR; break;
159 case EFDCTSF: ret = XB; break;
160 case EFDCTSI: ret = XB; break;
161 case EFDCTSIDZ: ret = XB; break;
162 case EFDCTSIZ: ret = XB; break;
163 case EFDCTUF: ret = XB; break;
164 case EFDCTUI: ret = XB; break;
165 case EFDCTUIDZ: ret = XB; break;
166 case EFDCTUIZ: ret = XB; break;
167 case EFDDIV: ret = AB; break;
168 case EFDMUL: ret = AB; break;
169 case EFDNABS: ret = XA; break;
170 case EFDNEG: ret = XA; break;
171 case EFDSUB: ret = AB; break;
172
173 default:
174 printk(KERN_ERR "\nOoops! SPE instruction no type found.");
175 printk(KERN_ERR "\ninst code: %08lx\n", speinsn);
176 }
177
178 return ret;
179}
180
181int do_spe_mathemu(struct pt_regs *regs)
182{
183 FP_DECL_EX;
184 int IR, cmp;
185
186 unsigned long type, func, fc, fa, fb, src, speinsn;
187 union dw_union vc, va, vb;
188
189 if (get_user(speinsn, (unsigned int __user *) regs->nip))
190 return -EFAULT;
191 if ((speinsn >> 26) != EFAPU)
192 return -EINVAL; /* not an spe instruction */
193
194 type = insn_type(speinsn);
195 if (type == NOTYPE)
196 return -ENOSYS;
197
198 func = speinsn & 0x7ff;
199 fc = (speinsn >> 21) & 0x1f;
200 fa = (speinsn >> 16) & 0x1f;
201 fb = (speinsn >> 11) & 0x1f;
202 src = (speinsn >> 5) & 0x7;
203
204 vc.wp[0] = current->thread.evr[fc];
205 vc.wp[1] = regs->gpr[fc];
206 va.wp[0] = current->thread.evr[fa];
207 va.wp[1] = regs->gpr[fa];
208 vb.wp[0] = current->thread.evr[fb];
209 vb.wp[1] = regs->gpr[fb];
210
211 __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
212
213#ifdef DEBUG
214 printk("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
215 printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
216 printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
217 printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
218#endif
219
220 switch (src) {
221 case SPFP: {
222 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
223
224 switch (type) {
225 case AB:
226 case XCR:
227 FP_UNPACK_SP(SA, va.wp + 1);
228 case XB:
229 FP_UNPACK_SP(SB, vb.wp + 1);
230 break;
231 case XA:
232 FP_UNPACK_SP(SA, va.wp + 1);
233 break;
234 }
235
236#ifdef DEBUG
237 printk("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c);
238 printk("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c);
239#endif
240
241 switch (func) {
242 case EFSABS:
243 vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
244 goto update_regs;
245
246 case EFSNABS:
247 vc.wp[1] = va.wp[1] | SIGN_BIT_S;
248 goto update_regs;
249
250 case EFSNEG:
251 vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
252 goto update_regs;
253
254 case EFSADD:
255 FP_ADD_S(SR, SA, SB);
256 goto pack_s;
257
258 case EFSSUB:
259 FP_SUB_S(SR, SA, SB);
260 goto pack_s;
261
262 case EFSMUL:
263 FP_MUL_S(SR, SA, SB);
264 goto pack_s;
265
266 case EFSDIV:
267 FP_DIV_S(SR, SA, SB);
268 goto pack_s;
269
270 case EFSCMPEQ:
271 cmp = 0;
272 goto cmp_s;
273
274 case EFSCMPGT:
275 cmp = 1;
276 goto cmp_s;
277
278 case EFSCMPLT:
279 cmp = -1;
280 goto cmp_s;
281
282 case EFSCTSF:
283 case EFSCTUF:
284 if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) {
285 /* NaN */
286 if (((vb.wp[1] >> 23) & 0xff) == 0) {
287 /* denorm */
288 vc.wp[1] = 0x0;
289 } else if ((vb.wp[1] >> 31) == 0) {
290 /* positive normal */
291 vc.wp[1] = (func == EFSCTSF) ?
292 0x7fffffff : 0xffffffff;
293 } else { /* negative normal */
294 vc.wp[1] = (func == EFSCTSF) ?
295 0x80000000 : 0x0;
296 }
297 } else { /* rB is NaN */
298 vc.wp[1] = 0x0;
299 }
300 goto update_regs;
301
302 case EFSCFD: {
303 FP_DECL_D(DB);
304 FP_CLEAR_EXCEPTIONS;
305 FP_UNPACK_DP(DB, vb.dp);
306#ifdef DEBUG
307 printk("DB: %ld %08lx %08lx %ld (%ld)\n",
308 DB_s, DB_f1, DB_f0, DB_e, DB_c);
309#endif
310 FP_CONV(S, D, 1, 2, SR, DB);
311 goto pack_s;
312 }
313
314 case EFSCTSI:
315 case EFSCTSIZ:
316 case EFSCTUI:
317 case EFSCTUIZ:
318 if (func & 0x4) {
319 _FP_ROUND(1, SB);
320 } else {
321 _FP_ROUND_ZERO(1, SB);
322 }
323 FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0));
324 goto update_regs;
325
326 default:
327 goto illegal;
328 }
329 break;
330
331pack_s:
332#ifdef DEBUG
333 printk("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c);
334#endif
335 FP_PACK_SP(vc.wp + 1, SR);
336 goto update_regs;
337
338cmp_s:
339 FP_CMP_S(IR, SA, SB, 3);
340 if (IR == 3 && (FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB)))
341 FP_SET_EXCEPTION(FP_EX_INVALID);
342 if (IR == cmp) {
343 IR = 0x4;
344 } else {
345 IR = 0;
346 }
347 goto update_ccr;
348 }
349
350 case DPFP: {
351 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
352
353 switch (type) {
354 case AB:
355 case XCR:
356 FP_UNPACK_DP(DA, va.dp);
357 case XB:
358 FP_UNPACK_DP(DB, vb.dp);
359 break;
360 case XA:
361 FP_UNPACK_DP(DA, va.dp);
362 break;
363 }
364
365#ifdef DEBUG
366 printk("DA: %ld %08lx %08lx %ld (%ld)\n",
367 DA_s, DA_f1, DA_f0, DA_e, DA_c);
368 printk("DB: %ld %08lx %08lx %ld (%ld)\n",
369 DB_s, DB_f1, DB_f0, DB_e, DB_c);
370#endif
371
372 switch (func) {
373 case EFDABS:
374 vc.dp[0] = va.dp[0] & ~SIGN_BIT_D;
375 goto update_regs;
376
377 case EFDNABS:
378 vc.dp[0] = va.dp[0] | SIGN_BIT_D;
379 goto update_regs;
380
381 case EFDNEG:
382 vc.dp[0] = va.dp[0] ^ SIGN_BIT_D;
383 goto update_regs;
384
385 case EFDADD:
386 FP_ADD_D(DR, DA, DB);
387 goto pack_d;
388
389 case EFDSUB:
390 FP_SUB_D(DR, DA, DB);
391 goto pack_d;
392
393 case EFDMUL:
394 FP_MUL_D(DR, DA, DB);
395 goto pack_d;
396
397 case EFDDIV:
398 FP_DIV_D(DR, DA, DB);
399 goto pack_d;
400
401 case EFDCMPEQ:
402 cmp = 0;
403 goto cmp_d;
404
405 case EFDCMPGT:
406 cmp = 1;
407 goto cmp_d;
408
409 case EFDCMPLT:
410 cmp = -1;
411 goto cmp_d;
412
413 case EFDCTSF:
414 case EFDCTUF:
415 if (!((vb.wp[0] >> 20) == 0x7ff &&
416 ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) {
417 /* not a NaN */
418 if (((vb.wp[0] >> 20) & 0x7ff) == 0) {
419 /* denorm */
420 vc.wp[1] = 0x0;
421 } else if ((vb.wp[0] >> 31) == 0) {
422 /* positive normal */
423 vc.wp[1] = (func == EFDCTSF) ?
424 0x7fffffff : 0xffffffff;
425 } else { /* negative normal */
426 vc.wp[1] = (func == EFDCTSF) ?
427 0x80000000 : 0x0;
428 }
429 } else { /* NaN */
430 vc.wp[1] = 0x0;
431 }
432 goto update_regs;
433
434 case EFDCFS: {
435 FP_DECL_S(SB);
436 FP_CLEAR_EXCEPTIONS;
437 FP_UNPACK_SP(SB, vb.wp + 1);
438#ifdef DEBUG
439 printk("SB: %ld %08lx %ld (%ld)\n",
440 SB_s, SB_f, SB_e, SB_c);
441#endif
442 FP_CONV(D, S, 2, 1, DR, SB);
443 goto pack_d;
444 }
445
446 case EFDCTUIDZ:
447 case EFDCTSIDZ:
448 _FP_ROUND_ZERO(2, DB);
449 FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0));
450 goto update_regs;
451
452 case EFDCTUI:
453 case EFDCTSI:
454 case EFDCTUIZ:
455 case EFDCTSIZ:
456 if (func & 0x4) {
457 _FP_ROUND(2, DB);
458 } else {
459 _FP_ROUND_ZERO(2, DB);
460 }
461 FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0));
462 goto update_regs;
463
464 default:
465 goto illegal;
466 }
467 break;
468
469pack_d:
470#ifdef DEBUG
471 printk("DR: %ld %08lx %08lx %ld (%ld)\n",
472 DR_s, DR_f1, DR_f0, DR_e, DR_c);
473#endif
474 FP_PACK_DP(vc.dp, DR);
475 goto update_regs;
476
477cmp_d:
478 FP_CMP_D(IR, DA, DB, 3);
479 if (IR == 3 && (FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB)))
480 FP_SET_EXCEPTION(FP_EX_INVALID);
481 if (IR == cmp) {
482 IR = 0x4;
483 } else {
484 IR = 0;
485 }
486 goto update_ccr;
487
488 }
489
490 case VCT: {
491 FP_DECL_S(SA0); FP_DECL_S(SB0); FP_DECL_S(SR0);
492 FP_DECL_S(SA1); FP_DECL_S(SB1); FP_DECL_S(SR1);
493 int IR0, IR1;
494
495 switch (type) {
496 case AB:
497 case XCR:
498 FP_UNPACK_SP(SA0, va.wp);
499 FP_UNPACK_SP(SA1, va.wp + 1);
500 case XB:
501 FP_UNPACK_SP(SB0, vb.wp);
502 FP_UNPACK_SP(SB1, vb.wp + 1);
503 break;
504 case XA:
505 FP_UNPACK_SP(SA0, va.wp);
506 FP_UNPACK_SP(SA1, va.wp + 1);
507 break;
508 }
509
510#ifdef DEBUG
511 printk("SA0: %ld %08lx %ld (%ld)\n", SA0_s, SA0_f, SA0_e, SA0_c);
512 printk("SA1: %ld %08lx %ld (%ld)\n", SA1_s, SA1_f, SA1_e, SA1_c);
513 printk("SB0: %ld %08lx %ld (%ld)\n", SB0_s, SB0_f, SB0_e, SB0_c);
514 printk("SB1: %ld %08lx %ld (%ld)\n", SB1_s, SB1_f, SB1_e, SB1_c);
515#endif
516
517 switch (func) {
518 case EVFSABS:
519 vc.wp[0] = va.wp[0] & ~SIGN_BIT_S;
520 vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
521 goto update_regs;
522
523 case EVFSNABS:
524 vc.wp[0] = va.wp[0] | SIGN_BIT_S;
525 vc.wp[1] = va.wp[1] | SIGN_BIT_S;
526 goto update_regs;
527
528 case EVFSNEG:
529 vc.wp[0] = va.wp[0] ^ SIGN_BIT_S;
530 vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
531 goto update_regs;
532
533 case EVFSADD:
534 FP_ADD_S(SR0, SA0, SB0);
535 FP_ADD_S(SR1, SA1, SB1);
536 goto pack_vs;
537
538 case EVFSSUB:
539 FP_SUB_S(SR0, SA0, SB0);
540 FP_SUB_S(SR1, SA1, SB1);
541 goto pack_vs;
542
543 case EVFSMUL:
544 FP_MUL_S(SR0, SA0, SB0);
545 FP_MUL_S(SR1, SA1, SB1);
546 goto pack_vs;
547
548 case EVFSDIV:
549 FP_DIV_S(SR0, SA0, SB0);
550 FP_DIV_S(SR1, SA1, SB1);
551 goto pack_vs;
552
553 case EVFSCMPEQ:
554 cmp = 0;
555 goto cmp_vs;
556
557 case EVFSCMPGT:
558 cmp = 1;
559 goto cmp_vs;
560
561 case EVFSCMPLT:
562 cmp = -1;
563 goto cmp_vs;
564
565 case EVFSCTSF:
566 __asm__ __volatile__ ("mtspr 512, %4\n"
567 "efsctsf %0, %2\n"
568 "efsctsf %1, %3\n"
569 : "=r" (vc.wp[0]), "=r" (vc.wp[1])
570 : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
571 goto update_regs;
572
573 case EVFSCTUF:
574 __asm__ __volatile__ ("mtspr 512, %4\n"
575 "efsctuf %0, %2\n"
576 "efsctuf %1, %3\n"
577 : "=r" (vc.wp[0]), "=r" (vc.wp[1])
578 : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
579 goto update_regs;
580
581 case EVFSCTUI:
582 case EVFSCTSI:
583 case EVFSCTUIZ:
584 case EVFSCTSIZ:
585 if (func & 0x4) {
586 _FP_ROUND(1, SB0);
587 _FP_ROUND(1, SB1);
588 } else {
589 _FP_ROUND_ZERO(1, SB0);
590 _FP_ROUND_ZERO(1, SB1);
591 }
592 FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0));
593 FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0));
594 goto update_regs;
595
596 default:
597 goto illegal;
598 }
599 break;
600
601pack_vs:
602#ifdef DEBUG
603 printk("SR0: %ld %08lx %ld (%ld)\n", SR0_s, SR0_f, SR0_e, SR0_c);
604 printk("SR1: %ld %08lx %ld (%ld)\n", SR1_s, SR1_f, SR1_e, SR1_c);
605#endif
606 FP_PACK_SP(vc.wp, SR0);
607 FP_PACK_SP(vc.wp + 1, SR1);
608 goto update_regs;
609
610cmp_vs:
611 {
612 int ch, cl;
613
614 FP_CMP_S(IR0, SA0, SB0, 3);
615 FP_CMP_S(IR1, SA1, SB1, 3);
616 if (IR0 == 3 && (FP_ISSIGNAN_S(SA0) || FP_ISSIGNAN_S(SB0)))
617 FP_SET_EXCEPTION(FP_EX_INVALID);
618 if (IR1 == 3 && (FP_ISSIGNAN_S(SA1) || FP_ISSIGNAN_S(SB1)))
619 FP_SET_EXCEPTION(FP_EX_INVALID);
620 ch = (IR0 == cmp) ? 1 : 0;
621 cl = (IR1 == cmp) ? 1 : 0;
622 IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) |
623 ((ch & cl) << 0);
624 goto update_ccr;
625 }
626 }
627 default:
628 return -EINVAL;
629 }
630
631update_ccr:
632 regs->ccr &= ~(15 << ((7 - ((speinsn >> 23) & 0x7)) << 2));
633 regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
634
635update_regs:
636 __FPU_FPSCR &= ~FP_EX_MASK;
637 __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
638 mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
639
640 current->thread.evr[fc] = vc.wp[0];
641 regs->gpr[fc] = vc.wp[1];
642
643#ifdef DEBUG
644 printk("ccr = %08lx\n", regs->ccr);
645 printk("cur exceptions = %08x spefscr = %08lx\n",
646 FP_CUR_EXCEPTIONS, __FPU_FPSCR);
647 printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
648 printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
649 printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
650#endif
651
652 return 0;
653
654illegal:
655 printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn);
656 return -ENOSYS;
657}
658
659int speround_handler(struct pt_regs *regs)
660{
661 union dw_union fgpr;
662 int s_lo, s_hi;
663 unsigned long speinsn, type, fc;
664
665 if (get_user(speinsn, (unsigned int __user *) regs->nip))
666 return -EFAULT;
667 if ((speinsn >> 26) != 4)
668 return -EINVAL; /* not an spe instruction */
669
670 type = insn_type(speinsn & 0x7ff);
671 if (type == XCR) return -ENOSYS;
672
673 fc = (speinsn >> 21) & 0x1f;
674 s_lo = regs->gpr[fc] & SIGN_BIT_S;
675 s_hi = current->thread.evr[fc] & SIGN_BIT_S;
676 fgpr.wp[0] = current->thread.evr[fc];
677 fgpr.wp[1] = regs->gpr[fc];
678
679 __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
680
681 switch ((speinsn >> 5) & 0x7) {
682 /* Since SPE instructions on E500 core can handle round to nearest
683 * and round toward zero with IEEE-754 complied, we just need
684 * to handle round toward +Inf and round toward -Inf by software.
685 */
686 case SPFP:
687 if ((FP_ROUNDMODE) == FP_RND_PINF) {
688 if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
689 } else { /* round to -Inf */
690 if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */
691 }
692 break;
693
694 case DPFP:
695 if (FP_ROUNDMODE == FP_RND_PINF) {
696 if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */
697 } else { /* round to -Inf */
698 if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */
699 }
700 break;
701
702 case VCT:
703 if (FP_ROUNDMODE == FP_RND_PINF) {
704 if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
705 if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
706 } else { /* round to -Inf */
707 if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
708 if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
709 }
710 break;
711
712 default:
713 return -EINVAL;
714 }
715
716 current->thread.evr[fc] = fgpr.wp[0];
717 regs->gpr[fc] = fgpr.wp[1];
718
719 return 0;
720}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index e7392b45a5ef..953cc4a1cde5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,17 +6,19 @@ ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc 6EXTRA_CFLAGS += -mno-minimal-toc
7endif 7endif
8 8
9obj-y := fault.o mem.o \ 9obj-y := fault.o mem.o pgtable.o \
10 init_$(CONFIG_WORD_SIZE).o \ 10 init_$(CONFIG_WORD_SIZE).o \
11 pgtable_$(CONFIG_WORD_SIZE).o \ 11 pgtable_$(CONFIG_WORD_SIZE).o
12 mmu_context_$(CONFIG_WORD_SIZE).o 12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
13 tlb_nohash_low.o
13hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 14hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
14obj-$(CONFIG_PPC64) += hash_utils_64.o \ 15obj-$(CONFIG_PPC64) += hash_utils_64.o \
15 slb_low.o slb.o stab.o \ 16 slb_low.o slb.o stab.o \
16 gup.o mmap.o $(hash-y) 17 gup.o mmap.o $(hash-y)
17obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o 18obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
18obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ 19obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
19 tlb_$(CONFIG_WORD_SIZE).o 20 tlb_hash$(CONFIG_WORD_SIZE).o \
21 mmu_context_hash$(CONFIG_WORD_SIZE).o
20obj-$(CONFIG_40x) += 40x_mmu.o 22obj-$(CONFIG_40x) += 40x_mmu.o
21obj-$(CONFIG_44x) += 44x_mmu.o 23obj-$(CONFIG_44x) += 44x_mmu.o
22obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o 24obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 565b7a237c84..91c7b8636b8a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,6 +30,7 @@
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/kdebug.h> 31#include <linux/kdebug.h>
32 32
33#include <asm/firmware.h>
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/mmu.h> 36#include <asm/mmu.h>
@@ -283,7 +284,7 @@ good_area:
283 } 284 }
284 pte_update(ptep, 0, _PAGE_HWEXEC | 285 pte_update(ptep, 0, _PAGE_HWEXEC |
285 _PAGE_ACCESSED); 286 _PAGE_ACCESSED);
286 _tlbie(address, mm->context.id); 287 local_flush_tlb_page(vma, address);
287 pte_unmap_unlock(ptep, ptl); 288 pte_unmap_unlock(ptep, ptl);
288 up_read(&mm->mmap_sem); 289 up_read(&mm->mmap_sem);
289 return 0; 290 return 0;
@@ -318,9 +319,16 @@ good_area:
318 goto do_sigbus; 319 goto do_sigbus;
319 BUG(); 320 BUG();
320 } 321 }
321 if (ret & VM_FAULT_MAJOR) 322 if (ret & VM_FAULT_MAJOR) {
322 current->maj_flt++; 323 current->maj_flt++;
323 else 324#ifdef CONFIG_PPC_SMLPAR
325 if (firmware_has_feature(FW_FEATURE_CMO)) {
326 preempt_disable();
327 get_lppaca()->page_ins += (1 << PAGE_FACTOR);
328 preempt_enable();
329 }
330#endif
331 } else
324 current->min_flt++; 332 current->min_flt++;
325 up_read(&mm->mmap_sem); 333 up_read(&mm->mmap_sem);
326 return 0; 334 return 0;
@@ -339,7 +347,7 @@ bad_area_nosemaphore:
339 && printk_ratelimit()) 347 && printk_ratelimit())
340 printk(KERN_CRIT "kernel tried to execute NX-protected" 348 printk(KERN_CRIT "kernel tried to execute NX-protected"
341 " page (%lx) - exploit attempt? (uid: %d)\n", 349 " page (%lx) - exploit attempt? (uid: %d)\n",
342 address, current->uid); 350 address, current_uid());
343 351
344 return SIGSEGV; 352 return SIGSEGV;
345 353
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 7bffb70b9fe2..67850ec9feb3 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -36,36 +36,6 @@ mmu_hash_lock:
36#endif /* CONFIG_SMP */ 36#endif /* CONFIG_SMP */
37 37
38/* 38/*
39 * Sync CPUs with hash_page taking & releasing the hash
40 * table lock
41 */
42#ifdef CONFIG_SMP
43 .text
44_GLOBAL(hash_page_sync)
45 mfmsr r10
46 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
47 mtmsr r0
48 lis r8,mmu_hash_lock@h
49 ori r8,r8,mmu_hash_lock@l
50 lis r0,0x0fff
51 b 10f
5211: lwz r6,0(r8)
53 cmpwi 0,r6,0
54 bne 11b
5510: lwarx r6,0,r8
56 cmpwi 0,r6,0
57 bne- 11b
58 stwcx. r0,0,r8
59 bne- 10b
60 isync
61 eieio
62 li r0,0
63 stw r0,0(r8)
64 mtmsr r10
65 blr
66#endif /* CONFIG_SMP */
67
68/*
69 * Load a PTE into the hash table, if possible. 39 * Load a PTE into the hash table, if possible.
70 * The address is in r4, and r3 contains an access flag: 40 * The address is in r4, and r3 contains an access flag:
71 * _PAGE_RW (0x400) if a write. 41 * _PAGE_RW (0x400) if a write.
@@ -353,8 +323,8 @@ _GLOBAL(create_hpte)
353 ori r8,r8,0xe14 /* clear out reserved bits and M */ 323 ori r8,r8,0xe14 /* clear out reserved bits and M */
354 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ 324 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
355BEGIN_FTR_SECTION 325BEGIN_FTR_SECTION
356 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ 326 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
357END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) 327END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
358#ifdef CONFIG_PTE_64BIT 328#ifdef CONFIG_PTE_64BIT
359 /* Put the XPN bits into the PTE */ 329 /* Put the XPN bits into the PTE */
360 rlwimi r8,r10,8,20,22 330 rlwimi r8,r10,8,20,22
@@ -663,3 +633,80 @@ _GLOBAL(flush_hash_patch_B)
663 SYNC_601 633 SYNC_601
664 isync 634 isync
665 blr 635 blr
636
637/*
638 * Flush an entry from the TLB
639 */
640_GLOBAL(_tlbie)
641#ifdef CONFIG_SMP
642 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
643 lwz r8,TI_CPU(r8)
644 oris r8,r8,11
645 mfmsr r10
646 SYNC
647 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
648 rlwinm r0,r0,0,28,26 /* clear DR */
649 mtmsr r0
650 SYNC_601
651 isync
652 lis r9,mmu_hash_lock@h
653 ori r9,r9,mmu_hash_lock@l
654 tophys(r9,r9)
65510: lwarx r7,0,r9
656 cmpwi 0,r7,0
657 bne- 10b
658 stwcx. r8,0,r9
659 bne- 10b
660 eieio
661 tlbie r3
662 sync
663 TLBSYNC
664 li r0,0
665 stw r0,0(r9) /* clear mmu_hash_lock */
666 mtmsr r10
667 SYNC_601
668 isync
669#else /* CONFIG_SMP */
670 tlbie r3
671 sync
672#endif /* CONFIG_SMP */
673 blr
674
675/*
676 * Flush the entire TLB. 603/603e only
677 */
678_GLOBAL(_tlbia)
679#if defined(CONFIG_SMP)
680 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
681 lwz r8,TI_CPU(r8)
682 oris r8,r8,10
683 mfmsr r10
684 SYNC
685 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
686 rlwinm r0,r0,0,28,26 /* clear DR */
687 mtmsr r0
688 SYNC_601
689 isync
690 lis r9,mmu_hash_lock@h
691 ori r9,r9,mmu_hash_lock@l
692 tophys(r9,r9)
69310: lwarx r7,0,r9
694 cmpwi 0,r7,0
695 bne- 10b
696 stwcx. r8,0,r9
697 bne- 10b
698 sync
699 tlbia
700 sync
701 TLBSYNC
702 li r0,0
703 stw r0,0(r9) /* clear mmu_hash_lock */
704 mtmsr r10
705 SYNC_601
706 isync
707#else /* CONFIG_SMP */
708 sync
709 tlbia
710 sync
711#endif /* CONFIG_SMP */
712 blr
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f0c3b88d50fa..201c7a5486cb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,8 +53,7 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
53 53
54/* Subtract one from array size because we don't need a cache for 4K since 54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */ 55 * is not a huge page size */
56#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \ 56#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
57 + psize-1])
58#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize]) 57#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
59 58
60static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = { 59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
@@ -113,7 +112,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
113static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, 112static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
114 unsigned long address, unsigned int psize) 113 unsigned long address, unsigned int psize)
115{ 114{
116 pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize), 115 pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
117 GFP_KERNEL|__GFP_REPEAT); 116 GFP_KERNEL|__GFP_REPEAT);
118 117
119 if (! new) 118 if (! new)
@@ -121,7 +120,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
121 120
122 spin_lock(&mm->page_table_lock); 121 spin_lock(&mm->page_table_lock);
123 if (!hugepd_none(*hpdp)) 122 if (!hugepd_none(*hpdp))
124 kmem_cache_free(huge_pgtable_cache(psize), new); 123 kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
125 else 124 else
126 hpdp->pd = (unsigned long)new | HUGEPD_OK; 125 hpdp->pd = (unsigned long)new | HUGEPD_OK;
127 spin_unlock(&mm->page_table_lock); 126 spin_unlock(&mm->page_table_lock);
@@ -763,13 +762,14 @@ static int __init hugetlbpage_init(void)
763 762
764 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 763 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
765 if (mmu_huge_psizes[psize]) { 764 if (mmu_huge_psizes[psize]) {
766 huge_pgtable_cache(psize) = kmem_cache_create( 765 pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
767 HUGEPTE_CACHE_NAME(psize), 766 kmem_cache_create(
768 HUGEPTE_TABLE_SIZE(psize), 767 HUGEPTE_CACHE_NAME(psize),
769 HUGEPTE_TABLE_SIZE(psize), 768 HUGEPTE_TABLE_SIZE(psize),
770 0, 769 HUGEPTE_TABLE_SIZE(psize),
771 NULL); 770 0,
772 if (!huge_pgtable_cache(psize)) 771 NULL);
772 if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
773 panic("hugetlbpage_init(): could not create %s"\ 773 panic("hugetlbpage_init(): could not create %s"\
774 "\n", HUGEPTE_CACHE_NAME(psize)); 774 "\n", HUGEPTE_CACHE_NAME(psize));
775 } 775 }
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 388ceda632f3..666a5e8a5be1 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -35,7 +35,6 @@
35#include <asm/pgalloc.h> 35#include <asm/pgalloc.h>
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h> 38#include <asm/pgtable.h>
40#include <asm/mmu.h> 39#include <asm/mmu.h>
41#include <asm/smp.h> 40#include <asm/smp.h>
@@ -49,7 +48,7 @@
49 48
50#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) 49#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
51/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ 50/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
52#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE)) 51#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
53#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" 52#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
54#endif 53#endif
55#endif 54#endif
@@ -180,9 +179,6 @@ void __init MMU_init(void)
180 if (ppc_md.progress) 179 if (ppc_md.progress)
181 ppc_md.progress("MMU:setio", 0x302); 180 ppc_md.progress("MMU:setio", 0x302);
182 181
183 /* Initialize the context management stuff */
184 mmu_context_init();
185
186 if (ppc_md.progress) 182 if (ppc_md.progress)
187 ppc_md.progress("MMU:exit", 0x211); 183 ppc_md.progress("MMU:exit", 0x211);
188 184
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9e1a1da6e52..53b06ebb3f2f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
103 103
104 if (!page_is_ram(pfn)) 104 if (!page_is_ram(pfn))
105 vma_prot = __pgprot(pgprot_val(vma_prot) 105 vma_prot = pgprot_noncached(vma_prot);
106 | _PAGE_GUARDED | _PAGE_NO_CACHE); 106
107 return vma_prot; 107 return vma_prot;
108} 108}
109EXPORT_SYMBOL(phys_mem_access_prot); 109EXPORT_SYMBOL(phys_mem_access_prot);
@@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
488 * we invalidate the TLB here, thus avoiding dcbst 488 * we invalidate the TLB here, thus avoiding dcbst
489 * misbehaviour. 489 * misbehaviour.
490 */ 490 */
491 _tlbie(address, 0 /* 8xx doesn't care about PID */); 491 _tlbil_va(address, 0 /* 8xx doesn't care about PID */);
492#endif 492#endif
493 /* The _PAGE_USER test should really be _PAGE_EXEC, but 493 /* The _PAGE_USER test should really be _PAGE_EXEC, but
494 * older glibc versions execute some code from no-exec 494 * older glibc versions execute some code from no-exec
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
deleted file mode 100644
index cc32ba41d900..000000000000
--- a/arch/powerpc/mm/mmu_context_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 *
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/mm.h>
26#include <linux/init.h>
27
28#include <asm/mmu_context.h>
29#include <asm/tlbflush.h>
30
31unsigned long next_mmu_context;
32unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
33#ifdef FEW_CONTEXTS
34atomic_t nr_free_contexts;
35struct mm_struct *context_mm[LAST_CONTEXT+1];
36void steal_context(void);
37#endif /* FEW_CONTEXTS */
38
39/*
40 * Initialize the context management stuff.
41 */
42void __init
43mmu_context_init(void)
44{
45 /*
46 * Some processors have too few contexts to reserve one for
47 * init_mm, and require using context 0 for a normal task.
48 * Other processors reserve the use of context zero for the kernel.
49 * This code assumes FIRST_CONTEXT < 32.
50 */
51 context_map[0] = (1 << FIRST_CONTEXT) - 1;
52 next_mmu_context = FIRST_CONTEXT;
53#ifdef FEW_CONTEXTS
54 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
55#endif /* FEW_CONTEXTS */
56}
57
58#ifdef FEW_CONTEXTS
59/*
60 * Steal a context from a task that has one at the moment.
61 * This is only used on 8xx and 4xx and we presently assume that
62 * they don't do SMP. If they do then this will have to check
63 * whether the MM we steal is in use.
64 * We also assume that this is only used on systems that don't
65 * use an MMU hash table - this is true for 8xx and 4xx.
66 * This isn't an LRU system, it just frees up each context in
67 * turn (sort-of pseudo-random replacement :). This would be the
68 * place to implement an LRU scheme if anyone was motivated to do it.
69 * -- paulus
70 */
71void
72steal_context(void)
73{
74 struct mm_struct *mm;
75
76 /* free up context `next_mmu_context' */
77 /* if we shouldn't free context 0, don't... */
78 if (next_mmu_context < FIRST_CONTEXT)
79 next_mmu_context = FIRST_CONTEXT;
80 mm = context_mm[next_mmu_context];
81 flush_tlb_mm(mm);
82 destroy_context(mm);
83}
84#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
new file mode 100644
index 000000000000..0dfba2bf7f31
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -0,0 +1,103 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 *
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/mm.h>
26#include <linux/init.h>
27
28#include <asm/mmu_context.h>
29#include <asm/tlbflush.h>
30
31/*
32 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
33 * (virtual segment identifiers) for each context. Although the
34 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
35 * we only use 32,768 of them. That is ample, since there can be
36 * at most around 30,000 tasks in the system anyway, and it means
37 * that we can use a bitmap to indicate which contexts are in use.
38 * Using a bitmap means that we entirely avoid all of the problems
39 * that we used to have when the context number overflowed,
40 * particularly on SMP systems.
41 * -- paulus.
42 */
43#define NO_CONTEXT ((unsigned long) -1)
44#define LAST_CONTEXT 32767
45#define FIRST_CONTEXT 1
46
47/*
48 * This function defines the mapping from contexts to VSIDs (virtual
49 * segment IDs). We use a skew on both the context and the high 4 bits
50 * of the 32-bit virtual address (the "effective segment ID") in order
51 * to spread out the entries in the MMU hash table. Note, if this
52 * function is changed then arch/ppc/mm/hashtable.S will have to be
53 * changed to correspond.
54 *
55 *
56 * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
57 * & 0xffffff)
58 */
59
60static unsigned long next_mmu_context;
61static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
62
63
64/*
65 * Set up the context for a new address space.
66 */
67int init_new_context(struct task_struct *t, struct mm_struct *mm)
68{
69 unsigned long ctx = next_mmu_context;
70
71 while (test_and_set_bit(ctx, context_map)) {
72 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
73 if (ctx > LAST_CONTEXT)
74 ctx = 0;
75 }
76 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
77 mm->context.id = ctx;
78
79 return 0;
80}
81
82/*
83 * We're finished using the context for an address space.
84 */
85void destroy_context(struct mm_struct *mm)
86{
87 preempt_disable();
88 if (mm->context.id != NO_CONTEXT) {
89 clear_bit(mm->context.id, context_map);
90 mm->context.id = NO_CONTEXT;
91 }
92 preempt_enable();
93}
94
95/*
96 * Initialize the context management stuff.
97 */
98void __init mmu_context_init(void)
99{
100 /* Reserve context 0 for kernel use */
101 context_map[0] = (1 << FIRST_CONTEXT) - 1;
102 next_mmu_context = FIRST_CONTEXT;
103}
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 1db38ba1f544..dbeb86ac90cd 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,6 +24,14 @@
24static DEFINE_SPINLOCK(mmu_context_lock); 24static DEFINE_SPINLOCK(mmu_context_lock);
25static DEFINE_IDR(mmu_context_idr); 25static DEFINE_IDR(mmu_context_idr);
26 26
27/*
28 * The proto-VSID space has 2^35 - 1 segments available for user mappings.
29 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
30 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
31 */
32#define NO_CONTEXT 0
33#define MAX_CONTEXT ((1UL << 19) - 1)
34
27int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 35int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
28{ 36{
29 int index; 37 int index;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
new file mode 100644
index 000000000000..52a0cfc38b64
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -0,0 +1,397 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU is not using the hash
4 * table, such as 8xx, 4xx, BookE's etc...
5 *
6 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
7 * IBM Corp.
8 *
9 * Derived from previous arch/powerpc/mm/mmu_context.c
10 * and arch/powerpc/include/asm/mmu_context.h
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * TODO:
18 *
19 * - The global context lock will not scale very well
20 * - The maps should be dynamically allocated to allow for processors
21 * that support more PID bits at runtime
22 * - Implement flush_tlb_mm() by making the context stale and picking
23 * a new one
24 * - More aggressively clear stale map bits and maybe find some way to
25 * also clear mm->cpu_vm_mask bits when processes are migrated
26 */
27
28#undef DEBUG
29#define DEBUG_STEAL_ONLY
30#undef DEBUG_MAP_CONSISTENCY
31/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */
32
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/bootmem.h>
38#include <linux/notifier.h>
39#include <linux/cpu.h>
40
41#include <asm/mmu_context.h>
42#include <asm/tlbflush.h>
43
44static unsigned int first_context, last_context;
45static unsigned int next_context, nr_free_contexts;
46static unsigned long *context_map;
47static unsigned long *stale_map[NR_CPUS];
48static struct mm_struct **context_mm;
49static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
50
51#define CTX_MAP_SIZE \
52 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
53
54
55/* Steal a context from a task that has one at the moment.
56 *
57 * This is used when we are running out of available PID numbers
58 * on the processors.
59 *
60 * This isn't an LRU system, it just frees up each context in
61 * turn (sort-of pseudo-random replacement :). This would be the
62 * place to implement an LRU scheme if anyone was motivated to do it.
63 * -- paulus
64 *
65 * For context stealing, we use a slightly different approach for
66 * SMP and UP. Basically, the UP one is simpler and doesn't use
67 * the stale map as we can just flush the local CPU
68 * -- benh
69 */
70#ifdef CONFIG_SMP
71static unsigned int steal_context_smp(unsigned int id)
72{
73 struct mm_struct *mm;
74 unsigned int cpu, max;
75
76 again:
77 max = last_context - first_context;
78
79 /* Attempt to free next_context first and then loop until we manage */
80 while (max--) {
81 /* Pick up the victim mm */
82 mm = context_mm[id];
83
84 /* We have a candidate victim, check if it's active, on SMP
85 * we cannot steal active contexts
86 */
87 if (mm->context.active) {
88 id++;
89 if (id > last_context)
90 id = first_context;
91 continue;
92 }
93 pr_debug("[%d] steal context %d from mm @%p\n",
94 smp_processor_id(), id, mm);
95
96 /* Mark this mm has having no context anymore */
97 mm->context.id = MMU_NO_CONTEXT;
98
99 /* Mark it stale on all CPUs that used this mm */
100 for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
101 __set_bit(id, stale_map[cpu]);
102 return id;
103 }
104
105 /* This will happen if you have more CPUs than available contexts,
106 * all we can do here is wait a bit and try again
107 */
108 spin_unlock(&context_lock);
109 cpu_relax();
110 spin_lock(&context_lock);
111 goto again;
112}
113#endif /* CONFIG_SMP */
114
115/* Note that this will also be called on SMP if all other CPUs are
116 * offlined, which means that it may be called for cpu != 0. For
117 * this to work, we somewhat assume that CPUs that are onlined
118 * come up with a fully clean TLB (or are cleaned when offlined)
119 */
120static unsigned int steal_context_up(unsigned int id)
121{
122 struct mm_struct *mm;
123 int cpu = smp_processor_id();
124
125 /* Pick up the victim mm */
126 mm = context_mm[id];
127
128 pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
129
130 /* Mark this mm has having no context anymore */
131 mm->context.id = MMU_NO_CONTEXT;
132
133 /* Flush the TLB for that context */
134 local_flush_tlb_mm(mm);
135
136 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
137 __clear_bit(id, stale_map[cpu]);
138
139 return id;
140}
141
142#ifdef DEBUG_MAP_CONSISTENCY
143static void context_check_map(void)
144{
145 unsigned int id, nrf, nact;
146
147 nrf = nact = 0;
148 for (id = first_context; id <= last_context; id++) {
149 int used = test_bit(id, context_map);
150 if (!used)
151 nrf++;
152 if (used != (context_mm[id] != NULL))
153 pr_err("MMU: Context %d is %s and MM is %p !\n",
154 id, used ? "used" : "free", context_mm[id]);
155 if (context_mm[id] != NULL)
156 nact += context_mm[id]->context.active;
157 }
158 if (nrf != nr_free_contexts) {
159 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
160 nr_free_contexts, nrf);
161 nr_free_contexts = nrf;
162 }
163 if (nact > num_online_cpus())
164 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
165 nact, num_online_cpus());
166 if (first_context > 0 && !test_bit(0, context_map))
167 pr_err("MMU: Context 0 has been freed !!!\n");
168}
169#else
170static void context_check_map(void) { }
171#endif
172
173void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
174{
175 unsigned int id, cpu = smp_processor_id();
176 unsigned long *map;
177
178 /* No lockless fast path .. yet */
179 spin_lock(&context_lock);
180
181#ifndef DEBUG_STEAL_ONLY
182 pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
183 cpu, next, next->context.active, next->context.id);
184#endif
185
186#ifdef CONFIG_SMP
187 /* Mark us active and the previous one not anymore */
188 next->context.active++;
189 if (prev) {
190#ifndef DEBUG_STEAL_ONLY
191 pr_debug(" old context %p active was: %d\n",
192 prev, prev->context.active);
193#endif
194 WARN_ON(prev->context.active < 1);
195 prev->context.active--;
196 }
197#endif /* CONFIG_SMP */
198
199 /* If we already have a valid assigned context, skip all that */
200 id = next->context.id;
201 if (likely(id != MMU_NO_CONTEXT))
202 goto ctxt_ok;
203
204 /* We really don't have a context, let's try to acquire one */
205 id = next_context;
206 if (id > last_context)
207 id = first_context;
208 map = context_map;
209
210 /* No more free contexts, let's try to steal one */
211 if (nr_free_contexts == 0) {
212#ifdef CONFIG_SMP
213 if (num_online_cpus() > 1) {
214 id = steal_context_smp(id);
215 goto stolen;
216 }
217#endif /* CONFIG_SMP */
218 id = steal_context_up(id);
219 goto stolen;
220 }
221 nr_free_contexts--;
222
223 /* We know there's at least one free context, try to find it */
224 while (__test_and_set_bit(id, map)) {
225 id = find_next_zero_bit(map, last_context+1, id);
226 if (id > last_context)
227 id = first_context;
228 }
229 stolen:
230 next_context = id + 1;
231 context_mm[id] = next;
232 next->context.id = id;
233
234#ifndef DEBUG_STEAL_ONLY
235 pr_debug("[%d] picked up new id %d, nrf is now %d\n",
236 cpu, id, nr_free_contexts);
237#endif
238
239 context_check_map();
240 ctxt_ok:
241
242 /* If that context got marked stale on this CPU, then flush the
243 * local TLB for it and unmark it before we use it
244 */
245 if (test_bit(id, stale_map[cpu])) {
246 pr_debug("[%d] flushing stale context %d for mm @%p !\n",
247 cpu, id, next);
248 local_flush_tlb_mm(next);
249
250 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
251 __clear_bit(id, stale_map[cpu]);
252 }
253
254 /* Flick the MMU and release lock */
255 set_context(id, next->pgd);
256 spin_unlock(&context_lock);
257}
258
259/*
260 * Set up the context for a new address space.
261 */
262int init_new_context(struct task_struct *t, struct mm_struct *mm)
263{
264 mm->context.id = MMU_NO_CONTEXT;
265 mm->context.active = 0;
266
267 return 0;
268}
269
270/*
271 * We're finished using the context for an address space.
272 */
273void destroy_context(struct mm_struct *mm)
274{
275 unsigned int id;
276
277 if (mm->context.id == MMU_NO_CONTEXT)
278 return;
279
280 WARN_ON(mm->context.active != 0);
281
282 spin_lock(&context_lock);
283 id = mm->context.id;
284 if (id != MMU_NO_CONTEXT) {
285 __clear_bit(id, context_map);
286 mm->context.id = MMU_NO_CONTEXT;
287#ifdef DEBUG_MAP_CONSISTENCY
288 mm->context.active = 0;
289 context_mm[id] = NULL;
290#endif
291 nr_free_contexts++;
292 }
293 spin_unlock(&context_lock);
294}
295
296#ifdef CONFIG_SMP
297
298static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
299 unsigned long action, void *hcpu)
300{
301 unsigned int cpu = (unsigned int)(long)hcpu;
302
303 /* We don't touch CPU 0 map, it's allocated at aboot and kept
304 * around forever
305 */
306 if (cpu == 0)
307 return NOTIFY_OK;
308
309 switch (action) {
310 case CPU_ONLINE:
311 case CPU_ONLINE_FROZEN:
312 pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
313 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
314 break;
315#ifdef CONFIG_HOTPLUG_CPU
316 case CPU_DEAD:
317 case CPU_DEAD_FROZEN:
318 pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
319 kfree(stale_map[cpu]);
320 stale_map[cpu] = NULL;
321 break;
322#endif
323 }
324 return NOTIFY_OK;
325}
326
327static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
328 .notifier_call = mmu_context_cpu_notify,
329};
330
331#endif /* CONFIG_SMP */
332
333/*
334 * Initialize the context management stuff.
335 */
336void __init mmu_context_init(void)
337{
338 /* Mark init_mm as being active on all possible CPUs since
339 * we'll get called with prev == init_mm the first time
340 * we schedule on a given CPU
341 */
342 init_mm.context.active = NR_CPUS;
343
344 /*
345 * The MPC8xx has only 16 contexts. We rotate through them on each
346 * task switch. A better way would be to keep track of tasks that
347 * own contexts, and implement an LRU usage. That way very active
348 * tasks don't always have to pay the TLB reload overhead. The
349 * kernel pages are mapped shared, so the kernel can run on behalf
350 * of any task that makes a kernel entry. Shared does not mean they
351 * are not protected, just that the ASID comparison is not performed.
352 * -- Dan
353 *
354 * The IBM4xx has 256 contexts, so we can just rotate through these
355 * as a way of "switching" contexts. If the TID of the TLB is zero,
356 * the PID/TID comparison is disabled, so we can use a TID of zero
357 * to represent all kernel pages as shared among all contexts.
358 * -- Dan
359 */
360 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
361 first_context = 0;
362 last_context = 15;
363 } else {
364 first_context = 1;
365 last_context = 255;
366 }
367
368#ifdef DEBUG_CLAMP_LAST_CONTEXT
369 last_context = DEBUG_CLAMP_LAST_CONTEXT;
370#endif
371 /*
372 * Allocate the maps used by context management
373 */
374 context_map = alloc_bootmem(CTX_MAP_SIZE);
375 context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
376 stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
377
378#ifdef CONFIG_SMP
379 register_cpu_notifier(&mmu_context_cpu_nb);
380#endif
381
382 printk(KERN_INFO
383 "MMU: Allocated %d bytes of context maps for %d contexts\n",
384 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
385 last_context - first_context + 1);
386
387 /*
388 * Some processors have too few contexts to reserve one for
389 * init_mm, and require using context 0 for a normal task.
390 * Other processors reserve the use of context zero for the kernel.
391 * This code assumes first_context < 32.
392 */
393 context_map[0] = (1 << first_context) - 1;
394 next_context = first_context;
395 nr_free_contexts = last_context - first_context + 1;
396}
397
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index fab3cfad4099..4314b39b6faf 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,10 +22,58 @@
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23#include <asm/mmu.h> 23#include <asm/mmu.h>
24 24
25#ifdef CONFIG_PPC_MMU_NOHASH
26
27/*
28 * On 40x and 8xx, we directly inline tlbia and tlbivax
29 */
30#if defined(CONFIG_40x) || defined(CONFIG_8xx)
31static inline void _tlbil_all(void)
32{
33 asm volatile ("sync; tlbia; isync" : : : "memory")
34}
35static inline void _tlbil_pid(unsigned int pid)
36{
37 asm volatile ("sync; tlbia; isync" : : : "memory")
38}
39#else /* CONFIG_40x || CONFIG_8xx */
40extern void _tlbil_all(void);
41extern void _tlbil_pid(unsigned int pid);
42#endif /* !(CONFIG_40x || CONFIG_8xx) */
43
44/*
45 * On 8xx, we directly inline tlbie, on others, it's extern
46 */
47#ifdef CONFIG_8xx
48static inline void _tlbil_va(unsigned long address, unsigned int pid)
49{
50 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
51}
52#else /* CONFIG_8xx */
53extern void _tlbil_va(unsigned long address, unsigned int pid);
54#endif /* CONIFG_8xx */
55
56/*
57 * As of today, we don't support tlbivax broadcast on any
58 * implementation. When that becomes the case, this will be
59 * an extern.
60 */
61static inline void _tlbivax_bcast(unsigned long address, unsigned int pid)
62{
63 BUG();
64}
65
66#else /* CONFIG_PPC_MMU_NOHASH */
67
25extern void hash_preload(struct mm_struct *mm, unsigned long ea, 68extern void hash_preload(struct mm_struct *mm, unsigned long ea,
26 unsigned long access, unsigned long trap); 69 unsigned long access, unsigned long trap);
27 70
28 71
72extern void _tlbie(unsigned long address);
73extern void _tlbia(void);
74
75#endif /* CONFIG_PPC_MMU_NOHASH */
76
29#ifdef CONFIG_PPC32 77#ifdef CONFIG_PPC32
30extern void mapin_ram(void); 78extern void mapin_ram(void);
31extern int map_page(unsigned long va, phys_addr_t pa, int flags); 79extern int map_page(unsigned long va, phys_addr_t pa, int flags);
@@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr;
58 * architectures. -- Dan 106 * architectures. -- Dan
59 */ 107 */
60#if defined(CONFIG_8xx) 108#if defined(CONFIG_8xx)
61#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */)
62#define MMU_init_hw() do { } while(0) 109#define MMU_init_hw() do { } while(0)
63#define mmu_mapin_ram() (0UL) 110#define mmu_mapin_ram() (0UL)
64 111
65#elif defined(CONFIG_4xx) 112#elif defined(CONFIG_4xx)
66#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
67extern void MMU_init_hw(void); 113extern void MMU_init_hw(void);
68extern unsigned long mmu_mapin_ram(void); 114extern unsigned long mmu_mapin_ram(void);
69 115
70#elif defined(CONFIG_FSL_BOOKE) 116#elif defined(CONFIG_FSL_BOOKE)
71#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
72extern void MMU_init_hw(void); 117extern void MMU_init_hw(void);
73extern unsigned long mmu_mapin_ram(void); 118extern unsigned long mmu_mapin_ram(void);
74extern void adjust_total_lowmem(void); 119extern void adjust_total_lowmem(void);
@@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void);
77/* anything 32-bit except 4xx or 8xx */ 122/* anything 32-bit except 4xx or 8xx */
78extern void MMU_init_hw(void); 123extern void MMU_init_hw(void);
79extern unsigned long mmu_mapin_ram(void); 124extern unsigned long mmu_mapin_ram(void);
80
81/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
82 * which includes all new 82xx processors. We need tlbie/tlbsync here
83 * in that case (I think). -- Dan.
84 */
85static inline void flush_HPTE(unsigned context, unsigned long va,
86 unsigned long pdval)
87{
88 if ((Hash != 0) &&
89 cpu_has_feature(CPU_FTR_HPTE_TABLE))
90 flush_hash_pages(0, va, pdval, 1);
91 else
92 _tlbie(va);
93}
94#endif 125#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
new file mode 100644
index 000000000000..6d94116fdea1
--- /dev/null
+++ b/arch/powerpc/mm/pgtable.c
@@ -0,0 +1,117 @@
1/*
2 * This file contains common routines for dealing with free of page tables
3 *
4 * Derived from arch/powerpc/mm/tlb_64.c:
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/init.h>
26#include <linux/percpu.h>
27#include <linux/hardirq.h>
28#include <asm/pgalloc.h>
29#include <asm/tlbflush.h>
30#include <asm/tlb.h>
31
32static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
33static unsigned long pte_freelist_forced_free;
34
35struct pte_freelist_batch
36{
37 struct rcu_head rcu;
38 unsigned int index;
39 pgtable_free_t tables[0];
40};
41
42#define PTE_FREELIST_SIZE \
43 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
44 / sizeof(pgtable_free_t))
45
46static void pte_free_smp_sync(void *arg)
47{
48 /* Do nothing, just ensure we sync with all CPUs */
49}
50
51/* This is only called when we are critically out of memory
52 * (and fail to get a page in pte_free_tlb).
53 */
54static void pgtable_free_now(pgtable_free_t pgf)
55{
56 pte_freelist_forced_free++;
57
58 smp_call_function(pte_free_smp_sync, NULL, 1);
59
60 pgtable_free(pgf);
61}
62
63static void pte_free_rcu_callback(struct rcu_head *head)
64{
65 struct pte_freelist_batch *batch =
66 container_of(head, struct pte_freelist_batch, rcu);
67 unsigned int i;
68
69 for (i = 0; i < batch->index; i++)
70 pgtable_free(batch->tables[i]);
71
72 free_page((unsigned long)batch);
73}
74
75static void pte_free_submit(struct pte_freelist_batch *batch)
76{
77 INIT_RCU_HEAD(&batch->rcu);
78 call_rcu(&batch->rcu, pte_free_rcu_callback);
79}
80
81void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
82{
83 /* This is safe since tlb_gather_mmu has disabled preemption */
84 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
85 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
86
87 if (atomic_read(&tlb->mm->mm_users) < 2 ||
88 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
89 pgtable_free(pgf);
90 return;
91 }
92
93 if (*batchp == NULL) {
94 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
95 if (*batchp == NULL) {
96 pgtable_free_now(pgf);
97 return;
98 }
99 (*batchp)->index = 0;
100 }
101 (*batchp)->tables[(*batchp)->index++] = pgf;
102 if ((*batchp)->index == PTE_FREELIST_SIZE) {
103 pte_free_submit(*batchp);
104 *batchp = NULL;
105 }
106}
107
108void pte_free_finish(void)
109{
110 /* This is safe since tlb_gather_mmu has disabled preemption */
111 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
112
113 if (*batchp == NULL)
114 return;
115 pte_free_submit(*batchp);
116 *batchp = NULL;
117}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c31d6d26f0b5..38ff35f2142a 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
48 48
49extern char etext[], _stext[]; 49extern char etext[], _stext[];
50 50
51#ifdef CONFIG_SMP
52extern void hash_page_sync(void);
53#endif
54
55#ifdef HAVE_BATS 51#ifdef HAVE_BATS
56extern phys_addr_t v_mapped_by_bats(unsigned long va); 52extern phys_addr_t v_mapped_by_bats(unsigned long va);
57extern unsigned long p_mapped_by_bats(phys_addr_t pa); 53extern unsigned long p_mapped_by_bats(phys_addr_t pa);
@@ -72,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
72#define p_mapped_by_tlbcam(x) (0UL) 68#define p_mapped_by_tlbcam(x) (0UL)
73#endif /* HAVE_TLBCAM */ 69#endif /* HAVE_TLBCAM */
74 70
75#ifdef CONFIG_PTE_64BIT 71#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
76/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
77#define PGDIR_ORDER 1
78#else
79#define PGDIR_ORDER 0
80#endif
81 72
82pgd_t *pgd_alloc(struct mm_struct *mm) 73pgd_t *pgd_alloc(struct mm_struct *mm)
83{ 74{
84 pgd_t *ret; 75 pgd_t *ret;
85 76
86 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); 77 /* pgdir take page or two with 4K pages and a page fraction otherwise */
78#ifndef CONFIG_PPC_4K_PAGES
79 ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
80#else
81 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
82 PGDIR_ORDER - PAGE_SHIFT);
83#endif
87 return ret; 84 return ret;
88} 85}
89 86
90void pgd_free(struct mm_struct *mm, pgd_t *pgd) 87void pgd_free(struct mm_struct *mm, pgd_t *pgd)
91{ 88{
92 free_pages((unsigned long)pgd, PGDIR_ORDER); 89#ifndef CONFIG_PPC_4K_PAGES
90 kfree((void *)pgd);
91#else
92 free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
93#endif
93} 94}
94 95
95__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 96__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -125,23 +126,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
125 return ptepage; 126 return ptepage;
126} 127}
127 128
128void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
129{
130#ifdef CONFIG_SMP
131 hash_page_sync();
132#endif
133 free_page((unsigned long)pte);
134}
135
136void pte_free(struct mm_struct *mm, pgtable_t ptepage)
137{
138#ifdef CONFIG_SMP
139 hash_page_sync();
140#endif
141 pgtable_page_dtor(ptepage);
142 __free_page(ptepage);
143}
144
145void __iomem * 129void __iomem *
146ioremap(phys_addr_t addr, unsigned long size) 130ioremap(phys_addr_t addr, unsigned long size)
147{ 131{
@@ -194,6 +178,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
194 if (p < 16*1024*1024) 178 if (p < 16*1024*1024)
195 p += _ISA_MEM_BASE; 179 p += _ISA_MEM_BASE;
196 180
181#ifndef CONFIG_CRASH_DUMP
197 /* 182 /*
198 * Don't allow anybody to remap normal RAM that we're using. 183 * Don't allow anybody to remap normal RAM that we're using.
199 * mem_init() sets high_memory so only do the check after that. 184 * mem_init() sets high_memory so only do the check after that.
@@ -203,6 +188,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
203 (unsigned long long)p, __builtin_return_address(0)); 188 (unsigned long long)p, __builtin_return_address(0));
204 return NULL; 189 return NULL;
205 } 190 }
191#endif
206 192
207 if (size == 0) 193 if (size == 0)
208 return NULL; 194 return NULL;
@@ -288,7 +274,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
288} 274}
289 275
290/* 276/*
291 * Map in a big chunk of physical memory starting at KERNELBASE. 277 * Map in a big chunk of physical memory starting at PAGE_OFFSET.
292 */ 278 */
293void __init mapin_ram(void) 279void __init mapin_ram(void)
294{ 280{
@@ -297,7 +283,7 @@ void __init mapin_ram(void)
297 int ktext; 283 int ktext;
298 284
299 s = mmu_mapin_ram(); 285 s = mmu_mapin_ram();
300 v = KERNELBASE + s; 286 v = PAGE_OFFSET + s;
301 p = memstart_addr + s; 287 p = memstart_addr + s;
302 for (; s < total_lowmem; s += PAGE_SIZE) { 288 for (; s < total_lowmem; s += PAGE_SIZE) {
303 ktext = ((char *) v >= _stext && (char *) v < etext); 289 ktext = ((char *) v >= _stext && (char *) v < etext);
@@ -363,7 +349,11 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
363 return -EINVAL; 349 return -EINVAL;
364 set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); 350 set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
365 wmb(); 351 wmb();
366 flush_HPTE(0, address, pmd_val(*kpmd)); 352#ifdef CONFIG_PPC_STD_MMU
353 flush_hash_pages(0, address, pmd_val(*kpmd), 1);
354#else
355 flush_tlb_page(NULL, address);
356#endif
367 pte_unmap(kpte); 357 pte_unmap(kpte);
368 358
369 return 0; 359 return 0;
@@ -400,7 +390,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
400#endif /* CONFIG_DEBUG_PAGEALLOC */ 390#endif /* CONFIG_DEBUG_PAGEALLOC */
401 391
402static int fixmaps; 392static int fixmaps;
403unsigned long FIXADDR_TOP = 0xfffff000; 393unsigned long FIXADDR_TOP = (-PAGE_SIZE);
404EXPORT_SYMBOL(FIXADDR_TOP); 394EXPORT_SYMBOL(FIXADDR_TOP);
405 395
406void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 396void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6aa120813775..45d925360b89 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void)
95 break; 95 break;
96 } 96 }
97 97
98 setbat(2, KERNELBASE, 0, bl, _PAGE_RAM); 98 setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
99 done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; 99 done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
100 if ((done < tot) && !bat_addrs[3].limit) { 100 if ((done < tot) && !bat_addrs[3].limit) {
101 /* use BAT3 to cover a bit more */ 101 /* use BAT3 to cover a bit more */
102 tot -= done; 102 tot -= done;
103 for (bl = 128<<10; bl < max_size; bl <<= 1) 103 for (bl = 128<<10; bl < max_size; bl <<= 1)
104 if (bl * 2 > tot) 104 if (bl * 2 > tot)
105 break; 105 break;
106 setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM); 106 setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
107 done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; 107 done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
108 } 108 }
109 109
110 return done; 110 return done;
@@ -192,7 +192,7 @@ void __init MMU_init_hw(void)
192 extern unsigned int hash_page[]; 192 extern unsigned int hash_page[];
193 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; 193 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
194 194
195 if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) { 195 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
196 /* 196 /*
197 * Put a blr (procedure return) instruction at the 197 * Put a blr (procedure return) instruction at the
198 * start of hash_page, since we can still get DSI 198 * start of hash_page, since we can still get DSI
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_hash32.c
index f9a47fee3927..65190587a365 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
137 flush_range(&init_mm, start, end); 137 flush_range(&init_mm, start, end);
138 FINISH_FLUSH; 138 FINISH_FLUSH;
139} 139}
140EXPORT_SYMBOL(flush_tlb_kernel_range);
140 141
141/* 142/*
142 * Flush all the (user) entries for the address space described by mm. 143 * Flush all the (user) entries for the address space described by mm.
@@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm)
160 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 161 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
161 FINISH_FLUSH; 162 FINISH_FLUSH;
162} 163}
164EXPORT_SYMBOL(flush_tlb_mm);
163 165
164void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 166void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
165{ 167{
@@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
176 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 178 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
177 FINISH_FLUSH; 179 FINISH_FLUSH;
178} 180}
181EXPORT_SYMBOL(flush_tlb_page);
179 182
180/* 183/*
181 * For each address in the range, find the pte for the address 184 * For each address in the range, find the pte for the address
@@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
188 flush_range(vma->vm_mm, start, end); 191 flush_range(vma->vm_mm, start, end);
189 FINISH_FLUSH; 192 FINISH_FLUSH;
190} 193}
194EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_hash64.c
index be7dd422c0fa..c931bc7d1079 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
37 * arch/powerpc/include/asm/tlb.h file -- tgall 37 * arch/powerpc/include/asm/tlb.h file -- tgall
38 */ 38 */
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
41static unsigned long pte_freelist_forced_free;
42
43struct pte_freelist_batch
44{
45 struct rcu_head rcu;
46 unsigned int index;
47 pgtable_free_t tables[0];
48};
49
50#define PTE_FREELIST_SIZE \
51 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
52 / sizeof(pgtable_free_t))
53
54static void pte_free_smp_sync(void *arg)
55{
56 /* Do nothing, just ensure we sync with all CPUs */
57}
58
59/* This is only called when we are critically out of memory
60 * (and fail to get a page in pte_free_tlb).
61 */
62static void pgtable_free_now(pgtable_free_t pgf)
63{
64 pte_freelist_forced_free++;
65
66 smp_call_function(pte_free_smp_sync, NULL, 1);
67
68 pgtable_free(pgf);
69}
70
71static void pte_free_rcu_callback(struct rcu_head *head)
72{
73 struct pte_freelist_batch *batch =
74 container_of(head, struct pte_freelist_batch, rcu);
75 unsigned int i;
76
77 for (i = 0; i < batch->index; i++)
78 pgtable_free(batch->tables[i]);
79
80 free_page((unsigned long)batch);
81}
82
83static void pte_free_submit(struct pte_freelist_batch *batch)
84{
85 INIT_RCU_HEAD(&batch->rcu);
86 call_rcu(&batch->rcu, pte_free_rcu_callback);
87}
88
89void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
90{
91 /* This is safe since tlb_gather_mmu has disabled preemption */
92 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
93 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
94
95 if (atomic_read(&tlb->mm->mm_users) < 2 ||
96 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
97 pgtable_free(pgf);
98 return;
99 }
100
101 if (*batchp == NULL) {
102 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
103 if (*batchp == NULL) {
104 pgtable_free_now(pgf);
105 return;
106 }
107 (*batchp)->index = 0;
108 }
109 (*batchp)->tables[(*batchp)->index++] = pgf;
110 if ((*batchp)->index == PTE_FREELIST_SIZE) {
111 pte_free_submit(*batchp);
112 *batchp = NULL;
113 }
114}
115 40
116/* 41/*
117 * A linux PTE was changed and the corresponding hash table entry 42 * A linux PTE was changed and the corresponding hash table entry
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
229 batch->index = 0; 154 batch->index = 0;
230} 155}
231 156
232void pte_free_finish(void)
233{
234 /* This is safe since tlb_gather_mmu has disabled preemption */
235 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
236
237 if (*batchp == NULL)
238 return;
239 pte_free_submit(*batchp);
240 *batchp = NULL;
241}
242
243/** 157/**
244 * __flush_hash_table_range - Flush all HPTEs for a given address range 158 * __flush_hash_table_range - Flush all HPTEs for a given address range
245 * from the hash table (and the TLB). But keeps 159 * from the hash table (and the TLB). But keeps
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
new file mode 100644
index 000000000000..803a64c02b06
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -0,0 +1,209 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
12 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/init.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/preempt.h>
36#include <linux/spinlock.h>
37
38#include <asm/tlbflush.h>
39#include <asm/tlb.h>
40
41#include "mmu_decl.h"
42
43/*
44 * Base TLB flushing operations:
45 *
46 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
47 * - flush_tlb_page(vma, vmaddr) flushes one page
48 * - flush_tlb_range(vma, start, end) flushes a range of pages
49 * - flush_tlb_kernel_range(start, end) flushes kernel pages
50 *
51 * - local_* variants of page and mm only apply to the current
52 * processor
53 */
54
55/*
56 * These are the base non-SMP variants of page and mm flushing
57 */
58void local_flush_tlb_mm(struct mm_struct *mm)
59{
60 unsigned int pid;
61
62 preempt_disable();
63 pid = mm->context.id;
64 if (pid != MMU_NO_CONTEXT)
65 _tlbil_pid(pid);
66 preempt_enable();
67}
68EXPORT_SYMBOL(local_flush_tlb_mm);
69
70void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
71{
72 unsigned int pid;
73
74 preempt_disable();
75 pid = vma ? vma->vm_mm->context.id : 0;
76 if (pid != MMU_NO_CONTEXT)
77 _tlbil_va(vmaddr, pid);
78 preempt_enable();
79}
80EXPORT_SYMBOL(local_flush_tlb_page);
81
82
83/*
84 * And here are the SMP non-local implementations
85 */
86#ifdef CONFIG_SMP
87
88static DEFINE_SPINLOCK(tlbivax_lock);
89
90struct tlb_flush_param {
91 unsigned long addr;
92 unsigned int pid;
93};
94
95static void do_flush_tlb_mm_ipi(void *param)
96{
97 struct tlb_flush_param *p = param;
98
99 _tlbil_pid(p ? p->pid : 0);
100}
101
102static void do_flush_tlb_page_ipi(void *param)
103{
104 struct tlb_flush_param *p = param;
105
106 _tlbil_va(p->addr, p->pid);
107}
108
109
110/* Note on invalidations and PID:
111 *
112 * We snapshot the PID with preempt disabled. At this point, it can still
113 * change either because:
114 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
115 * - we are invaliating some target that isn't currently running here
116 * and is concurrently acquiring a new PID on another CPU
117 * - some other CPU is re-acquiring a lost PID for this mm
118 * etc...
119 *
120 * However, this shouldn't be a problem as we only guarantee
121 * invalidation of TLB entries present prior to this call, so we
122 * don't care about the PID changing, and invalidating a stale PID
123 * is generally harmless.
124 */
125
126void flush_tlb_mm(struct mm_struct *mm)
127{
128 cpumask_t cpu_mask;
129 unsigned int pid;
130
131 preempt_disable();
132 pid = mm->context.id;
133 if (unlikely(pid == MMU_NO_CONTEXT))
134 goto no_context;
135 cpu_mask = mm->cpu_vm_mask;
136 cpu_clear(smp_processor_id(), cpu_mask);
137 if (!cpus_empty(cpu_mask)) {
138 struct tlb_flush_param p = { .pid = pid };
139 smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
140 }
141 _tlbil_pid(pid);
142 no_context:
143 preempt_enable();
144}
145EXPORT_SYMBOL(flush_tlb_mm);
146
147void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
148{
149 cpumask_t cpu_mask;
150 unsigned int pid;
151
152 preempt_disable();
153 pid = vma ? vma->vm_mm->context.id : 0;
154 if (unlikely(pid == MMU_NO_CONTEXT))
155 goto bail;
156 cpu_mask = vma->vm_mm->cpu_vm_mask;
157 cpu_clear(smp_processor_id(), cpu_mask);
158 if (!cpus_empty(cpu_mask)) {
159 /* If broadcast tlbivax is supported, use it */
160 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
161 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
162 if (lock)
163 spin_lock(&tlbivax_lock);
164 _tlbivax_bcast(vmaddr, pid);
165 if (lock)
166 spin_unlock(&tlbivax_lock);
167 goto bail;
168 } else {
169 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
170 smp_call_function_mask(cpu_mask,
171 do_flush_tlb_page_ipi, &p, 1);
172 }
173 }
174 _tlbil_va(vmaddr, pid);
175 bail:
176 preempt_enable();
177}
178EXPORT_SYMBOL(flush_tlb_page);
179
180#endif /* CONFIG_SMP */
181
182/*
183 * Flush kernel TLB entries in the given range
184 */
185void flush_tlb_kernel_range(unsigned long start, unsigned long end)
186{
187#ifdef CONFIG_SMP
188 preempt_disable();
189 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
190 _tlbil_pid(0);
191 preempt_enable();
192#endif
193 _tlbil_pid(0);
194}
195EXPORT_SYMBOL(flush_tlb_kernel_range);
196
197/*
198 * Currently, for range flushing, we just do a full mm flush. This should
199 * be optimized based on a threshold on the size of the range, since
200 * some implementation can stack multiple tlbivax before a tlbsync but
201 * for now, we keep it that way
202 */
203void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
204 unsigned long end)
205
206{
207 flush_tlb_mm(vma->vm_mm);
208}
209EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 000000000000..f900a39e6ec4
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,166 @@
1/*
2 * This file contains low-level functions for performing various
3 * types of TLB invalidations on various processors with no hash
4 * table.
5 *
6 * This file implements the following functions for all no-hash
7 * processors. Some aren't implemented for some variants. Some
8 * are inline in tlbflush.h
9 *
10 * - tlbil_va
11 * - tlbil_pid
12 * - tlbil_all
13 * - tlbivax_bcast (not yet)
14 *
15 * Code mostly moved over from misc_32.S
16 *
17 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
18 *
19 * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
20 * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <asm/reg.h>
30#include <asm/page.h>
31#include <asm/cputable.h>
32#include <asm/mmu.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35#include <asm/processor.h>
36
37#if defined(CONFIG_40x)
38
39/*
40 * 40x implementation needs only tlbil_va
41 */
42_GLOBAL(_tlbil_va)
43 /* We run the search with interrupts disabled because we have to change
44 * the PID and I don't want to preempt when that happens.
45 */
46 mfmsr r5
47 mfspr r6,SPRN_PID
48 wrteei 0
49 mtspr SPRN_PID,r4
50 tlbsx. r3, 0, r3
51 mtspr SPRN_PID,r6
52 wrtee r5
53 bne 1f
54 sync
55 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
56 * clear. Since 25 is the V bit in the TLB_TAG, loading this value
57 * will invalidate the TLB entry. */
58 tlbwe r3, r3, TLB_TAG
59 isync
601: blr
61
62#elif defined(CONFIG_8xx)
63
64/*
65 * Nothing to do for 8xx, everything is inline
66 */
67
68#elif defined(CONFIG_44x)
69
70/*
71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
72 * of the TLB for everything else.
73 */
74_GLOBAL(_tlbil_va)
75 mfspr r5,SPRN_MMUCR
76 rlwimi r5,r4,0,24,31 /* Set TID */
77
78 /* We have to run the search with interrupts disabled, otherwise
79 * an interrupt which causes a TLB miss can clobber the MMUCR
80 * between the mtspr and the tlbsx.
81 *
82 * Critical and Machine Check interrupts take care of saving
83 * and restoring MMUCR, so only normal interrupts have to be
84 * taken care of.
85 */
86 mfmsr r4
87 wrteei 0
88 mtspr SPRN_MMUCR,r5
89 tlbsx. r3, 0, r3
90 wrtee r4
91 bne 1f
92 sync
93 /* There are only 64 TLB entries, so r3 < 64,
94 * which means bit 22, is clear. Since 22 is
95 * the V bit in the TLB_PAGEID, loading this
96 * value will invalidate the TLB entry.
97 */
98 tlbwe r3, r3, PPC44x_TLB_PAGEID
99 isync
1001: blr
101
102_GLOBAL(_tlbil_all)
103_GLOBAL(_tlbil_pid)
104 li r3,0
105 sync
106
107 /* Load high watermark */
108 lis r4,tlb_44x_hwater@ha
109 lwz r5,tlb_44x_hwater@l(r4)
110
1111: tlbwe r3,r3,PPC44x_TLB_PAGEID
112 addi r3,r3,1
113 cmpw 0,r3,r5
114 ble 1b
115
116 isync
117 blr
118
119#elif defined(CONFIG_FSL_BOOKE)
120/*
121 * FSL BookE implementations. Currently _pid and _all are the
122 * same. This will change when tlbilx is actually supported and
123 * performs invalidate-by-PID. This change will be driven by
124 * mmu_features conditional
125 */
126
127/*
128 * Flush MMU TLB on the local processor
129 */
130_GLOBAL(_tlbil_pid)
131_GLOBAL(_tlbil_all)
132#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
133 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
134 li r3,(MMUCSR0_TLBFI)@l
135 mtspr SPRN_MMUCSR0, r3
1361:
137 mfspr r3,SPRN_MMUCSR0
138 andi. r3,r3,MMUCSR0_TLBFI@l
139 bne 1b
140 msync
141 isync
142 blr
143
144/*
145 * Flush MMU TLB for a particular address, but only on the local processor
146 * (no broadcast)
147 */
148_GLOBAL(_tlbil_va)
149 mfmsr r10
150 wrteei 0
151 slwi r4,r4,16
152 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
153 tlbsx 0,r3
154 mfspr r4,SPRN_MAS1 /* check valid */
155 andis. r3,r4,MAS1_VALID@h
156 beq 1f
157 rlwinm r4,r4,0,1,31
158 mtspr SPRN_MAS1,r4
159 tlbwe
160 msync
161 isync
1621: wrtee r10
163 blr
164#elif
165#error Unsupported processor type !
166#endif
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 2949126d28d1..6b793aeda72e 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -297,7 +297,7 @@ static inline unsigned long fast_get_dcookie(struct path *path)
297{ 297{
298 unsigned long cookie; 298 unsigned long cookie;
299 299
300 if (path->dentry->d_cookie) 300 if (path->dentry->d_flags & DCACHE_COOKIE)
301 return (unsigned long)path->dentry; 301 return (unsigned long)path->dentry;
302 get_dcookie(path, &cookie); 302 get_dcookie(path, &cookie);
303 return cookie; 303 return cookie;
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index ae2e7f67c18e..4058fd1e7fc7 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -100,7 +100,7 @@ static void __init ep405_setup_arch(void)
100 /* Find & init the BCSR CPLD */ 100 /* Find & init the BCSR CPLD */
101 ep405_init_bcsr(); 101 ep405_init_bcsr();
102 102
103 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 103 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
104} 104}
105 105
106static int __init ep405_probe(void) 106static int __init ep405_probe(void)
diff --git a/arch/powerpc/platforms/40x/kilauea.c b/arch/powerpc/platforms/40x/kilauea.c
index 1dd24ffc0dc1..fd7d934dac8b 100644
--- a/arch/powerpc/platforms/40x/kilauea.c
+++ b/arch/powerpc/platforms/40x/kilauea.c
@@ -44,7 +44,7 @@ static int __init kilauea_probe(void)
44 if (!of_flat_dt_is_compatible(root, "amcc,kilauea")) 44 if (!of_flat_dt_is_compatible(root, "amcc,kilauea"))
45 return 0; 45 return 0;
46 46
47 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 47 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
48 48
49 return 1; 49 return 1;
50} 50}
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 4498a86b46c3..f40ac9b8f99f 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -61,7 +61,7 @@ static int __init ppc40x_probe(void)
61 61
62 for (i = 0; i < ARRAY_SIZE(board); i++) { 62 for (i = 0; i < ARRAY_SIZE(board); i++) {
63 if (of_flat_dt_is_compatible(root, board[i])) { 63 if (of_flat_dt_is_compatible(root, board[i])) {
64 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 64 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
65 return 1; 65 return 1;
66 } 66 }
67 } 67 }
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index a0e8fe4662f6..88b9117fa691 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -54,7 +54,7 @@ static int __init ebony_probe(void)
54 if (!of_flat_dt_is_compatible(root, "ibm,ebony")) 54 if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
55 return 0; 55 return 0;
56 56
57 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 57 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
58 58
59 return 1; 59 return 1;
60} 60}
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 29671262801f..76fdc51dac8b 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -69,7 +69,7 @@ static int __init ppc44x_probe(void)
69 69
70 for (i = 0; i < ARRAY_SIZE(board); i++) { 70 for (i = 0; i < ARRAY_SIZE(board); i++) {
71 if (of_flat_dt_is_compatible(root, board[i])) { 71 if (of_flat_dt_is_compatible(root, board[i])) {
72 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 72 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
73 return 1; 73 return 1;
74 } 74 }
75 } 75 }
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 47f10e647735..a78e8eb6da41 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -51,7 +51,7 @@ static int __init sam440ep_probe(void)
51 if (!of_flat_dt_is_compatible(root, "acube,sam440ep")) 51 if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
52 return 0; 52 return 0;
53 53
54 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC; 54 ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
55 55
56 return 1; 56 return 1;
57} 57}
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index fe92e65103ed..b5c753db125e 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -3,7 +3,6 @@
3#include <asm/io.h> 3#include <asm/io.h>
4#include <asm/time.h> 4#include <asm/time.h>
5#include <asm/mpc52xx.h> 5#include <asm/mpc52xx.h>
6#include "mpc52xx_pic.h"
7 6
8/* defined in lite5200_sleep.S and only used here */ 7/* defined in lite5200_sleep.S and only used here */
9extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); 8extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index b49a18527661..c3f2c21024e3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -375,7 +375,7 @@ mpc52xx_add_bridge(struct device_node *node)
375 375
376 pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name); 376 pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
377 377
378 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 378 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
379 379
380 if (of_address_to_resource(node, 0, &rsrc) != 0) { 380 if (of_address_to_resource(node, 0, &rsrc) != 0) {
381 printk(KERN_ERR "Can't get %s resources\n", node->full_name); 381 printk(KERN_ERR "Can't get %s resources\n", node->full_name);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 8479394e9ab4..72865e8e4b51 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -2,20 +2,100 @@
2 * 2 *
3 * Programmable Interrupt Controller functions for the Freescale MPC52xx. 3 * Programmable Interrupt Controller functions for the Freescale MPC52xx.
4 * 4 *
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
5 * Copyright (C) 2006 bplan GmbH 6 * Copyright (C) 2006 bplan GmbH
7 * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
8 * Copyright (C) 2003 Montavista Software, Inc
6 * 9 *
7 * Based on the code from the 2.4 kernel by 10 * Based on the code from the 2.4 kernel by
8 * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg. 11 * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
9 * 12 *
10 * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
11 * Copyright (C) 2003 Montavista Software, Inc
12 *
13 * This file is licensed under the terms of the GNU General Public License 13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any 14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied. 15 * kind, whether express or implied.
16 * 16 *
17 */ 17 */
18 18
19/*
20 * This is the device driver for the MPC5200 interrupt controller.
21 *
22 * hardware overview
23 * -----------------
24 * The MPC5200 interrupt controller groups the all interrupt sources into
25 * three groups called 'critical', 'main', and 'peripheral'. The critical
26 * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep
27 * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC,
28 * gpios, and the general purpose timers. Peripheral group contains the
29 * remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet,
30 * USB, DMA, etc).
31 *
32 * virqs
33 * -----
34 * The Linux IRQ subsystem requires that each irq source be assigned a
35 * system wide unique IRQ number starting at 1 (0 means no irq). Since
36 * systems can have multiple interrupt controllers, the virtual IRQ (virq)
37 * infrastructure lets each interrupt controller to define a local set
38 * of IRQ numbers and the virq infrastructure maps those numbers into
39 * a unique range of the global IRQ# space.
40 *
41 * To define a range of virq numbers for this controller, this driver first
42 * assigns a number to each of the irq groups (called the level 1 or L1
43 * value). Within each group individual irq sources are also assigned a
44 * number, as defined by the MPC5200 user guide, and refers to it as the
45 * level 2 or L2 value. The virq number is determined by shifting up the
46 * L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value.
47 *
48 * For example, the TMR0 interrupt is irq 9 in the main group. The
49 * virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9).
50 *
51 * The observant reader will also notice that this driver defines a 4th
52 * interrupt group called 'bestcomm'. The bestcomm group isn't physically
53 * part of the MPC5200 interrupt controller, but it is used here to assign
54 * a separate virq number for each bestcomm task (since any of the 16
55 * bestcomm tasks can cause the bestcomm interrupt to be raised). When a
56 * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines
57 * which task needs servicing and returns the irq number for that task. This
58 * allows drivers which use bestcomm to define their own interrupt handlers.
59 *
60 * irq_chip structures
61 * -------------------
62 * For actually manipulating IRQs (masking, enabling, clearing, etc) this
63 * driver defines four separate 'irq_chip' structures, one for the main
64 * group, one for the peripherals group, one for the bestcomm group and one
65 * for external interrupts. The irq_chip structures provide the hooks needed
66 * to manipulate each IRQ source, and since each group is has a separate set
67 * of registers for controlling the irq, it makes sense to divide up the
68 * hooks along those lines.
69 *
70 * You'll notice that there is not an irq_chip for the critical group and
71 * you'll also notice that there is an irq_chip defined for external
72 * interrupts even though there is no external interrupt group. The reason
73 * for this is that the four external interrupts are all managed with the same
74 * register even though one of the external IRQs is in the critical group and
75 * the other three are in the main group. For this reason it makes sense for
76 * the 4 external irqs to be managed using a separate set of hooks. The
77 * reason there is no crit irq_chip is that of the 3 irqs in the critical
78 * group, only external interrupt is actually support at this time by this
79 * driver and since external interrupt is the only one used, it can just
80 * be directed to make use of the external irq irq_chip.
81 *
82 * device tree bindings
83 * --------------------
84 * The device tree bindings for this controller reflect the two level
85 * organization of irqs in the device. #interrupt-cells = <3> where the
86 * first cell is the group number [0..3], the second cell is the irq
87 * number in the group, and the third cell is the sense type (level/edge).
88 * For reference, the following is a list of the interrupt property values
89 * associated with external interrupt sources on the MPC5200 (just because
90 * it is non-obvious to determine what the interrupts property should be
91 * when reading the mpc5200 manual and it is a frequently asked question).
92 *
93 * External interrupts:
94 * <0 0 n> external irq0, n is sense (n=0: level high,
95 * <1 1 n> external irq1, n is sense n=1: edge rising,
96 * <1 2 n> external irq2, n is sense n=2: edge falling,
97 * <1 3 n> external irq3, n is sense n=3: level low)
98 */
19#undef DEBUG 99#undef DEBUG
20 100
21#include <linux/interrupt.h> 101#include <linux/interrupt.h>
@@ -24,11 +104,19 @@
24#include <asm/io.h> 104#include <asm/io.h>
25#include <asm/prom.h> 105#include <asm/prom.h>
26#include <asm/mpc52xx.h> 106#include <asm/mpc52xx.h>
27#include "mpc52xx_pic.h"
28 107
29/* 108/* HW IRQ mapping */
30 * 109#define MPC52xx_IRQ_L1_CRIT (0)
31*/ 110#define MPC52xx_IRQ_L1_MAIN (1)
111#define MPC52xx_IRQ_L1_PERP (2)
112#define MPC52xx_IRQ_L1_SDMA (3)
113
114#define MPC52xx_IRQ_L1_OFFSET (6)
115#define MPC52xx_IRQ_L1_MASK (0x00c0)
116#define MPC52xx_IRQ_L2_MASK (0x003f)
117
118#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
119
32 120
33/* MPC5200 device tree match tables */ 121/* MPC5200 device tree match tables */
34static struct of_device_id mpc52xx_pic_ids[] __initdata = { 122static struct of_device_id mpc52xx_pic_ids[] __initdata = {
@@ -53,10 +141,7 @@ static unsigned char mpc52xx_map_senses[4] = {
53 IRQ_TYPE_LEVEL_LOW, 141 IRQ_TYPE_LEVEL_LOW,
54}; 142};
55 143
56/* 144/* Utility functions */
57 *
58*/
59
60static inline void io_be_setbit(u32 __iomem *addr, int bitno) 145static inline void io_be_setbit(u32 __iomem *addr, int bitno)
61{ 146{
62 out_be32(addr, in_be32(addr) | (1 << bitno)); 147 out_be32(addr, in_be32(addr) | (1 << bitno));
@@ -69,15 +154,14 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
69 154
70/* 155/*
71 * IRQ[0-3] interrupt irq_chip 156 * IRQ[0-3] interrupt irq_chip
72*/ 157 */
73
74static void mpc52xx_extirq_mask(unsigned int virq) 158static void mpc52xx_extirq_mask(unsigned int virq)
75{ 159{
76 int irq; 160 int irq;
77 int l2irq; 161 int l2irq;
78 162
79 irq = irq_map[virq].hwirq; 163 irq = irq_map[virq].hwirq;
80 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 164 l2irq = irq & MPC52xx_IRQ_L2_MASK;
81 165
82 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 166 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
83 167
@@ -90,7 +174,7 @@ static void mpc52xx_extirq_unmask(unsigned int virq)
90 int l2irq; 174 int l2irq;
91 175
92 irq = irq_map[virq].hwirq; 176 irq = irq_map[virq].hwirq;
93 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 177 l2irq = irq & MPC52xx_IRQ_L2_MASK;
94 178
95 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 179 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
96 180
@@ -103,7 +187,7 @@ static void mpc52xx_extirq_ack(unsigned int virq)
103 int l2irq; 187 int l2irq;
104 188
105 irq = irq_map[virq].hwirq; 189 irq = irq_map[virq].hwirq;
106 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 190 l2irq = irq & MPC52xx_IRQ_L2_MASK;
107 191
108 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 192 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
109 193
@@ -117,7 +201,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
117 int l2irq; 201 int l2irq;
118 202
119 irq = irq_map[virq].hwirq; 203 irq = irq_map[virq].hwirq;
120 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 204 l2irq = irq & MPC52xx_IRQ_L2_MASK;
121 205
122 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); 206 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
123 207
@@ -156,15 +240,14 @@ static struct irq_chip mpc52xx_extirq_irqchip = {
156 240
157/* 241/*
158 * Main interrupt irq_chip 242 * Main interrupt irq_chip
159*/ 243 */
160
161static void mpc52xx_main_mask(unsigned int virq) 244static void mpc52xx_main_mask(unsigned int virq)
162{ 245{
163 int irq; 246 int irq;
164 int l2irq; 247 int l2irq;
165 248
166 irq = irq_map[virq].hwirq; 249 irq = irq_map[virq].hwirq;
167 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 250 l2irq = irq & MPC52xx_IRQ_L2_MASK;
168 251
169 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 252 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
170 253
@@ -177,7 +260,7 @@ static void mpc52xx_main_unmask(unsigned int virq)
177 int l2irq; 260 int l2irq;
178 261
179 irq = irq_map[virq].hwirq; 262 irq = irq_map[virq].hwirq;
180 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 263 l2irq = irq & MPC52xx_IRQ_L2_MASK;
181 264
182 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 265 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
183 266
@@ -193,15 +276,14 @@ static struct irq_chip mpc52xx_main_irqchip = {
193 276
194/* 277/*
195 * Peripherals interrupt irq_chip 278 * Peripherals interrupt irq_chip
196*/ 279 */
197
198static void mpc52xx_periph_mask(unsigned int virq) 280static void mpc52xx_periph_mask(unsigned int virq)
199{ 281{
200 int irq; 282 int irq;
201 int l2irq; 283 int l2irq;
202 284
203 irq = irq_map[virq].hwirq; 285 irq = irq_map[virq].hwirq;
204 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 286 l2irq = irq & MPC52xx_IRQ_L2_MASK;
205 287
206 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 288 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
207 289
@@ -214,7 +296,7 @@ static void mpc52xx_periph_unmask(unsigned int virq)
214 int l2irq; 296 int l2irq;
215 297
216 irq = irq_map[virq].hwirq; 298 irq = irq_map[virq].hwirq;
217 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 299 l2irq = irq & MPC52xx_IRQ_L2_MASK;
218 300
219 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 301 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
220 302
@@ -230,15 +312,14 @@ static struct irq_chip mpc52xx_periph_irqchip = {
230 312
231/* 313/*
232 * SDMA interrupt irq_chip 314 * SDMA interrupt irq_chip
233*/ 315 */
234
235static void mpc52xx_sdma_mask(unsigned int virq) 316static void mpc52xx_sdma_mask(unsigned int virq)
236{ 317{
237 int irq; 318 int irq;
238 int l2irq; 319 int l2irq;
239 320
240 irq = irq_map[virq].hwirq; 321 irq = irq_map[virq].hwirq;
241 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 322 l2irq = irq & MPC52xx_IRQ_L2_MASK;
242 323
243 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 324 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
244 325
@@ -251,7 +332,7 @@ static void mpc52xx_sdma_unmask(unsigned int virq)
251 int l2irq; 332 int l2irq;
252 333
253 irq = irq_map[virq].hwirq; 334 irq = irq_map[virq].hwirq;
254 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 335 l2irq = irq & MPC52xx_IRQ_L2_MASK;
255 336
256 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 337 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
257 338
@@ -264,7 +345,7 @@ static void mpc52xx_sdma_ack(unsigned int virq)
264 int l2irq; 345 int l2irq;
265 346
266 irq = irq_map[virq].hwirq; 347 irq = irq_map[virq].hwirq;
267 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 348 l2irq = irq & MPC52xx_IRQ_L2_MASK;
268 349
269 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq); 350 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
270 351
@@ -278,13 +359,12 @@ static struct irq_chip mpc52xx_sdma_irqchip = {
278 .ack = mpc52xx_sdma_ack, 359 .ack = mpc52xx_sdma_ack,
279}; 360};
280 361
281/* 362/**
282 * irq_host 363 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
283*/ 364 */
284
285static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, 365static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
286 u32 * intspec, unsigned int intsize, 366 u32 *intspec, unsigned int intsize,
287 irq_hw_number_t * out_hwirq, 367 irq_hw_number_t *out_hwirq,
288 unsigned int *out_flags) 368 unsigned int *out_flags)
289{ 369{
290 int intrvect_l1; 370 int intrvect_l1;
@@ -299,10 +379,9 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
299 intrvect_l2 = (int)intspec[1]; 379 intrvect_l2 = (int)intspec[1];
300 intrvect_type = (int)intspec[2]; 380 intrvect_type = (int)intspec[2];
301 381
302 intrvect_linux = 382 intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) &
303 (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK; 383 MPC52xx_IRQ_L1_MASK;
304 intrvect_linux |= 384 intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK;
305 (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
306 385
307 pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1, 386 pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
308 intrvect_l2); 387 intrvect_l2);
@@ -313,11 +392,11 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
313 return 0; 392 return 0;
314} 393}
315 394
316/* 395/**
317 * this function retrieves the correct IRQ type out 396 * mpc52xx_irqx_gettype - determine the IRQ sense type (level/edge)
318 * of the MPC regs 397 *
319 * Only externals IRQs needs this 398 * Only external IRQs need this.
320*/ 399 */
321static int mpc52xx_irqx_gettype(int irq) 400static int mpc52xx_irqx_gettype(int irq)
322{ 401{
323 int type; 402 int type;
@@ -329,6 +408,9 @@ static int mpc52xx_irqx_gettype(int irq)
329 return mpc52xx_map_senses[type]; 408 return mpc52xx_map_senses[type];
330} 409}
331 410
411/**
412 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
413 */
332static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, 414static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
333 irq_hw_number_t irq) 415 irq_hw_number_t irq)
334{ 416{
@@ -339,7 +421,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
339 int type; 421 int type;
340 422
341 l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET; 423 l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
342 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET; 424 l2irq = irq & MPC52xx_IRQ_L2_MASK;
343 425
344 /* 426 /*
345 * Most of ours IRQs will be level low 427 * Most of ours IRQs will be level low
@@ -379,8 +461,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
379 break; 461 break;
380 462
381 default: 463 default:
382 pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq); 464 pr_err("%s: invalid virq requested (0x%x)\n", __func__, virq);
383 printk(KERN_ERR "Unknow IRQ!\n");
384 return -EINVAL; 465 return -EINVAL;
385 } 466 }
386 467
@@ -406,10 +487,15 @@ static struct irq_host_ops mpc52xx_irqhost_ops = {
406 .map = mpc52xx_irqhost_map, 487 .map = mpc52xx_irqhost_map,
407}; 488};
408 489
409/* 490/**
410 * init (public) 491 * mpc52xx_init_irq - Initialize and register with the virq subsystem
411*/ 492 *
412 493 * Hook for setting up IRQs on an mpc5200 system. A pointer to this function
494 * is to be put into the machine definition structure.
495 *
496 * This function searches the device tree for an MPC5200 interrupt controller,
497 * initializes it, and registers it with the virq subsystem.
498 */
413void __init mpc52xx_init_irq(void) 499void __init mpc52xx_init_irq(void)
414{ 500{
415 u32 intr_ctrl; 501 u32 intr_ctrl;
@@ -454,7 +540,6 @@ void __init mpc52xx_init_irq(void)
454 * As last step, add an irq host to translate the real 540 * As last step, add an irq host to translate the real
455 * hw irq information provided by the ofw to linux virq 541 * hw irq information provided by the ofw to linux virq
456 */ 542 */
457
458 mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, 543 mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR,
459 MPC52xx_IRQ_HIGHTESTHWIRQ, 544 MPC52xx_IRQ_HIGHTESTHWIRQ,
460 &mpc52xx_irqhost_ops, -1); 545 &mpc52xx_irqhost_ops, -1);
@@ -462,12 +547,38 @@ void __init mpc52xx_init_irq(void)
462 if (!mpc52xx_irqhost) 547 if (!mpc52xx_irqhost)
463 panic(__FILE__ ": Cannot allocate the IRQ host\n"); 548 panic(__FILE__ ": Cannot allocate the IRQ host\n");
464 549
465 printk(KERN_INFO "MPC52xx PIC is up and running!\n"); 550 irq_set_default_host(mpc52xx_irqhost);
551
552 pr_info("MPC52xx PIC is up and running!\n");
466} 553}
467 554
468/* 555/**
469 * get_irq (public) 556 * mpc52xx_get_irq - Get pending interrupt number hook function
470*/ 557 *
558 * Called by the interupt handler to determine what IRQ handler needs to be
559 * executed.
560 *
561 * Status of pending interrupts is determined by reading the encoded status
562 * register. The encoded status register has three fields; one for each of the
563 * types of interrupts defined by the controller - 'critical', 'main' and
564 * 'peripheral'. This function reads the status register and returns the IRQ
565 * number associated with the highest priority pending interrupt. 'Critical'
566 * interrupts have the highest priority, followed by 'main' interrupts, and
567 * then 'peripheral'.
568 *
569 * The mpc5200 interrupt controller can be configured to boost the priority
570 * of individual 'peripheral' interrupts. If this is the case then a special
571 * value will appear in either the crit or main fields indicating a high
572 * or medium priority peripheral irq has occurred.
573 *
574 * This function checks each of the 3 irq request fields and returns the
575 * first pending interrupt that it finds.
576 *
577 * This function also identifies a 4th type of interrupt; 'bestcomm'. Each
578 * bestcomm DMA task can raise the bestcomm peripheral interrupt. When this
579 * occurs at task-specific IRQ# is decoded so that each task can have its
580 * own IRQ handler.
581 */
471unsigned int mpc52xx_get_irq(void) 582unsigned int mpc52xx_get_irq(void)
472{ 583{
473 u32 status; 584 u32 status;
@@ -478,25 +589,21 @@ unsigned int mpc52xx_get_irq(void)
478 irq = (status >> 8) & 0x3; 589 irq = (status >> 8) & 0x3;
479 if (irq == 2) /* high priority peripheral */ 590 if (irq == 2) /* high priority peripheral */
480 goto peripheral; 591 goto peripheral;
481 irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) & 592 irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET);
482 MPC52xx_IRQ_L1_MASK;
483 } else if (status & 0x00200000) { /* main */ 593 } else if (status & 0x00200000) { /* main */
484 irq = (status >> 16) & 0x1f; 594 irq = (status >> 16) & 0x1f;
485 if (irq == 4) /* low priority peripheral */ 595 if (irq == 4) /* low priority peripheral */
486 goto peripheral; 596 goto peripheral;
487 irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) & 597 irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET);
488 MPC52xx_IRQ_L1_MASK;
489 } else if (status & 0x20000000) { /* peripheral */ 598 } else if (status & 0x20000000) { /* peripheral */
490 peripheral: 599 peripheral:
491 irq = (status >> 24) & 0x1f; 600 irq = (status >> 24) & 0x1f;
492 if (irq == 0) { /* bestcomm */ 601 if (irq == 0) { /* bestcomm */
493 status = in_be32(&sdma->IntPend); 602 status = in_be32(&sdma->IntPend);
494 irq = ffs(status) - 1; 603 irq = ffs(status) - 1;
495 irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) & 604 irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET);
496 MPC52xx_IRQ_L1_MASK;
497 } else { 605 } else {
498 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) & 606 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
499 MPC52xx_IRQ_L1_MASK;
500 } 607 }
501 } 608 }
502 609
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
deleted file mode 100644
index 1a26bcdb3049..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Header file for Freescale MPC52xx Interrupt controller
3 *
4 * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
5 * Copyright (C) 2003 MontaVista, Software, Inc.
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.
10 */
11
12#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
13#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
14
15#include <asm/types.h>
16
17
18/* HW IRQ mapping */
19#define MPC52xx_IRQ_L1_CRIT (0)
20#define MPC52xx_IRQ_L1_MAIN (1)
21#define MPC52xx_IRQ_L1_PERP (2)
22#define MPC52xx_IRQ_L1_SDMA (3)
23
24#define MPC52xx_IRQ_L1_OFFSET (6)
25#define MPC52xx_IRQ_L1_MASK (0x00c0)
26
27#define MPC52xx_IRQ_L2_OFFSET (0)
28#define MPC52xx_IRQ_L2_MASK (0x003f)
29
30#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
31
32
33/* Interrupt controller Register set */
34struct mpc52xx_intr {
35 u32 per_mask; /* INTR + 0x00 */
36 u32 per_pri1; /* INTR + 0x04 */
37 u32 per_pri2; /* INTR + 0x08 */
38 u32 per_pri3; /* INTR + 0x0c */
39 u32 ctrl; /* INTR + 0x10 */
40 u32 main_mask; /* INTR + 0x14 */
41 u32 main_pri1; /* INTR + 0x18 */
42 u32 main_pri2; /* INTR + 0x1c */
43 u32 reserved1; /* INTR + 0x20 */
44 u32 enc_status; /* INTR + 0x24 */
45 u32 crit_status; /* INTR + 0x28 */
46 u32 main_status; /* INTR + 0x2c */
47 u32 per_status; /* INTR + 0x30 */
48 u32 reserved2; /* INTR + 0x34 */
49 u32 per_error; /* INTR + 0x38 */
50};
51
52#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
53
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index c72d3304387f..a55b0b6813ed 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -5,9 +5,6 @@
5#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
6#include <asm/mpc52xx.h> 6#include <asm/mpc52xx.h>
7 7
8#include "mpc52xx_pic.h"
9
10
11/* these are defined in mpc52xx_sleep.S, and only used here */ 8/* these are defined in mpc52xx_sleep.S, and only used here */
12extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs, 9extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
13 struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*); 10 struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 1b75902fad64..9761a59f175f 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -53,7 +53,7 @@ static void __init pq2_pci_add_bridge(struct device_node *np)
53 if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) 53 if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
54 goto err; 54 goto err;
55 55
56 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 56 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
57 57
58 hose = pcibios_alloc_controller(np); 58 hose = pcibios_alloc_controller(np);
59 if (!hose) 59 if (!hose)
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index cb3054e1001d..f0798c09980f 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -1,6 +1,8 @@
1# 1#
2# Makefile for the PowerPC 85xx linux kernel. 2# Makefile for the PowerPC 85xx linux kernel.
3# 3#
4obj-$(CONFIG_SMP) += smp.o
5
4obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o 6obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
5obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o 7obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
6obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o 8obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 613bf8c2e30d..a8301c8ad537 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -63,6 +63,7 @@ void __init mpc85xx_ds_pic_init(void)
63 struct device_node *cascade_node = NULL; 63 struct device_node *cascade_node = NULL;
64 int cascade_irq; 64 int cascade_irq;
65#endif 65#endif
66 unsigned long root = of_get_flat_dt_root();
66 67
67 np = of_find_node_by_type(NULL, "open-pic"); 68 np = of_find_node_by_type(NULL, "open-pic");
68 if (np == NULL) { 69 if (np == NULL) {
@@ -76,11 +77,19 @@ void __init mpc85xx_ds_pic_init(void)
76 return; 77 return;
77 } 78 }
78 79
79 mpic = mpic_alloc(np, r.start, 80 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
81 mpic = mpic_alloc(np, r.start,
82 MPIC_PRIMARY |
83 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
84 0, 256, " OpenPIC ");
85 } else {
86 mpic = mpic_alloc(np, r.start,
80 MPIC_PRIMARY | MPIC_WANTS_RESET | 87 MPIC_PRIMARY | MPIC_WANTS_RESET |
81 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | 88 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
82 MPIC_SINGLE_DEST_CPU, 89 MPIC_SINGLE_DEST_CPU,
83 0, 256, " OpenPIC "); 90 0, 256, " OpenPIC ");
91 }
92
84 BUG_ON(mpic == NULL); 93 BUG_ON(mpic == NULL);
85 of_node_put(np); 94 of_node_put(np);
86 95
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 2494c5155919..658a36fab3ab 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -231,7 +231,7 @@ static void __init mpc85xx_mds_setup_arch(void)
231 231
232static int __init board_fixups(void) 232static int __init board_fixups(void)
233{ 233{
234 char phy_id[BUS_ID_SIZE]; 234 char phy_id[20];
235 char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"}; 235 char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"};
236 struct device_node *mdio; 236 struct device_node *mdio;
237 struct resource res; 237 struct resource res;
@@ -241,13 +241,15 @@ static int __init board_fixups(void)
241 mdio = of_find_compatible_node(NULL, NULL, compstrs[i]); 241 mdio = of_find_compatible_node(NULL, NULL, compstrs[i]);
242 242
243 of_address_to_resource(mdio, 0, &res); 243 of_address_to_resource(mdio, 0, &res);
244 snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 1); 244 snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
245 (unsigned long long)res.start, 1);
245 246
246 phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock); 247 phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock);
247 phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); 248 phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
248 249
249 /* Register a workaround for errata */ 250 /* Register a workaround for errata */
250 snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 7); 251 snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
252 (unsigned long long)res.start, 7);
251 phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); 253 phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
252 254
253 of_node_put(mdio); 255 of_node_put(mdio);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
new file mode 100644
index 000000000000..d652c713f496
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -0,0 +1,104 @@
1/*
2 * Author: Andy Fleming <afleming@freescale.com>
3 * Kumar Gala <galak@kernel.crashing.org>
4 *
5 * Copyright 2006-2008 Freescale Semiconductor Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/stddef.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/of.h>
18
19#include <asm/machdep.h>
20#include <asm/pgtable.h>
21#include <asm/page.h>
22#include <asm/mpic.h>
23#include <asm/cacheflush.h>
24
25#include <sysdev/fsl_soc.h>
26
27extern volatile unsigned long __secondary_hold_acknowledge;
28extern void __early_start(void);
29
30#define BOOT_ENTRY_ADDR_UPPER 0
31#define BOOT_ENTRY_ADDR_LOWER 1
32#define BOOT_ENTRY_R3_UPPER 2
33#define BOOT_ENTRY_R3_LOWER 3
34#define BOOT_ENTRY_RESV 4
35#define BOOT_ENTRY_PIR 5
36#define BOOT_ENTRY_R6_UPPER 6
37#define BOOT_ENTRY_R6_LOWER 7
38#define NUM_BOOT_ENTRY 8
39#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
40
41static void __init
42smp_85xx_kick_cpu(int nr)
43{
44 unsigned long flags;
45 const u64 *cpu_rel_addr;
46 __iomem u32 *bptr_vaddr;
47 struct device_node *np;
48 int n = 0;
49
50 WARN_ON (nr < 0 || nr >= NR_CPUS);
51
52 pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
53
54 local_irq_save(flags);
55
56 np = of_get_cpu_node(nr, NULL);
57 cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
58
59 if (cpu_rel_addr == NULL) {
60 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
61 return;
62 }
63
64 /* Map the spin table */
65 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
66
67 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
68 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
69
70 /* Wait a bit for the CPU to ack. */
71 while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
72 mdelay(1);
73
74 iounmap(bptr_vaddr);
75
76 local_irq_restore(flags);
77
78 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
79}
80
81static void __init
82smp_85xx_setup_cpu(int cpu_nr)
83{
84 mpic_setup_this_cpu();
85
86 /* Clear any pending timer interrupts */
87 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
88
89 /* Enable decrementer interrupt */
90 mtspr(SPRN_TCR, TCR_DIE);
91}
92
93struct smp_ops_t smp_85xx_ops = {
94 .message_pass = smp_mpic_message_pass,
95 .probe = smp_mpic_probe,
96 .kick_cpu = smp_85xx_kick_cpu,
97 .setup_cpu = smp_85xx_setup_cpu,
98};
99
100void __init
101mpc85xx_smp_init(void)
102{
103 smp_ops = &smp_85xx_ops;
104}
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 77dd797a2580..8e5693935975 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -34,6 +34,8 @@ config MPC8610_HPCD
34config GEF_SBC610 34config GEF_SBC610
35 bool "GE Fanuc SBC610" 35 bool "GE Fanuc SBC610"
36 select DEFAULT_UIMAGE 36 select DEFAULT_UIMAGE
37 select GENERIC_GPIO
38 select ARCH_REQUIRE_GPIOLIB
37 select HAS_RAPIDIO 39 select HAS_RAPIDIO
38 help 40 help
39 This option enables support for GE Fanuc's SBC610. 41 This option enables support for GE Fanuc's SBC610.
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 4a56ff619afd..31e540c2ebbc 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o
7obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o 7obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
8obj-$(CONFIG_SBC8641D) += sbc8641d.o 8obj-$(CONFIG_SBC8641D) += sbc8641d.o
9obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o 9obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
10obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o 10gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o
11obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y)
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c
new file mode 100644
index 000000000000..85b2800f4cb7
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/gef_gpio.c
@@ -0,0 +1,143 @@
1/*
2 * Driver for GE Fanuc's FPGA based GPIO pins
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 *
6 * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13/* TODO
14 *
15 * Configuration of output modes (totem-pole/open-drain)
16 * Interrupt configuration - interrupts are always generated the FPGA relies on
17 * the I/O interrupt controllers mask to stop them propergating
18 */
19
20#include <linux/kernel.h>
21#include <linux/compiler.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_platform.h>
27#include <linux/of_gpio.h>
28#include <linux/gpio.h>
29
30#define GEF_GPIO_DIRECT 0x00
31#define GEF_GPIO_IN 0x04
32#define GEF_GPIO_OUT 0x08
33#define GEF_GPIO_TRIG 0x0C
34#define GEF_GPIO_POLAR_A 0x10
35#define GEF_GPIO_POLAR_B 0x14
36#define GEF_GPIO_INT_STAT 0x18
37#define GEF_GPIO_OVERRUN 0x1C
38#define GEF_GPIO_MODE 0x20
39
40#define NUM_GPIO 19
41
42static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
43{
44 unsigned int data;
45
46 data = ioread32be(reg);
47 /* value: 0=low; 1=high */
48 if (value & 0x1)
49 data = data | (0x1 << offset);
50 else
51 data = data & ~(0x1 << offset);
52
53 iowrite32be(data, reg);
54}
55
56
57static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
58{
59 unsigned int data;
60 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
61
62 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
63 data = data | (0x1 << offset);
64 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
65
66 return 0;
67}
68
69static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
70{
71 unsigned int data;
72 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
73
74 /* Set direction before switching to input */
75 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
76
77 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
78 data = data & ~(0x1 << offset);
79 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
80
81 return 0;
82}
83
84static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
85{
86 unsigned int data;
87 int state = 0;
88 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
89
90 data = ioread32be(mmchip->regs + GEF_GPIO_IN);
91 state = (int)((data >> offset) & 0x1);
92
93 return state;
94}
95
96static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
97{
98 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
99
100 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
101}
102
103static int __init gef_gpio_init(void)
104{
105 struct device_node *np;
106
107 for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
108 int retval;
109 struct of_mm_gpio_chip *gef_gpio_chip;
110
111 pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
112
113 /* Allocate chip structure */
114 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
115 if (!gef_gpio_chip) {
116 pr_err("%s: Unable to allocate structure\n",
117 np->full_name);
118 continue;
119 }
120
121 /* Setup pointers to chip functions */
122 gef_gpio_chip->of_gc.gpio_cells = 2;
123 gef_gpio_chip->of_gc.gc.ngpio = NUM_GPIO;
124 gef_gpio_chip->of_gc.gc.direction_input = gef_gpio_dir_in;
125 gef_gpio_chip->of_gc.gc.direction_output = gef_gpio_dir_out;
126 gef_gpio_chip->of_gc.gc.get = gef_gpio_get;
127 gef_gpio_chip->of_gc.gc.set = gef_gpio_set;
128
129 /* This function adds a memory mapped GPIO chip */
130 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
131 if (retval) {
132 kfree(gef_gpio_chip);
133 pr_err("%s: Unable to add GPIO\n", np->full_name);
134 }
135 }
136
137 return 0;
138};
139arch_initcall(gef_gpio_init);
140
141MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver");
142MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
143MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 548efa55c8fe..3d0c776f888d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -195,16 +195,24 @@ config SPE
195 195
196config PPC_STD_MMU 196config PPC_STD_MMU
197 bool 197 bool
198 depends on 6xx || POWER3 || POWER4 || PPC64 198 depends on 6xx || PPC64
199 default y 199 default y
200 200
201config PPC_STD_MMU_32 201config PPC_STD_MMU_32
202 def_bool y 202 def_bool y
203 depends on PPC_STD_MMU && PPC32 203 depends on PPC_STD_MMU && PPC32
204 204
205config PPC_STD_MMU_64
206 def_bool y
207 depends on PPC_STD_MMU && PPC64
208
209config PPC_MMU_NOHASH
210 def_bool y
211 depends on !PPC_STD_MMU
212
205config PPC_MM_SLICES 213config PPC_MM_SLICES
206 bool 214 bool
207 default y if HUGETLB_PAGE || PPC_64K_PAGES 215 default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES)
208 default n 216 default n
209 217
210config VIRT_CPU_ACCOUNTING 218config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index c14d7d8d96c8..5cc3279559a4 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -2,13 +2,18 @@ config PPC_CELL
2 bool 2 bool
3 default n 3 default n
4 4
5config PPC_CELL_NATIVE 5config PPC_CELL_COMMON
6 bool 6 bool
7 select PPC_CELL 7 select PPC_CELL
8 select PPC_DCR_MMIO 8 select PPC_DCR_MMIO
9 select PPC_OF_PLATFORM_PCI
10 select PPC_INDIRECT_IO 9 select PPC_INDIRECT_IO
11 select PPC_NATIVE 10 select PPC_NATIVE
11 select PPC_RTAS
12
13config PPC_CELL_NATIVE
14 bool
15 select PPC_CELL_COMMON
16 select PPC_OF_PLATFORM_PCI
12 select MPIC 17 select MPIC
13 select IBM_NEW_EMAC_EMAC4 18 select IBM_NEW_EMAC_EMAC4
14 select IBM_NEW_EMAC_RGMII 19 select IBM_NEW_EMAC_RGMII
@@ -20,7 +25,6 @@ config PPC_IBM_CELL_BLADE
20 bool "IBM Cell Blade" 25 bool "IBM Cell Blade"
21 depends on PPC_MULTIPLATFORM && PPC64 26 depends on PPC_MULTIPLATFORM && PPC64
22 select PPC_CELL_NATIVE 27 select PPC_CELL_NATIVE
23 select PPC_RTAS
24 select MMIO_NVRAM 28 select MMIO_NVRAM
25 select PPC_UDBG_16550 29 select PPC_UDBG_16550
26 select UDBG_RTAS_CONSOLE 30 select UDBG_RTAS_CONSOLE
@@ -28,16 +32,17 @@ config PPC_IBM_CELL_BLADE
28config PPC_CELLEB 32config PPC_CELLEB
29 bool "Toshiba's Cell Reference Set 'Celleb' Architecture" 33 bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
30 depends on PPC_MULTIPLATFORM && PPC64 34 depends on PPC_MULTIPLATFORM && PPC64
31 select PPC_CELL
32 select PPC_CELL_NATIVE 35 select PPC_CELL_NATIVE
33 select PPC_RTAS
34 select PPC_INDIRECT_IO
35 select PPC_OF_PLATFORM_PCI
36 select HAS_TXX9_SERIAL 36 select HAS_TXX9_SERIAL
37 select PPC_UDBG_BEAT 37 select PPC_UDBG_BEAT
38 select USB_OHCI_BIG_ENDIAN_MMIO 38 select USB_OHCI_BIG_ENDIAN_MMIO
39 select USB_EHCI_BIG_ENDIAN_MMIO 39 select USB_EHCI_BIG_ENDIAN_MMIO
40 40
41config PPC_CELL_QPACE
42 bool "IBM Cell - QPACE"
43 depends on PPC_MULTIPLATFORM && PPC64
44 select PPC_CELL_COMMON
45
41menu "Cell Broadband Engine options" 46menu "Cell Broadband Engine options"
42 depends on PPC_CELL 47 depends on PPC_CELL
43 48
@@ -102,7 +107,7 @@ config PPC_IBM_CELL_POWERBUTTON
102config CBE_THERM 107config CBE_THERM
103 tristate "CBE thermal support" 108 tristate "CBE thermal support"
104 default m 109 default m
105 depends on CBE_RAS 110 depends on CBE_RAS && SPU_BASE
106 111
107config CBE_CPUFREQ 112config CBE_CPUFREQ
108 tristate "CBE frequency scaling" 113 tristate "CBE frequency scaling"
@@ -136,5 +141,5 @@ endmenu
136 141
137config OPROFILE_CELL 142config OPROFILE_CELL
138 def_bool y 143 def_bool y
139 depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) 144 depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
140 145
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 7fd830872c43..43eccb270301 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \ 1obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
2 cbe_regs.o spider-pic.o \ 2
3 pervasive.o pmu.o io-workarounds.o \ 3obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
4 spider-pci.o 4 pmu.o io-workarounds.o spider-pci.o
5obj-$(CONFIG_CBE_RAS) += ras.o 5obj-$(CONFIG_CBE_RAS) += ras.o
6 6
7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o 7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -14,13 +14,12 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
14 14
15ifeq ($(CONFIG_SMP),y) 15ifeq ($(CONFIG_SMP),y)
16obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o 16obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
17obj-$(CONFIG_PPC_CELL_QPACE) += smp.o
17endif 18endif
18 19
19# needed only when building loadable spufs.ko 20# needed only when building loadable spufs.ko
20spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o 21spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
21 22spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
22spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
23spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
24 23
25obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 24obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
26 spu_notify.o \ 25 spu_notify.o \
@@ -31,6 +30,8 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
31 30
32obj-$(CONFIG_PCI_MSI) += axon_msi.o 31obj-$(CONFIG_PCI_MSI) += axon_msi.o
33 32
33# qpace setup
34obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
34 35
35# celleb stuff 36# celleb stuff
36ifeq ($(CONFIG_PPC_CELLEB),y) 37ifeq ($(CONFIG_PPC_CELLEB),y)
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index b11cb30decb2..07c234f6b2b6 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -45,7 +45,6 @@
45#include <asm/mmu.h> 45#include <asm/mmu.h>
46#include <asm/processor.h> 46#include <asm/processor.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/kexec.h>
49#include <asm/prom.h> 48#include <asm/prom.h>
50#include <asm/machdep.h> 49#include <asm/machdep.h>
51#include <asm/cputable.h> 50#include <asm/cputable.h>
@@ -226,9 +225,6 @@ define_machine(celleb_beat) {
226 .pci_setup_phb = celleb_setup_phb, 225 .pci_setup_phb = celleb_setup_phb,
227#ifdef CONFIG_KEXEC 226#ifdef CONFIG_KEXEC
228 .kexec_cpu_down = beat_kexec_cpu_down, 227 .kexec_cpu_down = beat_kexec_cpu_down,
229 .machine_kexec = default_machine_kexec,
230 .machine_kexec_prepare = default_machine_kexec_prepare,
231 .machine_crash_shutdown = default_machine_crash_shutdown,
232#endif 228#endif
233}; 229};
234 230
@@ -248,9 +244,4 @@ define_machine(celleb_native) {
248 .pci_probe_mode = celleb_pci_probe_mode, 244 .pci_probe_mode = celleb_pci_probe_mode,
249 .pci_setup_phb = celleb_setup_phb, 245 .pci_setup_phb = celleb_setup_phb,
250 .init_IRQ = celleb_init_IRQ_native, 246 .init_IRQ = celleb_init_IRQ_native,
251#ifdef CONFIG_KEXEC
252 .machine_kexec = default_machine_kexec,
253 .machine_kexec_prepare = default_machine_kexec_prepare,
254 .machine_crash_shutdown = default_machine_crash_shutdown,
255#endif
256}; 247};
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 3168272ab0d7..86db4dd170a0 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1053,10 +1053,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
1053 } 1053 }
1054 1054
1055 /* We must have dma-ranges properties for fixed mapping to work */ 1055 /* We must have dma-ranges properties for fixed mapping to work */
1056 for (np = NULL; (np = of_find_all_nodes(np));) { 1056 np = of_find_node_with_property(NULL, "dma-ranges");
1057 if (of_find_property(np, "dma-ranges", NULL))
1058 break;
1059 }
1060 of_node_put(np); 1057 of_node_put(np);
1061 1058
1062 if (!np) { 1059 if (!np) {
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
new file mode 100644
index 000000000000..be84e6a16b30
--- /dev/null
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -0,0 +1,152 @@
1/*
2 * linux/arch/powerpc/platforms/cell/qpace_setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
9 * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/irq.h>
22#include <linux/console.h>
23#include <linux/of_platform.h>
24
25#include <asm/mmu.h>
26#include <asm/processor.h>
27#include <asm/io.h>
28#include <asm/kexec.h>
29#include <asm/pgtable.h>
30#include <asm/prom.h>
31#include <asm/rtas.h>
32#include <asm/dma.h>
33#include <asm/machdep.h>
34#include <asm/time.h>
35#include <asm/cputable.h>
36#include <asm/irq.h>
37#include <asm/spu.h>
38#include <asm/spu_priv1.h>
39#include <asm/udbg.h>
40#include <asm/cell-regs.h>
41
42#include "interrupt.h"
43#include "pervasive.h"
44#include "ras.h"
45#include "io-workarounds.h"
46
47static void qpace_show_cpuinfo(struct seq_file *m)
48{
49 struct device_node *root;
50 const char *model = "";
51
52 root = of_find_node_by_path("/");
53 if (root)
54 model = of_get_property(root, "model", NULL);
55 seq_printf(m, "machine\t\t: CHRP %s\n", model);
56 of_node_put(root);
57}
58
59static void qpace_progress(char *s, unsigned short hex)
60{
61 printk("*** %04x : %s\n", hex, s ? s : "");
62}
63
64static int __init qpace_publish_devices(void)
65{
66 int node;
67
68 /* Publish OF platform devices for southbridge IOs */
69 of_platform_bus_probe(NULL, NULL, NULL);
70
71 /* There is no device for the MIC memory controller, thus we create
72 * a platform device for it to attach the EDAC driver to.
73 */
74 for_each_online_node(node) {
75 if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
76 continue;
77 platform_device_register_simple("cbe-mic", node, NULL, 0);
78 }
79
80 return 0;
81}
82machine_subsys_initcall(qpace, qpace_publish_devices);
83
84extern int qpace_notify(struct device *dev)
85{
86 /* set dma_ops for of_platform bus */
87 if (dev->bus && dev->bus->name
88 && !strcmp(dev->bus->name, "of_platform"))
89 set_dma_ops(dev, &dma_direct_ops);
90
91 return 0;
92}
93
94static void __init qpace_setup_arch(void)
95{
96#ifdef CONFIG_SPU_BASE
97 spu_priv1_ops = &spu_priv1_mmio_ops;
98 spu_management_ops = &spu_management_of_ops;
99#endif
100
101 cbe_regs_init();
102
103#ifdef CONFIG_CBE_RAS
104 cbe_ras_init();
105#endif
106
107#ifdef CONFIG_SMP
108 smp_init_cell();
109#endif
110
111 /* init to some ~sane value until calibrate_delay() runs */
112 loops_per_jiffy = 50000000;
113
114 cbe_pervasive_init();
115#ifdef CONFIG_DUMMY_CONSOLE
116 conswitchp = &dummy_con;
117#endif
118
119 /* set notifier function */
120 platform_notify = &qpace_notify;
121}
122
123static int __init qpace_probe(void)
124{
125 unsigned long root = of_get_flat_dt_root();
126
127 if (!of_flat_dt_is_compatible(root, "IBM,QPACE"))
128 return 0;
129
130 hpte_init_native();
131
132 return 1;
133}
134
135define_machine(qpace) {
136 .name = "QPACE",
137 .probe = qpace_probe,
138 .setup_arch = qpace_setup_arch,
139 .show_cpuinfo = qpace_show_cpuinfo,
140 .restart = rtas_restart,
141 .power_off = rtas_power_off,
142 .halt = rtas_halt,
143 .get_boot_time = rtas_get_boot_time,
144 .calibrate_decr = generic_calibrate_decr,
145 .progress = qpace_progress,
146 .init_IRQ = iic_init_IRQ,
147#ifdef CONFIG_KEXEC
148 .machine_kexec = default_machine_kexec,
149 .machine_kexec_prepare = default_machine_kexec_prepare,
150 .machine_crash_shutdown = default_machine_crash_shutdown,
151#endif
152};
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index ab721b50fbba..59305369f6b2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -35,7 +35,6 @@
35#include <asm/mmu.h> 35#include <asm/mmu.h>
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/io.h> 37#include <asm/io.h>
38#include <asm/kexec.h>
39#include <asm/pgtable.h> 38#include <asm/pgtable.h>
40#include <asm/prom.h> 39#include <asm/prom.h>
41#include <asm/rtas.h> 40#include <asm/rtas.h>
@@ -289,9 +288,4 @@ define_machine(cell) {
289 .progress = cell_progress, 288 .progress = cell_progress,
290 .init_IRQ = cell_init_irq, 289 .init_IRQ = cell_init_irq,
291 .pci_setup_phb = cell_setup_phb, 290 .pci_setup_phb = cell_setup_phb,
292#ifdef CONFIG_KEXEC
293 .machine_kexec = default_machine_kexec,
294 .machine_kexec_prepare = default_machine_kexec_prepare,
295 .machine_crash_shutdown = default_machine_crash_shutdown,
296#endif
297}; 291};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 1b26071a86ca..7106b63d401b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -273,12 +273,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
273 return VM_FAULT_NOPAGE; 273 return VM_FAULT_NOPAGE;
274 274
275 if (ctx->state == SPU_STATE_SAVED) { 275 if (ctx->state == SPU_STATE_SAVED) {
276 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 276 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
277 & ~_PAGE_NO_CACHE);
278 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 277 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
279 } else { 278 } else {
280 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 279 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
281 | _PAGE_NO_CACHE);
282 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 280 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
283 } 281 }
284 vm_insert_pfn(vma, address, pfn); 282 vm_insert_pfn(vma, address, pfn);
@@ -338,8 +336,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
338 return -EINVAL; 336 return -EINVAL;
339 337
340 vma->vm_flags |= VM_IO | VM_PFNMAP; 338 vma->vm_flags |= VM_IO | VM_PFNMAP;
341 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 339 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
342 | _PAGE_NO_CACHE);
343 340
344 vma->vm_ops = &spufs_mem_mmap_vmops; 341 vma->vm_ops = &spufs_mem_mmap_vmops;
345 return 0; 342 return 0;
@@ -452,8 +449,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
452 return -EINVAL; 449 return -EINVAL;
453 450
454 vma->vm_flags |= VM_IO | VM_PFNMAP; 451 vma->vm_flags |= VM_IO | VM_PFNMAP;
455 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 452 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
456 | _PAGE_NO_CACHE | _PAGE_GUARDED);
457 453
458 vma->vm_ops = &spufs_cntl_mmap_vmops; 454 vma->vm_ops = &spufs_cntl_mmap_vmops;
459 return 0; 455 return 0;
@@ -1155,8 +1151,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1155 return -EINVAL; 1151 return -EINVAL;
1156 1152
1157 vma->vm_flags |= VM_IO | VM_PFNMAP; 1153 vma->vm_flags |= VM_IO | VM_PFNMAP;
1158 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1154 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1159 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1160 1155
1161 vma->vm_ops = &spufs_signal1_mmap_vmops; 1156 vma->vm_ops = &spufs_signal1_mmap_vmops;
1162 return 0; 1157 return 0;
@@ -1292,8 +1287,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1292 return -EINVAL; 1287 return -EINVAL;
1293 1288
1294 vma->vm_flags |= VM_IO | VM_PFNMAP; 1289 vma->vm_flags |= VM_IO | VM_PFNMAP;
1295 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1290 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1296 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1297 1291
1298 vma->vm_ops = &spufs_signal2_mmap_vmops; 1292 vma->vm_ops = &spufs_signal2_mmap_vmops;
1299 return 0; 1293 return 0;
@@ -1414,8 +1408,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1414 return -EINVAL; 1408 return -EINVAL;
1415 1409
1416 vma->vm_flags |= VM_IO | VM_PFNMAP; 1410 vma->vm_flags |= VM_IO | VM_PFNMAP;
1417 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1411 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1418 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1419 1412
1420 vma->vm_ops = &spufs_mss_mmap_vmops; 1413 vma->vm_ops = &spufs_mss_mmap_vmops;
1421 return 0; 1414 return 0;
@@ -1476,8 +1469,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1476 return -EINVAL; 1469 return -EINVAL;
1477 1470
1478 vma->vm_flags |= VM_IO | VM_PFNMAP; 1471 vma->vm_flags |= VM_IO | VM_PFNMAP;
1479 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1472 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1480 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1481 1473
1482 vma->vm_ops = &spufs_psmap_mmap_vmops; 1474 vma->vm_ops = &spufs_psmap_mmap_vmops;
1483 return 0; 1475 return 0;
@@ -1536,8 +1528,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1536 return -EINVAL; 1528 return -EINVAL;
1537 1529
1538 vma->vm_flags |= VM_IO | VM_PFNMAP; 1530 vma->vm_flags |= VM_IO | VM_PFNMAP;
1539 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1531 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1540 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1541 1532
1542 vma->vm_ops = &spufs_mfc_mmap_vmops; 1533 vma->vm_ops = &spufs_mfc_mmap_vmops;
1543 return 0; 1534 return 0;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index cb85d237e492..6296bfd9cb0b 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -95,8 +95,8 @@ spufs_new_inode(struct super_block *sb, int mode)
95 goto out; 95 goto out;
96 96
97 inode->i_mode = mode; 97 inode->i_mode = mode;
98 inode->i_uid = current->fsuid; 98 inode->i_uid = current_fsuid();
99 inode->i_gid = current->fsgid; 99 inode->i_gid = current_fsgid();
100 inode->i_blocks = 0; 100 inode->i_blocks = 0;
101 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 101 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
102out: 102out:
@@ -323,7 +323,7 @@ static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
323 goto out; 323 goto out;
324 } 324 }
325 325
326 filp = dentry_open(dentry, mnt, O_RDONLY); 326 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
327 if (IS_ERR(filp)) { 327 if (IS_ERR(filp)) {
328 put_unused_fd(ret); 328 put_unused_fd(ret);
329 ret = PTR_ERR(filp); 329 ret = PTR_ERR(filp);
@@ -562,7 +562,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
562 goto out; 562 goto out;
563 } 563 }
564 564
565 filp = dentry_open(dentry, mnt, O_RDONLY); 565 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
566 if (IS_ERR(filp)) { 566 if (IS_ERR(filp)) {
567 put_unused_fd(ret); 567 put_unused_fd(ret);
568 ret = PTR_ERR(filp); 568 ret = PTR_ERR(filp);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index d3cde6b9d2df..f6b0c519d5a2 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -141,6 +141,7 @@ hydra_init(void)
141 of_node_put(np); 141 of_node_put(np);
142 return 0; 142 return 0;
143 } 143 }
144 of_node_put(np);
144 Hydra = ioremap(r.start, r.end-r.start); 145 Hydra = ioremap(r.start, r.end-r.start);
145 printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); 146 printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
146 printk("Hydra Feature_Control was %x", 147 printk("Hydra Feature_Control was %x",
@@ -198,7 +199,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
198 printk ("RTAS supporting Pegasos OF not found, please upgrade" 199 printk ("RTAS supporting Pegasos OF not found, please upgrade"
199 " your firmware\n"); 200 " your firmware\n");
200 } 201 }
201 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 202 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
202 /* keep the reference to the root node */ 203 /* keep the reference to the root node */
203} 204}
204 205
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index 32ba0fa0ad03..8cab5731850f 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -20,7 +20,6 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/kexec.h>
24 23
25#include <asm/machdep.h> 24#include <asm/machdep.h>
26#include <asm/prom.h> 25#include <asm/prom.h>
@@ -147,9 +146,4 @@ define_machine(c2k) {
147 .get_irq = mv64x60_get_irq, 146 .get_irq = mv64x60_get_irq,
148 .restart = c2k_restart, 147 .restart = c2k_restart,
149 .calibrate_decr = generic_calibrate_decr, 148 .calibrate_decr = generic_calibrate_decr,
150#ifdef CONFIG_KEXEC
151 .machine_kexec = default_machine_kexec,
152 .machine_kexec_prepare = default_machine_kexec_prepare,
153 .machine_crash_shutdown = default_machine_crash_shutdown,
154#endif
155}; 149};
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index 4c485e984236..670035f49a69 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -19,7 +19,6 @@
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/time.h> 21#include <asm/time.h>
22#include <asm/kexec.h>
23 22
24#include <mm/mmu_decl.h> 23#include <mm/mmu_decl.h>
25 24
@@ -155,9 +154,4 @@ define_machine(prpmc2800){
155 .get_irq = mv64x60_get_irq, 154 .get_irq = mv64x60_get_irq,
156 .restart = prpmc2800_restart, 155 .restart = prpmc2800_restart,
157 .calibrate_decr = generic_calibrate_decr, 156 .calibrate_decr = generic_calibrate_decr,
158#ifdef CONFIG_KEXEC
159 .machine_kexec = default_machine_kexec,
160 .machine_kexec_prepare = default_machine_kexec_prepare,
161 .machine_crash_shutdown = default_machine_crash_shutdown,
162#endif
163}; 157};
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 45ffd8e542f4..ed3753d8c109 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -9,6 +9,7 @@ menu "iSeries device drivers"
9 9
10config VIODASD 10config VIODASD
11 tristate "iSeries Virtual I/O disk support" 11 tristate "iSeries Virtual I/O disk support"
12 depends on BLOCK
12 help 13 help
13 If you are running on an iSeries system and you want to use 14 If you are running on an iSeries system and you want to use
14 virtual disks created and managed by OS/400, say Y. 15 virtual disks created and managed by OS/400, say Y.
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index d4c61c3c9669..bfd60e4accee 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -50,7 +50,6 @@
50#include <asm/system.h> 50#include <asm/system.h>
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <asm/io.h> 52#include <asm/io.h>
53#include <asm/kexec.h>
54#include <asm/pci-bridge.h> 53#include <asm/pci-bridge.h>
55#include <asm/iommu.h> 54#include <asm/iommu.h>
56#include <asm/machdep.h> 55#include <asm/machdep.h>
@@ -335,9 +334,4 @@ define_machine(maple) {
335 .calibrate_decr = generic_calibrate_decr, 334 .calibrate_decr = generic_calibrate_decr,
336 .progress = maple_progress, 335 .progress = maple_progress,
337 .power_save = power4_idle, 336 .power_save = power4_idle,
338#ifdef CONFIG_KEXEC
339 .machine_kexec = default_machine_kexec,
340 .machine_kexec_prepare = default_machine_kexec_prepare,
341 .machine_crash_shutdown = default_machine_crash_shutdown,
342#endif
343}; 337};
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 792d3ce8112e..65c585b8b00d 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -310,7 +310,7 @@ static int pmu_set_cpu_speed(int low_speed)
310 _set_L3CR(save_l3cr); 310 _set_L3CR(save_l3cr);
311 311
312 /* Restore userland MMU context */ 312 /* Restore userland MMU context */
313 set_context(current->active_mm->context.id, current->active_mm->pgd); 313 switch_mmu_context(NULL, current->active_mm);
314 314
315#ifdef DEBUG_FREQ 315#ifdef DEBUG_FREQ
316 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); 316 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index bcf50d7056e9..54b7b76ed4f0 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -729,7 +729,7 @@ static void __init setup_bandit(struct pci_controller *hose,
729static int __init setup_uninorth(struct pci_controller *hose, 729static int __init setup_uninorth(struct pci_controller *hose,
730 struct resource *addr) 730 struct resource *addr)
731{ 731{
732 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 732 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
733 has_uninorth = 1; 733 has_uninorth = 1;
734 hose->ops = &macrisc_pci_ops; 734 hose->ops = &macrisc_pci_ops;
735 hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); 735 hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
@@ -996,7 +996,7 @@ void __init pmac_pci_init(void)
996 struct device_node *np, *root; 996 struct device_node *np, *root;
997 struct device_node *ht = NULL; 997 struct device_node *ht = NULL;
998 998
999 ppc_pci_flags = PPC_PCI_CAN_SKIP_ISA_ALIGN; 999 ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN);
1000 1000
1001 root = of_find_node_by_path("/"); 1001 root = of_find_node_by_path("/");
1002 if (root == NULL) { 1002 if (root == NULL) {
@@ -1055,7 +1055,7 @@ void __init pmac_pci_init(void)
1055 * some offset between bus number and domains for now when we 1055 * some offset between bus number and domains for now when we
1056 * assign all busses should help for now 1056 * assign all busses should help for now
1057 */ 1057 */
1058 if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS) 1058 if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
1059 pcibios_assign_bus_offset = 0x10; 1059 pcibios_assign_bus_offset = 0x10;
1060#endif 1060#endif
1061} 1061}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 82c14d203d8b..9b78f5300c24 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -60,7 +60,6 @@
60#include <asm/system.h> 60#include <asm/system.h>
61#include <asm/pgtable.h> 61#include <asm/pgtable.h>
62#include <asm/io.h> 62#include <asm/io.h>
63#include <asm/kexec.h>
64#include <asm/pci-bridge.h> 63#include <asm/pci-bridge.h>
65#include <asm/ohare.h> 64#include <asm/ohare.h>
66#include <asm/mediabay.h> 65#include <asm/mediabay.h>
@@ -310,9 +309,7 @@ static void __init pmac_setup_arch(void)
310 } 309 }
311 310
312 /* See if newworld or oldworld */ 311 /* See if newworld or oldworld */
313 for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; ) 312 ic = of_find_node_with_property(NULL, "interrupt-controller");
314 if (of_get_property(ic, "interrupt-controller", NULL))
315 break;
316 if (ic) { 313 if (ic) {
317 pmac_newworld = 1; 314 pmac_newworld = 1;
318 of_node_put(ic); 315 of_node_put(ic);
@@ -740,11 +737,6 @@ define_machine(powermac) {
740 .pci_probe_mode = pmac_pci_probe_mode, 737 .pci_probe_mode = pmac_pci_probe_mode,
741 .power_save = power4_idle, 738 .power_save = power4_idle,
742 .enable_pmcs = power4_enable_pmcs, 739 .enable_pmcs = power4_enable_pmcs,
743#ifdef CONFIG_KEXEC
744 .machine_kexec = default_machine_kexec,
745 .machine_kexec_prepare = default_machine_kexec_prepare,
746 .machine_crash_shutdown = default_machine_crash_shutdown,
747#endif
748#endif /* CONFIG_PPC64 */ 740#endif /* CONFIG_PPC64 */
749#ifdef CONFIG_PPC32 741#ifdef CONFIG_PPC32
750 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, 742 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index adee28da353f..1c2802fabd57 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -17,6 +17,7 @@
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/mmu.h>
20 21
21#define MAGIC 0x4c617273 /* 'Lars' */ 22#define MAGIC 0x4c617273 /* 'Lars' */
22 23
@@ -323,7 +324,7 @@ grackle_wake_up:
323 lwz r4,SL_IBAT3+4(r1) 324 lwz r4,SL_IBAT3+4(r1)
324 mtibatl 3,r4 325 mtibatl 3,r4
325 326
326BEGIN_FTR_SECTION 327BEGIN_MMU_FTR_SECTION
327 li r4,0 328 li r4,0
328 mtspr SPRN_DBAT4U,r4 329 mtspr SPRN_DBAT4U,r4
329 mtspr SPRN_DBAT4L,r4 330 mtspr SPRN_DBAT4L,r4
@@ -341,7 +342,7 @@ BEGIN_FTR_SECTION
341 mtspr SPRN_IBAT6L,r4 342 mtspr SPRN_IBAT6L,r4
342 mtspr SPRN_IBAT7U,r4 343 mtspr SPRN_IBAT7U,r4
343 mtspr SPRN_IBAT7L,r4 344 mtspr SPRN_IBAT7L,r4
344END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) 345END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
345 346
346 /* Flush all TLBs */ 347 /* Flush all TLBs */
347 lis r4,0x1000 348 lis r4,0x1000
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 40f72c2a4699..6b0711c15eca 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -739,7 +739,7 @@ static void __init smp_core99_setup(int ncpus)
739 739
740 /* XXX should get this from reg properties */ 740 /* XXX should get this from reg properties */
741 for (i = 1; i < ncpus; ++i) 741 for (i = 1; i < ncpus; ++i)
742 smp_hw_index[i] = i; 742 set_hard_smp_processor_id(i, i);
743 } 743 }
744#endif 744#endif
745 745
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index ffdd8e963fbd..dbc124e05646 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -314,11 +314,17 @@ static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
314 314
315 result = ps3_system_bus_device_register(&p->dev); 315 result = ps3_system_bus_device_register(&p->dev);
316 316
317 if (result) 317 if (result) {
318 pr_debug("%s:%d ps3_system_bus_device_register failed\n", 318 pr_debug("%s:%d ps3_system_bus_device_register failed\n",
319 __func__, __LINE__); 319 __func__, __LINE__);
320 320 goto fail_device_register;
321 }
321 pr_debug(" <- %s:%d\n", __func__, __LINE__); 322 pr_debug(" <- %s:%d\n", __func__, __LINE__);
323 return 0;
324
325fail_device_register:
326 kfree(p);
327 pr_debug(" <- %s:%d fail\n", __func__, __LINE__);
322 return result; 328 return result;
323} 329}
324 330
@@ -463,11 +469,17 @@ static int __init ps3_register_sound_devices(void)
463 469
464 result = ps3_system_bus_device_register(&p->dev); 470 result = ps3_system_bus_device_register(&p->dev);
465 471
466 if (result) 472 if (result) {
467 pr_debug("%s:%d ps3_system_bus_device_register failed\n", 473 pr_debug("%s:%d ps3_system_bus_device_register failed\n",
468 __func__, __LINE__); 474 __func__, __LINE__);
469 475 goto fail_device_register;
476 }
470 pr_debug(" <- %s:%d\n", __func__, __LINE__); 477 pr_debug(" <- %s:%d\n", __func__, __LINE__);
478 return 0;
479
480fail_device_register:
481 kfree(p);
482 pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
471 return result; 483 return result;
472} 484}
473 485
@@ -485,17 +497,24 @@ static int __init ps3_register_graphics_devices(void)
485 if (!p) 497 if (!p)
486 return -ENOMEM; 498 return -ENOMEM;
487 499
488 p->dev.match_id = PS3_MATCH_ID_GRAPHICS; 500 p->dev.match_id = PS3_MATCH_ID_GPU;
489 p->dev.match_sub_id = PS3_MATCH_SUB_ID_FB; 501 p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB;
490 p->dev.dev_type = PS3_DEVICE_TYPE_IOC0; 502 p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
491 503
492 result = ps3_system_bus_device_register(&p->dev); 504 result = ps3_system_bus_device_register(&p->dev);
493 505
494 if (result) 506 if (result) {
495 pr_debug("%s:%d ps3_system_bus_device_register failed\n", 507 pr_debug("%s:%d ps3_system_bus_device_register failed\n",
496 __func__, __LINE__); 508 __func__, __LINE__);
509 goto fail_device_register;
510 }
497 511
498 pr_debug(" <- %s:%d\n", __func__, __LINE__); 512 pr_debug(" <- %s:%d\n", __func__, __LINE__);
513 return 0;
514
515fail_device_register:
516 kfree(p);
517 pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
499 return result; 518 return result;
500} 519}
501 520
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 3a58ffabccd9..a4d49dd9e8a9 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -649,7 +649,7 @@ static int dma_sb_region_create(struct ps3_dma_region *r)
649{ 649{
650 int result; 650 int result;
651 651
652 pr_info(" -> %s:%d:\n", __func__, __LINE__); 652 DBG(" -> %s:%d:\n", __func__, __LINE__);
653 653
654 BUG_ON(!r); 654 BUG_ON(!r);
655 655
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 77bc330263c4..35f3e85cf60e 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -23,7 +23,6 @@
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/root_dev.h> 24#include <linux/root_dev.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/kexec.h>
27#include <linux/bootmem.h> 26#include <linux/bootmem.h>
28 27
29#include <asm/machdep.h> 28#include <asm/machdep.h>
@@ -42,6 +41,10 @@
42#define DBG pr_debug 41#define DBG pr_debug
43#endif 42#endif
44 43
44/* mutex synchronizing GPU accesses and video mode changes */
45DEFINE_MUTEX(ps3_gpu_mutex);
46EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
47
45#if !defined(CONFIG_SMP) 48#if !defined(CONFIG_SMP)
46static void smp_send_stop(void) {} 49static void smp_send_stop(void) {}
47#endif 50#endif
@@ -277,8 +280,5 @@ define_machine(ps3) {
277 .halt = ps3_halt, 280 .halt = ps3_halt,
278#if defined(CONFIG_KEXEC) 281#if defined(CONFIG_KEXEC)
279 .kexec_cpu_down = ps3_kexec_cpu_down, 282 .kexec_cpu_down = ps3_kexec_cpu_down,
280 .machine_kexec = default_machine_kexec,
281 .machine_kexec_prepare = default_machine_kexec_prepare,
282 .machine_crash_shutdown = default_machine_crash_shutdown,
283#endif 283#endif
284}; 284};
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 661e9f77ebf6..ee0d22911621 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -31,7 +31,7 @@
31#include "platform.h" 31#include "platform.h"
32 32
33static struct device ps3_system_bus = { 33static struct device ps3_system_bus = {
34 .bus_id = "ps3_system", 34 .init_name = "ps3_system",
35}; 35};
36 36
37/* FIXME: need device usage counters! */ 37/* FIXME: need device usage counters! */
@@ -175,7 +175,7 @@ int ps3_open_hv_device(struct ps3_system_bus_device *dev)
175 return ps3_open_hv_device_sb(dev); 175 return ps3_open_hv_device_sb(dev);
176 176
177 case PS3_MATCH_ID_SOUND: 177 case PS3_MATCH_ID_SOUND:
178 case PS3_MATCH_ID_GRAPHICS: 178 case PS3_MATCH_ID_GPU:
179 return ps3_open_hv_device_gpu(dev); 179 return ps3_open_hv_device_gpu(dev);
180 180
181 case PS3_MATCH_ID_AV_SETTINGS: 181 case PS3_MATCH_ID_AV_SETTINGS:
@@ -213,7 +213,7 @@ int ps3_close_hv_device(struct ps3_system_bus_device *dev)
213 return ps3_close_hv_device_sb(dev); 213 return ps3_close_hv_device_sb(dev);
214 214
215 case PS3_MATCH_ID_SOUND: 215 case PS3_MATCH_ID_SOUND:
216 case PS3_MATCH_ID_GRAPHICS: 216 case PS3_MATCH_ID_GPU:
217 return ps3_close_hv_device_gpu(dev); 217 return ps3_close_hv_device_gpu(dev);
218 218
219 case PS3_MATCH_ID_AV_SETTINGS: 219 case PS3_MATCH_ID_AV_SETTINGS:
@@ -356,12 +356,12 @@ static int ps3_system_bus_match(struct device *_dev,
356 if (result) 356 if (result)
357 pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n", 357 pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
358 __func__, __LINE__, 358 __func__, __LINE__,
359 dev->match_id, dev->match_sub_id, dev->core.bus_id, 359 dev->match_id, dev->match_sub_id, dev_name(&dev->core),
360 drv->match_id, drv->match_sub_id, drv->core.name); 360 drv->match_id, drv->match_sub_id, drv->core.name);
361 else 361 else
362 pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n", 362 pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
363 __func__, __LINE__, 363 __func__, __LINE__,
364 dev->match_id, dev->match_sub_id, dev->core.bus_id, 364 dev->match_id, dev->match_sub_id, dev_name(&dev->core),
365 drv->match_id, drv->match_sub_id, drv->core.name); 365 drv->match_id, drv->match_sub_id, drv->core.name);
366 366
367 return result; 367 return result;
@@ -383,9 +383,9 @@ static int ps3_system_bus_probe(struct device *_dev)
383 result = drv->probe(dev); 383 result = drv->probe(dev);
384 else 384 else
385 pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__, 385 pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
386 dev->core.bus_id); 386 dev_name(&dev->core));
387 387
388 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id); 388 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
389 return result; 389 return result;
390} 390}
391 391
@@ -407,7 +407,7 @@ static int ps3_system_bus_remove(struct device *_dev)
407 dev_dbg(&dev->core, "%s:%d %s: no remove method\n", 407 dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
408 __func__, __LINE__, drv->core.name); 408 __func__, __LINE__, drv->core.name);
409 409
410 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id); 410 pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
411 return result; 411 return result;
412} 412}
413 413
@@ -432,7 +432,7 @@ static void ps3_system_bus_shutdown(struct device *_dev)
432 BUG_ON(!drv); 432 BUG_ON(!drv);
433 433
434 dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__, 434 dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
435 dev->core.bus_id, drv->core.name); 435 dev_name(&dev->core), drv->core.name);
436 436
437 if (drv->shutdown) 437 if (drv->shutdown)
438 drv->shutdown(dev); 438 drv->shutdown(dev);
@@ -453,7 +453,8 @@ static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *en
453{ 453{
454 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 454 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
455 455
456 if (add_uevent_var(env, "MODALIAS=ps3:%d", dev->match_id)) 456 if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
457 dev->match_sub_id))
457 return -ENOMEM; 458 return -ENOMEM;
458 return 0; 459 return 0;
459} 460}
@@ -462,7 +463,8 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
462 char *buf) 463 char *buf)
463{ 464{
464 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 465 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
465 int len = snprintf(buf, PAGE_SIZE, "ps3:%d\n", dev->match_id); 466 int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
467 dev->match_sub_id);
466 468
467 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 469 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
468} 470}
@@ -742,22 +744,18 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
742 switch (dev->dev_type) { 744 switch (dev->dev_type) {
743 case PS3_DEVICE_TYPE_IOC0: 745 case PS3_DEVICE_TYPE_IOC0:
744 dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; 746 dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
745 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), 747 dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
746 "ioc0_%02x", ++dev_ioc0_count);
747 break; 748 break;
748 case PS3_DEVICE_TYPE_SB: 749 case PS3_DEVICE_TYPE_SB:
749 dev->core.archdata.dma_ops = &ps3_sb_dma_ops; 750 dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
750 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), 751 dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
751 "sb_%02x", ++dev_sb_count);
752 752
753 break; 753 break;
754 case PS3_DEVICE_TYPE_VUART: 754 case PS3_DEVICE_TYPE_VUART:
755 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), 755 dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
756 "vuart_%02x", ++dev_vuart_count);
757 break; 756 break;
758 case PS3_DEVICE_TYPE_LPM: 757 case PS3_DEVICE_TYPE_LPM:
759 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), 758 dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
760 "lpm_%02x", ++dev_lpm_count);
761 break; 759 break;
762 default: 760 default:
763 BUG(); 761 BUG();
@@ -766,7 +764,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
766 dev->core.archdata.of_node = NULL; 764 dev->core.archdata.of_node = NULL;
767 set_dev_node(&dev->core, 0); 765 set_dev_node(&dev->core, 0);
768 766
769 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id); 767 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
770 768
771 result = device_register(&dev->core); 769 result = device_register(&dev->core);
772 return result; 770 return result;
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 97619fd51e39..ddc2a307cd50 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -54,7 +54,7 @@ config PPC_SMLPAR
54 54
55config CMM 55config CMM
56 tristate "Collaborative memory management" 56 tristate "Collaborative memory management"
57 depends on PPC_SMLPAR 57 depends on PPC_SMLPAR && !CRASH_DUMP
58 default y 58 default y
59 help 59 help
60 Select this option, if you want to enable the kernel interface 60 Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 5cd4d2761620..6567439fe78d 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -28,6 +28,7 @@
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/oom.h> 30#include <linux/oom.h>
31#include <linux/reboot.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/stringify.h> 33#include <linux/stringify.h>
33#include <linux/swap.h> 34#include <linux/swap.h>
@@ -384,6 +385,26 @@ static void cmm_unregister_sysfs(struct sys_device *sysdev)
384} 385}
385 386
386/** 387/**
388 * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
389 *
390 **/
391static int cmm_reboot_notifier(struct notifier_block *nb,
392 unsigned long action, void *unused)
393{
394 if (action == SYS_RESTART) {
395 if (cmm_thread_ptr)
396 kthread_stop(cmm_thread_ptr);
397 cmm_thread_ptr = NULL;
398 cmm_free_pages(loaned_pages);
399 }
400 return NOTIFY_DONE;
401}
402
403static struct notifier_block cmm_reboot_nb = {
404 .notifier_call = cmm_reboot_notifier,
405};
406
407/**
387 * cmm_init - Module initialization 408 * cmm_init - Module initialization
388 * 409 *
389 * Return value: 410 * Return value:
@@ -399,9 +420,12 @@ static int cmm_init(void)
399 if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0) 420 if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
400 return rc; 421 return rc;
401 422
402 if ((rc = cmm_sysfs_register(&cmm_sysdev))) 423 if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
403 goto out_oom_notifier; 424 goto out_oom_notifier;
404 425
426 if ((rc = cmm_sysfs_register(&cmm_sysdev)))
427 goto out_reboot_notifier;
428
405 if (cmm_disabled) 429 if (cmm_disabled)
406 return rc; 430 return rc;
407 431
@@ -415,6 +439,8 @@ static int cmm_init(void)
415 439
416out_unregister_sysfs: 440out_unregister_sysfs:
417 cmm_unregister_sysfs(&cmm_sysdev); 441 cmm_unregister_sysfs(&cmm_sysdev);
442out_reboot_notifier:
443 unregister_reboot_notifier(&cmm_reboot_nb);
418out_oom_notifier: 444out_oom_notifier:
419 unregister_oom_notifier(&cmm_oom_nb); 445 unregister_oom_notifier(&cmm_oom_nb);
420 return rc; 446 return rc;
@@ -431,6 +457,7 @@ static void cmm_exit(void)
431 if (cmm_thread_ptr) 457 if (cmm_thread_ptr)
432 kthread_stop(cmm_thread_ptr); 458 kthread_stop(cmm_thread_ptr);
433 unregister_oom_notifier(&cmm_oom_nb); 459 unregister_oom_notifier(&cmm_oom_nb);
460 unregister_reboot_notifier(&cmm_reboot_nb);
434 cmm_free_pages(loaned_pages); 461 cmm_free_pages(loaned_pages);
435 cmm_unregister_sysfs(&cmm_sysdev); 462 cmm_unregister_sysfs(&cmm_sysdev);
436} 463}
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 54816d75b578..989d6462c154 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,6 +21,8 @@
21 * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> 21 * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
22 */ 22 */
23 23
24#undef DEBUG
25
24#include <linux/delay.h> 26#include <linux/delay.h>
25#include <linux/init.h> 27#include <linux/init.h>
26#include <linux/list.h> 28#include <linux/list.h>
@@ -488,10 +490,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
488 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || 490 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
489 pdn->eeh_mode & EEH_MODE_NOCHECK) { 491 pdn->eeh_mode & EEH_MODE_NOCHECK) {
490 ignored_check++; 492 ignored_check++;
491#ifdef DEBUG 493 pr_debug("EEH: Ignored check (%x) for %s %s\n",
492 printk ("EEH:ignored check (%x) for %s %s\n", 494 pdn->eeh_mode, pci_name (dev), dn->full_name);
493 pdn->eeh_mode, pci_name (dev), dn->full_name);
494#endif
495 return 0; 495 return 0;
496 } 496 }
497 497
@@ -1014,10 +1014,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
1014 eeh_subsystem_enabled = 1; 1014 eeh_subsystem_enabled = 1;
1015 pdn->eeh_mode |= EEH_MODE_SUPPORTED; 1015 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
1016 1016
1017#ifdef DEBUG 1017 pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
1018 printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n", 1018 dn->full_name, pdn->eeh_config_addr,
1019 dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr); 1019 pdn->eeh_pe_config_addr);
1020#endif
1021 } else { 1020 } else {
1022 1021
1023 /* This device doesn't support EEH, but it may have an 1022 /* This device doesn't support EEH, but it may have an
@@ -1161,13 +1160,17 @@ static void eeh_add_device_late(struct pci_dev *dev)
1161 if (!dev || !eeh_subsystem_enabled) 1160 if (!dev || !eeh_subsystem_enabled)
1162 return; 1161 return;
1163 1162
1164#ifdef DEBUG 1163 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1165 printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
1166#endif
1167 1164
1168 pci_dev_get (dev);
1169 dn = pci_device_to_OF_node(dev); 1165 dn = pci_device_to_OF_node(dev);
1170 pdn = PCI_DN(dn); 1166 pdn = PCI_DN(dn);
1167 if (pdn->pcidev == dev) {
1168 pr_debug("EEH: Already referenced !\n");
1169 return;
1170 }
1171 WARN_ON(pdn->pcidev);
1172
1173 pci_dev_get (dev);
1171 pdn->pcidev = dev; 1174 pdn->pcidev = dev;
1172 1175
1173 pci_addr_cache_insert_device(dev); 1176 pci_addr_cache_insert_device(dev);
@@ -1206,17 +1209,18 @@ static void eeh_remove_device(struct pci_dev *dev)
1206 return; 1209 return;
1207 1210
1208 /* Unregister the device with the EEH/PCI address search system */ 1211 /* Unregister the device with the EEH/PCI address search system */
1209#ifdef DEBUG 1212 pr_debug("EEH: Removing device %s\n", pci_name(dev));
1210 printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
1211#endif
1212 pci_addr_cache_remove_device(dev);
1213 eeh_sysfs_remove_device(dev);
1214 1213
1215 dn = pci_device_to_OF_node(dev); 1214 dn = pci_device_to_OF_node(dev);
1216 if (PCI_DN(dn)->pcidev) { 1215 if (PCI_DN(dn)->pcidev == NULL) {
1217 PCI_DN(dn)->pcidev = NULL; 1216 pr_debug("EEH: Not referenced !\n");
1218 pci_dev_put (dev); 1217 return;
1219 } 1218 }
1219 PCI_DN(dn)->pcidev = NULL;
1220 pci_dev_put (dev);
1221
1222 pci_addr_cache_remove_device(dev);
1223 eeh_sysfs_remove_device(dev);
1220} 1224}
1221 1225
1222void eeh_remove_bus_device(struct pci_dev *dev) 1226void eeh_remove_bus_device(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 1f032483c026..a20ead87153d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -116,7 +116,7 @@ static void pseries_cpu_die(unsigned int cpu)
116 cpu_status = query_cpu_stopped(pcpu); 116 cpu_status = query_cpu_stopped(pcpu);
117 if (cpu_status == 0 || cpu_status == -1) 117 if (cpu_status == 0 || cpu_status == -1)
118 break; 118 break;
119 msleep(200); 119 cpu_relax();
120 } 120 }
121 if (cpu_status != 0) { 121 if (cpu_status != 0) {
122 printk("Querying DEAD? cpu %i (%i) shows %i\n", 122 printk("Querying DEAD? cpu %i (%i) shows %i\n",
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 7190493e9bdc..5e1ed3d60ee5 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,6 +25,8 @@
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */ 26 */
27 27
28#undef DEBUG
29
28#include <linux/pci.h> 30#include <linux/pci.h>
29#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
30#include <asm/ppc-pci.h> 32#include <asm/ppc-pci.h>
@@ -69,74 +71,25 @@ EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
69 * Remove all of the PCI devices under this bus both from the 71 * Remove all of the PCI devices under this bus both from the
70 * linux pci device tree, and from the powerpc EEH address cache. 72 * linux pci device tree, and from the powerpc EEH address cache.
71 */ 73 */
72void 74void pcibios_remove_pci_devices(struct pci_bus *bus)
73pcibios_remove_pci_devices(struct pci_bus *bus)
74{ 75{
75 struct pci_dev *dev, *tmp; 76 struct pci_dev *dev, *tmp;
77 struct pci_bus *child_bus;
78
79 /* First go down child busses */
80 list_for_each_entry(child_bus, &bus->children, node)
81 pcibios_remove_pci_devices(child_bus);
76 82
83 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
84 pci_domain_nr(bus), bus->number);
77 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 85 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
86 pr_debug(" * Removing %s...\n", pci_name(dev));
78 eeh_remove_bus_device(dev); 87 eeh_remove_bus_device(dev);
79 pci_remove_bus_device(dev); 88 pci_remove_bus_device(dev);
80 } 89 }
81} 90}
82EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 91EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
83 92
84/* Must be called before pci_bus_add_devices */
85void
86pcibios_fixup_new_pci_devices(struct pci_bus *bus)
87{
88 struct pci_dev *dev;
89
90 list_for_each_entry(dev, &bus->devices, bus_list) {
91 /* Skip already-added devices */
92 if (!dev->is_added) {
93 int i;
94
95 /* Fill device archdata and setup iommu table */
96 pcibios_setup_new_device(dev);
97
98 pci_read_irq_line(dev);
99 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
100 struct resource *r = &dev->resource[i];
101
102 if (r->parent || !r->start || !r->flags)
103 continue;
104 pci_claim_resource(dev, i);
105 }
106 }
107 }
108}
109EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
110
111static int
112pcibios_pci_config_bridge(struct pci_dev *dev)
113{
114 u8 sec_busno;
115 struct pci_bus *child_bus;
116
117 /* Get busno of downstream bus */
118 pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
119
120 /* Add to children of PCI bridge dev->bus */
121 child_bus = pci_add_new_bus(dev->bus, dev, sec_busno);
122 if (!child_bus) {
123 printk (KERN_ERR "%s: could not add second bus\n", __func__);
124 return -EIO;
125 }
126 sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number);
127
128 pci_scan_child_bus(child_bus);
129
130 /* Fixup new pci devices */
131 pcibios_fixup_new_pci_devices(child_bus);
132
133 /* Make the discovered devices available */
134 pci_bus_add_devices(child_bus);
135
136 eeh_add_device_tree_late(child_bus);
137 return 0;
138}
139
140/** 93/**
141 * pcibios_add_pci_devices - adds new pci devices to bus 94 * pcibios_add_pci_devices - adds new pci devices to bus
142 * 95 *
@@ -147,10 +100,9 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
147 * is how this routine differs from other, similar pcibios 100 * is how this routine differs from other, similar pcibios
148 * routines.) 101 * routines.)
149 */ 102 */
150void 103void pcibios_add_pci_devices(struct pci_bus * bus)
151pcibios_add_pci_devices(struct pci_bus * bus)
152{ 104{
153 int slotno, num, mode; 105 int slotno, num, mode, pass, max;
154 struct pci_dev *dev; 106 struct pci_dev *dev;
155 struct device_node *dn = pci_bus_to_OF_node(bus); 107 struct device_node *dn = pci_bus_to_OF_node(bus);
156 108
@@ -162,26 +114,23 @@ pcibios_add_pci_devices(struct pci_bus * bus)
162 114
163 if (mode == PCI_PROBE_DEVTREE) { 115 if (mode == PCI_PROBE_DEVTREE) {
164 /* use ofdt-based probe */ 116 /* use ofdt-based probe */
165 of_scan_bus(dn, bus); 117 of_rescan_bus(dn, bus);
166 if (!list_empty(&bus->devices)) {
167 pcibios_fixup_new_pci_devices(bus);
168 pci_bus_add_devices(bus);
169 eeh_add_device_tree_late(bus);
170 }
171 } else if (mode == PCI_PROBE_NORMAL) { 118 } else if (mode == PCI_PROBE_NORMAL) {
172 /* use legacy probe */ 119 /* use legacy probe */
173 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); 120 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
174 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); 121 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
175 if (num) { 122 if (!num)
176 pcibios_fixup_new_pci_devices(bus); 123 return;
177 pci_bus_add_devices(bus); 124 pcibios_setup_bus_devices(bus);
178 eeh_add_device_tree_late(bus); 125 max = bus->secondary;
126 for (pass=0; pass < 2; pass++)
127 list_for_each_entry(dev, &bus->devices, bus_list) {
128 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
129 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
130 max = pci_scan_bridge(bus, dev, max, pass);
179 } 131 }
180
181 list_for_each_entry(dev, &bus->devices, bus_list)
182 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
183 pcibios_pci_config_bridge(dev);
184 } 132 }
133 pcibios_finish_adding_to_bus(bus);
185} 134}
186EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); 135EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
187 136
@@ -190,6 +139,8 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
190 struct pci_controller *phb; 139 struct pci_controller *phb;
191 int primary; 140 int primary;
192 141
142 pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name);
143
193 primary = list_empty(&hose_list); 144 primary = list_empty(&hose_list);
194 phb = pcibios_alloc_controller(dn); 145 phb = pcibios_alloc_controller(dn);
195 if (!phb) 146 if (!phb)
@@ -203,11 +154,59 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
203 eeh_add_device_tree_early(dn); 154 eeh_add_device_tree_early(dn);
204 155
205 scan_phb(phb); 156 scan_phb(phb);
206 pcibios_allocate_bus_resources(phb->bus); 157 pcibios_finish_adding_to_bus(phb->bus);
207 pcibios_fixup_new_pci_devices(phb->bus);
208 pci_bus_add_devices(phb->bus);
209 eeh_add_device_tree_late(phb->bus);
210 158
211 return phb; 159 return phb;
212} 160}
213EXPORT_SYMBOL_GPL(init_phb_dynamic); 161EXPORT_SYMBOL_GPL(init_phb_dynamic);
162
163/* RPA-specific bits for removing PHBs */
164int remove_phb_dynamic(struct pci_controller *phb)
165{
166 struct pci_bus *b = phb->bus;
167 struct resource *res;
168 int rc, i;
169
170 pr_debug("PCI: Removing PHB %04x:%02x... \n",
171 pci_domain_nr(b), b->number);
172
173 /* We cannot to remove a root bus that has children */
174 if (!(list_empty(&b->children) && list_empty(&b->devices)))
175 return -EBUSY;
176
177 /* We -know- there aren't any child devices anymore at this stage
178 * and thus, we can safely unmap the IO space as it's not in use
179 */
180 res = &phb->io_resource;
181 if (res->flags & IORESOURCE_IO) {
182 rc = pcibios_unmap_io_space(b);
183 if (rc) {
184 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
185 __func__, b->name);
186 return 1;
187 }
188 }
189
190 /* Unregister the bridge device from sysfs and remove the PCI bus */
191 device_unregister(b->bridge);
192 phb->bus = NULL;
193 pci_remove_bus(b);
194
195 /* Now release the IO resource */
196 if (res->flags & IORESOURCE_IO)
197 release_resource(res);
198
199 /* Release memory resources */
200 for (i = 0; i < 3; ++i) {
201 res = &phb->mem_resources[i];
202 if (!(res->flags & IORESOURCE_MEM))
203 continue;
204 release_resource(res);
205 }
206
207 /* Free pci_controller data structure */
208 pcibios_free_controller(phb);
209
210 return 0;
211}
212EXPORT_SYMBOL_GPL(remove_phb_dynamic);
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
index edbc012c2ebc..6cf35cd8d0b5 100644
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ b/arch/powerpc/platforms/pseries/phyp_dump.c
@@ -130,6 +130,9 @@ static unsigned long init_dump_header(struct phyp_dump_header *ph)
130static void print_dump_header(const struct phyp_dump_header *ph) 130static void print_dump_header(const struct phyp_dump_header *ph)
131{ 131{
132#ifdef DEBUG 132#ifdef DEBUG
133 if (ph == NULL)
134 return;
135
133 printk(KERN_INFO "dump header:\n"); 136 printk(KERN_INFO "dump header:\n");
134 /* setup some ph->sections required */ 137 /* setup some ph->sections required */
135 printk(KERN_INFO "version = %d\n", ph->version); 138 printk(KERN_INFO "version = %d\n", ph->version);
@@ -411,6 +414,8 @@ static int __init phyp_dump_setup(void)
411 of_node_put(rtas); 414 of_node_put(rtas);
412 } 415 }
413 416
417 ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump");
418
414 print_dump_header(dump_header); 419 print_dump_header(dump_header);
415 dump_area_length = init_dump_header(&phdr); 420 dump_area_length = init_dump_header(&phdr);
416 /* align down */ 421 /* align down */
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index f4e55be2eea9..afad9f5ac0ac 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
208 break; 208 break;
209 case ERR_TYPE_KERNEL_PANIC: 209 case ERR_TYPE_KERNEL_PANIC:
210 default: 210 default:
211 WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
211 spin_unlock_irqrestore(&rtasd_log_lock, s); 212 spin_unlock_irqrestore(&rtasd_log_lock, s);
212 return; 213 return;
213 } 214 }
@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
227 /* Check to see if we need to or have stopped logging */ 228 /* Check to see if we need to or have stopped logging */
228 if (fatal || !logging_enabled) { 229 if (fatal || !logging_enabled) {
229 logging_enabled = 0; 230 logging_enabled = 0;
231 WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
230 spin_unlock_irqrestore(&rtasd_log_lock, s); 232 spin_unlock_irqrestore(&rtasd_log_lock, s);
231 return; 233 return;
232 } 234 }
@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
249 else 251 else
250 rtas_log_start += 1; 252 rtas_log_start += 1;
251 253
254 WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
252 spin_unlock_irqrestore(&rtasd_log_lock, s); 255 spin_unlock_irqrestore(&rtasd_log_lock, s);
253 wake_up_interruptible(&rtas_log_wait); 256 wake_up_interruptible(&rtas_log_wait);
254 break; 257 break;
255 case ERR_TYPE_KERNEL_PANIC: 258 case ERR_TYPE_KERNEL_PANIC:
256 default: 259 default:
260 WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
257 spin_unlock_irqrestore(&rtasd_log_lock, s); 261 spin_unlock_irqrestore(&rtasd_log_lock, s);
258 return; 262 return;
259 } 263 }
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 424b335a71c8..84e058f1e1cc 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -579,7 +579,7 @@ static void xics_update_irq_servers(void)
579 int i, j; 579 int i, j;
580 struct device_node *np; 580 struct device_node *np;
581 u32 ilen; 581 u32 ilen;
582 const u32 *ireg, *isize; 582 const u32 *ireg;
583 u32 hcpuid; 583 u32 hcpuid;
584 584
585 /* Find the server numbers for the boot cpu. */ 585 /* Find the server numbers for the boot cpu. */
@@ -607,11 +607,6 @@ static void xics_update_irq_servers(void)
607 } 607 }
608 } 608 }
609 609
610 /* get the bit size of server numbers */
611 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
612 if (isize)
613 interrupt_server_size = *isize;
614
615 of_node_put(np); 610 of_node_put(np);
616} 611}
617 612
@@ -682,6 +677,7 @@ void __init xics_init_IRQ(void)
682 struct device_node *np; 677 struct device_node *np;
683 u32 indx = 0; 678 u32 indx = 0;
684 int found = 0; 679 int found = 0;
680 const u32 *isize;
685 681
686 ppc64_boot_msg(0x20, "XICS Init"); 682 ppc64_boot_msg(0x20, "XICS Init");
687 683
@@ -701,6 +697,26 @@ void __init xics_init_IRQ(void)
701 if (found == 0) 697 if (found == 0)
702 return; 698 return;
703 699
700 /* get the bit size of server numbers */
701 found = 0;
702
703 for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
704 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
705
706 if (!isize)
707 continue;
708
709 if (!found) {
710 interrupt_server_size = *isize;
711 found = 1;
712 } else if (*isize != interrupt_server_size) {
713 printk(KERN_WARNING "XICS: "
714 "mismatched ibm,interrupt-server#-size\n");
715 interrupt_server_size = max(*isize,
716 interrupt_server_size);
717 }
718 }
719
704 xics_update_irq_servers(); 720 xics_update_irq_servers();
705 xics_init_host(); 721 xics_init_host();
706 722
@@ -728,9 +744,18 @@ static void xics_set_cpu_priority(unsigned char cppr)
728/* Have the calling processor join or leave the specified global queue */ 744/* Have the calling processor join or leave the specified global queue */
729static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) 745static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
730{ 746{
731 int status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, 747 int index;
732 (1UL << interrupt_server_size) - 1 - gserver, join); 748 int status;
733 WARN_ON(status < 0); 749
750 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
751 return;
752
753 index = (1UL << interrupt_server_size) - 1 - gserver;
754
755 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
756
757 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
758 GLOBAL_INTERRUPT_QUEUE, index, join, status);
734} 759}
735 760
736void xics_setup_cpu(void) 761void xics_setup_cpu(void)
diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c
index 1f5258fb38c3..901c9f91e5dd 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.c
+++ b/arch/powerpc/sysdev/bestcomm/ata.c
@@ -61,6 +61,9 @@ bcom_ata_init(int queue_len, int maxbufsize)
61 struct bcom_ata_var *var; 61 struct bcom_ata_var *var;
62 struct bcom_ata_inc *inc; 62 struct bcom_ata_inc *inc;
63 63
64 /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
65 bcom_disable_prefetch();
66
64 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); 67 tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
65 if (!tsk) 68 if (!tsk)
66 return NULL; 69 return NULL;
diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h
index 10982769c465..0b2371811334 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.h
+++ b/arch/powerpc/sysdev/bestcomm/ata.h
@@ -16,22 +16,15 @@
16 16
17struct bcom_ata_bd { 17struct bcom_ata_bd {
18 u32 status; 18 u32 status;
19 u32 dst_pa;
20 u32 src_pa; 19 u32 src_pa;
20 u32 dst_pa;
21}; 21};
22 22
23extern struct bcom_task * 23extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize);
24bcom_ata_init(int queue_len, int maxbufsize); 24extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
25 25extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
26extern void 26extern void bcom_ata_reset_bd(struct bcom_task *tsk);
27bcom_ata_rx_prepare(struct bcom_task *tsk); 27extern void bcom_ata_release(struct bcom_task *tsk);
28
29extern void
30bcom_ata_tx_prepare(struct bcom_task *tsk);
31
32extern void
33bcom_ata_reset_bd(struct bcom_task *tsk);
34
35 28
36#endif /* __BESTCOMM_ATA_H__ */ 29#endif /* __BESTCOMM_ATA_H__ */
37 30
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 446c9ea85b30..378ebd9aac18 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -279,7 +279,6 @@ bcom_engine_init(void)
279 int task; 279 int task;
280 phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; 280 phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
281 unsigned int tdt_size, ctx_size, var_size, fdt_size; 281 unsigned int tdt_size, ctx_size, var_size, fdt_size;
282 u16 regval;
283 282
284 /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ 283 /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
285 tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); 284 tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
@@ -331,10 +330,8 @@ bcom_engine_init(void)
331 out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); 330 out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
332 331
333 /* Disable COMM Bus Prefetch on the original 5200; it's broken */ 332 /* Disable COMM Bus Prefetch on the original 5200; it's broken */
334 if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) { 333 if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
335 regval = in_be16(&bcom_eng->regs->PtdCntrl); 334 bcom_disable_prefetch();
336 out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
337 }
338 335
339 /* Init lock */ 336 /* Init lock */
340 spin_lock_init(&bcom_eng->lock); 337 spin_lock_init(&bcom_eng->lock);
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
index c960a8b49655..23a95f80dfdb 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
@@ -16,8 +16,19 @@
16#ifndef __BESTCOMM_H__ 16#ifndef __BESTCOMM_H__
17#define __BESTCOMM_H__ 17#define __BESTCOMM_H__
18 18
19struct bcom_bd; /* defined later on ... */ 19/**
20 20 * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
21 * @status: The current status of this buffer. Exact meaning depends on the
22 * task type
23 * @data: An array of u32 extra data. Size of array is task dependant.
24 *
25 * Note: Don't dereference a bcom_bd pointer as an array. The size of the
26 * bcom_bd is variable. Use bcom_get_bd() instead.
27 */
28struct bcom_bd {
29 u32 status;
30 u32 data[0]; /* variable payload size */
31};
21 32
22/* ======================================================================== */ 33/* ======================================================================== */
23/* Generic task management */ 34/* Generic task management */
@@ -84,17 +95,6 @@ bcom_get_task_irq(struct bcom_task *tsk) {
84/* BD based tasks helpers */ 95/* BD based tasks helpers */
85/* ======================================================================== */ 96/* ======================================================================== */
86 97
87/**
88 * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
89 * @status: The current status of this buffer. Exact meaning depends on the
90 * task type
91 * @data: An array of u32 whose meaning depends on the task type.
92 */
93struct bcom_bd {
94 u32 status;
95 u32 data[1]; /* variable, but at least 1 */
96};
97
98#define BCOM_BD_READY 0x40000000ul 98#define BCOM_BD_READY 0x40000000ul
99 99
100/** _bcom_next_index - Get next input index. 100/** _bcom_next_index - Get next input index.
@@ -140,15 +140,31 @@ bcom_queue_full(struct bcom_task *tsk)
140} 140}
141 141
142/** 142/**
143 * bcom_get_bd - Get a BD from the queue
144 * @tsk: The BestComm task structure
145 * index: Index of the BD to fetch
146 */
147static inline struct bcom_bd
148*bcom_get_bd(struct bcom_task *tsk, unsigned int index)
149{
150 /* A cast to (void*) so the address can be incremented by the
151 * real size instead of by sizeof(struct bcom_bd) */
152 return ((void *)tsk->bd) + (index * tsk->bd_size);
153}
154
155/**
143 * bcom_buffer_done - Checks if a BestComm 156 * bcom_buffer_done - Checks if a BestComm
144 * @tsk: The BestComm task structure 157 * @tsk: The BestComm task structure
145 */ 158 */
146static inline int 159static inline int
147bcom_buffer_done(struct bcom_task *tsk) 160bcom_buffer_done(struct bcom_task *tsk)
148{ 161{
162 struct bcom_bd *bd;
149 if (bcom_queue_empty(tsk)) 163 if (bcom_queue_empty(tsk))
150 return 0; 164 return 0;
151 return !(tsk->bd[tsk->outdex].status & BCOM_BD_READY); 165
166 bd = bcom_get_bd(tsk, tsk->outdex);
167 return !(bd->status & BCOM_BD_READY);
152} 168}
153 169
154/** 170/**
@@ -160,16 +176,21 @@ bcom_buffer_done(struct bcom_task *tsk)
160static inline struct bcom_bd * 176static inline struct bcom_bd *
161bcom_prepare_next_buffer(struct bcom_task *tsk) 177bcom_prepare_next_buffer(struct bcom_task *tsk)
162{ 178{
163 tsk->bd[tsk->index].status = 0; /* cleanup last status */ 179 struct bcom_bd *bd;
164 return &tsk->bd[tsk->index]; 180
181 bd = bcom_get_bd(tsk, tsk->index);
182 bd->status = 0; /* cleanup last status */
183 return bd;
165} 184}
166 185
167static inline void 186static inline void
168bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) 187bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
169{ 188{
189 struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
190
170 tsk->cookie[tsk->index] = cookie; 191 tsk->cookie[tsk->index] = cookie;
171 mb(); /* ensure the bd is really up-to-date */ 192 mb(); /* ensure the bd is really up-to-date */
172 tsk->bd[tsk->index].status |= BCOM_BD_READY; 193 bd->status |= BCOM_BD_READY;
173 tsk->index = _bcom_next_index(tsk); 194 tsk->index = _bcom_next_index(tsk);
174 if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) 195 if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
175 bcom_enable(tsk); 196 bcom_enable(tsk);
@@ -179,10 +200,12 @@ static inline void *
179bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) 200bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
180{ 201{
181 void *cookie = tsk->cookie[tsk->outdex]; 202 void *cookie = tsk->cookie[tsk->outdex];
203 struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
204
182 if (p_status) 205 if (p_status)
183 *p_status = tsk->bd[tsk->outdex].status; 206 *p_status = bd->status;
184 if (p_bd) 207 if (p_bd)
185 *p_bd = &tsk->bd[tsk->outdex]; 208 *p_bd = bd;
186 tsk->outdex = _bcom_next_outdex(tsk); 209 tsk->outdex = _bcom_next_outdex(tsk);
187 return cookie; 210 return cookie;
188} 211}
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
index 866a2915ef2f..eb0d1c883c31 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
@@ -198,8 +198,8 @@ struct bcom_task_header {
198#define BCOM_IPR_SCTMR_1 2 198#define BCOM_IPR_SCTMR_1 2
199#define BCOM_IPR_FEC_RX 6 199#define BCOM_IPR_FEC_RX 6
200#define BCOM_IPR_FEC_TX 5 200#define BCOM_IPR_FEC_TX 5
201#define BCOM_IPR_ATA_RX 4 201#define BCOM_IPR_ATA_RX 7
202#define BCOM_IPR_ATA_TX 3 202#define BCOM_IPR_ATA_TX 7
203#define BCOM_IPR_SCPCI_RX 2 203#define BCOM_IPR_SCPCI_RX 2
204#define BCOM_IPR_SCPCI_TX 2 204#define BCOM_IPR_SCPCI_TX 2
205#define BCOM_IPR_PSC3_RX 2 205#define BCOM_IPR_PSC3_RX 2
@@ -241,6 +241,22 @@ extern void bcom_set_initiator(int task, int initiator);
241 241
242#define TASK_ENABLE 0x8000 242#define TASK_ENABLE 0x8000
243 243
244/**
245 * bcom_disable_prefetch - Hook to disable bus prefetching
246 *
247 * ATA DMA and the original MPC5200 need this due to silicon bugs. At the
248 * moment disabling prefetch is a one-way street. There is no mechanism
249 * in place to turn prefetch back on after it has been disabled. There is
250 * no reason it couldn't be done, it would just be more complex to implement.
251 */
252static inline void bcom_disable_prefetch(void)
253{
254 u16 regval;
255
256 regval = in_be16(&bcom_eng->regs->PtdCntrl);
257 out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
258};
259
244static inline void 260static inline void
245bcom_enable_task(int task) 261bcom_enable_task(int task)
246{ 262{
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index 2078f39e2f17..d3098ef1404a 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -11,14 +11,20 @@
11 11
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/bug.h>
14 15
15#define DCR_ACCESS_PROLOG(table) \ 16#define DCR_ACCESS_PROLOG(table) \
17 cmpli cr0,r3,1024; \
16 rlwinm r3,r3,4,18,27; \ 18 rlwinm r3,r3,4,18,27; \
17 lis r5,table@h; \ 19 lis r5,table@h; \
18 ori r5,r5,table@l; \ 20 ori r5,r5,table@l; \
19 add r3,r3,r5; \ 21 add r3,r3,r5; \
22 bge- 1f; \
20 mtctr r3; \ 23 mtctr r3; \
21 bctr 24 bctr; \
251: trap; \
26 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \
27 blr
22 28
23_GLOBAL(__mfdcr) 29_GLOBAL(__mfdcr)
24 DCR_ACCESS_PROLOG(__mfdcr_table) 30 DCR_ACCESS_PROLOG(__mfdcr_table)
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index a8ba9983dd5a..bb44aa9fd470 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(dcr_write_generic);
124 124
125#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ 125#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
126 126
127unsigned int dcr_resource_start(struct device_node *np, unsigned int index) 127unsigned int dcr_resource_start(const struct device_node *np,
128 unsigned int index)
128{ 129{
129 unsigned int ds; 130 unsigned int ds;
130 const u32 *dr = of_get_property(np, "dcr-reg", &ds); 131 const u32 *dr = of_get_property(np, "dcr-reg", &ds);
@@ -136,7 +137,7 @@ unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
136} 137}
137EXPORT_SYMBOL_GPL(dcr_resource_start); 138EXPORT_SYMBOL_GPL(dcr_resource_start);
138 139
139unsigned int dcr_resource_len(struct device_node *np, unsigned int index) 140unsigned int dcr_resource_len(const struct device_node *np, unsigned int index)
140{ 141{
141 unsigned int ds; 142 unsigned int ds;
142 const u32 *dr = of_get_property(np, "dcr-reg", &ds); 143 const u32 *dr = of_get_property(np, "dcr-reg", &ds);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 5b264eb4b1f7..d5f9ae0f1b75 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -187,7 +187,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
187 printk(KERN_WARNING "Can't get bus-range for %s, assume" 187 printk(KERN_WARNING "Can't get bus-range for %s, assume"
188 " bus 0\n", dev->full_name); 188 " bus 0\n", dev->full_name);
189 189
190 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 190 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
191 hose = pcibios_alloc_controller(dev); 191 hose = pcibios_alloc_controller(dev);
192 if (!hose) 192 if (!hose)
193 return -ENOMEM; 193 return -ENOMEM;
@@ -300,7 +300,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
300 " bus 0\n", dev->full_name); 300 " bus 0\n", dev->full_name);
301 } 301 }
302 302
303 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 303 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
304 hose = pcibios_alloc_controller(dev); 304 hose = pcibios_alloc_controller(dev);
305 if (!hose) 305 if (!hose)
306 return -ENOMEM; 306 return -ENOMEM;
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 26ecb96f9731..115cb16351fd 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -207,236 +207,51 @@ static int __init of_add_fixed_phys(void)
207arch_initcall(of_add_fixed_phys); 207arch_initcall(of_add_fixed_phys);
208#endif /* CONFIG_FIXED_PHY */ 208#endif /* CONFIG_FIXED_PHY */
209 209
210static int gfar_mdio_of_init_one(struct device_node *np) 210#ifdef CONFIG_PPC_83xx
211{ 211static int __init mpc83xx_wdt_init(void)
212 int k;
213 struct device_node *child = NULL;
214 struct gianfar_mdio_data mdio_data;
215 struct platform_device *mdio_dev;
216 struct resource res;
217 int ret;
218
219 memset(&res, 0, sizeof(res));
220 memset(&mdio_data, 0, sizeof(mdio_data));
221
222 ret = of_address_to_resource(np, 0, &res);
223 if (ret)
224 return ret;
225
226 /* The gianfar device will try to use the same ID created below to find
227 * this bus, to coordinate register access (since they share). */
228 mdio_dev = platform_device_register_simple("fsl-gianfar_mdio",
229 res.start&0xfffff, &res, 1);
230 if (IS_ERR(mdio_dev))
231 return PTR_ERR(mdio_dev);
232
233 for (k = 0; k < 32; k++)
234 mdio_data.irq[k] = PHY_POLL;
235
236 while ((child = of_get_next_child(np, child)) != NULL) {
237 int irq = irq_of_parse_and_map(child, 0);
238 if (irq != NO_IRQ) {
239 const u32 *id = of_get_property(child, "reg", NULL);
240 mdio_data.irq[*id] = irq;
241 }
242 }
243
244 ret = platform_device_add_data(mdio_dev, &mdio_data,
245 sizeof(struct gianfar_mdio_data));
246 if (ret)
247 platform_device_unregister(mdio_dev);
248
249 return ret;
250}
251
252static int __init gfar_mdio_of_init(void)
253{
254 struct device_node *np = NULL;
255
256 for_each_compatible_node(np, NULL, "fsl,gianfar-mdio")
257 gfar_mdio_of_init_one(np);
258
259 /* try the deprecated version */
260 for_each_compatible_node(np, "mdio", "gianfar");
261 gfar_mdio_of_init_one(np);
262
263 return 0;
264}
265
266arch_initcall(gfar_mdio_of_init);
267
268static const char *gfar_tx_intr = "tx";
269static const char *gfar_rx_intr = "rx";
270static const char *gfar_err_intr = "error";
271
272static int __init gfar_of_init(void)
273{ 212{
213 struct resource r;
274 struct device_node *np; 214 struct device_node *np;
275 unsigned int i; 215 struct platform_device *dev;
276 struct platform_device *gfar_dev; 216 u32 freq = fsl_get_sys_freq();
277 struct resource res;
278 int ret; 217 int ret;
279 218
280 for (np = NULL, i = 0; 219 np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt");
281 (np = of_find_compatible_node(np, "network", "gianfar")) != NULL;
282 i++) {
283 struct resource r[4];
284 struct device_node *phy, *mdio;
285 struct gianfar_platform_data gfar_data;
286 const unsigned int *id;
287 const char *model;
288 const char *ctype;
289 const void *mac_addr;
290 const phandle *ph;
291 int n_res = 2;
292
293 if (!of_device_is_available(np))
294 continue;
295
296 memset(r, 0, sizeof(r));
297 memset(&gfar_data, 0, sizeof(gfar_data));
298
299 ret = of_address_to_resource(np, 0, &r[0]);
300 if (ret)
301 goto err;
302
303 of_irq_to_resource(np, 0, &r[1]);
304
305 model = of_get_property(np, "model", NULL);
306
307 /* If we aren't the FEC we have multiple interrupts */
308 if (model && strcasecmp(model, "FEC")) {
309 r[1].name = gfar_tx_intr;
310
311 r[2].name = gfar_rx_intr;
312 of_irq_to_resource(np, 1, &r[2]);
313 220
314 r[3].name = gfar_err_intr; 221 if (!np) {
315 of_irq_to_resource(np, 2, &r[3]); 222 ret = -ENODEV;
316 223 goto nodev;
317 n_res += 2; 224 }
318 }
319
320 gfar_dev =
321 platform_device_register_simple("fsl-gianfar", i, &r[0],
322 n_res);
323
324 if (IS_ERR(gfar_dev)) {
325 ret = PTR_ERR(gfar_dev);
326 goto err;
327 }
328
329 mac_addr = of_get_mac_address(np);
330 if (mac_addr)
331 memcpy(gfar_data.mac_addr, mac_addr, 6);
332
333 if (model && !strcasecmp(model, "TSEC"))
334 gfar_data.device_flags =
335 FSL_GIANFAR_DEV_HAS_GIGABIT |
336 FSL_GIANFAR_DEV_HAS_COALESCE |
337 FSL_GIANFAR_DEV_HAS_RMON |
338 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
339 if (model && !strcasecmp(model, "eTSEC"))
340 gfar_data.device_flags =
341 FSL_GIANFAR_DEV_HAS_GIGABIT |
342 FSL_GIANFAR_DEV_HAS_COALESCE |
343 FSL_GIANFAR_DEV_HAS_RMON |
344 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
345 FSL_GIANFAR_DEV_HAS_CSUM |
346 FSL_GIANFAR_DEV_HAS_VLAN |
347 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
348
349 ctype = of_get_property(np, "phy-connection-type", NULL);
350
351 /* We only care about rgmii-id. The rest are autodetected */
352 if (ctype && !strcmp(ctype, "rgmii-id"))
353 gfar_data.interface = PHY_INTERFACE_MODE_RGMII_ID;
354 else
355 gfar_data.interface = PHY_INTERFACE_MODE_MII;
356
357 if (of_get_property(np, "fsl,magic-packet", NULL))
358 gfar_data.device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
359
360 ph = of_get_property(np, "phy-handle", NULL);
361 if (ph == NULL) {
362 u32 *fixed_link;
363
364 fixed_link = (u32 *)of_get_property(np, "fixed-link",
365 NULL);
366 if (!fixed_link) {
367 ret = -ENODEV;
368 goto unreg;
369 }
370
371 snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0");
372 gfar_data.phy_id = fixed_link[0];
373 } else {
374 phy = of_find_node_by_phandle(*ph);
375
376 if (phy == NULL) {
377 ret = -ENODEV;
378 goto unreg;
379 }
380
381 mdio = of_get_parent(phy);
382
383 id = of_get_property(phy, "reg", NULL);
384 ret = of_address_to_resource(mdio, 0, &res);
385 if (ret) {
386 of_node_put(phy);
387 of_node_put(mdio);
388 goto unreg;
389 }
390
391 gfar_data.phy_id = *id;
392 snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%llx",
393 (unsigned long long)res.start&0xfffff);
394 225
395 of_node_put(phy); 226 memset(&r, 0, sizeof(r));
396 of_node_put(mdio);
397 }
398 227
399 /* Get MDIO bus controlled by this eTSEC, if any. Normally only 228 ret = of_address_to_resource(np, 0, &r);
400 * eTSEC 1 will control an MDIO bus, not necessarily the same 229 if (ret)
401 * bus that its PHY is on ('mdio' above), so we can't just use 230 goto err;
402 * that. What we do is look for a gianfar mdio device that has
403 * overlapping registers with this device. That's really the
404 * whole point, to find the device sharing our registers to
405 * coordinate access with it.
406 */
407 for_each_compatible_node(mdio, NULL, "fsl,gianfar-mdio") {
408 if (of_address_to_resource(mdio, 0, &res))
409 continue;
410
411 if (res.start >= r[0].start && res.end <= r[0].end) {
412 /* Get the ID the mdio bus platform device was
413 * registered with. gfar_data.bus_id is
414 * different because it's for finding a PHY,
415 * while this is for finding a MII bus.
416 */
417 gfar_data.mdio_bus = res.start&0xfffff;
418 of_node_put(mdio);
419 break;
420 }
421 }
422 231
423 ret = 232 dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1);
424 platform_device_add_data(gfar_dev, &gfar_data, 233 if (IS_ERR(dev)) {
425 sizeof(struct 234 ret = PTR_ERR(dev);
426 gianfar_platform_data)); 235 goto err;
427 if (ret)
428 goto unreg;
429 } 236 }
430 237
238 ret = platform_device_add_data(dev, &freq, sizeof(freq));
239 if (ret)
240 goto unreg;
241
242 of_node_put(np);
431 return 0; 243 return 0;
432 244
433unreg: 245unreg:
434 platform_device_unregister(gfar_dev); 246 platform_device_unregister(dev);
435err: 247err:
248 of_node_put(np);
249nodev:
436 return ret; 250 return ret;
437} 251}
438 252
439arch_initcall(gfar_of_init); 253arch_initcall(mpc83xx_wdt_init);
254#endif
440 255
441static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) 256static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
442{ 257{
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index d502927644c6..5da37c2f22ee 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose)
57{ 57{
58 setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); 58 setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
59 if (machine_is_compatible("PowerMac1,1")) 59 if (machine_is_compatible("PowerMac1,1"))
60 ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; 60 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
61 if (machine_is_compatible("AAPL,PowerBook1998")) 61 if (machine_is_compatible("AAPL,PowerBook1998"))
62 grackle_set_loop_snoop(hose, 1); 62 grackle_set_loop_snoop(hose, 1);
63#if 0 /* Disabled for now, HW problems ??? */ 63#if 0 /* Disabled for now, HW problems ??? */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 5d7f9f0c93c3..3e0d89dcdba2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -661,17 +661,6 @@ static inline void mpic_eoi(struct mpic *mpic)
661 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); 661 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
662} 662}
663 663
664#ifdef CONFIG_SMP
665static irqreturn_t mpic_ipi_action(int irq, void *data)
666{
667 long ipi = (long)data;
668
669 smp_message_recv(ipi);
670
671 return IRQ_HANDLED;
672}
673#endif /* CONFIG_SMP */
674
675/* 664/*
676 * Linux descriptor level callbacks 665 * Linux descriptor level callbacks
677 */ 666 */
@@ -1548,13 +1537,7 @@ unsigned int mpic_get_mcirq(void)
1548void mpic_request_ipis(void) 1537void mpic_request_ipis(void)
1549{ 1538{
1550 struct mpic *mpic = mpic_primary; 1539 struct mpic *mpic = mpic_primary;
1551 long i, err; 1540 int i;
1552 static char *ipi_names[] = {
1553 "IPI0 (call function)",
1554 "IPI1 (reschedule)",
1555 "IPI2 (call function single)",
1556 "IPI3 (debugger break)",
1557 };
1558 BUG_ON(mpic == NULL); 1541 BUG_ON(mpic == NULL);
1559 1542
1560 printk(KERN_INFO "mpic: requesting IPIs ... \n"); 1543 printk(KERN_INFO "mpic: requesting IPIs ... \n");
@@ -1563,17 +1546,10 @@ void mpic_request_ipis(void)
1563 unsigned int vipi = irq_create_mapping(mpic->irqhost, 1546 unsigned int vipi = irq_create_mapping(mpic->irqhost,
1564 mpic->ipi_vecs[0] + i); 1547 mpic->ipi_vecs[0] + i);
1565 if (vipi == NO_IRQ) { 1548 if (vipi == NO_IRQ) {
1566 printk(KERN_ERR "Failed to map IPI %ld\n", i); 1549 printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
1567 break; 1550 continue;
1568 }
1569 err = request_irq(vipi, mpic_ipi_action,
1570 IRQF_DISABLED|IRQF_PERCPU,
1571 ipi_names[i], (void *)i);
1572 if (err) {
1573 printk(KERN_ERR "Request of irq %d for IPI %ld failed\n",
1574 vipi, i);
1575 break;
1576 } 1551 }
1552 smp_request_message_ipi(vipi, i);
1577 } 1553 }
1578} 1554}
1579 1555
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index d3e4d61030b5..77fae5f64f2e 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -194,11 +194,41 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
194 * 4xx PCI 2.x part 194 * 4xx PCI 2.x part
195 */ 195 */
196 196
197static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
198 void __iomem *reg,
199 u64 plb_addr,
200 u64 pci_addr,
201 u64 size,
202 unsigned int flags,
203 int index)
204{
205 u32 ma, pcila, pciha;
206
207 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
208 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
209 printk(KERN_WARNING "%s: Resource out of range\n",
210 hose->dn->full_name);
211 return -1;
212 }
213 ma = (0xffffffffu << ilog2(size)) | 1;
214 if (flags & IORESOURCE_PREFETCH)
215 ma |= 2;
216
217 pciha = RES_TO_U32_HIGH(pci_addr);
218 pcila = RES_TO_U32_LOW(pci_addr);
219
220 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
221 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
222 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
223 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
224
225 return 0;
226}
227
197static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, 228static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
198 void __iomem *reg) 229 void __iomem *reg)
199{ 230{
200 u32 la, ma, pcila, pciha; 231 int i, j, found_isa_hole = 0;
201 int i, j;
202 232
203 /* Setup outbound memory windows */ 233 /* Setup outbound memory windows */
204 for (i = j = 0; i < 3; i++) { 234 for (i = j = 0; i < 3; i++) {
@@ -213,28 +243,29 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
213 break; 243 break;
214 } 244 }
215 245
216 /* Calculate register values */ 246 /* Configure the resource */
217 la = res->start; 247 if (ppc4xx_setup_one_pci_PMM(hose, reg,
218 pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); 248 res->start,
219 pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); 249 res->start - hose->pci_mem_offset,
220 250 res->end + 1 - res->start,
221 ma = res->end + 1 - res->start; 251 res->flags,
222 if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) { 252 j) == 0) {
223 printk(KERN_WARNING "%s: Resource out of range\n", 253 j++;
224 hose->dn->full_name); 254
225 continue; 255 /* If the resource PCI address is 0 then we have our
256 * ISA memory hole
257 */
258 if (res->start == hose->pci_mem_offset)
259 found_isa_hole = 1;
226 } 260 }
227 ma = (0xffffffffu << ilog2(ma)) | 0x1;
228 if (res->flags & IORESOURCE_PREFETCH)
229 ma |= 0x2;
230
231 /* Program register values */
232 writel(la, reg + PCIL0_PMM0LA + (0x10 * j));
233 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j));
234 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j));
235 writel(ma, reg + PCIL0_PMM0MA + (0x10 * j));
236 j++;
237 } 261 }
262
263 /* Handle ISA memory hole if not already covered */
264 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
265 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
266 hose->isa_mem_size, 0, j) == 0)
267 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
268 hose->dn->full_name);
238} 269}
239 270
240static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose, 271static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
@@ -352,11 +383,52 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
352 * 4xx PCI-X part 383 * 4xx PCI-X part
353 */ 384 */
354 385
386static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
387 void __iomem *reg,
388 u64 plb_addr,
389 u64 pci_addr,
390 u64 size,
391 unsigned int flags,
392 int index)
393{
394 u32 lah, lal, pciah, pcial, sa;
395
396 if (!is_power_of_2(size) || size < 0x1000 ||
397 (plb_addr & (size - 1)) != 0) {
398 printk(KERN_WARNING "%s: Resource out of range\n",
399 hose->dn->full_name);
400 return -1;
401 }
402
403 /* Calculate register values */
404 lah = RES_TO_U32_HIGH(plb_addr);
405 lal = RES_TO_U32_LOW(plb_addr);
406 pciah = RES_TO_U32_HIGH(pci_addr);
407 pcial = RES_TO_U32_LOW(pci_addr);
408 sa = (0xffffffffu << ilog2(size)) | 0x1;
409
410 /* Program register values */
411 if (index == 0) {
412 writel(lah, reg + PCIX0_POM0LAH);
413 writel(lal, reg + PCIX0_POM0LAL);
414 writel(pciah, reg + PCIX0_POM0PCIAH);
415 writel(pcial, reg + PCIX0_POM0PCIAL);
416 writel(sa, reg + PCIX0_POM0SA);
417 } else {
418 writel(lah, reg + PCIX0_POM1LAH);
419 writel(lal, reg + PCIX0_POM1LAL);
420 writel(pciah, reg + PCIX0_POM1PCIAH);
421 writel(pcial, reg + PCIX0_POM1PCIAL);
422 writel(sa, reg + PCIX0_POM1SA);
423 }
424
425 return 0;
426}
427
355static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, 428static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
356 void __iomem *reg) 429 void __iomem *reg)
357{ 430{
358 u32 lah, lal, pciah, pcial, sa; 431 int i, j, found_isa_hole = 0;
359 int i, j;
360 432
361 /* Setup outbound memory windows */ 433 /* Setup outbound memory windows */
362 for (i = j = 0; i < 3; i++) { 434 for (i = j = 0; i < 3; i++) {
@@ -371,36 +443,29 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
371 break; 443 break;
372 } 444 }
373 445
374 /* Calculate register values */ 446 /* Configure the resource */
375 lah = RES_TO_U32_HIGH(res->start); 447 if (ppc4xx_setup_one_pcix_POM(hose, reg,
376 lal = RES_TO_U32_LOW(res->start); 448 res->start,
377 pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); 449 res->start - hose->pci_mem_offset,
378 pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); 450 res->end + 1 - res->start,
379 sa = res->end + 1 - res->start; 451 res->flags,
380 if (!is_power_of_2(sa) || sa < 0x100000 || 452 j) == 0) {
381 sa > 0xffffffffu) { 453 j++;
382 printk(KERN_WARNING "%s: Resource out of range\n", 454
383 hose->dn->full_name); 455 /* If the resource PCI address is 0 then we have our
384 continue; 456 * ISA memory hole
457 */
458 if (res->start == hose->pci_mem_offset)
459 found_isa_hole = 1;
385 } 460 }
386 sa = (0xffffffffu << ilog2(sa)) | 0x1;
387
388 /* Program register values */
389 if (j == 0) {
390 writel(lah, reg + PCIX0_POM0LAH);
391 writel(lal, reg + PCIX0_POM0LAL);
392 writel(pciah, reg + PCIX0_POM0PCIAH);
393 writel(pcial, reg + PCIX0_POM0PCIAL);
394 writel(sa, reg + PCIX0_POM0SA);
395 } else {
396 writel(lah, reg + PCIX0_POM1LAH);
397 writel(lal, reg + PCIX0_POM1LAL);
398 writel(pciah, reg + PCIX0_POM1PCIAH);
399 writel(pcial, reg + PCIX0_POM1PCIAL);
400 writel(sa, reg + PCIX0_POM1SA);
401 }
402 j++;
403 } 461 }
462
463 /* Handle ISA memory hole if not already covered */
464 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
465 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
466 hose->isa_mem_size, 0, j) == 0)
467 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
468 hose->dn->full_name);
404} 469}
405 470
406static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose, 471static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
@@ -1317,12 +1382,72 @@ static struct pci_ops ppc4xx_pciex_pci_ops =
1317 .write = ppc4xx_pciex_write_config, 1382 .write = ppc4xx_pciex_write_config,
1318}; 1383};
1319 1384
1385static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1386 struct pci_controller *hose,
1387 void __iomem *mbase,
1388 u64 plb_addr,
1389 u64 pci_addr,
1390 u64 size,
1391 unsigned int flags,
1392 int index)
1393{
1394 u32 lah, lal, pciah, pcial, sa;
1395
1396 if (!is_power_of_2(size) ||
1397 (index < 2 && size < 0x100000) ||
1398 (index == 2 && size < 0x100) ||
1399 (plb_addr & (size - 1)) != 0) {
1400 printk(KERN_WARNING "%s: Resource out of range\n",
1401 hose->dn->full_name);
1402 return -1;
1403 }
1404
1405 /* Calculate register values */
1406 lah = RES_TO_U32_HIGH(plb_addr);
1407 lal = RES_TO_U32_LOW(plb_addr);
1408 pciah = RES_TO_U32_HIGH(pci_addr);
1409 pcial = RES_TO_U32_LOW(pci_addr);
1410 sa = (0xffffffffu << ilog2(size)) | 0x1;
1411
1412 /* Program register values */
1413 switch (index) {
1414 case 0:
1415 out_le32(mbase + PECFG_POM0LAH, pciah);
1416 out_le32(mbase + PECFG_POM0LAL, pcial);
1417 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1418 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1419 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1420 /* Note that 3 here means enabled | single region */
1421 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
1422 break;
1423 case 1:
1424 out_le32(mbase + PECFG_POM1LAH, pciah);
1425 out_le32(mbase + PECFG_POM1LAL, pcial);
1426 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1427 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1428 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1429 /* Note that 3 here means enabled | single region */
1430 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
1431 break;
1432 case 2:
1433 out_le32(mbase + PECFG_POM2LAH, pciah);
1434 out_le32(mbase + PECFG_POM2LAL, pcial);
1435 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1436 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1437 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1438 /* Note that 3 here means enabled | IO space !!! */
1439 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3);
1440 break;
1441 }
1442
1443 return 0;
1444}
1445
1320static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, 1446static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1321 struct pci_controller *hose, 1447 struct pci_controller *hose,
1322 void __iomem *mbase) 1448 void __iomem *mbase)
1323{ 1449{
1324 u32 lah, lal, pciah, pcial, sa; 1450 int i, j, found_isa_hole = 0;
1325 int i, j;
1326 1451
1327 /* Setup outbound memory windows */ 1452 /* Setup outbound memory windows */
1328 for (i = j = 0; i < 3; i++) { 1453 for (i = j = 0; i < 3; i++) {
@@ -1337,53 +1462,38 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1337 break; 1462 break;
1338 } 1463 }
1339 1464
1340 /* Calculate register values */ 1465 /* Configure the resource */
1341 lah = RES_TO_U32_HIGH(res->start); 1466 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1342 lal = RES_TO_U32_LOW(res->start); 1467 res->start,
1343 pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset); 1468 res->start - hose->pci_mem_offset,
1344 pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset); 1469 res->end + 1 - res->start,
1345 sa = res->end + 1 - res->start; 1470 res->flags,
1346 if (!is_power_of_2(sa) || sa < 0x100000 || 1471 j) == 0) {
1347 sa > 0xffffffffu) { 1472 j++;
1348 printk(KERN_WARNING "%s: Resource out of range\n", 1473
1349 port->node->full_name); 1474 /* If the resource PCI address is 0 then we have our
1350 continue; 1475 * ISA memory hole
1351 } 1476 */
1352 sa = (0xffffffffu << ilog2(sa)) | 0x1; 1477 if (res->start == hose->pci_mem_offset)
1353 1478 found_isa_hole = 1;
1354 /* Program register values */
1355 switch (j) {
1356 case 0:
1357 out_le32(mbase + PECFG_POM0LAH, pciah);
1358 out_le32(mbase + PECFG_POM0LAL, pcial);
1359 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1360 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1361 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1362 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
1363 break;
1364 case 1:
1365 out_le32(mbase + PECFG_POM1LAH, pciah);
1366 out_le32(mbase + PECFG_POM1LAL, pcial);
1367 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1368 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1369 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1370 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
1371 break;
1372 } 1479 }
1373 j++;
1374 } 1480 }
1375 1481
1376 /* Configure IO, always 64K starting at 0 */ 1482 /* Handle ISA memory hole if not already covered */
1377 if (hose->io_resource.flags & IORESOURCE_IO) { 1483 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1378 lah = RES_TO_U32_HIGH(hose->io_base_phys); 1484 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1379 lal = RES_TO_U32_LOW(hose->io_base_phys); 1485 hose->isa_mem_phys, 0,
1380 out_le32(mbase + PECFG_POM2LAH, 0); 1486 hose->isa_mem_size, 0, j) == 0)
1381 out_le32(mbase + PECFG_POM2LAL, 0); 1487 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1382 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah); 1488 hose->dn->full_name);
1383 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal); 1489
1384 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff); 1490 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1385 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3); 1491 * Note also that it -has- to be region index 2 on this HW
1386 } 1492 */
1493 if (hose->io_resource.flags & IORESOURCE_IO)
1494 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1495 hose->io_base_phys, 0,
1496 0x10000, IORESOURCE_IO, 2);
1387} 1497}
1388 1498
1389static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, 1499static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b3b73ae57d6d..01bce3784b0a 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/param.h> 20#include <linux/param.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/spinlock.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/bootmem.h> 25#include <linux/bootmem.h>
@@ -38,6 +39,8 @@ static void qe_snums_init(void);
38static int qe_sdma_init(void); 39static int qe_sdma_init(void);
39 40
40static DEFINE_SPINLOCK(qe_lock); 41static DEFINE_SPINLOCK(qe_lock);
42DEFINE_SPINLOCK(cmxgcr_lock);
43EXPORT_SYMBOL(cmxgcr_lock);
41 44
42/* QE snum state */ 45/* QE snum state */
43enum qe_snum_state { 46enum qe_snum_state {
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 1d78071aad7d..ebb442ea1917 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -18,6 +18,7 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/stddef.h> 20#include <linux/stddef.h>
21#include <linux/spinlock.h>
21#include <linux/module.h> 22#include <linux/module.h>
22 23
23#include <asm/irq.h> 24#include <asm/irq.h>
@@ -26,9 +27,6 @@
26#include <asm/qe.h> 27#include <asm/qe.h>
27#include <asm/ucc.h> 28#include <asm/ucc.h>
28 29
29DEFINE_SPINLOCK(cmxgcr_lock);
30EXPORT_SYMBOL(cmxgcr_lock);
31
32int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) 30int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
33{ 31{
34 unsigned long flags; 32 unsigned long flags;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 51d97588e762..9cb03b71b9d6 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -4,7 +4,7 @@ ifdef CONFIG_PPC64
4EXTRA_CFLAGS += -mno-minimal-toc 4EXTRA_CFLAGS += -mno-minimal-toc
5endif 5endif
6 6
7obj-y += xmon.o setjmp.o start.o nonstdio.o 7obj-y += xmon.o start.o nonstdio.o
8 8
9ifdef CONFIG_XMON_DISASSEMBLY 9ifdef CONFIG_XMON_DISASSEMBLY
10obj-y += ppc-dis.o ppc-opc.o 10obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 076368c8b8a9..8dfad7d9a004 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -41,6 +41,7 @@
41#include <asm/spu_priv1.h> 41#include <asm/spu_priv1.h>
42#include <asm/firmware.h> 42#include <asm/firmware.h>
43#include <asm/setjmp.h> 43#include <asm/setjmp.h>
44#include <asm/reg.h>
44 45
45#ifdef CONFIG_PPC64 46#ifdef CONFIG_PPC64
46#include <asm/hvcall.h> 47#include <asm/hvcall.h>
@@ -159,8 +160,6 @@ static int xmon_no_auto_backtrace;
159extern void xmon_enter(void); 160extern void xmon_enter(void);
160extern void xmon_leave(void); 161extern void xmon_leave(void);
161 162
162extern void xmon_save_regs(struct pt_regs *);
163
164#ifdef CONFIG_PPC64 163#ifdef CONFIG_PPC64
165#define REG "%.16lx" 164#define REG "%.16lx"
166#define REGS_PER_LINE 4 165#define REGS_PER_LINE 4
@@ -532,7 +531,7 @@ int xmon(struct pt_regs *excp)
532 struct pt_regs regs; 531 struct pt_regs regs;
533 532
534 if (excp == NULL) { 533 if (excp == NULL) {
535 xmon_save_regs(&regs); 534 ppc_save_regs(&regs);
536 excp = &regs; 535 excp = &regs;
537 } 536 }
538 537